Commit 6a784739 authored by Spiros Koulouzis's avatar Spiros Koulouzis

Adding docker login

parent c9be5596
version: "2" version: "3.2"
services: services:
kamailio: kamailio:
#build: ./kamailio #build: ./kamailio
...@@ -104,8 +104,8 @@ services: ...@@ -104,8 +104,8 @@ services:
- "10048:10048/udp" - "10048:10048/udp"
- "10049:10049/udp" - "10049:10049/udp"
expose: #expose:
- "7711" #- "7711"
# volumes: # volumes:
# - ./rtpproxy/docker-entrypoint-init:/docker-entrypoint-init # - ./rtpproxy/docker-entrypoint-init:/docker-entrypoint-init
dns: dns:
...@@ -136,5 +136,5 @@ networks: ...@@ -136,5 +136,5 @@ networks:
driver: default driver: default
config: config:
- subnet: 192.168.2.0/24 - subnet: 192.168.2.0/24
ip_range: 192.168.2.0/24 #ip_range: 192.168.2.0/24
gateway: 192.168.2.1 #gateway: 192.168.2.1
...@@ -134,8 +134,6 @@ services: ...@@ -134,8 +134,6 @@ services:
- REDIS_PORT=${REDIS_PORT:-6379} - REDIS_PORT=${REDIS_PORT:-6379}
- REDIS_PASSWORD=${REDIS_PASSWORD:-changeme} - REDIS_PASSWORD=${REDIS_PASSWORD:-changeme}
- DEFAULT_SIP=${DEFAULT_SIP:-${SIP_PEERNAME}} - DEFAULT_SIP=${DEFAULT_SIP:-${SIP_PEERNAME}}
- MONITORING_SERVER = monitoring_server
- MONITORING_ADAPTER = monitoring_adapter
ports: ports:
- "8001:80/tcp" - "8001:80/tcp"
...@@ -165,4 +163,4 @@ networks: ...@@ -165,4 +163,4 @@ networks:
volumes: volumes:
graphite_data: graphite_data:
grafana_data: grafana_data:
redis_data: redis_data:
\ No newline at end of file
This diff is collapsed.
This diff is collapsed.
...@@ -54,7 +54,9 @@ import nl.uva.sne.drip.commons.utils.Converter; ...@@ -54,7 +54,9 @@ import nl.uva.sne.drip.commons.utils.Converter;
import nl.uva.sne.drip.commons.utils.DRIPLogHandler; import nl.uva.sne.drip.commons.utils.DRIPLogHandler;
import nl.uva.sne.drip.drip.commons.data.v1.external.ConfigurationRepresentation; import nl.uva.sne.drip.drip.commons.data.v1.external.ConfigurationRepresentation;
import nl.uva.sne.drip.drip.commons.data.v1.external.KeyPair; import nl.uva.sne.drip.drip.commons.data.v1.external.KeyPair;
import nl.uva.sne.drip.drip.commons.data.v1.external.PlanResponse;
import nl.uva.sne.drip.drip.commons.data.v1.external.ScaleRequest; import nl.uva.sne.drip.drip.commons.data.v1.external.ScaleRequest;
import nl.uva.sne.drip.drip.commons.data.v1.external.ToscaRepresentation;
import nl.uva.sne.drip.drip.commons.data.v1.external.ansible.AnsibleOutput; import nl.uva.sne.drip.drip.commons.data.v1.external.ansible.AnsibleOutput;
import nl.uva.sne.drip.drip.commons.data.v1.external.ansible.AnsibleResult; import nl.uva.sne.drip.drip.commons.data.v1.external.ansible.AnsibleResult;
import nl.uva.sne.drip.drip.commons.data.v1.external.ansible.BenchmarkResult; import nl.uva.sne.drip.drip.commons.data.v1.external.ansible.BenchmarkResult;
...@@ -93,6 +95,12 @@ public class DeployService { ...@@ -93,6 +95,12 @@ public class DeployService {
@Autowired @Autowired
private BenchmarkResultService benchmarkResultService; private BenchmarkResultService benchmarkResultService;
@Autowired
private ToscaService toscaService;
@Autowired
private PlannerService plannerService;
private static final String[] CLOUD_SITE_NAMES = new String[]{"domain", "VMResourceID"}; private static final String[] CLOUD_SITE_NAMES = new String[]{"domain", "VMResourceID"};
private static final String[] PUBLIC_ADRESS_NAMES = new String[]{"public_address", "publicAddress"}; private static final String[] PUBLIC_ADRESS_NAMES = new String[]{"public_address", "publicAddress"};
private final Logger logger; private final Logger logger;
...@@ -138,9 +146,7 @@ public class DeployService { ...@@ -138,9 +146,7 @@ public class DeployService {
public DeployResponse deploySoftware(DeployRequest deployInfo) throws Exception { public DeployResponse deploySoftware(DeployRequest deployInfo) throws Exception {
try (DRIPCaller deployer = new DeployerCaller(messageBrokerHost);) { try (DRIPCaller deployer = new DeployerCaller(messageBrokerHost);) {
Message deployerInvokationMessage = buildDeployerMessages( Message deployerInvokationMessage = buildDeployerMessages(
deployInfo.getProvisionID(), deployInfo,
deployInfo.getManagerType().toLowerCase(),
deployInfo.getConfigurationID(),
null, null,
null).get(0); null).get(0);
; ;
...@@ -167,10 +173,9 @@ public class DeployService { ...@@ -167,10 +173,9 @@ public class DeployService {
} }
public Map<String, Object> getSwarmInfo(DeployResponse deployResp) throws JSONException, IOException, TimeoutException, InterruptedException { public Map<String, Object> getSwarmInfo(DeployResponse deployResp) throws JSONException, IOException, TimeoutException, InterruptedException {
deployResp.setManagerType("swarm_info");
Message deployerInvokationMessage = buildDeployerMessages( Message deployerInvokationMessage = buildDeployerMessages(
deployResp.getProvisionID(), deployResp,
"swarm_info",
deployResp.getConfigurationID(),
null, null,
null).get(0); null).get(0);
Map<String, Object> info; Map<String, Object> info;
...@@ -185,12 +190,20 @@ public class DeployService { ...@@ -185,12 +190,20 @@ public class DeployService {
return info; return info;
} }
private List<Message> buildDeployerMessages(String provisionID, String managerType, String configurationID, String serviceName, Integer numOfCont) throws JSONException { private List<Message> buildDeployerMessages(
DeployRequest deployInfo,
String serviceName,
Integer numOfContainers) throws JSONException {
String provisionID = deployInfo.getProvisionID();
String managerType = deployInfo.getManagerType();
String configurationID = deployInfo.getConfigurationID();
ProvisionResponse pro = provisionService.findOne(provisionID); ProvisionResponse pro = provisionService.findOne(provisionID);
if (pro == null) { if (pro == null) {
throw new NotFoundException(); throw new NotFoundException();
} }
List<String> loginKeysIDs = pro.getDeployerKeyPairIDs(); List<String> loginKeysIDs = pro.getDeployerKeyPairIDs();
List<Message> messages = new ArrayList<>(); List<Message> messages = new ArrayList<>();
// if (loginKeysIDs == null || loginKeysIDs.isEmpty()) { // if (loginKeysIDs == null || loginKeysIDs.isEmpty()) {
// List<String> cloudConfIDs = pro.getCloudCredentialsIDs(); // List<String> cloudConfIDs = pro.getCloudCredentialsIDs();
...@@ -220,12 +233,14 @@ public class DeployService { ...@@ -220,12 +233,14 @@ public class DeployService {
} }
if (managerType.toLowerCase().equals("swarm") && configurationID != null) { if (managerType.toLowerCase().equals("swarm") && configurationID != null) {
MessageParameter composerParameter = createComposerParameter(configurationID); Map<String, String> dockerLogin = getDockerLogin(pro);
MessageParameter composerParameter = createComposerParameter(configurationID, dockerLogin);
parameters.add(composerParameter); parameters.add(composerParameter);
} }
if (managerType.toLowerCase().equals("scale") && configurationID != null) { if (managerType.toLowerCase().equals("scale") && configurationID != null) {
MessageParameter scaleParameter = createScaleParameter(configurationID, serviceName, numOfCont); MessageParameter scaleParameter = createScaleParameter(configurationID, serviceName, numOfContainers);
parameters.add(scaleParameter); parameters.add(scaleParameter);
} }
if (managerType.toLowerCase().equals("swarm_info") && configurationID != null) { if (managerType.toLowerCase().equals("swarm_info") && configurationID != null) {
...@@ -278,10 +293,13 @@ public class DeployService { ...@@ -278,10 +293,13 @@ public class DeployService {
return createConfigurationParameter(configurationID, "ansible"); return createConfigurationParameter(configurationID, "ansible");
} }
private MessageParameter createComposerParameter(String configurationID) throws JSONException { private MessageParameter createComposerParameter(String configurationID, Map<String, String> dockerLogin) throws JSONException {
MessageParameter configurationParameter = createConfigurationParameter(configurationID, "composer"); MessageParameter configurationParameter = createConfigurationParameter(configurationID, "composer");
Map<String, String> attributes = new HashMap<>(); Map<String, String> attributes = new HashMap<>();
attributes.put("name", configurationID); attributes.put("name", configurationID);
attributes.put("docker_login_username", dockerLogin.get("username"));
attributes.put("docker_login_password", dockerLogin.get("password"));
attributes.put("docker_login_registry", dockerLogin.get("registry"));
configurationParameter.setAttributes(attributes); configurationParameter.setAttributes(attributes);
return configurationParameter; return configurationParameter;
} }
...@@ -335,8 +353,11 @@ public class DeployService { ...@@ -335,8 +353,11 @@ public class DeployService {
throw new BadRequestException("Service name does not exist in this deployment"); throw new BadRequestException("Service name does not exist in this deployment");
} }
Message message = buildDeployerMessages(deployment.getProvisionID(), "scale", deployment.setManagerType("scale");
confID, scaleReq.getScaleTargetName(), scaleReq.getNumOfInstances()).get(0); Message message = buildDeployerMessages(deployment,
scaleReq.getScaleTargetName(),
scaleReq.getNumOfInstances()).get(0);
message.setOwner(((User) SecurityContextHolder.getContext().getAuthentication().getPrincipal()).getUsername()); message.setOwner(((User) SecurityContextHolder.getContext().getAuthentication().getPrincipal()).getUsername());
try (DRIPCaller deployer = new DeployerCaller(messageBrokerHost);) { try (DRIPCaller deployer = new DeployerCaller(messageBrokerHost);) {
logger.info("Calling deployer"); logger.info("Calling deployer");
...@@ -673,4 +694,15 @@ public class DeployService { ...@@ -673,4 +694,15 @@ public class DeployService {
return resp; return resp;
} }
private Map<String, String> getDockerLogin(ProvisionResponse pro) {
String planID = pro.getPlanID();
PlanResponse plan = plannerService.findOne(planID);
ToscaRepresentation tosca = toscaService.findOne(plan.getToscaID());
Map<String, Object> map = tosca.getKeyValue();
map.get("repositories");
HashMap dockerLogin = new HashMap();
return dockerLogin;
}
} }
...@@ -35,7 +35,7 @@ if not getattr(logger, 'handler_set', None): ...@@ -35,7 +35,7 @@ if not getattr(logger, 'handler_set', None):
retry=0 retry=0
def deploy_compose(vm, compose_file, compose_name): def deploy_compose(vm, compose_file, compose_name,docker_login):
try: try:
logger.info("Starting docker compose deployment on: "+vm.ip) logger.info("Starting docker compose deployment on: "+vm.ip)
paramiko.util.log_to_file("deployment.log") paramiko.util.log_to_file("deployment.log")
...@@ -46,7 +46,10 @@ def deploy_compose(vm, compose_file, compose_name): ...@@ -46,7 +46,10 @@ def deploy_compose(vm, compose_file, compose_name):
sftp.chdir('/tmp/') sftp.chdir('/tmp/')
sftp.put(compose_file, "docker-compose.yml") sftp.put(compose_file, "docker-compose.yml")
stdin, stdout, stderr = ssh.exec_command("sudo docker stack deploy --compose-file /tmp/docker-compose.yml %s" % (compose_name)) if(docker_login):
stdin, stdout, stderr = ssh.exec_command("docker login -u "+docker_login['username']+" -p "+docker_login['password']+" "+docker_login['registry']+" && sudo sudo docker stack deploy --compose-file /tmp/docker-compose.yml %s" % (compose_name))
else:
stdin, stdout, stderr = ssh.exec_command("sudo docker stack deploy --compose-file /tmp/docker-compose.yml %s" % (compose_name))
stdout.read() stdout.read()
logger.info("Finished docker compose deployment on: "+vm.ip) logger.info("Finished docker compose deployment on: "+vm.ip)
except Exception as e: except Exception as e:
...@@ -64,12 +67,12 @@ def deploy_compose(vm, compose_file, compose_name): ...@@ -64,12 +67,12 @@ def deploy_compose(vm, compose_file, compose_name):
def run(vm_list, compose_file, compose_name,rabbitmq_host,owner): def run(vm_list, compose_file, compose_name,rabbitmq_host,owner,docker_login):
rabbit = DRIPLoggingHandler(host=rabbitmq_host, port=5672,user=owner) rabbit = DRIPLoggingHandler(host=rabbitmq_host, port=5672,user=owner)
logger.addHandler(rabbit) logger.addHandler(rabbit)
for i in vm_list: for i in vm_list:
if i.role == "master": if i.role == "master":
ret = deploy_compose(i, compose_file, compose_name) ret = deploy_compose(i, compose_file, compose_name,docker_login)
if "ERROR" in ret: if "ERROR" in ret:
return ret return ret
else: else:
......
...@@ -95,6 +95,12 @@ def handleDelivery(message): ...@@ -95,6 +95,12 @@ def handleDelivery(message):
compose_file = path + "docker-compose.yml" compose_file = path + "docker-compose.yml"
if not param["attributes"] == None and not param["attributes"]["name"] == None : if not param["attributes"] == None and not param["attributes"]["name"] == None :
compose_name = param["attributes"]["name"] compose_name = param["attributes"]["name"]
if 'docker_login' in param["attributes"]:
docker_login = {}
docker_login['username'] = param["attributes"]["docker_login_username"]
docker_login['password'] = param["attributes"]["docker_login_password"]
docker_login['registry'] = param["attributes"]["docker_login_registry"]
docker_login = param["attributes"]["docker_login"]
else: else:
current_milli_time = lambda: int(round(time.time() * 1000)) current_milli_time = lambda: int(round(time.time() * 1000))
compose_name = "service_"+str(current_milli_time()) compose_name = "service_"+str(current_milli_time())
...@@ -117,7 +123,7 @@ def handleDelivery(message): ...@@ -117,7 +123,7 @@ def handleDelivery(message):
if "ERROR" in ret: return ret if "ERROR" in ret: return ret
ret = docker_swarm.run(vm_list,rabbitmq_host,owner) ret = docker_swarm.run(vm_list,rabbitmq_host,owner)
if "ERROR" in ret: return ret if "ERROR" in ret: return ret
ret = docker_compose.run(vm_list, compose_file, compose_name,rabbitmq_host,owner) ret = docker_compose.run(vm_list, compose_file, compose_name,rabbitmq_host,owner,docker_login)
return ret return ret
elif manager_type == "ansible": elif manager_type == "ansible":
ret = ansible_playbook.run(vm_list,playbook,rabbitmq_host,owner) ret = ansible_playbook.run(vm_list,playbook,rabbitmq_host,owner)
......
...@@ -82,8 +82,9 @@ def handle_delivery(message): ...@@ -82,8 +82,9 @@ def handle_delivery(message):
def test_local(): def test_local():
home = expanduser("~") home = expanduser("~")
transformer = DockerComposeTransformer(home+"/workspace/DRIP/docs/input_tosca_files/MOG/mog_tosca_v1.yml") transformer = DockerComposeTransformer(home+"/workspace/DRIP/docs/input_tosca_files/BEIA/BEIAv3.yml")
compose = transformer.getnerate_compose() vresion = '2';
compose = transformer.getnerate_compose(vresion)
print yaml.dump(compose) print yaml.dump(compose)
with open(home+'/Downloads/docker-compose.yml', 'w') as outfile: with open(home+'/Downloads/docker-compose.yml', 'w') as outfile:
......
...@@ -29,11 +29,11 @@ class DockerComposeTransformer: ...@@ -29,11 +29,11 @@ class DockerComposeTransformer:
self.DOCKER_TYPE = 'Switch.nodes.Application.Container.Docker' self.DOCKER_TYPE = 'Switch.nodes.Application.Container.Docker'
def getnerate_compose(self): def getnerate_compose(self,version):
# if self.tt: # if self.tt:
# return self.analize_tosca() # return self.analize_tosca()
# else: # else:
return self.analyze_yaml() return self.analyze_yaml(version)
def get_node_types(self): def get_node_types(self):
return self.yaml_dict_tpl['node_types'] return self.yaml_dict_tpl['node_types']
...@@ -157,11 +157,11 @@ class DockerComposeTransformer: ...@@ -157,11 +157,11 @@ class DockerComposeTransformer:
volumes.append(vol) volumes.append(vol)
return volumes return volumes
def analyze_yaml(self): def analyze_yaml(self,version):
docker_types = self.get_docker_types() docker_types = self.get_docker_types()
node_templates = self.get_node_templates() node_templates = self.get_node_templates()
services = {} services = {}
services['version'] = '2' services['version'] = version
services['services'] = {} services['services'] = {}
all_volumes = [] all_volumes = []
for node_template_key in node_templates: for node_template_key in node_templates:
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment