Commit 6a784739 authored by Spiros Koulouzis's avatar Spiros Koulouzis

Adding docker login

parent c9be5596
version: "2"
version: "3.2"
services:
kamailio:
#build: ./kamailio
......@@ -104,8 +104,8 @@ services:
- "10048:10048/udp"
- "10049:10049/udp"
expose:
- "7711"
#expose:
#- "7711"
# volumes:
# - ./rtpproxy/docker-entrypoint-init:/docker-entrypoint-init
dns:
......@@ -136,5 +136,5 @@ networks:
driver: default
config:
- subnet: 192.168.2.0/24
ip_range: 192.168.2.0/24
gateway: 192.168.2.1
#ip_range: 192.168.2.0/24
#gateway: 192.168.2.1
......@@ -134,8 +134,6 @@ services:
- REDIS_PORT=${REDIS_PORT:-6379}
- REDIS_PASSWORD=${REDIS_PASSWORD:-changeme}
- DEFAULT_SIP=${DEFAULT_SIP:-${SIP_PEERNAME}}
- MONITORING_SERVER = monitoring_server
- MONITORING_ADAPTER = monitoring_adapter
ports:
- "8001:80/tcp"
......@@ -165,4 +163,4 @@ networks:
volumes:
graphite_data:
grafana_data:
redis_data:
redis_data:
\ No newline at end of file
This diff is collapsed.
This diff is collapsed.
......@@ -54,7 +54,9 @@ import nl.uva.sne.drip.commons.utils.Converter;
import nl.uva.sne.drip.commons.utils.DRIPLogHandler;
import nl.uva.sne.drip.drip.commons.data.v1.external.ConfigurationRepresentation;
import nl.uva.sne.drip.drip.commons.data.v1.external.KeyPair;
import nl.uva.sne.drip.drip.commons.data.v1.external.PlanResponse;
import nl.uva.sne.drip.drip.commons.data.v1.external.ScaleRequest;
import nl.uva.sne.drip.drip.commons.data.v1.external.ToscaRepresentation;
import nl.uva.sne.drip.drip.commons.data.v1.external.ansible.AnsibleOutput;
import nl.uva.sne.drip.drip.commons.data.v1.external.ansible.AnsibleResult;
import nl.uva.sne.drip.drip.commons.data.v1.external.ansible.BenchmarkResult;
......@@ -93,6 +95,12 @@ public class DeployService {
@Autowired
private BenchmarkResultService benchmarkResultService;
@Autowired
private ToscaService toscaService;
@Autowired
private PlannerService plannerService;
private static final String[] CLOUD_SITE_NAMES = new String[]{"domain", "VMResourceID"};
private static final String[] PUBLIC_ADRESS_NAMES = new String[]{"public_address", "publicAddress"};
private final Logger logger;
......@@ -138,9 +146,7 @@ public class DeployService {
public DeployResponse deploySoftware(DeployRequest deployInfo) throws Exception {
try (DRIPCaller deployer = new DeployerCaller(messageBrokerHost);) {
Message deployerInvokationMessage = buildDeployerMessages(
deployInfo.getProvisionID(),
deployInfo.getManagerType().toLowerCase(),
deployInfo.getConfigurationID(),
deployInfo,
null,
null).get(0);
;
......@@ -167,10 +173,9 @@ public class DeployService {
}
public Map<String, Object> getSwarmInfo(DeployResponse deployResp) throws JSONException, IOException, TimeoutException, InterruptedException {
deployResp.setManagerType("swarm_info");
Message deployerInvokationMessage = buildDeployerMessages(
deployResp.getProvisionID(),
"swarm_info",
deployResp.getConfigurationID(),
deployResp,
null,
null).get(0);
Map<String, Object> info;
......@@ -185,12 +190,20 @@ public class DeployService {
return info;
}
private List<Message> buildDeployerMessages(String provisionID, String managerType, String configurationID, String serviceName, Integer numOfCont) throws JSONException {
private List<Message> buildDeployerMessages(
DeployRequest deployInfo,
String serviceName,
Integer numOfContainers) throws JSONException {
String provisionID = deployInfo.getProvisionID();
String managerType = deployInfo.getManagerType();
String configurationID = deployInfo.getConfigurationID();
ProvisionResponse pro = provisionService.findOne(provisionID);
if (pro == null) {
throw new NotFoundException();
}
List<String> loginKeysIDs = pro.getDeployerKeyPairIDs();
List<Message> messages = new ArrayList<>();
// if (loginKeysIDs == null || loginKeysIDs.isEmpty()) {
// List<String> cloudConfIDs = pro.getCloudCredentialsIDs();
......@@ -220,12 +233,14 @@ public class DeployService {
}
if (managerType.toLowerCase().equals("swarm") && configurationID != null) {
MessageParameter composerParameter = createComposerParameter(configurationID);
Map<String, String> dockerLogin = getDockerLogin(pro);
MessageParameter composerParameter = createComposerParameter(configurationID, dockerLogin);
parameters.add(composerParameter);
}
if (managerType.toLowerCase().equals("scale") && configurationID != null) {
MessageParameter scaleParameter = createScaleParameter(configurationID, serviceName, numOfCont);
MessageParameter scaleParameter = createScaleParameter(configurationID, serviceName, numOfContainers);
parameters.add(scaleParameter);
}
if (managerType.toLowerCase().equals("swarm_info") && configurationID != null) {
......@@ -278,10 +293,13 @@ public class DeployService {
return createConfigurationParameter(configurationID, "ansible");
}
private MessageParameter createComposerParameter(String configurationID) throws JSONException {
private MessageParameter createComposerParameter(String configurationID, Map<String, String> dockerLogin) throws JSONException {
MessageParameter configurationParameter = createConfigurationParameter(configurationID, "composer");
Map<String, String> attributes = new HashMap<>();
attributes.put("name", configurationID);
attributes.put("docker_login_username", dockerLogin.get("username"));
attributes.put("docker_login_password", dockerLogin.get("password"));
attributes.put("docker_login_registry", dockerLogin.get("registry"));
configurationParameter.setAttributes(attributes);
return configurationParameter;
}
......@@ -335,8 +353,11 @@ public class DeployService {
throw new BadRequestException("Service name does not exist in this deployment");
}
Message message = buildDeployerMessages(deployment.getProvisionID(), "scale",
confID, scaleReq.getScaleTargetName(), scaleReq.getNumOfInstances()).get(0);
deployment.setManagerType("scale");
Message message = buildDeployerMessages(deployment,
scaleReq.getScaleTargetName(),
scaleReq.getNumOfInstances()).get(0);
message.setOwner(((User) SecurityContextHolder.getContext().getAuthentication().getPrincipal()).getUsername());
try (DRIPCaller deployer = new DeployerCaller(messageBrokerHost);) {
logger.info("Calling deployer");
......@@ -673,4 +694,15 @@ public class DeployService {
return resp;
}
private Map<String, String> getDockerLogin(ProvisionResponse pro) {
String planID = pro.getPlanID();
PlanResponse plan = plannerService.findOne(planID);
ToscaRepresentation tosca = toscaService.findOne(plan.getToscaID());
Map<String, Object> map = tosca.getKeyValue();
map.get("repositories");
HashMap dockerLogin = new HashMap();
return dockerLogin;
}
}
......@@ -35,7 +35,7 @@ if not getattr(logger, 'handler_set', None):
retry=0
def deploy_compose(vm, compose_file, compose_name):
def deploy_compose(vm, compose_file, compose_name,docker_login):
try:
logger.info("Starting docker compose deployment on: "+vm.ip)
paramiko.util.log_to_file("deployment.log")
......@@ -46,7 +46,10 @@ def deploy_compose(vm, compose_file, compose_name):
sftp.chdir('/tmp/')
sftp.put(compose_file, "docker-compose.yml")
stdin, stdout, stderr = ssh.exec_command("sudo docker stack deploy --compose-file /tmp/docker-compose.yml %s" % (compose_name))
if(docker_login):
stdin, stdout, stderr = ssh.exec_command("docker login -u "+docker_login['username']+" -p "+docker_login['password']+" "+docker_login['registry']+" && sudo sudo docker stack deploy --compose-file /tmp/docker-compose.yml %s" % (compose_name))
else:
stdin, stdout, stderr = ssh.exec_command("sudo docker stack deploy --compose-file /tmp/docker-compose.yml %s" % (compose_name))
stdout.read()
logger.info("Finished docker compose deployment on: "+vm.ip)
except Exception as e:
......@@ -64,12 +67,12 @@ def deploy_compose(vm, compose_file, compose_name):
def run(vm_list, compose_file, compose_name,rabbitmq_host,owner):
def run(vm_list, compose_file, compose_name,rabbitmq_host,owner,docker_login):
rabbit = DRIPLoggingHandler(host=rabbitmq_host, port=5672,user=owner)
logger.addHandler(rabbit)
for i in vm_list:
if i.role == "master":
ret = deploy_compose(i, compose_file, compose_name)
ret = deploy_compose(i, compose_file, compose_name,docker_login)
if "ERROR" in ret:
return ret
else:
......
......@@ -95,6 +95,12 @@ def handleDelivery(message):
compose_file = path + "docker-compose.yml"
if not param["attributes"] == None and not param["attributes"]["name"] == None :
compose_name = param["attributes"]["name"]
if 'docker_login' in param["attributes"]:
docker_login = {}
docker_login['username'] = param["attributes"]["docker_login_username"]
docker_login['password'] = param["attributes"]["docker_login_password"]
docker_login['registry'] = param["attributes"]["docker_login_registry"]
docker_login = param["attributes"]["docker_login"]
else:
current_milli_time = lambda: int(round(time.time() * 1000))
compose_name = "service_"+str(current_milli_time())
......@@ -117,7 +123,7 @@ def handleDelivery(message):
if "ERROR" in ret: return ret
ret = docker_swarm.run(vm_list,rabbitmq_host,owner)
if "ERROR" in ret: return ret
ret = docker_compose.run(vm_list, compose_file, compose_name,rabbitmq_host,owner)
ret = docker_compose.run(vm_list, compose_file, compose_name,rabbitmq_host,owner,docker_login)
return ret
elif manager_type == "ansible":
ret = ansible_playbook.run(vm_list,playbook,rabbitmq_host,owner)
......
......@@ -82,8 +82,9 @@ def handle_delivery(message):
def test_local():
home = expanduser("~")
transformer = DockerComposeTransformer(home+"/workspace/DRIP/docs/input_tosca_files/MOG/mog_tosca_v1.yml")
compose = transformer.getnerate_compose()
transformer = DockerComposeTransformer(home+"/workspace/DRIP/docs/input_tosca_files/BEIA/BEIAv3.yml")
vresion = '2';
compose = transformer.getnerate_compose(vresion)
print yaml.dump(compose)
with open(home+'/Downloads/docker-compose.yml', 'w') as outfile:
......
......@@ -29,11 +29,11 @@ class DockerComposeTransformer:
self.DOCKER_TYPE = 'Switch.nodes.Application.Container.Docker'
def getnerate_compose(self):
def getnerate_compose(self,version):
# if self.tt:
# return self.analize_tosca()
# else:
return self.analyze_yaml()
return self.analyze_yaml(version)
def get_node_types(self):
return self.yaml_dict_tpl['node_types']
......@@ -157,11 +157,11 @@ class DockerComposeTransformer:
volumes.append(vol)
return volumes
def analyze_yaml(self):
def analyze_yaml(self,version):
docker_types = self.get_docker_types()
node_templates = self.get_node_templates()
services = {}
services['version'] = '2'
services['version'] = version
services['services'] = {}
all_volumes = []
for node_template_key in node_templates:
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment