Commit 1c71860a authored by Oceans's avatar Oceans

Merge branch 'package' of https://github.com/skoulouzis/DRIP into package

parents d073529a 4cc1deb3
......@@ -15,13 +15,13 @@
*/
package nl.uva.sne.drip.api.dao;
import nl.uva.sne.drip.commons.types.UserPublicKey;
import nl.uva.sne.drip.commons.types.LoginKey;
import org.springframework.data.mongodb.repository.MongoRepository;
/**
*
* @author S. Koulouzis
*/
public interface UserKeyDao extends MongoRepository<UserPublicKey, String> {
public interface UserKeyDao extends MongoRepository<LoginKey, String> {
}
......@@ -15,8 +15,13 @@
*/
package nl.uva.sne.drip.api.rest;
import java.io.IOException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.logging.Level;
import java.util.logging.Logger;
import javax.annotation.security.RolesAllowed;
import nl.uva.sne.drip.commons.types.CloudCredentials;
import org.springframework.beans.factory.annotation.Autowired;
......@@ -26,9 +31,15 @@ import org.springframework.web.bind.annotation.RequestMethod;
import org.springframework.web.bind.annotation.ResponseBody;
import org.springframework.web.bind.annotation.RestController;
import nl.uva.sne.drip.api.dao.CloudCredentialsDao;
import nl.uva.sne.drip.api.exception.BadRequestException;
import nl.uva.sne.drip.api.exception.NotFoundException;
import nl.uva.sne.drip.api.service.UserService;
import nl.uva.sne.drip.commons.types.LoginKey;
import org.apache.commons.io.FilenameUtils;
import org.springframework.web.bind.annotation.PathVariable;
import org.springframework.web.bind.annotation.RequestBody;
import org.springframework.web.bind.annotation.RequestParam;
import org.springframework.web.multipart.MultipartFile;
/**
*
......@@ -42,20 +53,74 @@ public class CloudConfigurationController {
@Autowired
private CloudCredentialsDao cloudCredentialsDao;
// curl -H "Content-Type: application/json" -X POST -d '{"key":"my_secret_password","keyIdAlias":"geni","logineKys":[{"attributes":null,"key":"-----BEGINRSAPUBLICKEY-----\nMIIBCgKCAQEA+xGZ/wcz9ugFpP07Nspo6U17l0YhFiFpxxU4pTk3Lifz9R3zsIsu\nERwta7+fWIfxOo208ett/jhskiVodSEt3QBGh4XBipyWopKwZ93HHaDVZAALi/2A\n+xTBtWdEo7XGUujKDvC2/aZKukfjpOiUI8AhLAfjmlcD/UZ1QPh0mHsglRNCmpCw\nmwSXA9VNmhz+PiB+Dml4WWnKW/VHo2ujTXxq7+efMU4H2fny3Se3KYOsFPFGZ1TN\nQSYlFuShWrHPtiLmUdPoP6CV2mML1tk+l7DIIqXrQhLUKDACeM5roMx0kLhUWB8P\n+0uj1CNlNN4JRZlC7xFfqiMbFRU9Z4N6YwIDAQAB\n-----ENDRSAPUBLICKEY-----","type":"PUBLIC"},{"attributes":null,"key":"-----BEGINRSAPRIVATEKEY-----\nMIICXAIBAAKBgQCqGKukO1De7zhZj6+H0qtjTkVxwTCpvKe4eCZ0FPqri0cb2JZfXJ/DgYSF6vUp\nwmJG8wVQZKjeGcjDOL5UlsuusFncCzWBQ7RKNUSesmQRMSGkVb1/3j+skZ6UtW+5u09lHNsj6tQ5\n1s1SPrCBkedbNf0Tp0GbMJDyR4e9T04ZZwIDAQABAoGAFijko56+qGyN8M0RVyaRAXz++xTqHBLh\n3tx4VgMtrQ+WEgCjhoTwo23KMBAuJGSYnRmoBZM3lMfTKevIkAidPExvYCdm5dYq3XToLkkLv5L2\npIIVOFMDG+KESnAFV7l2c+cnzRMW0+b6f8mR1CJzZuxVLL6Q02fvLi55/mbSYxECQQDeAw6fiIQX\nGukBI4eMZZt4nscy2o12KyYner3VpoeE+Np2q+Z3pvAMd/aNzQ/W9WaI+NRfcxUJrmfPwIGm63il\nAkEAxCL5HQb2bQr4ByorcMWm/hEP2MZzROV73yF41hPsRC9m66KrheO9HPTJuo3/9s5p+sqGxOlF\nL0NDt4SkosjgGwJAFklyR1uZ/wPJjj611cdBcztlPdqoxssQGnh85BzCj/u3WqBpE2vjvyyvyI5k\nX6zk7S0ljKtt2jny2+00VsBerQJBAJGC1Mg5Oydo5NwD6BiROrPxGo2bpTbu/fhrT8ebHkTz2epl\nU9VQQSQzY1oZMVX8i1m5WUTLPz2yLJIBQVdXqhMCQBGoiuSoSjafUhV7i1cEGpb88h5NBYZzWXGZ\n37sJ5QsW+sJyoNde3xH8vdXhzU7eT82D6X/scw9RZz+/6rCJ4p0=\n-----ENDRSAPRIVATEKEY-----","type":"PRIVATE"}],"cloudProviderName":"exogeni"}' http://localhost:8080/drip-api/configuration
// curl -H "Content-Type: application/json" -X POST -d '{"key":"my_secret_password","keyIdAlias":"geni","logineKeys":[{"attributes":null,"key":"-----BEGINRSAPUBLICKEY-----\nMIIBCgKCAQEA+xGZ/wcz9ugFpP07Nspo6U17l0YhFiFpxxU4pTk3Lifz9R3zsIsu\nERwta7+fWIfxOo208ett/jhskiVodSEt3QBGh4XBipyWopKwZ93HHaDVZAALi/2A\n+xTBtWdEo7XGUujKDvC2/aZKukfjpOiUI8AhLAfjmlcD/UZ1QPh0mHsglRNCmpCw\nmwSXA9VNmhz+PiB+Dml4WWnKW/VHo2ujTXxq7+efMU4H2fny3Se3KYOsFPFGZ1TN\nQSYlFuShWrHPtiLmUdPoP6CV2mML1tk+l7DIIqXrQhLUKDACeM5roMx0kLhUWB8P\n+0uj1CNlNN4JRZlC7xFfqiMbFRU9Z4N6YwIDAQAB\n-----ENDRSAPUBLICKEY-----","type":"PUBLIC"},{"attributes":null,"key":"-----BEGINRSAPRIVATEKEY-----\nMIICXAIBAAKBgQCqGKukO1De7zhZj6+H0qtjTkVxwTCpvKe4eCZ0FPqri0cb2JZfXJ/DgYSF6vUp\nwmJG8wVQZKjeGcjDOL5UlsuusFncCzWBQ7RKNUSesmQRMSGkVb1/3j+skZ6UtW+5u09lHNsj6tQ5\n1s1SPrCBkedbNf0Tp0GbMJDyR4e9T04ZZwIDAQABAoGAFijko56+qGyN8M0RVyaRAXz++xTqHBLh\n3tx4VgMtrQ+WEgCjhoTwo23KMBAuJGSYnRmoBZM3lMfTKevIkAidPExvYCdm5dYq3XToLkkLv5L2\npIIVOFMDG+KESnAFV7l2c+cnzRMW0+b6f8mR1CJzZuxVLL6Q02fvLi55/mbSYxECQQDeAw6fiIQX\nGukBI4eMZZt4nscy2o12KyYner3VpoeE+Np2q+Z3pvAMd/aNzQ/W9WaI+NRfcxUJrmfPwIGm63il\nAkEAxCL5HQb2bQr4ByorcMWm/hEP2MZzROV73yF41hPsRC9m66KrheO9HPTJuo3/9s5p+sqGxOlF\nL0NDt4SkosjgGwJAFklyR1uZ/wPJjj611cdBcztlPdqoxssQGnh85BzCj/u3WqBpE2vjvyyvyI5k\nX6zk7S0ljKtt2jny2+00VsBerQJBAJGC1Mg5Oydo5NwD6BiROrPxGo2bpTbu/fhrT8ebHkTz2epl\nU9VQQSQzY1oZMVX8i1m5WUTLPz2yLJIBQVdXqhMCQBGoiuSoSjafUhV7i1cEGpb88h5NBYZzWXGZ\n37sJ5QsW+sJyoNde3xH8vdXhzU7eT82D6X/scw9RZz+/6rCJ4p0=\n-----ENDRSAPRIVATEKEY-----","type":"PRIVATE"}],"cloudProviderName":"exogeni"}' http://localhost:8080/drip-api/configuration
// curl -H "Content-Type: application/json" -X POST -d '{"key":"AKISAKISAKIS","keyIdAlias":"6J76J76J76J76J76J76J7","logineKys":[{"attributes":{"domain_name":"California"},"type":"PUBLIC","key":"-----BEGINRSAPRIVATEKEY-----\nMIIEpQIBAAKCAQEA3Tz2mr7SZiAMfQyuvBjM9Oi..Z1BjP5CE/Wm/Rr500P\nRK+Lh9x5eJPo5CAZ3/ANBE0sTK0ZsDGMak2m1g7..3VHqIxFTz0Ta1d+NAj\nwnLe4nOb7/eEJbDPkk05ShhBrJGBKKxb8n104o/..PdzbFMIyNjJzBM2o5y\n5A13wiLitEO7nco2WfyYkQzaxCw0AwzlkVHiIyC..71pSzkv6sv+4IDMbT/\nXpCo8L6wTarzrywnQsh+etLD6FtTjYbbrvZ8RQM..Hg2qxraAV++HNBYmNW\ns0duEdjUbJK+ZarypXI9TtnS4o1Ckj7POfljiQI..IBAFyidxtqRQyv5KrD\nkbJ+q+rsJxQlaipn2M4lGuQJEfIxELFDyd3XpxP..Un/82NZNXlPmRIopXs\n2T91jiLZEUKQw+n73j26adTbteuEaPGSrTZxBLR..yssO0wWomUyILqVeti\n6AkL0NJAuKcucHGqWVgUIa4g1haE0ilcm6dWUDo..fd+PpzdCJf1s4NdUWK\nYV2GJcutGQb+jqT5DTUqAgST7N8M28rwjK6nVMI..BUpP0xpPnuYDyPOw6x\n4hBt8DZQYyduzIXBXRBKNiNdv8fum68/5klHxp6..4HRkMUL958UVeljUsT\nBFQlO9UCgYEA/VqzXVzlz8K36VSTMPEhB5zBATV..PRiXtYK1YpYV4/jSUj\nvvT4hP8uoYNC+BlEMi98LtnxZIh0V4rqHDsScAq..VyeSLH0loKMZgpwFEm\nbEIDnEOD0nKrfT/9K9sPYgvB43wsLEtUujaYw3W..Liy0WKmB8CgYEA34xn\n1QlOOhHBn9Z8qYjoDYhvcj+a89tD9eMPhesfQFw..rsfGcXIonFmWdVygbe\n6Doihc+GIYIq/QP4jgMksE1ADvczJSke92ZfE2i..fitBpQERNJO0BlabfP\nALs5NssKNmLkWS2U2BHCbv4DzDXwiQB37KPOL1c..kBHfF2/htIs20d1UVL\n+PK+aXKwguI6bxLGZ3of0UH+mGsSl0mkp7kYZCm..OTQtfeRqP8rDSC7DgA\nkHc5ajYqh04AzNFaxjRo+M3IGICUaOdKnXd0Fda..QwfoaX4QlRTgLqb7AN\nZTzM9WbmnYoXrx17kZlT3lsCgYEAm757XI3WJVj..WoLj1+v48WyoxZpcai\nuv9bT4Cj+lXRS+gdKHK+SH7J3x2CRHVS+WH/SVC..DxuybvebDoT0TkKiCj\nBWQaGzCaJqZa+POHK0klvS+9ln0/6k539p95tfX..X4TCzbVG6+gJiX0ysz\nYfehn5MCgYEAkMiKuWHCsVyCab3RUf6XA9gd3qY..fCTIGtS1tR5PgFIV+G\nengiVoWc/hkj8SBHZz1n1xLN7KDf8ySU06MDggB..hJ+gXJKy+gf3mF5Kmj\nDtkpjGHQzPF6vOe907y5NQLvVFGXUq/FIJZxB8k..fJdHEm2M4=\n-----ENDRSAPRIVATEKEY-----"},{"attributes":{"domain_name":"Virginia"},"type":"PUBLIC","key":"-----BEGINRSAPRIVATEKEY-----\nMIICXAIBAAKBgQCqGKukO1De7zhZj6+H0qtjTkVxwTCpvKe4eCZ0FPqri0cb2JZfXJ/DgYSF6vUp\nwmJG8wVQZKjeGcjDOL5UlsuusFncCzWBQ7RKNUSesmQRMSGkVb1/3j+skZ6UtW+5u09lHNsj6tQ5\n1s1SPrCBkedbNf0Tp0GbMJDyR4e9T04ZZwIDAQABAoGAFijko56+qGyN8M0RVyaRAXz++xTqHBLh\n3tx4VgMtrQ+WEgCjhoTwo23KMBAuJGSYnRmoBZM3lMfTKevIkAidPExvYCdm5dYq3XToLkkLv5L2\npIIVOFMDG+KESnAFV7l2c+cnzRMW0+b6f8mR1CJzZuxVLL6Q02fvLi55/mbSYxECQQDeAw6fiIQX\nGukBI4eMZZt4nscy2o12KyYner3VpoeE+Np2q+Z3pvAMd/aNzQ/W9WaI+NRfcxUJrmfPwIGm63il\nAkEAxCL5HQb2bQr4ByorcMWm/hEP2MZzROV73yF41hPsRC9m66KrheO9HPTJuo3/9s5p+sqGxOlF\nL0NDt4SkosjgGwJAFklyR1uZ/wPJjj611cdBcztlPdqoxssQGnh85BzCj/u3WqBpE2vjvyyvyI5k\nX6zk7S0ljKtt2jny2+00VsBerQJBAJGC1Mg5Oydo5NwD6BiROrPxGo2bpTbu/fhrT8ebHkTz2epl\nU9VQQSQzY1oZMVX8i1m5WUTLPz2yLJIBQVdXqhMCQBGoiuSoSjafUhV7i1cEGpb88h5NBYZzWXGZ\n37sJ5QsW+sJyoNde3xH8vdXhzU7eT82D6X/scw9RZz+/6rCJ4p0=\n-----ENDRSAPRIVATEKEY-----"}],"cloudProviderName":"ec2"}'
@RequestMapping(method = RequestMethod.POST)
@RolesAllowed({UserService.USER, UserService.ADMIN})
public @ResponseBody
String postConf(@RequestBody CloudCredentials cc) {
if (cc.getKey() == null) {
throw new BadRequestException("key can't be null");
}
if (cc.getKeyIdAlias() == null) {
throw new BadRequestException("keyIdAlias can't be null");
}
cloudCredentialsDao.save(cc);
return cc.getId();
}
@RequestMapping(value = "/upload/{id}", method = RequestMethod.POST)
@RolesAllowed({UserService.USER, UserService.ADMIN})
public @ResponseBody
String addLogineKey(@RequestParam("file") MultipartFile file, @PathVariable("id") String id) {
try {
CloudCredentials cc = cloudCredentialsDao.findOne(id);
if (cc == null) {
throw new NotFoundException();
}
if (file.isEmpty()) {
throw new BadRequestException("Must uplaod a file");
}
String originalFileName = file.getOriginalFilename();
byte[] bytes = file.getBytes();
List<LoginKey> logInKeys = cc.getLoginKeys();
if (logInKeys == null) {
logInKeys = new ArrayList<>();
}
LoginKey key = new LoginKey();
key.setKey(new String(bytes, "UTF-8"));
if (cc.getCloudProviderName().toLowerCase().equals("ec2")) {
Map<String, String> attributes = new HashMap<>();
attributes.put("domain_name", FilenameUtils.removeExtension(originalFileName));
key.setAttributes(attributes);
}
logInKeys.add(key);
cc.setLogineKeys(logInKeys);
cloudCredentialsDao.save(cc);
return cloudCredentialsDao.findOne(id).getId();
} catch (IOException ex) {
Logger.getLogger(CloudConfigurationController.class.getName()).log(Level.SEVERE, null, ex);
}
return null;
}
@RequestMapping(value = "/{id}", method = RequestMethod.GET)
@RolesAllowed({UserService.USER, UserService.ADMIN})
public CloudCredentials get(@PathVariable("id") String id) {
return cloudCredentialsDao.findOne(id);
public @ResponseBody
CloudCredentials get(@PathVariable("id") String id) {
CloudCredentials cc = cloudCredentialsDao.findOne(id);
if (cc == null) {
throw new NotFoundException();
}
return cc;
}
@RequestMapping(value = "/{id}", method = RequestMethod.DELETE)
@RolesAllowed({UserService.USER, UserService.ADMIN})
public @ResponseBody
String delete(@PathVariable("id") String id) {
cloudCredentialsDao.delete(id);
return "Deleted :" + id;
}
@RequestMapping(value = "/ids")
......
......@@ -15,45 +15,28 @@
*/
package nl.uva.sne.drip.api.rest;
import com.fasterxml.jackson.core.JsonProcessingException;
import java.io.ByteArrayOutputStream;
import nl.uva.sne.drip.api.service.SimplePlannerService;
import nl.uva.sne.drip.commons.types.ToscaRepresentation;
import java.io.File;
import java.io.IOException;
import java.io.UnsupportedEncodingException;
import java.nio.file.Files;
import java.nio.file.Paths;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.concurrent.TimeoutException;
import java.util.logging.Level;
import java.util.logging.Logger;
import javax.annotation.security.RolesAllowed;
import nl.uva.sne.drip.api.dao.CloudCredentialsDao;
import nl.uva.sne.drip.api.rpc.PlannerCaller;
import nl.uva.sne.drip.commons.types.Message;
import nl.uva.sne.drip.commons.types.Parameter;
import nl.uva.sne.drip.commons.utils.Converter;
import nl.uva.sne.drip.api.exception.NotFoundException;
import org.json.JSONException;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.stereotype.Component;
import org.springframework.web.bind.annotation.PathVariable;
import org.springframework.web.bind.annotation.RequestMapping;
import org.springframework.web.bind.annotation.RequestMethod;
import org.springframework.web.bind.annotation.ResponseBody;
import org.springframework.web.bind.annotation.RestController;
import nl.uva.sne.drip.api.dao.ToscaDao;
import nl.uva.sne.drip.api.rpc.DRIPCaller;
import nl.uva.sne.drip.api.rpc.ProvisionerCaller;
import nl.uva.sne.drip.api.service.PlannerService;
import nl.uva.sne.drip.api.service.ToscaService;
import nl.uva.sne.drip.api.service.UserService;
import nl.uva.sne.drip.commons.types.CloudCredentials;
import nl.uva.sne.drip.commons.types.LoginKey;
import org.apache.commons.io.FilenameUtils;
import org.springframework.web.bind.annotation.RequestParam;
/**
*
......@@ -64,168 +47,65 @@ import org.apache.commons.io.FilenameUtils;
@Component
public class PlannerController {
@Value("${message.broker.host}")
private String messageBrokerHost;
@Autowired
private ToscaDao dao;
private ToscaService toscaService;
@Autowired
private SimplePlannerService simplePlannerService;
@Autowired
private CloudCredentialsDao cloudCredentialsDao;
private PlannerService plannerService;
// @Autowired
// PlannerService plannerService;
@RequestMapping(value = "/plan/{tosca_id}", method = RequestMethod.POST)
@RequestMapping(value = "/plan/{tosca_id}", method = RequestMethod.GET)
@RolesAllowed({UserService.USER, UserService.ADMIN})
public @ResponseBody
String plann(@PathVariable("tosca_id") String toscaId) {
DRIPCaller planner = null;
DRIPCaller provisioner = null;
List<DRIPCaller> dripComponetens = new ArrayList<>();
try {
Message plannerInvokationMessage = buildPlannerMessage(toscaId);
planner = new PlannerCaller(messageBrokerHost);
dripComponetens.add(planner);
Message plannerReturnedMessage = planner.call(plannerInvokationMessage);
String plan(@PathVariable("tosca_id") String toscaId) {
Message provisionerInvokationMessage = buildProvisionerMessage(plannerReturnedMessage, "58a7281c55363e65b3c9eb82");
provisioner = new ProvisionerCaller(messageBrokerHost);
dripComponetens.add(provisioner);
provisioner.call(provisionerInvokationMessage);
return "";
} catch (JSONException | IOException | TimeoutException | InterruptedException ex) {
Logger.getLogger(PlannerController.class.getName()).log(Level.SEVERE, null, ex);
} finally {
for (DRIPCaller drip : dripComponetens) {
if (drip != null) {
try {
drip.close();
} catch (IOException | TimeoutException ex) {
Logger.getLogger(PlannerController.class.getName()).log(Level.SEVERE, null, ex);
}
// ToscaRepresentation plan = simplePlannerService.getPlan(toscaId);
// return plan.getId();
ToscaRepresentation plan = plannerService.getPlan(toscaId);
if (plan == null) {
throw new NotFoundException("Could not make plan");
}
}
return plan.getId();
} catch (JSONException | IOException | TimeoutException | InterruptedException ex) {
Logger.getLogger(PlannerController.class.getName()).log(Level.SEVERE, null, ex);
}
return null;
}
private Message buildPlannerMessage(String toscaId) throws JSONException, UnsupportedEncodingException, IOException {
ToscaRepresentation t2 = dao.findOne(toscaId);
Map<String, Object> map = t2.getKvMap();
String ymlStr = Converter.map2YmlString(map);
ymlStr = ymlStr.replaceAll("\\uff0E", "\\.");
byte[] bytes = ymlStr.getBytes();
Message invokationMessage = new Message();
List parameters = new ArrayList();
Parameter fileArgument = new Parameter();
String charset = "UTF-8";
fileArgument.setValue(new String(bytes, charset));
fileArgument.setEncoding(charset);
fileArgument.setName("input");
parameters.add(fileArgument);
fileArgument = new Parameter();
bytes = Files.readAllBytes(Paths.get(System.getProperty("user.home") + File.separator + "Downloads/DRIP/example_a.yml"));
fileArgument.setValue(new String(bytes, charset));
fileArgument.setEncoding(charset);
fileArgument.setName("example");
parameters.add(fileArgument);
invokationMessage.setParameters(parameters);
invokationMessage.setCreationDate((System.currentTimeMillis()));
return invokationMessage;
}
private Message buildProvisionerMessage(Message plannerReturnedMessage, String cloudConfID) throws IOException, JsonProcessingException, JSONException {
Message invokationMessage = new Message();
List<Parameter> parameters = new ArrayList();
CloudCredentials cred = cloudCredentialsDao.findOne(cloudConfID);
Parameter conf = buildCloudConfParam(cred);
parameters.add(conf);
List<Parameter> certs = buildCertificatesParam(cred);
parameters.addAll(certs);
List<Parameter> topologies = buildTopologyParams(plannerReturnedMessage);
parameters.addAll(topologies);
invokationMessage.setParameters(parameters);
invokationMessage.setCreationDate((System.currentTimeMillis()));
return invokationMessage;
}
private Parameter buildCloudConfParam(CloudCredentials cred) throws JsonProcessingException, JSONException, IOException {
Parameter conf = null;
switch (cred.getCloudProviderName().toLowerCase()) {
case "ec2":
conf = buildEC2Conf(cred);
break;
@RequestMapping(value = "/{id}", method = RequestMethod.GET, params = {"format"})
@RolesAllowed({UserService.USER, UserService.ADMIN})
public @ResponseBody
String get(@PathVariable("id") String id, @RequestParam(value = "format") String format) {
try {
return toscaService.get(id, format, ToscaRepresentation.Type.PLAN);
} catch (JSONException ex) {
Logger.getLogger(ToscaController.class.getName()).log(Level.SEVERE, null, ex);
}
return conf;
return null;
}
private List<Parameter> buildCertificatesParam(CloudCredentials cred) {
List<LoginKey> loginKeys = cred.getLogineKys();
List<Parameter> parameters = new ArrayList<>();
for (LoginKey lk : loginKeys) {
String domainName = lk.getAttributes().get("domain_name");
if (domainName == null) {
domainName = lk.getAttributes().get("domain_name ");
}
Parameter cert = new Parameter();
cert.setName("certificate");
cert.setValue(lk.getKey());
Map<String, String> attributes = new HashMap<>();
attributes.put("filename", domainName);
cert.setAttributes(attributes);
parameters.add(cert);
}
return parameters;
@RequestMapping(value = "/{id}", method = RequestMethod.DELETE)
@RolesAllowed({UserService.USER, UserService.ADMIN})
public @ResponseBody
String delete(@PathVariable("id") String id) {
toscaService.delete(id, ToscaRepresentation.Type.PLAN);
return "Deleted tosca :" + id;
}
private List<Parameter> buildTopologyParams(Message plannerReturnedMessage) {
List<Parameter> returnedParams = plannerReturnedMessage.getParameters();
List<Parameter> parameters = new ArrayList();
for (Parameter param : returnedParams) {
Parameter topology = new Parameter();
String name = param.getName();
if (name.equals("planner_output_all.yml")) {
topology.setName("topology");
topology.setValue(param.getValue());
Map<String, String> attributes = new HashMap<>();
attributes.put("level", "0");
attributes.put("filename", FilenameUtils.removeExtension(name));
topology.setAttributes(attributes);
} else {
topology.setName("topology");
topology.setValue(param.getValue());
Map<String, String> attributes = new HashMap<>();
attributes.put("level", "1");
attributes.put("filename", FilenameUtils.removeExtension(name));
topology.setAttributes(attributes);
}
parameters.add(topology);
// http://localhost:8080/drip-api/tosca/ids
@RequestMapping(value = "/ids")
@RolesAllowed({UserService.USER, UserService.ADMIN})
public @ResponseBody
List<String> getIds() {
List<ToscaRepresentation> all = toscaService.findAll(ToscaRepresentation.Type.PLAN);
List<String> ids = new ArrayList<>();
for (ToscaRepresentation tr : all) {
ids.add(tr.getId());
}
return parameters;
return ids;
}
private Parameter buildEC2Conf(CloudCredentials cred) throws JsonProcessingException, JSONException, IOException {
Properties prop = Converter.getEC2Properties(cred);
ByteArrayOutputStream baos = new ByteArrayOutputStream();
prop.store(baos, null);
byte[] bytes = baos.toByteArray();
Parameter conf = new Parameter();
conf.setName("ec2.conf");
String charset = "UTF-8";
conf.setValue(new String(bytes, charset));
return conf;
}
}
/*
* Copyright 2017 S. Koulouzis, Wang Junchao, Huan Zhou, Yang Hu
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package nl.uva.sne.drip.api.rest;
import nl.uva.sne.drip.commons.types.ProvisionRequest;
import com.fasterxml.jackson.core.JsonProcessingException;
import java.io.ByteArrayOutputStream;
import nl.uva.sne.drip.commons.types.ToscaRepresentation;
import java.io.IOException;
import java.util.ArrayList;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.Properties;
import java.util.Set;
import java.util.concurrent.TimeoutException;
import java.util.logging.Level;
import java.util.logging.Logger;
import javax.annotation.security.RolesAllowed;
import nl.uva.sne.drip.api.dao.CloudCredentialsDao;
import nl.uva.sne.drip.commons.types.Message;
import nl.uva.sne.drip.commons.types.Parameter;
import nl.uva.sne.drip.commons.utils.Converter;
import org.json.JSONException;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.stereotype.Component;
import org.springframework.web.bind.annotation.RequestMapping;
import org.springframework.web.bind.annotation.RequestMethod;
import org.springframework.web.bind.annotation.ResponseBody;
import org.springframework.web.bind.annotation.RestController;
import nl.uva.sne.drip.api.dao.ToscaDao;
import nl.uva.sne.drip.api.exception.NotFoundException;
import nl.uva.sne.drip.api.rpc.DRIPCaller;
import nl.uva.sne.drip.api.rpc.ProvisionerCaller;
import nl.uva.sne.drip.api.service.UserService;
import nl.uva.sne.drip.commons.types.CloudCredentials;
import nl.uva.sne.drip.commons.types.LoginKey;
import org.apache.commons.io.FilenameUtils;
import org.springframework.web.bind.annotation.RequestBody;
/**
*
* @author S. Koulouzis
*/
@RestController
@RequestMapping("/user/provisioner")
@Component
public class ProvisionController {
@Value("${message.broker.host}")
private String messageBrokerHost;
@Autowired
private ToscaDao toscaDao;
@Autowired
private CloudCredentialsDao cloudCredentialsDao;
@RequestMapping(value = "/get", method = RequestMethod.GET)
@RolesAllowed({UserService.USER, UserService.ADMIN})
public @ResponseBody
ProvisionRequest get() {
ProvisionRequest re = new ProvisionRequest();
re.setCloudConfID("58a1f0a963d42f004b1d63ad");
re.setPlanID("58ac1e70e4949b54f8ac1051");
re.setUserKeyID("58a20be263d4a5898835676e");
re.setUserScriptID("58a2112363d41754cca042b4");
return re;
}
@RequestMapping(value = "/provision", method = RequestMethod.POST)
@RolesAllowed({UserService.USER, UserService.ADMIN})
public @ResponseBody
String plann(@RequestBody ProvisionRequest req) {
try (DRIPCaller provisioner = new ProvisionerCaller(messageBrokerHost);) {
Message provisionerInvokationMessage = buildProvisionerMessage(req);
Message response = provisioner.call(provisionerInvokationMessage);
return "";
} catch (IOException | TimeoutException | JSONException | InterruptedException ex) {
Logger.getLogger(ProvisionController.class.getName()).log(Level.SEVERE, null, ex);
}
return null;
}
private Message buildProvisionerMessage(ProvisionRequest pReq) throws JSONException, IOException {
Message invokationMessage = new Message();
List<Parameter> parameters = new ArrayList();
CloudCredentials cred = cloudCredentialsDao.findOne(pReq.getCloudConfID());
Parameter conf = buildCloudConfParam(cred);
parameters.add(conf);
List<Parameter> certs = buildCertificatesParam(cred);
parameters.addAll(certs);
List<Parameter> topologies = buildTopologyParams(pReq.getPlanID());
parameters.addAll(topologies);
List<Parameter> userScripts = buildScriptParams(pReq.getUserScriptID());
parameters.addAll(userScripts);
List<Parameter> userKeys = buildKeysParams(pReq.getUserKeyID());
parameters.addAll(userScripts);
invokationMessage.setParameters(parameters);
invokationMessage.setCreationDate((System.currentTimeMillis()));
return invokationMessage;
}
private Parameter buildCloudConfParam(CloudCredentials cred) throws JsonProcessingException, JSONException, IOException {
Parameter conf = null;
switch (cred.getCloudProviderName().toLowerCase()) {
case "ec2":
conf = buildEC2Conf(cred);
break;
}
return conf;
}
private List<Parameter> buildCertificatesParam(CloudCredentials cred) {
List<LoginKey> loginKeys = cred.getLoginKeys();
List<Parameter> parameters = new ArrayList<>();
for (LoginKey lk : loginKeys) {
String domainName = lk.getAttributes().get("domain_name");
if (domainName == null) {
domainName = lk.getAttributes().get("domain_name ");
}
Parameter cert = new Parameter();
cert.setName("certificate");
cert.setValue(lk.getKey());
Map<String, String> attributes = new HashMap<>();
attributes.put("filename", domainName);
cert.setAttributes(attributes);
parameters.add(cert);
}
return parameters;
}
private List<Parameter> buildTopologyParams(String planID) throws JSONException {
ToscaRepresentation plan = toscaDao.findOne(planID);
if (plan == null) {
throw new NotFoundException();
}
List<Parameter> parameters = new ArrayList();
Parameter topology = new Parameter();
topology.setName("topology");
topology.setValue(Converter.map2YmlString(plan.getKvMap()));
Map<String, String> attributes = new HashMap<>();
attributes.put("level", String.valueOf(plan.getLevel()));
attributes.put("filename", FilenameUtils.removeExtension(plan.getName()));
topology.setAttributes(attributes);
parameters.add(topology);
Set<String> ids = plan.getLowerLevelIDs();
for (String lowID : ids) {
plan = toscaDao.findOne(lowID);
topology = new Parameter();
topology.setName("topology");
topology.setValue(Converter.map2YmlString(plan.getKvMap()));
attributes = new HashMap<>();
attributes.put("level", String.valueOf(plan.getLevel()));
attributes.put("filename", FilenameUtils.removeExtension(plan.getName()));
topology.setAttributes(attributes);
parameters.add(topology);
}
return parameters;
}
private Parameter buildEC2Conf(CloudCredentials cred) throws JsonProcessingException, JSONException, IOException {
Properties prop = Converter.getEC2Properties(cred);
ByteArrayOutputStream baos = new ByteArrayOutputStream();
prop.store(baos, null);
byte[] bytes = baos.toByteArray();
Parameter conf = new Parameter();
conf.setName("ec2.conf");
String charset = "UTF-8";
conf.setValue(new String(bytes, charset));
return conf;
}
private List<Parameter> buildScriptParams(String userScriptID) {
throw new UnsupportedOperationException("Not supported yet."); //To change body of generated methods, choose Tools | Templates.
}
private List<Parameter> buildKeysParams(String userKeyID) {
throw new UnsupportedOperationException("Not supported yet."); //To change body of generated methods, choose Tools | Templates.
}
}
......@@ -16,16 +16,12 @@
package nl.uva.sne.drip.api.rest;
import nl.uva.sne.drip.commons.types.ToscaRepresentation;
import nl.uva.sne.drip.api.rpc.PlannerCaller;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.concurrent.TimeoutException;
import java.util.logging.Level;
import java.util.logging.Logger;
import javax.annotation.security.RolesAllowed;
import nl.uva.sne.drip.commons.utils.Converter;
import org.json.JSONException;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Component;
......@@ -36,7 +32,8 @@ import org.springframework.web.bind.annotation.ResponseBody;
import org.springframework.web.bind.annotation.RestController;
import org.springframework.web.multipart.MultipartFile;
import org.springframework.web.bind.annotation.PathVariable;
import nl.uva.sne.drip.api.dao.ToscaDao;
import nl.uva.sne.drip.api.exception.BadRequestException;
import nl.uva.sne.drip.api.service.ToscaService;
import nl.uva.sne.drip.api.service.UserService;
/**
......@@ -48,89 +45,44 @@ import nl.uva.sne.drip.api.service.UserService;
@Component
public class ToscaController {
// @Value("${message.broker.host}")
// private String messageBrokerHost;
@Autowired
private ToscaDao dao;
private ToscaService toscaService;
// curl -X POST -F "file=@DRIP/input.yaml" localhost:8080/drip-api/upload
@RequestMapping(value = "/upload", method = RequestMethod.POST)
@RolesAllowed({UserService.USER, UserService.ADMIN})
public @ResponseBody
String toscaUpload(@RequestParam("file") MultipartFile file) {
PlannerCaller planner = null;
if (!file.isEmpty()) {
if (file.isEmpty()) {
throw new BadRequestException("Must uplaod a file");
}
try {
String originalFileName = file.getOriginalFilename();
String name = System.currentTimeMillis() + "_" + originalFileName;
byte[] bytes = file.getBytes();
String str = new String(bytes, "UTF-8");
str = str.replaceAll("\\.", "\uff0E");
Map<String, Object> map = Converter.ymlString2Map(str);
ToscaRepresentation t = new ToscaRepresentation();
t.setName(name);
t.setKvMap(map);
dao.save(t);
return t.getId();
return toscaService.save(file, ToscaRepresentation.Type.SIDE);
} catch (IOException | IllegalStateException ex) {
Logger.getLogger(ToscaController.class.getName()).log(Level.SEVERE, null, ex);
} finally {
if (planner != null) {
try {
planner.close();
} catch (IOException | TimeoutException ex) {
Logger.getLogger(ToscaController.class.getName()).log(Level.WARNING, null, ex);
}
}
}
}
return null;
}
// curl http://localhost:8080/drip-api/tosca/589e1160d9925f9dc127e882/?fromat=yaml
@RequestMapping(value = "/{id}", method = RequestMethod.GET, params = {"fromat"})
@RequestMapping(value = "/{id}", method = RequestMethod.GET, params = {"format"})
@RolesAllowed({UserService.USER, UserService.ADMIN})
public @ResponseBody
String get(@PathVariable("id") String id, @RequestParam(value = "fromat") String fromat) {
String get(@PathVariable("id") String id, @RequestParam(value = "format") String format) {
try {
Map<String, Object> map = dao.findOne(id).getKvMap();
if (fromat != null && fromat.equals("yml")) {
String ymlStr = Converter.map2YmlString(map);
ymlStr = ymlStr.replaceAll("\\uff0E", "\\.");
return ymlStr;
}
if (fromat != null && fromat.equals("json")) {
String jsonStr = Converter.map2JsonString(map);
jsonStr = jsonStr.replaceAll("\\uff0E", "\\.");
return jsonStr;
}
String ymlStr = Converter.map2YmlString(map);
ymlStr = ymlStr.replaceAll("\\uff0E", "\\.");
return ymlStr;
return toscaService.get(id, format,ToscaRepresentation.Type.SIDE);
} catch (JSONException ex) {
Logger.getLogger(ToscaController.class.getName()).log(Level.SEVERE, null, ex);
}
return null;
}
@RequestMapping(value = "/{id}", method = RequestMethod.GET)
@RolesAllowed({UserService.USER, UserService.ADMIN})
public @ResponseBody
ToscaRepresentation getToscaRepresentation(@PathVariable("id") String id) {
ToscaRepresentation tosca = dao.findOne(id);
return tosca;
}
@RequestMapping(value = "/{id}", method = RequestMethod.DELETE)
@RolesAllowed({UserService.USER, UserService.ADMIN})
public @ResponseBody
String delete(@PathVariable("id") String id) {
dao.delete(id);
toscaService.delete(id,ToscaRepresentation.Type.SIDE);
return "Deleted tosca :" + id;
}
......@@ -139,7 +91,7 @@ public class ToscaController {
@RolesAllowed({UserService.USER, UserService.ADMIN})
public @ResponseBody
List<String> getIds() {
List<ToscaRepresentation> all = dao.findAll();
List<ToscaRepresentation> all = toscaService.findAll(ToscaRepresentation.Type.SIDE);
List<String> ids = new ArrayList<>();
for (ToscaRepresentation tr : all) {
ids.add(tr.getId());
......
......@@ -47,6 +47,16 @@ public class UserController {
@Autowired
private UserService service;
/**
* Register new user. A normal user cannot create accounts, only the user
* with the 'ADMIN' role can do that.
*
* @param user
* @return Response on success: The ID of the newly register user. Response
* on fail: If the user name already exists, or the user name is 'null' or
* the password is 'null' there will be a 'BadRequestException'
*
*/
@RequestMapping(value = "/register", method = RequestMethod.POST)
@RolesAllowed({UserService.ADMIN})
public @ResponseBody
......@@ -74,8 +84,7 @@ public class UserController {
if (registeredUser == null) {
throw new NotFoundException("User " + user.getUsername() + " not found");
}
service.getDao().save(user);
return user.getId();
return this.register(user);
}
@RequestMapping(value = "/{id}", method = RequestMethod.GET)
......
......@@ -15,7 +15,7 @@
*/
package nl.uva.sne.drip.api.rest;
import nl.uva.sne.drip.commons.types.UserPublicKey;
import nl.uva.sne.drip.commons.types.LoginKey;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
......@@ -32,6 +32,7 @@ import org.springframework.web.bind.annotation.ResponseBody;
import org.springframework.web.bind.annotation.RestController;
import org.springframework.web.multipart.MultipartFile;
import nl.uva.sne.drip.api.dao.UserKeyDao;
import nl.uva.sne.drip.api.exception.NotFoundException;
import nl.uva.sne.drip.api.service.UserService;
import org.springframework.web.bind.annotation.PathVariable;
......@@ -59,9 +60,10 @@ public class UserPublicKeysController {
byte[] bytes = file.getBytes();
String key = new String(bytes, "UTF-8");
UserPublicKey upk = new UserPublicKey();
LoginKey upk = new LoginKey();
upk.setKey(key);
upk.setName(name);
upk.setType(LoginKey.Type.PUBLIC);
dao.save(upk);
return upk.getId();
......@@ -73,21 +75,25 @@ public class UserPublicKeysController {
}
// curl -H "Content-Type: application/json" -X POST -d '{"key":"ssh-rsa AAAAB3NzaDWBqs75i849MytgwgQcRYMcsXIki0yeYTKABH6JqoiyFBHtYlyh/EV1t6cujb9LyNP4J5EN4fPbtwKYvxecd0LojSPxl4wjQlfrHyg6iKUYB7hVzGqACMvgYZHrtHPfrdEmOGPplPVPpoaX2j+u0BZ0yYhrWMKjzyYZKa68yy5N18+Gq+1p83HfUDwIU9wWaUYdgEvDujqF6b8p3z6LDx9Ob+RanSMZSt+b8eZRcd+F2Oy/gieJEJ8kc152VIOv8UY1xB3hVEwVnSRGgrAsa+9PChfF6efXUGWiKf8KBlWgBOYsSTsOY4ks9zkXMnbcTdC+o7xspOkyIcWjv us@u\n","name":"id_rsa.pub"}' localhost:8080/drip-api/user_key/
@RequestMapping(method = RequestMethod.POST)
@RolesAllowed({UserService.USER, UserService.ADMIN})
public @ResponseBody
String postConf(UserPublicKey uk) throws JSONException {
String name = System.currentTimeMillis() + "_" + uk.getName();
uk.setName(name);
dao.save(uk);
return uk.getId();
}
// @RequestMapping(method = RequestMethod.POST)
// @RolesAllowed({UserService.USER, UserService.ADMIN})
// public @ResponseBody
// String postConf(LoginKey uk) throws JSONException {
// String name = System.currentTimeMillis() + "_" + uk.getName();
// uk.setName(name);
// dao.save(uk);
// return uk.getId();
// }
//curl localhost:8080/drip-api/user_key/58a20be263d4a5898835676e
@RequestMapping(value = "/{id}", method = RequestMethod.GET)
@RolesAllowed({UserService.USER, UserService.ADMIN})
public UserPublicKey get(@PathVariable("id") String id) {
return dao.findOne(id);
public LoginKey get(@PathVariable("id") String id) {
LoginKey key = dao.findOne(id);
if (key == null || !key.getType().equals(LoginKey.Type.PUBLIC)) {
throw new NotFoundException();
}
return key;
}
// localhost:8080/drip-api/user_key/ids
......@@ -95,11 +101,13 @@ public class UserPublicKeysController {
@RolesAllowed({UserService.USER, UserService.ADMIN})
public @ResponseBody
List<String> getIds() {
List<UserPublicKey> all = dao.findAll();
List<LoginKey> all = dao.findAll();
List<String> ids = new ArrayList<>();
for (UserPublicKey tr : all) {
for (LoginKey tr : all) {
if (tr.getType().equals(LoginKey.Type.PUBLIC)) {
ids.add(tr.getId());
}
}
return ids;
}
......
package nl.uva.sne.drip.api.rpc;
import com.fasterxml.jackson.core.JsonParser;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.rabbitmq.client.AMQP;
import com.rabbitmq.client.Channel;
......@@ -8,11 +9,17 @@ import com.rabbitmq.client.ConnectionFactory;
import com.rabbitmq.client.DefaultConsumer;
import com.rabbitmq.client.Envelope;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.UUID;
import java.util.concurrent.ArrayBlockingQueue;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.TimeoutException;
import nl.uva.sne.drip.commons.types.Message;
import nl.uva.sne.drip.commons.types.Parameter;
import org.json.JSONArray;
import org.json.JSONException;
import org.json.JSONObject;
/*
* Copyright 2017 S. Koulouzis, Wang Junchao, Huan Zhou, Yang Hu
......@@ -33,7 +40,7 @@ import nl.uva.sne.drip.commons.types.Message;
*
* @author S. Koulouzis
*/
public abstract class DRIPCaller {
public abstract class DRIPCaller implements AutoCloseable {
private final Connection connection;
private final Channel channel;
......@@ -75,6 +82,7 @@ public abstract class DRIPCaller {
return replyQueueName;
}
@Override
public void close() throws IOException, TimeoutException {
if (channel != null && channel.isOpen()) {
channel.close();
......@@ -84,9 +92,10 @@ public abstract class DRIPCaller {
}
}
public Message call(Message r) throws IOException, TimeoutException, InterruptedException {
public Message call(Message r) throws IOException, TimeoutException, InterruptedException, JSONException {
ObjectMapper mapper = new ObjectMapper();
mapper.configure(JsonParser.Feature.ALLOW_SINGLE_QUOTES, true);
String jsonInString = mapper.writeValueAsString(r);
//Build a correlation ID to distinguish responds
......@@ -109,8 +118,26 @@ public abstract class DRIPCaller {
}
});
String strResponse = response.take();
strResponse = strResponse.replaceAll("'null'", "null").replaceAll("\'", "\"").replaceAll(" ", "");
// System.err.println(strResponse);
return mapper.readValue(strResponse, Message.class);
JSONObject jsonObj = new JSONObject(strResponse);
Message responseMessage = new Message();
responseMessage.setCreationDate((Long) jsonObj.get("creationDate"));
JSONArray jsonParams = (JSONArray) jsonObj.get("parameters");
List<Parameter> parameters = new ArrayList<>();
for (int i = 0; i < jsonParams.length(); i++) {
JSONObject jsonParam = (JSONObject) jsonParams.get(i);
Parameter parameter = new Parameter();
parameter.setName(jsonParam.getString("name"));
parameter.setName(jsonParam.getString("value"));
parameters.add(parameter);
}
responseMessage.setParameters(parameters);
return responseMessage;//mapper.readValue(strResponse, Message.class);
}
}
......@@ -28,7 +28,7 @@ public class PlannerCaller extends DRIPCaller {
private static final String REQUEST_QUEUE_NAME = "planner_queue";
public PlannerCaller(String messageBrokerHost) throws IOException, TimeoutException {
super(messageBrokerHost,REQUEST_QUEUE_NAME);
super(messageBrokerHost, REQUEST_QUEUE_NAME);
}
}
......@@ -15,6 +15,20 @@
*/
package nl.uva.sne.drip.api.service;
import java.io.IOException;
import java.io.UnsupportedEncodingException;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.concurrent.TimeoutException;
import nl.uva.sne.drip.api.rpc.PlannerCaller;
import nl.uva.sne.drip.commons.types.Message;
import nl.uva.sne.drip.commons.types.Parameter;
import nl.uva.sne.drip.commons.types.ToscaRepresentation;
import nl.uva.sne.drip.commons.utils.Converter;
import org.json.JSONException;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.stereotype.Service;
/**
......@@ -24,4 +38,50 @@ import org.springframework.stereotype.Service;
@Service
public class PlannerService {
@Autowired
private ToscaService toscaService;
@Value("${message.broker.host}")
private String messageBrokerHost;
public ToscaRepresentation getPlan(String toscaId) throws JSONException, UnsupportedEncodingException, IOException, TimeoutException, InterruptedException {
try (PlannerCaller planner = new PlannerCaller(messageBrokerHost)) {
Message plannerInvokationMessage = buildPlannerMessage(toscaId);
Message plannerReturnedMessage = planner.call(plannerInvokationMessage);
List<Parameter> parameters = plannerReturnedMessage.getParameters();
for(Parameter param: parameters){
}
ToscaRepresentation tr = new ToscaRepresentation();
Map<String, Object> kvMap = null;
tr.setKvMap(kvMap);
tr.setType(ToscaRepresentation.Type.PLAN);
return null;
}
}
private Message buildPlannerMessage(String toscaId) throws JSONException, UnsupportedEncodingException {
ToscaRepresentation t2 = toscaService.getDao().findOne(toscaId);
Map<String, Object> map = t2.getKvMap();
String json = Converter.map2JsonString(map);
json = json.replaceAll("\\uff0E", "\\.");
byte[] bytes = json.getBytes();
Message invokationMessage = new Message();
List parameters = new ArrayList();
Parameter jsonArgument = new Parameter();
String charset = "UTF-8";
jsonArgument.setValue(new String(bytes, charset));
jsonArgument.setEncoding(charset);
jsonArgument.setName("input");
parameters.add(jsonArgument);
invokationMessage.setParameters(parameters);
invokationMessage.setCreationDate((System.currentTimeMillis()));
return invokationMessage;
}
}
/*
* Copyright 2017 S. Koulouzis, Wang Junchao, Huan Zhou, Yang Hu
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package nl.uva.sne.drip.api.service;
import java.io.File;
import java.io.IOException;
import java.io.UnsupportedEncodingException;
import java.nio.file.Files;
import java.nio.file.Paths;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.concurrent.TimeoutException;
import nl.uva.sne.drip.api.rpc.PlannerCaller;
import nl.uva.sne.drip.commons.types.Message;
import nl.uva.sne.drip.commons.types.Parameter;
import nl.uva.sne.drip.commons.types.ToscaRepresentation;
import nl.uva.sne.drip.commons.utils.Converter;
import org.json.JSONException;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.stereotype.Service;
/**
*
* @author S. Koulouzis
*/
@Service
public class SimplePlannerService {
@Value("${message.broker.host}")
private String messageBrokerHost;
@Autowired
private ToscaService toscaService;
public ToscaRepresentation getPlan(String toscaId) throws JSONException, IOException, TimeoutException, InterruptedException {
Message plannerInvokationMessage = buildSimplePlannerMessage(toscaId);
PlannerCaller planner = new PlannerCaller(messageBrokerHost);
Message plannerReturnedMessage = planner.call(plannerInvokationMessage);
List<Parameter> toscaFiles = plannerReturnedMessage.getParameters();
ToscaRepresentation topLevel = new ToscaRepresentation();
ToscaRepresentation tr = null;
for (Parameter p : toscaFiles) {
//Should have levels in attributes
Map<String, String> attributess = p.getAttributes();
String originalFileName = p.getName();
String name = System.currentTimeMillis() + "_" + originalFileName;
if (originalFileName.equals("planner_output_all.yml")) {
topLevel.setName(name);
topLevel.setLevel(0);
topLevel.setKvMap(Converter.ymlString2Map(p.getValue()));
} else {
tr = new ToscaRepresentation();
tr.setName(name);
tr.setKvMap(Converter.ymlString2Map(p.getValue()));
tr.setLevel(1);
}
tr.setType(ToscaRepresentation.Type.PLAN);
toscaService.getDao().save(tr);
Set<String> ids = topLevel.getLowerLevelIDs();
if (ids == null) {
ids = new HashSet<>();
}
ids.add(tr.getId());
topLevel.setLowerLevelIDs(ids);
}
topLevel.setType(ToscaRepresentation.Type.PLAN);
toscaService.getDao().save(topLevel);
planner.close();
return topLevel;
}
private Message buildSimplePlannerMessage(String toscaId) throws JSONException, UnsupportedEncodingException, IOException {
ToscaRepresentation t2 = toscaService.getDao().findOne(toscaId);
Map<String, Object> map = t2.getKvMap();
String ymlStr = Converter.map2YmlString(map);
ymlStr = ymlStr.replaceAll("\\uff0E", "\\.");
byte[] bytes = ymlStr.getBytes();
Message invokationMessage = new Message();
List parameters = new ArrayList();
Parameter fileArgument = new Parameter();
String charset = "UTF-8";
fileArgument.setValue(new String(bytes, charset));
fileArgument.setEncoding(charset);
fileArgument.setName("input");
parameters.add(fileArgument);
fileArgument = new Parameter();
bytes = Files.readAllBytes(Paths.get(System.getProperty("user.home") + File.separator + "Downloads/DRIP/example_a.yml"));
fileArgument.setValue(new String(bytes, charset));
fileArgument.setEncoding(charset);
fileArgument.setName("example");
parameters.add(fileArgument);
invokationMessage.setParameters(parameters);
invokationMessage.setCreationDate((System.currentTimeMillis()));
return invokationMessage;
}
}
/*
* Copyright 2017 S. Koulouzis, Wang Junchao, Huan Zhou, Yang Hu
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package nl.uva.sne.drip.api.service;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.Map;
import java.util.Set;
import nl.uva.sne.drip.api.dao.ToscaDao;
import nl.uva.sne.drip.api.exception.NotFoundException;
import nl.uva.sne.drip.commons.types.ToscaRepresentation;
import nl.uva.sne.drip.commons.utils.Converter;
import org.json.JSONException;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Service;
import org.springframework.web.multipart.MultipartFile;
/**
*
* @author S. Koulouzis
*/
@Service
public class ToscaService {
@Autowired
private ToscaDao dao;
public String get(String id, String fromat, ToscaRepresentation.Type type) throws JSONException {
ToscaRepresentation tosca = dao.findOne(id);
if (tosca == null || !tosca.getType().equals(type)) {
throw new NotFoundException();
}
Set<String> ids = tosca.getLowerLevelIDs();
Map<String, Object> map = tosca.getKvMap();
if (ids != null) {
for (String lowID : ids) {
map.putAll(dao.findOne(lowID).getKvMap());
}
}
if (fromat != null && fromat.equals("yml")) {
String ymlStr = Converter.map2YmlString(map);
ymlStr = ymlStr.replaceAll("\\uff0E", "\\.");
return ymlStr;
}
if (fromat != null && fromat.equals("json")) {
String jsonStr = Converter.map2JsonString(map);
jsonStr = jsonStr.replaceAll("\\uff0E", "\\.");
return jsonStr;
}
String ymlStr = Converter.map2YmlString(map);
ymlStr = ymlStr.replaceAll("\\uff0E", "\\.");
return ymlStr;
}
public String save(MultipartFile file, ToscaRepresentation.Type type) throws IOException {
String originalFileName = file.getOriginalFilename();
String name = System.currentTimeMillis() + "_" + originalFileName;
byte[] bytes = file.getBytes();
String str = new String(bytes, "UTF-8");
str = str.replaceAll("\\.", "\uff0E");
Map<String, Object> map = Converter.ymlString2Map(str);
ToscaRepresentation t = new ToscaRepresentation();
t.setName(name);
t.setKvMap(map);
t.setType(type);
dao.save(t);
return t.getId();
}
public void delete(String id, ToscaRepresentation.Type type) {
ToscaRepresentation tosca = dao.findOne(id);
if (!tosca.getType().equals(type)) {
throw new NotFoundException();
} else {
dao.delete(id);
}
}
public List<ToscaRepresentation> findAll(ToscaRepresentation.Type type) {
List<ToscaRepresentation> all = dao.findAll();
List<ToscaRepresentation> allType = new ArrayList<>();
if (all == null) {
throw new NotFoundException();
}
for (ToscaRepresentation tr : all) {
if (tr.getType() != null && tr.getType().equals(type)) {
allType.add(tr);
}
}
return allType;
}
public ToscaDao getDao() {
return dao;
}
}
message.broker.host=172.17.0.2
message.broker.host=172.17.0.3
db.name=drip
db.username=drip-user
db.password=drip-pass
......@@ -31,9 +31,11 @@ public class CloudCredentials {
private String key;
private String keyIdAlias;
private List<LoginKey> logineKys;
private List<LoginKey> loginKeys;
private String cloudProviderName;
......@@ -74,17 +76,17 @@ public class CloudCredentials {
}
/**
* @return the logineKys
* @return the loginKeys
*/
public List<LoginKey> getLogineKys() {
return logineKys;
public List<LoginKey> getLoginKeys() {
return loginKeys;
}
/**
* @param logineKys the logineKys to set
* @param loginKeys the loginKeys to set
*/
public void setLogineKys(List<LoginKey> logineKys) {
this.logineKys = logineKys;
public void setLogineKeys(List<LoginKey> loginKeys) {
this.loginKeys = loginKeys;
}
/**
......
......@@ -16,6 +16,7 @@
package nl.uva.sne.drip.commons.types;
import java.util.Map;
import org.springframework.data.annotation.Id;
import org.springframework.data.mongodb.core.mapping.Document;
/**
......@@ -25,11 +26,37 @@ import org.springframework.data.mongodb.core.mapping.Document;
@Document
public class LoginKey {
@Id
private String id;
private Map<String, String> attributes;
private String key;
private Type type;
private String name;
public String getName() {
return this.name;
}
public void setName(String name) {
this.name = name;
}
/**
* @return the id
*/
public String getId() {
return id;
}
/**
* @param id the id to set
*/
public void setId(String id) {
this.id = id;
}
public static enum Type {
PRIVATE,
......
......@@ -15,62 +15,74 @@
*/
package nl.uva.sne.drip.commons.types;
import org.springframework.data.annotation.Id;
import org.springframework.data.mongodb.core.mapping.Document;
/**
*
* @author S. Koulouzis
*/
@Document
public class UserPublicKey {
public class ProvisionRequest {
private String cloudConfID;
private String planID;
@Id
private String id;
private String userScriptID;
private String key;
private String userKeyID;
private String name;
/**
* @return the cloudConfID
*/
public String getCloudConfID() {
return cloudConfID;
}
/**
* @return the id
* @param cloudConfID the cloudConfID to set
*/
public String getId() {
return id;
public void setCloudConfID(String cloudConfID) {
this.cloudConfID = cloudConfID;
}
/**
* @param id the id to set
* @return the planID
*/
public void setId(String id) {
this.id = id;
public String getPlanID() {
return planID;
}
/**
* @return the key
* @param planID the planID to set
*/
public String getKey() {
return key;
public void setPlanID(String planID) {
this.planID = planID;
}
/**
* @param key the key to set
* @return the userScriptID
*/
public void setKey(String key) {
this.key = key;
public String getUserScriptID() {
return userScriptID;
}
/**
* @return the name
* @param userScriptID the userScriptID to set
*/
public String getName() {
return name;
public void setUserScriptID(String userScriptID) {
this.userScriptID = userScriptID;
}
/**
* @param name the name to set
* @return the userKeyID
*/
public void setName(String name) {
this.name = name;
public String getUserKeyID() {
return userKeyID;
}
/**
* @param userKeyID the userKeyID to set
*/
public void setUserKeyID(String userKeyID) {
this.userKeyID = userKeyID;
}
}
......@@ -16,6 +16,7 @@
package nl.uva.sne.drip.commons.types;
import java.util.Map;
import java.util.Set;
import org.springframework.data.annotation.Id;
import org.springframework.data.mongodb.core.mapping.Document;
......@@ -29,10 +30,35 @@ public class ToscaRepresentation {
@Id
private String id;
private Set<String> lowerLevelIDs;
private String name;
private Map<String, Object> kvMap;
private Integer level;
private Type type;
public static enum Type {
PLAN,
SIDE
}
/**
* @return the type
*/
public Type getType() {
return type;
}
/**
* @param type the type to set
*/
public void setType(Type type) {
this.type = type;
}
public final String getId() {
return id;
}
......@@ -69,4 +95,32 @@ public class ToscaRepresentation {
this.name = name;
}
/**
* @return the level
*/
public Integer getLevel() {
return level;
}
/**
* @param level the level to set
*/
public void setLevel(Integer level) {
this.level = level;
}
/**
* @return the lowerLevelIDs
*/
public Set<String> getLowerLevelIDs() {
return lowerLevelIDs;
}
/**
* @param lowerLevelIDs the lowerLevelIDs to set
*/
public void setLowerLevelIDs(Set<String> lowerLevelIDs) {
this.lowerLevelIDs = lowerLevelIDs;
}
}
......@@ -30,10 +30,8 @@ import java.io.IOException;
import java.io.InputStream;
import java.io.PrintWriter;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.util.ArrayList;
import java.util.Enumeration;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
......@@ -382,7 +380,6 @@ public class Consumer extends DefaultConsumer {
byte[] bytes = Files.readAllBytes(Paths.get(f.getAbsolutePath()));
fileArguments.put("value", new String(bytes, charset));
parameters.add(fileArguments);
} else {
fileArguments.put("name", outputs.get(i).topologyName);
fileArguments.put("value", "ERROR::There is no output for topology " + outputs.get(i).topologyName);
......
'''
Created on Nov 20, 2015
@author: junchao
'''
import os
import sys
import re
import random
import networkx as nx
import numpy as np
import json
from subprocess import call
import time
from NewInstance import NewInstance
import collections
class Workflow():
def add_entry_exit(self, g):
# add entry node to the graph
g.add_node(0, est = -1, eft = -1, lft =-1)
for vertex in g:
if len(g.predecessors(vertex)) == 0 and not vertex == 0:
self.G.add_weighted_edges_from([(0, vertex, 0)])
# add exit node to the graph
g.add_node(self.vertex_num-1, est = -1, eft = -1, lft =-1)
startnodes = []
for vertex in g:
if len(g.successors(vertex)) == 0 and not vertex == self.vertex_num-1:
startnodes.append(vertex)
for node in startnodes:
self.G.add_weighted_edges_from([(node, self.vertex_num-1, 0)])
def init(self, content):
self.G = nx.DiGraph()
self.vertex_num =0
for (key, value) in content['workflow'].items():
if isinstance(value, list) :
if key == 'nodes' :
self.vertex_num = len(value)
for node in value:
self.G.add_node(node['name'], est = -1, eft = -1, lft =-1)
if key == 'links' :
for link in value:
self.G.add_weighted_edges_from([(link['source'], link['target'], link['weight'])])
print self.G.nodes
#parse the performance matrix
p = []
od = collections.OrderedDict(sorted(content['performance'].items()))
for (key, value) in od.items():
row = []
row.append(0)
for i in value.split(','):
row.append(int(i))
row.append(0)
print row
p.append(row)
self.p_table = np.matrix(p)
#parse the price vector
self.vm_price = []
for i in content['price'].split(','):
self.vm_price.append(int(i))
#parse the deadline
self.d_list = []
for (key, value) in content['deadline'].items():
self.d_list.append([int(key), int(value)])
self.d_table = np.matrix(self.d_list)
self.successful = 0
self.vertex_num += 2
self.add_entry_exit(self.G)
#test whether the DAG contains cycles
if len(list(nx.simple_cycles(self.G))) > 0:
print "the DAG contains cycles"
sys.exit()
self.assigned_list = [-1]*(self.vertex_num)
self.instances = []
self.G.node[0]['est'] = 0
self.G.node[0]['eft'] = 0
self.cal_est(0)
self.G.node[self.vertex_num-1]['lft'] = self.d_table[self.d_table.shape[0]-1,1]#self.p_table[0, child], self.p_table.shape[0]
self.cal_lft(self.vertex_num-1)
def init1(self, workflow_file_name, performance_file_name, price_file_name, deadline_file_name):
#Initialization
self.G=nx.DiGraph()
self.vertex_num = 0
self.successful = 0
#Read the workflow information
graph = pydot.graph_from_dot_file(workflow_file_name)
nx_graph = nx.from_pydot(graph)
self.G=nx.DiGraph()
for node in nx_graph:
#print node
self.G.add_node(int(node)+1, est = -1, eft = -1, lft =-1)
self.vertex_num += 1
#print nx_graph.edge
#print workflow_file_name
for link in nx_graph.edges_iter():
#print link[0], link[1]
self.G.add_weighted_edges_from([(int(link[0])+1, int(link[1])+1, int(float(nx_graph[link[0]][link[1]][0]['weight'])))])
self.vertex_num += 2
self.add_entry_exit(self.G)
#test whether the DAG contains cycles
if len(list(nx.simple_cycles(self.G))) > 0:
print "the DAG contains cycles"
sys.exit()
#read performance table
l = [ map(int,line.split(',')) for line in open(performance_file_name, 'r')]
#append the entry and exit node information
for row in l:
row.insert(0, 0)
row.insert(len(row),0)
self.p_table = np.matrix(l)
self.assigned_list = [-1]*(self.vertex_num)
self.vm_price = map(int,open(price_file_name,'r').readline().split(','))
self.instances = []
self.d_list = [ map(int,line.split('\t')) for line in open(deadline_file_name, 'r')]
tmpList = self.d_list[len(self.d_list)-1]
self.d_table = np.matrix(tmpList)
#deadline = open(deadline_file_name, 'r').readline()
self.G.node[0]['est'] = 0
self.G.node[0]['eft'] = 0
self.cal_est(0)
self.G.node[self.vertex_num-1]['lft'] = self.d_table[self.d_table.shape[0]-1,1]#self.p_table[0, child], self.p_table.shape[0]
self.cal_lft(self.vertex_num-1)
#The following two functions are to initialize the EST, EFT and LFT
#calculate the earliest start time and earliest finish time
def cal_est(self, i):
for child in self.G.successors(i):
est = self.G.node[i]['eft']+self.G[i][child]['weight']
if est>self.G.node[child]['est']:
self.G.node[child]['est'] = est
self.G.node[child]['eft'] = est + self.p_table[0, child]
self.cal_est(child)
def cal_lft(self, d):
for parent in self.G.predecessors(d):
lft = self.G.node[d]['lft'] - self.p_table[0, d] - self.G[parent][d]['weight']
d_table_list = []
for deadline in self.d_table:
d_table_list.append(deadline[0, 0])
if parent in d_table_list:
deadline = self.d_table[d_table_list.index([parent]),1]
if deadline < lft:
lft = deadline
if self.G.node[parent]['lft'] == -1 or lft<self.G.node[parent]['lft']:
# parent may not finish later
self.G.node[parent]['lft'] = lft
#print "call graphAssignLFT(",graph.node[parent]['name'],")"
self.cal_lft(parent)
#Finding critical path
def ic_pcp(self):
self.assigned_list[0] = 0
self.assigned_list[self.vertex_num-1] = 0
self.assign_parents(self.vertex_num-1)
def has_unassigned_parent(self, i):
for parent in self.G.predecessors(i):
if (self.assigned_list[parent] == -1):
return True
return False
def assign_parents(self, i):
while (self.has_unassigned_parent(i)):
if self.successful == 1: #resources cannot be met
break
pcp = []
self.find_critical_path(i, pcp)
assigned_vms = self.assign_path(pcp)
if -1 in assigned_vms:
print 'resource cannot be met'
break
self.G.node[pcp[len(pcp)-1]]['eft'] = self.G.node[pcp[len(pcp)-1]]['est'] + self.p_table[assigned_vms[len(pcp)-1], pcp[len(pcp)-1]]
self.update_est(pcp[len(pcp)-1], pcp, assigned_vms)
self.update_lft(pcp[0], pcp, assigned_vms)
#split according to the types of assigned VMs and add it to the new instance
ni = NewInstance(assigned_vms, self.G.node[pcp[len(pcp)-1]]['est'], self.G.node[pcp[0]]['eft'], pcp)
for j in xrange(len(pcp)):
ni.cost = ni.cost + self.vm_price[assigned_vms[j]]
self.instances.append(ni)
for j in reversed(pcp): #also in the paper they didn't mention the order
self.assign_parents(j)
#TODO: A very tricky thing on updating the EST and EFT.
def update_est(self, i, pcp, assigned_vms):
for child in self.G.successors(i):
if child not in pcp:
est = self.G.node[i]['eft']+self.G[i][child]['weight']
if self.assigned_list[i] == -1:
eft = est + self.p_table[0, child]
else:
eft = est + self.p_table[self.assigned_list[child], child]
else:
if pcp.index(child) == len(pcp) - 1:
est = self.G.node[i]['eft']
eft = est + self.p_table[self.assigned_list[child], child]
else:
pos = pcp.index(child)
est = self.G.node[i]['eft'] + self.G[pcp[pos+1]][pcp[pos]]['weight']
eft = est + self.p_table[self.assigned_list[child], child]
#decide whether the assignment will violate other parent data dependency
all_smaller = True
for parent in self.G.predecessors(child):
if not parent == i:
if self.G.node[parent]['eft'] + self.G[parent][child]['weight'] > est:
all_smaller = False
if all_smaller:
self.G.node[child]['est'] = est
self.G.node[child]['eft'] = eft
self.update_est(child, pcp, assigned_vms)
def update_lft(self, d, pcp, assigned_vms):
for parent in self.G.predecessors(d):
if parent not in pcp:
if self.assigned_list[d] == -1:
lft = self.G.node[d]['lft'] - self.p_table[0, d] - self.G[parent][d]['weight']
else:
lft = self.G.node[d]['lft'] - self.p_table[self.assigned_list[d], d] - self.G[parent][d]['weight']
else:
pos = pcp.index(parent)
#if pos < len(pcp) -1:
if assigned_vms[pos] == assigned_vms[pos-1]:#9, 6, 2
lft = self.G.node[d]['lft'] - self.p_table[self.assigned_list[d], d]
else:
lft = self.G.node[d]['lft'] - self.p_table[self.assigned_list[d], d] - self.G[pcp[pos]][pcp[pos-1]]['weight']
if lft < self.G.node[parent]['lft']:
self.G.node[parent]['lft'] = lft
self.update_lft(parent, pcp, assigned_vms)
def find_critical_path(self, i, pcp):
cal_cost = 0
critical_parent = -1
for n in self.G.predecessors(i):
if self.assigned_list[n] == -1: #parent of node i is not assigned
if self.G.node[n]['eft'] + self.G.edge[n][i]['weight'] > cal_cost:
cal_cost = self.G.node[n]['eft'] + self.G.edge[n][i]['weight']
critical_parent = n
if not critical_parent == -1:
pcp.append(critical_parent)
self.find_critical_path(critical_parent, pcp)
def exec_time_sum(self, pcp, vm_type):
sum = 0
for i in pcp:
sum += self.p_table[vm_type, i]
return sum
#look forward one step when assigning a vm to a pcp how the est varies
def est_vary(self, pcp, d):
head_pcp = pcp[len(pcp)-1]
original_est = self.G.node[head_pcp]['est']
biggest_est = -1
biggest_parent = -1
for parent in self.G.predecessors(head_pcp):
if parent == d:
est = self.G.node[parent]['eft']
else:
est = self.G.node[parent]['eft'] + self.G[parent][head_pcp]['weight']
if biggest_est < est:
biggest_est = est
biggest_parent = parent
return original_est-biggest_est
#choose the best existing available instance for the pcp
def choose_exist_instance(self, pcp):
best_vm = None
best_exec_time = -1
best_vary_time = -1
for vm in self.instances:
head_pcp = pcp[len(pcp)-1]
for parent in self.G.predecessors(head_pcp):
head_exist_pcp = vm.task_list[0] #The last node of the previous critical path
if parent == head_exist_pcp:
if best_vm == None:
best_vm = vm
exec_time = self.exec_time_sum(pcp, vm.vm_type)
best_exec_time = exec_time
best_vary_time = self.est_vary(pcp, head_exist_pcp)
else:
best_vm_head = vm.task_list[0]
# if assigned to the vm, what will the est be
exec_time = self.exec_time_sum(pcp, vm.vm_type)
# calculate the lft
varied_time = self.G.node[head_pcp]['est']-self.est_vary(pcp, head_exist_pcp)
lft = varied_time+exec_time
if (exec_time - self.est_vary(pcp, head_exist_pcp))*self.vm_price[vm.vm_type]> \
(best_exec_time - self.est_vary(pcp, best_vm.task_list[0]))*self.vm_price[best_vm.vm_type] \
and lft < self.G.node[head_pcp]['lft']: #also should not violate the lft
best_vm = vm
best_exec_time = exec_time
best_vary_time = varied_time
if not best_vm == None:
best_vm.vm_end = self.G.node[pcp[len(pcp)-1]]['est']-best_vary_time+best_exec_time
return best_vm
def assign_path(self, pcp):
cheapest_vm = -1
cheapest_sum = 9999999 #the initialized value should be a very large number
for i in xrange(self.p_table.shape[0]): # use the the shape of the performance table to identify how many VM types are there
violate_LFT = 0
start = self.G.node[pcp[len(pcp)-1]]['est']
cost_sum = 0
for j in xrange(len(pcp)-1, -1, -1):
cost_sum += self.vm_price[i]
if j == len(pcp)-1:
start = start + self.p_table[i, pcp[j]]
else:
start = start + self.p_table[i, pcp[j]]+ self.G[pcp[j+1]][pcp[j]]['weight']
if self.G.node[pcp[j]]['lft'] < start:
violate_LFT = 1
#launch a new instance of the cheapest service which can finish each task of P before its LFT
if violate_LFT == 0 and cost_sum < cheapest_sum:
cheapest_vm = i
cheapest_sum = cost_sum
for i in xrange(len(pcp)):
self.assigned_list[pcp[i]] = cheapest_vm
return [cheapest_vm]*len(pcp)
def generate_string(self, node):
s = "name "+str(node)+"\n"
for i in xrange(len(self.vm_price)):
s = s+str(self.p_table[i, node])+"\n"
s = s+"assigned vm: " + str(self.assigned_list[node]+1)
return s
def generateJSON(self):
content = {}
for i in xrange(1, self.vertex_num-1, 1):
if self.assigned_list[i] == 0:
content[str(i)] = 'large'
if self.assigned_list[i] == 1:
content[str(i)] = 'Medium'
if self.assigned_list[i] == 2:
content[str(i)] = 'Small'
return content
#calculate the total execution cost
def cal_cost(self):
cost = 0
for vm in self.instances:
cost = cost + vm.cost
return cost
def cal_cost1(self):
cost = 0
for i in range(1, len(self.assigned_list)-1):
cost += self.vm_price[self.assigned_list[i]]
return cost
def has_edge_vm(self, vm1, vm2):
for node1 in vm1.task_list:
for node2 in vm2.task_list:
if self.G.has_edge(node1, node2) or self.G.has_edge(node2, node1):
return True
return False
class NewInstance(object):
def __init__(self, vm_type, vm_start, vm_end, pcp):
self.vm_type = vm_type
self.vm_start = vm_start
self.vm_end = vm_end
self.task_list = pcp
self.cost = 0
\ No newline at end of file
#!/usr/bin/env python
import pika
import networkx as nx
import sys
import numpy as np
import sys, argparse
import operator
import os
from toscaparser import *
from toscaparser.tosca_template import ToscaTemplate
import re
import getopt
from ICPCP import Workflow
import random
import time
import json
connection = pika.BlockingConnection(pika.ConnectionParameters(host='172.17.0.2'))
channel = connection.channel()
channel.queue_declare(queue='planner_queue')
def handleDelivery(message):
parsed_json = json.loads(message)
params = parsed_json["parameters"]
for param in params:
name = param["name"]
value = param["value"]
def on_request(ch, method, props, body):
handleDelivery(body)
print(" Message %s" % body)
response = "AAAAAAAAAAAAAAAAAAAAAA"
json1 = response.get('parameters')[0].get('value').get('topology_template').get('node_templates')
deadline = 0
for j in json1:
#print json[j]
if not json1[j]['type'] == "Switch.nodes.Application.Connection":
deadline = int(re.search(r'\d+', json1[j]['properties']['QoS']['response_time']).group())
#get the nodes from the json
nodeDic = {}
nodeDic1 = {}
i = 1
for j in json1:
if not json1[j]['type'] == "Switch.nodes.Application.Connection":
print j, json1[j]
nodeDic[j] = i
nodeDic1[i] = j
i = i + 1
#get the links from the json
links = []
for j in json1:
if json1[j]['type'] == "Switch.nodes.Application.Connection":
print json1[j]['properties']['source']['component_name']
print json1[j]['properties']['target']['component_name']
link= {}
link['source'] = nodeDic[json1[j]['properties']['source']['component_name']]
link['target'] = nodeDic[json1[j]['properties']['target']['component_name']]
link['weight'] = random.randint(1, 10)
links.append(link)
# compose the json as input of the workflow
wfJson = {}
wfJson['workflow'] = {}
nodesList = []
sorted_nodeDic = sorted(nodeDic.items(), key=operator.itemgetter(1))
for key, value in sorted_nodeDic:
v = {}
v['name'] = value
nodesList.append(v)
wfJson['workflow']['nodes'] = nodesList
wfJson['workflow']['links'] = links
#print deadline
wfJson['price'] = "5,2,1"
wfJson['deadline'] = {'2': deadline}
#generate performance
performance = {}
for key, value in sorted_nodeDic:
performance[str(value)] = "1,2,3"
wfJson['performance'] = performance
print wfJson
#send request to the server
start = time.time()
wf = Workflow()
wf.init(wfJson)
wf.ic_pcp()
#print content['workflow']
#return
res = wf.generateJSON()
end = time.time()
print (end - start)
# generate the json files in the corresponding format as the
outcontent = {}
outcontent["creationDate"] = 1487002029722
outcontent["parameters"] = []
par1 = {}
par1["url"] = "null"
par1["encoding"] = "UTF-8"
par1["value"] = res
par1["attributes"] = "null"
outcontent["parameters"].append(par1)
response = outcontent
ch.basic_publish(exchange='',
routing_key=props.reply_to,
properties=pika.BasicProperties(correlation_id = \
props.correlation_id),
body=str(response))
ch.basic_ack(delivery_tag = method.delivery_tag)
channel.basic_qos(prefetch_count=1)
channel.basic_consume(on_request, queue='planner_queue')
print(" [x] Awaiting RPC requests")
channel.start_consuming()
\ No newline at end of file
#https://github.com/skoulouzis/DRIP/blob/package/doc/json_samples/kbExampleMessage.json
import networkx as nx
import sys
import numpy as np
import sys, argparse
import operator
import os
from toscaparser import *
from toscaparser.tosca_template import ToscaTemplate
import re
import getopt
from ICPCP import Workflow
import random
import time
import json
def main(argv):
workflow_file = ""
try:
opts, args = getopt.getopt(argv,"hw:s:",["workflow=", "SDI="])
except getopt.GetoptError:
print 'server.py -w <workflowfile> -s <SDI>'
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print 'server.py -w <workflowfile> -s <SDI>'
sys.exit()
elif opt in ("-w", "--workflow"):
workflow_file = arg
elif opt in ("-s", "--SDI"):
SDI_file = arg
data = {}
print workflow_file
with open(workflow_file) as data_file:
data = json.load(data_file)
#print data
#path = "input.yaml"
'''
a_file = os.path.isfile(path)
tosca = ToscaTemplate(path)
#print tosca.tpl
json = tosca.tpl.get('topology_template').get('node_templates')
#print json
'''
json1 = data.get('parameters')[0].get('value').get('topology_template').get('node_templates')
deadline = 0
for j in json1:
#print json[j]
if not json1[j]['type'] == "Switch.nodes.Application.Connection":
deadline = int(re.search(r'\d+', json1[j]['properties']['QoS']['response_time']).group())
#get the nodes from the json
nodeDic = {}
nodeDic1 = {}
i = 1
for j in json1:
if not json1[j]['type'] == "Switch.nodes.Application.Connection":
print j, json1[j]
nodeDic[j] = i
nodeDic1[i] = j
i = i + 1
#get the links from the json
links = []
for j in json1:
if json1[j]['type'] == "Switch.nodes.Application.Connection":
print json1[j]['properties']['source']['component_name']
print json1[j]['properties']['target']['component_name']
link= {}
link['source'] = nodeDic[json1[j]['properties']['source']['component_name']]
link['target'] = nodeDic[json1[j]['properties']['target']['component_name']]
link['weight'] = random.randint(1, 10)
links.append(link)
# compose the json as input of the workflow
wfJson = {}
wfJson['workflow'] = {}
nodesList = []
sorted_nodeDic = sorted(nodeDic.items(), key=operator.itemgetter(1))
for key, value in sorted_nodeDic:
v = {}
v['name'] = value
nodesList.append(v)
wfJson['workflow']['nodes'] = nodesList
wfJson['workflow']['links'] = links
#print deadline
wfJson['price'] = "5,2,1"
wfJson['deadline'] = {'2': deadline}
#generate performance
performance = {}
for key, value in sorted_nodeDic:
performance[str(value)] = "1,2,3"
wfJson['performance'] = performance
print wfJson
#send request to the server
start = time.time()
wf = Workflow()
wf.init(wfJson)
wf.ic_pcp()
#print content['workflow']
#return
res = wf.generateJSON()
end = time.time()
print (end - start)
# generate the json files in the corresponding format as the
outcontent = {}
outcontent["creationDate"] = 1487002029722
outcontent["parameters"] = []
par1 = {}
par1["url"] = "null"
par1["encoding"] = "UTF-8"
par1["value"] = res
par1["attributes"] = "null"
outcontent["parameters"].append(par1)
return outcontent
if __name__ == '__main__':
main(sys.argv[1:])
\ No newline at end of file
......@@ -31,7 +31,7 @@ import java.util.logging.Logger;
public class RPCServer {
private static final String RPC_QUEUE_NAME = "planner_queue";
private static final String HOST = "172.17.0.2";
private static final String HOST = "172.17.0.3";
public static void main(String[] argv) {
start();
......
'''
Created on Nov 20, 2015
@author: junchao
'''
import os
import sys
import re
import random
import networkx as nx
import numpy as np
import json
from subprocess import call
import time
from NewInstance import NewInstance
import collections
class Workflow():
def add_entry_exit(self, g):
# add entry node to the graph
g.add_node(0, est = -1, eft = -1, lft =-1)
for vertex in g:
if len(g.predecessors(vertex)) == 0 and not vertex == 0:
self.G.add_weighted_edges_from([(0, vertex, 0)])
# add exit node to the graph
g.add_node(self.vertex_num-1, est = -1, eft = -1, lft =-1)
startnodes = []
for vertex in g:
if len(g.successors(vertex)) == 0 and not vertex == self.vertex_num-1:
startnodes.append(vertex)
for node in startnodes:
self.G.add_weighted_edges_from([(node, self.vertex_num-1, 0)])
def init(self, content):
self.G = nx.DiGraph()
self.vertex_num =0
for (key, value) in content['workflow'].items():
if isinstance(value, list) :
if key == 'nodes' :
self.vertex_num = len(value)
for node in value:
self.G.add_node(node['name'], est = -1, eft = -1, lft =-1)
if key == 'links' :
for link in value:
self.G.add_weighted_edges_from([(link['source'], link['target'], link['weight'])])
print self.G.nodes
#parse the performance matrix
p = []
od = collections.OrderedDict(sorted(content['performance'].items()))
for (key, value) in od.items():
row = []
row.append(0)
for i in value.split(','):
row.append(int(i))
row.append(0)
print row
p.append(row)
self.p_table = np.matrix(p)
#parse the price vector
self.vm_price = []
for i in content['price'].split(','):
self.vm_price.append(int(i))
#parse the deadline
self.d_list = []
for (key, value) in content['deadline'].items():
self.d_list.append([int(key), int(value)])
self.d_table = np.matrix(self.d_list)
self.successful = 0
self.vertex_num += 2
self.add_entry_exit(self.G)
#test whether the DAG contains cycles
if len(list(nx.simple_cycles(self.G))) > 0:
print "the DAG contains cycles"
sys.exit()
self.assigned_list = [-1]*(self.vertex_num)
self.instances = []
self.G.node[0]['est'] = 0
self.G.node[0]['eft'] = 0
self.cal_est(0)
self.G.node[self.vertex_num-1]['lft'] = self.d_table[self.d_table.shape[0]-1,1]#self.p_table[0, child], self.p_table.shape[0]
self.cal_lft(self.vertex_num-1)
def init1(self, workflow_file_name, performance_file_name, price_file_name, deadline_file_name):
#Initialization
self.G=nx.DiGraph()
self.vertex_num = 0
self.successful = 0
#Read the workflow information
graph = pydot.graph_from_dot_file(workflow_file_name)
nx_graph = nx.from_pydot(graph)
self.G=nx.DiGraph()
for node in nx_graph:
#print node
self.G.add_node(int(node)+1, est = -1, eft = -1, lft =-1)
self.vertex_num += 1
#print nx_graph.edge
#print workflow_file_name
for link in nx_graph.edges_iter():
#print link[0], link[1]
self.G.add_weighted_edges_from([(int(link[0])+1, int(link[1])+1, int(float(nx_graph[link[0]][link[1]][0]['weight'])))])
self.vertex_num += 2
self.add_entry_exit(self.G)
#test whether the DAG contains cycles
if len(list(nx.simple_cycles(self.G))) > 0:
print "the DAG contains cycles"
sys.exit()
#read performance table
l = [ map(int,line.split(',')) for line in open(performance_file_name, 'r')]
#append the entry and exit node information
for row in l:
row.insert(0, 0)
row.insert(len(row),0)
self.p_table = np.matrix(l)
self.assigned_list = [-1]*(self.vertex_num)
self.vm_price = map(int,open(price_file_name,'r').readline().split(','))
self.instances = []
self.d_list = [ map(int,line.split('\t')) for line in open(deadline_file_name, 'r')]
tmpList = self.d_list[len(self.d_list)-1]
self.d_table = np.matrix(tmpList)
#deadline = open(deadline_file_name, 'r').readline()
self.G.node[0]['est'] = 0
self.G.node[0]['eft'] = 0
self.cal_est(0)
self.G.node[self.vertex_num-1]['lft'] = self.d_table[self.d_table.shape[0]-1,1]#self.p_table[0, child], self.p_table.shape[0]
self.cal_lft(self.vertex_num-1)
#The following two functions are to initialize the EST, EFT and LFT
#calculate the earliest start time and earliest finish time
def cal_est(self, i):
for child in self.G.successors(i):
est = self.G.node[i]['eft']+self.G[i][child]['weight']
if est>self.G.node[child]['est']:
self.G.node[child]['est'] = est
self.G.node[child]['eft'] = est + self.p_table[0, child]
self.cal_est(child)
def cal_lft(self, d):
for parent in self.G.predecessors(d):
lft = self.G.node[d]['lft'] - self.p_table[0, d] - self.G[parent][d]['weight']
d_table_list = []
for deadline in self.d_table:
d_table_list.append(deadline[0, 0])
if parent in d_table_list:
deadline = self.d_table[d_table_list.index([parent]),1]
if deadline < lft:
lft = deadline
if self.G.node[parent]['lft'] == -1 or lft<self.G.node[parent]['lft']:
# parent may not finish later
self.G.node[parent]['lft'] = lft
#print "call graphAssignLFT(",graph.node[parent]['name'],")"
self.cal_lft(parent)
#Finding critical path
def ic_pcp(self):
self.assigned_list[0] = 0
self.assigned_list[self.vertex_num-1] = 0
self.assign_parents(self.vertex_num-1)
def has_unassigned_parent(self, i):
for parent in self.G.predecessors(i):
if (self.assigned_list[parent] == -1):
return True
return False
def assign_parents(self, i):
while (self.has_unassigned_parent(i)):
if self.successful == 1: #resources cannot be met
break
pcp = []
self.find_critical_path(i, pcp)
assigned_vms = self.assign_path(pcp)
if -1 in assigned_vms:
print 'resource cannot be met'
break
self.G.node[pcp[len(pcp)-1]]['eft'] = self.G.node[pcp[len(pcp)-1]]['est'] + self.p_table[assigned_vms[len(pcp)-1], pcp[len(pcp)-1]]
self.update_est(pcp[len(pcp)-1], pcp, assigned_vms)
self.update_lft(pcp[0], pcp, assigned_vms)
#split according to the types of assigned VMs and add it to the new instance
ni = NewInstance(assigned_vms, self.G.node[pcp[len(pcp)-1]]['est'], self.G.node[pcp[0]]['eft'], pcp)
for j in xrange(len(pcp)):
ni.cost = ni.cost + self.vm_price[assigned_vms[j]]
self.instances.append(ni)
for j in reversed(pcp): #also in the paper they didn't mention the order
self.assign_parents(j)
#TODO: A very tricky thing on updating the EST and EFT.
def update_est(self, i, pcp, assigned_vms):
for child in self.G.successors(i):
if child not in pcp:
est = self.G.node[i]['eft']+self.G[i][child]['weight']
if self.assigned_list[i] == -1:
eft = est + self.p_table[0, child]
else:
eft = est + self.p_table[self.assigned_list[child], child]
else:
if pcp.index(child) == len(pcp) - 1:
est = self.G.node[i]['eft']
eft = est + self.p_table[self.assigned_list[child], child]
else:
pos = pcp.index(child)
est = self.G.node[i]['eft'] + self.G[pcp[pos+1]][pcp[pos]]['weight']
eft = est + self.p_table[self.assigned_list[child], child]
#decide whether the assignment will violate other parent data dependency
all_smaller = True
for parent in self.G.predecessors(child):
if not parent == i:
if self.G.node[parent]['eft'] + self.G[parent][child]['weight'] > est:
all_smaller = False
if all_smaller:
self.G.node[child]['est'] = est
self.G.node[child]['eft'] = eft
self.update_est(child, pcp, assigned_vms)
def update_lft(self, d, pcp, assigned_vms):
for parent in self.G.predecessors(d):
if parent not in pcp:
if self.assigned_list[d] == -1:
lft = self.G.node[d]['lft'] - self.p_table[0, d] - self.G[parent][d]['weight']
else:
lft = self.G.node[d]['lft'] - self.p_table[self.assigned_list[d], d] - self.G[parent][d]['weight']
else:
pos = pcp.index(parent)
#if pos < len(pcp) -1:
if assigned_vms[pos] == assigned_vms[pos-1]:#9, 6, 2
lft = self.G.node[d]['lft'] - self.p_table[self.assigned_list[d], d]
else:
lft = self.G.node[d]['lft'] - self.p_table[self.assigned_list[d], d] - self.G[pcp[pos]][pcp[pos-1]]['weight']
if lft < self.G.node[parent]['lft']:
self.G.node[parent]['lft'] = lft
self.update_lft(parent, pcp, assigned_vms)
def find_critical_path(self, i, pcp):
cal_cost = 0
critical_parent = -1
for n in self.G.predecessors(i):
if self.assigned_list[n] == -1: #parent of node i is not assigned
if self.G.node[n]['eft'] + self.G.edge[n][i]['weight'] > cal_cost:
cal_cost = self.G.node[n]['eft'] + self.G.edge[n][i]['weight']
critical_parent = n
if not critical_parent == -1:
pcp.append(critical_parent)
self.find_critical_path(critical_parent, pcp)
def exec_time_sum(self, pcp, vm_type):
sum = 0
for i in pcp:
sum += self.p_table[vm_type, i]
return sum
#look forward one step when assigning a vm to a pcp how the est varies
def est_vary(self, pcp, d):
head_pcp = pcp[len(pcp)-1]
original_est = self.G.node[head_pcp]['est']
biggest_est = -1
biggest_parent = -1
for parent in self.G.predecessors(head_pcp):
if parent == d:
est = self.G.node[parent]['eft']
else:
est = self.G.node[parent]['eft'] + self.G[parent][head_pcp]['weight']
if biggest_est < est:
biggest_est = est
biggest_parent = parent
return original_est-biggest_est
#choose the best existing available instance for the pcp
def choose_exist_instance(self, pcp):
best_vm = None
best_exec_time = -1
best_vary_time = -1
for vm in self.instances:
head_pcp = pcp[len(pcp)-1]
for parent in self.G.predecessors(head_pcp):
head_exist_pcp = vm.task_list[0] #The last node of the previous critical path
if parent == head_exist_pcp:
if best_vm == None:
best_vm = vm
exec_time = self.exec_time_sum(pcp, vm.vm_type)
best_exec_time = exec_time
best_vary_time = self.est_vary(pcp, head_exist_pcp)
else:
best_vm_head = vm.task_list[0]
# if assigned to the vm, what will the est be
exec_time = self.exec_time_sum(pcp, vm.vm_type)
# calculate the lft
varied_time = self.G.node[head_pcp]['est']-self.est_vary(pcp, head_exist_pcp)
lft = varied_time+exec_time
if (exec_time - self.est_vary(pcp, head_exist_pcp))*self.vm_price[vm.vm_type]> \
(best_exec_time - self.est_vary(pcp, best_vm.task_list[0]))*self.vm_price[best_vm.vm_type] \
and lft < self.G.node[head_pcp]['lft']: #also should not violate the lft
best_vm = vm
best_exec_time = exec_time
best_vary_time = varied_time
if not best_vm == None:
best_vm.vm_end = self.G.node[pcp[len(pcp)-1]]['est']-best_vary_time+best_exec_time
return best_vm
def assign_path(self, pcp):
cheapest_vm = -1
cheapest_sum = 9999999 #the initialized value should be a very large number
for i in xrange(self.p_table.shape[0]): # use the the shape of the performance table to identify how many VM types are there
violate_LFT = 0
start = self.G.node[pcp[len(pcp)-1]]['est']
cost_sum = 0
for j in xrange(len(pcp)-1, -1, -1):
cost_sum += self.vm_price[i]
if j == len(pcp)-1:
start = start + self.p_table[i, pcp[j]]
else:
start = start + self.p_table[i, pcp[j]]+ self.G[pcp[j+1]][pcp[j]]['weight']
if self.G.node[pcp[j]]['lft'] < start:
violate_LFT = 1
#launch a new instance of the cheapest service which can finish each task of P before its LFT
if violate_LFT == 0 and cost_sum < cheapest_sum:
cheapest_vm = i
cheapest_sum = cost_sum
for i in xrange(len(pcp)):
self.assigned_list[pcp[i]] = cheapest_vm
return [cheapest_vm]*len(pcp)
def generate_string(self, node):
s = "name "+str(node)+"\n"
for i in xrange(len(self.vm_price)):
s = s+str(self.p_table[i, node])+"\n"
s = s+"assigned vm: " + str(self.assigned_list[node]+1)
return s
def generateJSON(self):
content = {}
for i in xrange(1, self.vertex_num-1, 1):
if self.assigned_list[i] == 0:
content[str(i)] = 'large'
if self.assigned_list[i] == 1:
content[str(i)] = 'Medium'
if self.assigned_list[i] == 2:
content[str(i)] = 'Small'
return content
#calculate the total execution cost
def cal_cost(self):
cost = 0
for vm in self.instances:
cost = cost + vm.cost
return cost
def cal_cost1(self):
cost = 0
for i in range(1, len(self.assigned_list)-1):
cost += self.vm_price[self.assigned_list[i]]
return cost
def has_edge_vm(self, vm1, vm2):
for node1 in vm1.task_list:
for node2 in vm2.task_list:
if self.G.has_edge(node1, node2) or self.G.has_edge(node2, node1):
return True
return False
class NewInstance(object):
def __init__(self, vm_type, vm_start, vm_end, pcp):
self.vm_type = vm_type
self.vm_start = vm_start
self.vm_end = vm_end
self.task_list = pcp
self.cost = 0
\ No newline at end of file
#!/usr/bin/env python
import pika
import networkx as nx
import sys
import numpy as np
import sys, argparse
import operator
import os
from toscaparser import *
from toscaparser.tosca_template import ToscaTemplate
import re
import getopt
from ICPCP import Workflow
import random
import time
import json
connection = pika.BlockingConnection(pika.ConnectionParameters(host='172.17.0.3'))
channel = connection.channel()
channel.queue_declare(queue='planner_queue')
def on_request(ch, method, props, body):
parsed_message = json.loads(body)
value = parsed_message.get('parameters')[0].get('value')
parsed_value = json.loads(value)
json1 = parsed_value.get('topology_template').get('node_templates')
deadline = 0
for j in json1:
#print json[j]
if not json1[j]['type'] == "Switch.nodes.Application.Connection":
deadline = int(re.search(r'\d+', json1[j]['properties']['QoS']['response_time']).group())
#get the nodes from the json
nodeDic = {}
nodeDic1 = {}
i = 1
for j in json1:
if not json1[j]['type'] == "Switch.nodes.Application.Connection":
print j, json1[j]
nodeDic[j] = i
nodeDic1[i] = j
i = i + 1
#get the links from the json
links = []
for j in json1:
if json1[j]['type'] == "Switch.nodes.Application.Connection":
print json1[j]['properties']['source']['component_name']
print json1[j]['properties']['target']['component_name']
link= {}
link['source'] = nodeDic[json1[j]['properties']['source']['component_name']]
link['target'] = nodeDic[json1[j]['properties']['target']['component_name']]
link['weight'] = random.randint(1, 10)
links.append(link)
# compose the json as input of the workflow
wfJson = {}
wfJson['workflow'] = {}
nodesList = []
sorted_nodeDic = sorted(nodeDic.items(), key=operator.itemgetter(1))
for key, value in sorted_nodeDic:
v = {}
v['name'] = value
nodesList.append(v)
wfJson['workflow']['nodes'] = nodesList
wfJson['workflow']['links'] = links
#print deadline
wfJson['price'] = "5,2,1"
wfJson['deadline'] = {'2': deadline}
#generate performance
performance = {}
for key, value in sorted_nodeDic:
performance[str(value)] = "1,2,3"
wfJson['performance'] = performance
print wfJson
#send request to the server
start = time.time()
wf = Workflow()
wf.init(wfJson)
wf.ic_pcp()
#print content['workflow']
#return
res = wf.generateJSON()
end = time.time()
print (end - start)
# generate the json files in the corresponding format as the
outcontent = {}
current_milli_time = lambda: int(round(time.time() * 1000))
outcontent["creationDate"] = current_milli_time()
outcontent["parameters"] = []
par1 = {}
par1["url"] = 'null'
par1["encoding"] = "UTF-8"
par1["name"] = "plan"
par1["value"] = res
par1["attributes"] = 'null'
outcontent["parameters"].append(par1)
response = outcontent
print(response)
ch.basic_publish(exchange='',
routing_key=props.reply_to,
properties=pika.BasicProperties(correlation_id = \
props.correlation_id),
body=str(response))
ch.basic_ack(delivery_tag = method.delivery_tag)
channel.basic_qos(prefetch_count=1)
channel.basic_consume(on_request, queue='planner_queue')
print(" [x] Awaiting RPC requests")
channel.start_consuming()
\ No newline at end of file
#https://github.com/skoulouzis/DRIP/blob/package/doc/json_samples/kbExampleMessage.json
import networkx as nx
import sys
import numpy as np
import sys, argparse
import operator
import os
from toscaparser import *
from toscaparser.tosca_template import ToscaTemplate
import re
import getopt
from ICPCP import Workflow
import random
import time
import json
def main(argv):
workflow_file = ""
try:
opts, args = getopt.getopt(argv,"hw:s:",["workflow=", "SDI="])
except getopt.GetoptError:
print 'server.py -w <workflowfile> -s <SDI>'
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print 'server.py -w <workflowfile> -s <SDI>'
sys.exit()
elif opt in ("-w", "--workflow"):
workflow_file = arg
elif opt in ("-s", "--SDI"):
SDI_file = arg
data = {}
print workflow_file
with open(workflow_file) as data_file:
data = json.load(data_file)
#print data
#path = "input.yaml"
'''
a_file = os.path.isfile(path)
tosca = ToscaTemplate(path)
#print tosca.tpl
json = tosca.tpl.get('topology_template').get('node_templates')
#print json
'''
json1 = data.get('parameters')[0].get('value').get('topology_template').get('node_templates')
deadline = 0
for j in json1:
#print json[j]
if not json1[j]['type'] == "Switch.nodes.Application.Connection":
deadline = int(re.search(r'\d+', json1[j]['properties']['QoS']['response_time']).group())
#get the nodes from the json
nodeDic = {}
nodeDic1 = {}
i = 1
for j in json1:
if not json1[j]['type'] == "Switch.nodes.Application.Connection":
print j, json1[j]
nodeDic[j] = i
nodeDic1[i] = j
i = i + 1
#get the links from the json
links = []
for j in json1:
if json1[j]['type'] == "Switch.nodes.Application.Connection":
print json1[j]['properties']['source']['component_name']
print json1[j]['properties']['target']['component_name']
link= {}
link['source'] = nodeDic[json1[j]['properties']['source']['component_name']]
link['target'] = nodeDic[json1[j]['properties']['target']['component_name']]
link['weight'] = random.randint(1, 10)
links.append(link)
# compose the json as input of the workflow
wfJson = {}
wfJson['workflow'] = {}
nodesList = []
sorted_nodeDic = sorted(nodeDic.items(), key=operator.itemgetter(1))
for key, value in sorted_nodeDic:
v = {}
v['name'] = value
nodesList.append(v)
wfJson['workflow']['nodes'] = nodesList
wfJson['workflow']['links'] = links
#print deadline
wfJson['price'] = "5,2,1"
wfJson['deadline'] = {'2': deadline}
#generate performance
performance = {}
for key, value in sorted_nodeDic:
performance[str(value)] = "1,2,3"
wfJson['performance'] = performance
print wfJson
#send request to the server
start = time.time()
wf = Workflow()
wf.init(wfJson)
wf.ic_pcp()
#print content['workflow']
#return
res = wf.generateJSON()
end = time.time()
print (end - start)
# generate the json files in the corresponding format as the
outcontent = {}
outcontent["creationDate"] = 1487002029722
outcontent["parameters"] = []
par1 = {}
par1["url"] = "null"
par1["encoding"] = "UTF-8"
par1["value"] = res
par1["attributes"] = "null"
outcontent["parameters"].append(par1)
return outcontent
if __name__ == '__main__':
main(sys.argv[1:])
\ No newline at end of file
#!/usr/bin/env python
import pika
import random
import time
import json
connection = pika.BlockingConnection(pika.ConnectionParameters(host='172.17.0.2'))
connection = pika.BlockingConnection(pika.ConnectionParameters(host='172.17.0.3'))
channel = connection.channel()
channel.queue_declare(queue='planner_queue')
def handleDelivery(message):
parsed_json = json.loads(message)
params = parsed_json["parameters"]
for param in params:
name = param["name"]
value = param["value"]
def on_request(ch, method, props, body):
handleDelivery(body)
parsed_message = json.loads(body)
response = consume_message(parsed_message)
print(" Message %s" % body)
response = "AAAAAAAAAAAAAAAAAAAAAA"
ch.basic_publish(exchange='',
routing_key=props.reply_to,
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment