Commit cbe0c324 authored by Spiros Koulouzis's avatar Spiros Koulouzis

changed default disk size. Added disk size to cloudstorm vm

parent 0662df0a
......@@ -29,15 +29,15 @@ topology_template:
repository: docker_hub
#topology:
#type: tosca.nodes.ARTICONF.VM.topology
#interfaces:
#CloudsStorm:
#provision:
#inputs:
#code_type: SEQ
#object_type: SubTopology
#properties:
#domain: UvA (Amsterdam, The Netherlands) XO Rack
#provider: ExoGENI
topology:
type: tosca.nodes.ARTICONF.VM.topology
interfaces:
CloudsStorm:
provision:
inputs:
code_type: SEQ
object_type: SubTopology
properties:
domain: UvA (Amsterdam, The Netherlands) XO Rack
provider: ExoGENI
......@@ -179,7 +179,7 @@ node_types:
disk_size:
type: scalar-unit.size
required: true
default: 40000 MB
default: 25000 MB
constraints:
- greater_or_equal: 15000 MB
mem_size:
......
......@@ -116,10 +116,10 @@ services:
ports:
- "30000:8080"
#sure-tosca:
#image: sure-tosca:3.0.0
#ports:
#- "8081:8081"
sure-tosca:
image: sure-tosca:3.0.0
ports:
- "8081:8081"
planner:
depends_on:
......@@ -129,14 +129,14 @@ services:
environment:
RABBITMQ_HOST: rabbit
provisioner:
depends_on:
- rabbit
- sure-tosca
image: provisioner:3.0.0
environment:
RABBITMQ_HOST: rabbit
SURE_TOSCA_BASE_PATH: http://sure-tosca:8081/tosca-sure/1.0.0
#provisioner:
#depends_on:
#- rabbit
#- sure-tosca
#image: provisioner:3.0.0
#environment:
#RABBITMQ_HOST: rabbit
#SURE_TOSCA_BASE_PATH: http://sure-tosca:8081/tosca-sure/1.0.0
deployer:
depends_on:
......
......@@ -16,8 +16,10 @@ import javax.validation.Valid;
import javax.servlet.http.HttpServletRequest;
import java.io.IOException;
import java.util.List;
import java.util.concurrent.TimeoutException;
import java.util.logging.Level;
import nl.uva.sne.drip.model.Exceptions.TypeExeption;
import nl.uva.sne.drip.service.DRIPService;
import nl.uva.sne.drip.service.ToscaTemplateService;
import nl.uva.sne.drip.sure.tosca.client.ApiException;
import org.springframework.beans.factory.annotation.Autowired;
......@@ -34,6 +36,9 @@ public class ToscaTemplateApiController implements ToscaTemplateApi {
@Autowired
private ToscaTemplateService toscaTemplateService;
@Autowired
private DRIPService dripService;
@org.springframework.beans.factory.annotation.Autowired
public ToscaTemplateApiController(ObjectMapper objectMapper, HttpServletRequest request) {
this.request = request;
......@@ -44,9 +49,9 @@ public class ToscaTemplateApiController implements ToscaTemplateApi {
String accept = request.getHeader("Accept");
if (accept != null && accept.contains("*/*")) {
try {
toscaTemplateService.deleteByID(id);
return new ResponseEntity<>("", HttpStatus.OK);
} catch (NotFoundException | IOException | ApiException | TypeExeption ex) {
String deleteedYemplateId = dripService.delete(id);
return new ResponseEntity<>(deleteedYemplateId, HttpStatus.OK);
} catch (NotFoundException | IOException | ApiException | TypeExeption | TimeoutException | InterruptedException ex) {
java.util.logging.Logger.getLogger(ToscaTemplateApiController.class.getName()).log(Level.SEVERE, null, ex);
return new ResponseEntity<>(HttpStatus.INTERNAL_SERVER_ERROR);
}
......
......@@ -61,7 +61,6 @@ public class DRIPService {
}
private String execute(ToscaTemplate toscaTemplate) throws JsonProcessingException, ApiException, IOException, TimeoutException, InterruptedException {
caller.init();
Logger.getLogger(DRIPService.class.getName()).log(Level.INFO, "toscaTemplate:\n{0}", toscaTemplate);
Message message = new Message();
......@@ -172,19 +171,19 @@ public class DRIPService {
return toscaTemplate;
}
void deleteActions(ToscaTemplate toscaTemplate) throws ApiException, TypeExeption, IOException {
helper.uploadToscaTemplate(toscaTemplate);
public String delete(String id) throws NotFoundException, IOException, JsonProcessingException, ApiException, TypeExeption, TimeoutException, InterruptedException {
ToscaTemplate toscaTemplate = initExecution(id);
List<NodeTemplateMap> vmTopologies = helper.getVMTopologyTemplates();
if (vmTopologies != null) {
for (NodeTemplateMap vmTopology : vmTopologies) {
CloudsStormSubTopology.StatusEnum status = helper.getVMTopologyTemplateStatus(vmTopology);
if (!status.equals(CloudsStormSubTopology.StatusEnum.DELETED)) {
Logger.getLogger(ToscaHelper.class.getName()).log(Level.FINE, "Deleting VMs from " + vmTopology);
toscaTemplate = setProvisionerOperation(toscaTemplate, PROVISIONER_OPERATION.DELETE);
}
}
return execute(toscaTemplate);
}
return null;
}
}
......@@ -44,8 +44,7 @@ public class ToscaTemplateService {
@Autowired
private ToscaTemplateDAO dao;
@Autowired
private DRIPService dripService;
public String save(ToscaTemplate tt) {
dao.save(tt);
......@@ -85,7 +84,6 @@ public class ToscaTemplateService {
public void deleteByID(String id) throws JsonProcessingException, NotFoundException, IOException, ApiException, TypeExeption {
ToscaTemplate toscaTemplate = getYaml2ToscaTemplate(findByID(id));
dripService.deleteActions(toscaTemplate);
dao.deleteById(id);
}
......
......@@ -206,20 +206,27 @@ class CloudStormService {
}
private CloudsStormVM getBestMatchingCloudStormVM(NodeTemplateMap vmMap, String provider) throws IOException, Exception {
Double numOfCores = helper.getVMNumOfCores(vmMap);
Double memSize = helper.getVMNMemSize(vmMap);
String os = helper.getVMNOS(vmMap);
Double diskSize = helper.getVMNDiskSize(vmMap);
double[] requestedVector = convert2ArrayofDoubles(numOfCores, memSize, diskSize);
Double requestedNumOfCores = helper.getVMNumOfCores(vmMap);
Double requestedMemSize = helper.getVMNMemSize(vmMap);
String requestedOs = helper.getVMNOS(vmMap);
Double requestedDiskSize = helper.getVMNDiskSize(vmMap);
double[] requestedVector = convert2ArrayofDoubles(requestedNumOfCores, requestedMemSize, requestedDiskSize);
double min = Double.MAX_VALUE;
CloudsStormVM bestMatchingVM = null;
List<CloudsStormVM> vmInfos = cloudStormDAO.findVmMetaInfoByProvider(CloudProviderEnum.fromValue(provider));
for (CloudsStormVM vmInfo : vmInfos) {
if (os.toLowerCase().equals(vmInfo.getOstype().toLowerCase())) {
Double cloudsStormVMdiskSize = null;
if (requestedOs.toLowerCase().equals(vmInfo.getOstype().toLowerCase())) {
Double cloudsStormVMdiskSize;
if (vmInfo.getDiskSize() == null) {
cloudsStormVMdiskSize = Double.valueOf(7.0);
if (vmInfo.getExtraInfo() != null && vmInfo.getExtraInfo().containsKey("DiskSize")) {
int intSize = (int) vmInfo.getExtraInfo().get("DiskSize");
cloudsStormVMdiskSize = Double.valueOf(intSize);
vmInfo.setDiskSize(intSize);
} else {
cloudsStormVMdiskSize = 7.0;
}
} else {
cloudsStormVMdiskSize = Double.valueOf(vmInfo.getDiskSize());
}
......@@ -232,8 +239,9 @@ class CloudStormService {
}
}
}
if (bestMatchingVM != null && bestMatchingVM.getDiskSize() == null){
bestMatchingVM.setDiskSize(diskSize.intValue());
if (bestMatchingVM != null && bestMatchingVM.getDiskSize() == null
&& bestMatchingVM.getExtraInfo() == null && !bestMatchingVM.getExtraInfo().containsKey("DiskSize")) {
bestMatchingVM.setDiskSize(requestedDiskSize.intValue());
}
Logger.getLogger(CloudStormService.class.getName()).log(Level.INFO, "Found best matching VM: {0}", bestMatchingVM);
return bestMatchingVM;
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment