Commit 460256ee authored by Spiros Koulouzis's avatar Spiros Koulouzis

added disk size from vms

parent 8de510db
...@@ -26,17 +26,18 @@ topology_template: ...@@ -26,17 +26,18 @@ topology_template:
image: image:
type: tosca.artifacts.Deployment.Image.Container.Docker type: tosca.artifacts.Deployment.Image.Container.Docker
file: alogo53/ws-pema-lifewatch file: alogo53/ws-pema-lifewatch
repository: docker_hub repository: docker_hub
topology: #topology:
type: tosca.nodes.ARTICONF.VM.topology #type: tosca.nodes.ARTICONF.VM.topology
interfaces: #interfaces:
CloudsStorm: #CloudsStorm:
provision: #provision:
inputs: #inputs:
code_type: SEQ #code_type: SEQ
object_type: SubTopology #object_type: SubTopology
properties: #properties:
domain: UvA (Amsterdam, The Netherlands) XO Rack #domain: UvA (Amsterdam, The Netherlands) XO Rack
provider: ExoGENI #provider: ExoGENI
...@@ -179,7 +179,7 @@ node_types: ...@@ -179,7 +179,7 @@ node_types:
disk_size: disk_size:
type: scalar-unit.size type: scalar-unit.size
required: true required: true
default: 50000 MB default: 20000 MB
constraints: constraints:
- greater_or_equal: 15000 MB - greater_or_equal: 15000 MB
mem_size: mem_size:
......
...@@ -53,6 +53,7 @@ public class ToscaHelper { ...@@ -53,6 +53,7 @@ public class ToscaHelper {
private static final String VM_TYPE = "tosca.nodes.ARTICONF.VM.Compute"; private static final String VM_TYPE = "tosca.nodes.ARTICONF.VM.Compute";
private static final String VM_NUM_OF_CORES = "num_cores"; private static final String VM_NUM_OF_CORES = "num_cores";
private static final String MEM_SIZE = "mem_size"; private static final String MEM_SIZE = "mem_size";
private static final String DISK_SIZE = "disk_size";
private static final String VM_OS = "os"; private static final String VM_OS = "os";
private static final String VM_TOPOLOGY = "tosca.nodes.ARTICONF.VM.topology"; private static final String VM_TOPOLOGY = "tosca.nodes.ARTICONF.VM.topology";
private Integer id; private Integer id;
...@@ -147,6 +148,21 @@ public class ToscaHelper { ...@@ -147,6 +148,21 @@ public class ToscaHelper {
} }
public Double getVMNDiskSize(NodeTemplateMap vmMap) throws Exception {
NodeTemplate vm = vmMap.getNodeTemplate();
if (vm.getType().equals(VM_TYPE)) {
String memScalar = (String) vm.getProperties().get(DISK_SIZE);
String[] memScalarArray = memScalar.split(" ");
String memSize = memScalarArray[0];
String memUnit = memScalarArray[1];
Double sizeInGB = convertToGB(Integer.valueOf(memSize), memUnit);
return sizeInGB;
} else {
throw new Exception("NodeTemplate is not of type: " + VM_TYPE + " it is of type: " + vm.getType());
}
}
public String getVMNOS(NodeTemplateMap vmMap) throws Exception { public String getVMNOS(NodeTemplateMap vmMap) throws Exception {
NodeTemplate vm = vmMap.getNodeTemplate(); NodeTemplate vm = vmMap.getNodeTemplate();
if (vm.getType().equals(VM_TYPE)) { if (vm.getType().equals(VM_TYPE)) {
...@@ -244,4 +260,5 @@ public class ToscaHelper { ...@@ -244,4 +260,5 @@ public class ToscaHelper {
} }
return "vm_user"; return "vm_user";
} }
} }
...@@ -34,6 +34,9 @@ def write_inventory_file(tmp_path, vms): ...@@ -34,6 +34,9 @@ def write_inventory_file(tmp_path, vms):
for vm_name in vms: for vm_name in vms:
attributes = vms[vm_name]['attributes'] attributes = vms[vm_name]['attributes']
role = attributes['role'] role = attributes['role']
if 'public_ip' not in attributes:
raise ValueError('VM: ' + vm_name + ' has no public_ip attribute')
if role == 'master': if role == 'master':
k8_master = attributes['public_ip'] k8_master = attributes['public_ip']
else: else:
......
...@@ -100,7 +100,7 @@ services: ...@@ -100,7 +100,7 @@ services:
#jupyter: #jupyter:
#ports: #ports:
#- "30003:8888" #- "30003:8888"
#image: jupyter/base-notebook #image: jupyter_base-notebook
manager: manager:
...@@ -129,22 +129,22 @@ services: ...@@ -129,22 +129,22 @@ services:
environment: environment:
RABBITMQ_HOST: rabbit RABBITMQ_HOST: rabbit
provisioner: #provisioner:
depends_on:
- rabbit
- sure-tosca
image: alogo53/provisioner:3.0.0
environment:
RABBITMQ_HOST: rabbit
SURE_TOSCA_BASE_PATH: http://sure-tosca:8081/tosca-sure/1.0.0
#deployer:
#depends_on: #depends_on:
#- rabbit #- rabbit
#- sure-tosca #- sure-tosca
#image: alogo53/deployer:3.0.0 #image: alogo53/provisioner:3.0.0
#environment: #environment:
#RABBITMQ_HOST: rabbit #RABBITMQ_HOST: rabbit
#SURE_TOSCA_BASE_PATH: http://sure-tosca:8081/tosca-sure/1.0.0
deployer:
depends_on:
- rabbit
- sure-tosca
image: alogo53/deployer:3.0.0
environment:
RABBITMQ_HOST: rabbit
#volumes: #volumes:
......
...@@ -3,7 +3,6 @@ package nl.uva.sne.drip.api; ...@@ -3,7 +3,6 @@ package nl.uva.sne.drip.api;
import nl.uva.sne.drip.model.tosca.Credential; import nl.uva.sne.drip.model.tosca.Credential;
import com.fasterxml.jackson.databind.ObjectMapper; import com.fasterxml.jackson.databind.ObjectMapper;
import io.swagger.annotations.*; import io.swagger.annotations.*;
import java.io.IOException;
import org.slf4j.Logger; import org.slf4j.Logger;
import org.slf4j.LoggerFactory; import org.slf4j.LoggerFactory;
import org.springframework.http.HttpStatus; import org.springframework.http.HttpStatus;
...@@ -13,11 +12,8 @@ import org.springframework.web.bind.annotation.RequestBody; ...@@ -13,11 +12,8 @@ import org.springframework.web.bind.annotation.RequestBody;
import javax.validation.Valid; import javax.validation.Valid;
import javax.servlet.http.HttpServletRequest; import javax.servlet.http.HttpServletRequest;
import java.util.List; import java.util.List;
import nl.uva.sne.drip.commons.utils.Converter;
import nl.uva.sne.drip.service.CredentialService; import nl.uva.sne.drip.service.CredentialService;
import org.springframework.beans.factory.annotation.Autowired; import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.web.bind.annotation.RequestPart;
import org.springframework.web.multipart.MultipartFile;
@javax.annotation.Generated(value = "io.swagger.codegen.languages.SpringCodegen", date = "2019-10-10T17:15:46.465Z") @javax.annotation.Generated(value = "io.swagger.codegen.languages.SpringCodegen", date = "2019-10-10T17:15:46.465Z")
......
...@@ -3,5 +3,5 @@ pika==1.1.0 ...@@ -3,5 +3,5 @@ pika==1.1.0
names==0.3.0 names==0.3.0
networkx==2.4 networkx==2.4
pyyaml==5.3 pyyaml==5.3
tosca-parser ==1.7.0 tosca-parser==1.7.0
matplotlib==3.1.2 matplotlib==3.1.2
\ No newline at end of file
...@@ -164,7 +164,6 @@ DCMetaInfo: ...@@ -164,7 +164,6 @@ DCMetaInfo:
AMI: "ami-2581aa40" AMI: "ami-2581aa40"
- domain: "Frankfurt" - domain: "Frankfurt"
endpoint: "ec2.eu-central-1.amazonaws.com" endpoint: "ec2.eu-central-1.amazonaws.com"
country: Germany country: Germany
......
...@@ -209,14 +209,21 @@ class CloudStormService { ...@@ -209,14 +209,21 @@ class CloudStormService {
Double numOfCores = helper.getVMNumOfCores(vmMap); Double numOfCores = helper.getVMNumOfCores(vmMap);
Double memSize = helper.getVMNMemSize(vmMap); Double memSize = helper.getVMNMemSize(vmMap);
String os = helper.getVMNOS(vmMap); String os = helper.getVMNOS(vmMap);
double[] requestedVector = convert2ArrayofDoubles(numOfCores, memSize); Double diskSize = helper.getVMNDiskSize(vmMap);
double[] requestedVector = convert2ArrayofDoubles(numOfCores, memSize, diskSize);
double min = Double.MAX_VALUE; double min = Double.MAX_VALUE;
CloudsStormVM bestMatchingVM = null; CloudsStormVM bestMatchingVM = null;
List<CloudsStormVM> vmInfos = cloudStormDAO.findVmMetaInfoByProvider(CloudProviderEnum.fromValue(provider)); List<CloudsStormVM> vmInfos = cloudStormDAO.findVmMetaInfoByProvider(CloudProviderEnum.fromValue(provider));
for (CloudsStormVM vmInfo : vmInfos) { for (CloudsStormVM vmInfo : vmInfos) {
if (os.toLowerCase().equals(vmInfo.getOstype().toLowerCase())) { if (os.toLowerCase().equals(vmInfo.getOstype().toLowerCase())) {
double[] aveliableVector = convert2ArrayofDoubles(Double.valueOf(vmInfo.getCPU()), Double.valueOf(vmInfo.getMEM())); Double cloudsStormVMdiskSize = null;
if (vmInfo.getDiskSize() == null) {
cloudsStormVMdiskSize = Double.valueOf(7.0);
} else {
cloudsStormVMdiskSize = Double.valueOf(vmInfo.getDiskSize());
}
double[] aveliableVector = convert2ArrayofDoubles(Double.valueOf(vmInfo.getCPU()), Double.valueOf(vmInfo.getMEM()), cloudsStormVMdiskSize);
EuclideanDistance dist = new EuclideanDistance(); EuclideanDistance dist = new EuclideanDistance();
double res = dist.compute(requestedVector, aveliableVector); double res = dist.compute(requestedVector, aveliableVector);
if (res < min) { if (res < min) {
...@@ -225,7 +232,10 @@ class CloudStormService { ...@@ -225,7 +232,10 @@ class CloudStormService {
} }
} }
} }
Logger.getLogger(CloudStormService.class.getName()).log(Level.INFO, "Found best matching VM: " + bestMatchingVM); if (bestMatchingVM != null && bestMatchingVM.getDiskSize() == null){
bestMatchingVM.setDiskSize(diskSize.intValue());
}
Logger.getLogger(CloudStormService.class.getName()).log(Level.INFO, "Found best matching VM: {0}", bestMatchingVM);
return bestMatchingVM; return bestMatchingVM;
} }
...@@ -383,8 +393,8 @@ class CloudStormService { ...@@ -383,8 +393,8 @@ class CloudStormService {
return toscaTemplate; return toscaTemplate;
} }
private double[] convert2ArrayofDoubles(Double numOfCores, Double memSize) { private double[] convert2ArrayofDoubles(Double numOfCores, Double memSize, Double diskSize) {
double[] vector = new double[]{numOfCores, memSize}; double[] vector = new double[]{numOfCores, memSize, diskSize};
return vector; return vector;
} }
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment