Commit 8afa984c authored by Spiros Koulouzis's avatar Spiros Koulouzis

add artifacts to be able to add the cloudstorm information

parent 2bbc0d82
This diff is collapsed.
tosca_definitions_version: tosca_simple_yaml_1_0
artifacts:
tosca.datatypes.ARTICONF.CloudStorm.Deployment:
derived_from: tosca.artifacts.Deployment
file_contents:
type: string
required: true
encoding:
type: string
required: true
file_ext:
type: string
required: false
tosca_definitions_version: tosca_simple_yaml_1_0
data_types:
data_types:
tosca.datatypes.ARTICONF.Credential:
derived_from: tosca.datatypes.Credential
properties:
......
......@@ -50,13 +50,13 @@ interface_types:
description: Upload some input data or files to the remote resources. ObjectType currently can only be VM.
get:
description: Download or retrieve output results or files from the remote resources. ObjectType currently can only be VM.
vscale:
v_scale:
description: Vertically scale up or down to adapt the computing capability of the infrastructure through directly changing the capacity of some VM. ObjectType can only be VM and REQ.
hscale:
h_scale:
description: Horizontally scale out or in to adapt the computing capability of the infrastructure through changing the number of VMs. ObjectType can be SubTopology, VM or REQ.
recover:
description: Recover some failed sub-topologies. ObjectType can be SubTopology or REQ.
provision:
run:
description: Provision the defined objects (resources). ObjectType can be SubTopology or VM.
......
......@@ -13,6 +13,7 @@ node_types:
type: string
required: false
description: the current state of the node
tosca.nodes.ARTICONF.Container.Application.Docker:
derived_from: tosca.nodes.ARTICONF.Application
......@@ -164,7 +165,7 @@ node_types:
delete:
inputs:
code_type: SEQ
object_type: SubTopology
object_type: SubTopology
hscale:
inputs:
code_type: SEQ
......
......@@ -29,4 +29,6 @@ public class Constatnts {
public static final String VM_OS = "os";
public static final String VM_TOPOLOGY = "tosca.nodes.ARTICONF.VM.topology";
public static final String CLOUD_STORM_INTERFACE = "tosca.interfaces.ARTICONF.CloudsStorm";
public static final String ENCODED_FILE_DATATYPE = "tosca.datatypes.ARTICONF.encodedFile";
}
......@@ -15,16 +15,24 @@
*/
package nl.uva.sne.drip.commons.utils;
import java.io.FileNotFoundException;
import java.io.FileOutputStream;
import java.io.IOException;
import java.nio.charset.StandardCharsets;
import java.nio.file.FileVisitResult;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.nio.file.SimpleFileVisitor;
import java.nio.file.attribute.BasicFileAttributes;
import java.security.MessageDigest;
import java.security.NoSuchAlgorithmException;
import java.util.Base64;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
import java.util.zip.ZipEntry;
import java.util.zip.ZipOutputStream;
import org.json.JSONException;
import org.json.JSONObject;
import org.springframework.web.multipart.MultipartFile;
......@@ -92,4 +100,17 @@ public class Converter {
return new String(encodedBytes, StandardCharsets.UTF_8);
}
public static void zipFolder(Path sourceFolderPath, Path zipPath) throws FileNotFoundException, IOException {
try (ZipOutputStream zos = new ZipOutputStream(new FileOutputStream(zipPath.toFile()))) {
Files.walkFileTree(sourceFolderPath, new SimpleFileVisitor<Path>() {
@Override
public FileVisitResult visitFile(Path file, BasicFileAttributes attrs) throws IOException {
zos.putNextEntry(new ZipEntry(sourceFolderPath.relativize(file).toString()));
Files.copy(file, zos);
zos.closeEntry();
return FileVisitResult.CONTINUE;
}
});
}
}
}
......@@ -48,6 +48,7 @@ import nl.uva.sne.drip.sure.tosca.client.Configuration;
import org.springframework.beans.factory.annotation.Autowired;
import static nl.uva.sne.drip.commons.utils.Constatnts.*;
import nl.uva.sne.drip.model.cloud.storm.CloudsStormSubTopology.StatusEnum;
import nl.uva.sne.drip.model.cloud.storm.OpCode;
/**
*
......@@ -306,7 +307,7 @@ public class ToscaHelper {
public NodeTemplateMap setNodeCurrentState(NodeTemplateMap node, NODE_STATES nodeState) {
return setNodeState(node, "current_state", nodeState);
}
public NodeTemplateMap setNodeDesiredState(NodeTemplateMap node, NODE_STATES nodeState) {
return setNodeState(node, "desired_state", nodeState);
}
......@@ -334,7 +335,7 @@ public class ToscaHelper {
}
public static NODE_STATES cloudStormStatus2NodeState(StatusEnum cloudStormStatus) {
if(cloudStormStatus.equals(StatusEnum.FRESH)){
if (cloudStormStatus.equals(StatusEnum.FRESH)) {
return null;
}
String cloudStormStatusStr = cloudStormStatus.toString().toUpperCase();
......@@ -363,4 +364,48 @@ public class ToscaHelper {
return null;
}
public static OpCode.OperationEnum NodeDesiredState2CloudStormOperation(NODE_STATES nodeDesiredState) {
switch (nodeDesiredState) {
case RUNNING:
return OpCode.OperationEnum.PROVISION;
case DELETED:
return OpCode.OperationEnum.DELETE;
case STARTED:
return OpCode.OperationEnum.START;
case STOPPED:
return OpCode.OperationEnum.STOP;
case H_SCALED:
return OpCode.OperationEnum.HSCALE;
case V_SCALED:
return OpCode.OperationEnum.VSCALE;
default:
return null;
}
}
public static StatusEnum nodeCurrentState2CloudStormStatus(NODE_STATES currentState) {
if (currentState == null) {
return StatusEnum.FRESH;
}
switch (currentState) {
case RUNNING:
return StatusEnum.RUNNING;
case DELETED:
return StatusEnum.DELETED;
case STARTED:
return StatusEnum.RUNNING;
case STOPPED:
return StatusEnum.STOPPED;
case H_SCALED:
return StatusEnum.RUNNING;
case V_SCALED:
return StatusEnum.RUNNING;
case FAILED:
return StatusEnum.FAILED;
default:
return null;
}
}
}
......@@ -81,7 +81,6 @@ public class CloudsStormSubTopology {
this.topology = topology;
return this;
}
/**
* Get topology
*
......
......@@ -28,7 +28,8 @@ public class OpCode {
VSCALE("vscale"),
HSCALE("hscale"),
RECOVER("recover"),
START("start");
START("start"),
STOP("stop");
private String value;
......
This diff is collapsed.
{"owner":"user","creationDate":1584616813246,"toscaTemplate":{"tosca_definitions_version":"tosca_simple_yaml_1_0","tosca_default_namespace":null,"template_name":null,"topology_template":{"description":null,"inputs":null,"node_templates":{"compute":{"properties":{"disk_size":"10000 MB","mem_size":"1000 MB","num_cores":1,"os":"Ubuntu 18.04","user_name":"vm_user"},"interfaces":{"Standard":{"create":"dumy.yaml"}},"type":"tosca.nodes.ARTICONF.VM.Compute"},"compute_1":{"properties":{"disk_size":"10000 MB","mem_size":"1000 MB","num_cores":1,"os":"Ubuntu 18.04","user_name":"vm_user"},"interfaces":{"Standard":{"create":"dumy.yaml"}},"type":"tosca.nodes.ARTICONF.VM.Compute"},"kubernetes":{"requirements":[{"host":{"capability":"tosca.capabilities.ARTICONF.VM.topology","node":"topology","relationship":"tosca.relationships.HostedOn"}}],"interfaces":{"Kubernetes":{"configure":{"inputs":{"playbook":"https://raw.githubusercontent.com/skoulouzis/CONF/develop/ansible_playbooks/dashboard.yaml"}},"create":{"inputs":{"playbook":"https://raw.githubusercontent.com/skoulouzis/CONF/develop/ansible_playbooks/create_k8s.yml"}},"install":{"inputs":{"playbook":"https://raw.githubusercontent.com/skoulouzis/CONF/develop/ansible_playbooks/install_k8s.yml"}}}},"type":"tosca.nodes.ARTICONF.docker.Orchestrator.Kubernetes"},"topology":{"properties":{"domain":"Frankfurt","provider":"EC2"},"requirements":[{"vm":{"capability":"tosca.capabilities.ARTICONF.VM","node":"compute","relationship":"tosca.relationships.DependsOn"}},{"vm":{"capability":"tosca.capabilities.ARTICONF.VM","node":"compute_1","relationship":"tosca.relationships.DependsOn"}}],"interfaces":{"CloudsStorm":{"delete":{"inputs":{"code_type":"SEQ","object_type":"SubTopology"}},"hscale":{"inputs":{"code_type":"SEQ","object_type":"SubTopology"}},"provision":{"inputs":{"code_type":"SEQ","object_type":"SubTopology"}},"start":{"inputs":{"code_type":"SEQ","object_type":"SubTopology"}},"stop":{"inputs":{"code_type":"SEQ","object_type":"SubTopology"}}}},"type":"tosca.nodes.ARTICONF.VM.topology","attributes":{"credential":{"cloud_provider_name":"EC2","keys":{"aws_access_key_id":"XXXXXXXXXXXXXXXXXXX"},"token":"XXXXXXXXXXXXXXX","token_type":"access_key"},"desired_state":"PROVISION"}},"ws-pema":{"properties":{"ports":["30001:8080"]},"requirements":[{"host":{"capability":"tosca.capabilities.ARTICONF.docker.Orchestrator","node":"kubernetes","relationship":"tosca.relationships.HostedOn"}}],"type":"tosca.nodes.ARTICONF.Container.Application.Docker","artifacts":{"image":{"file":"alogo53/ws-pema-lifewatch","repository":"docker_hub","type":"tosca.artifacts.Deployment.Image.Container.Docker"}}}},"relationship_templates":null,"outputs":null,"groups":null,"substitution_mappings":null,"policies":null},"template_author":null,"template_version":null,"description":"TOSCA example","imports":[{"nodes":"https://raw.githubusercontent.com/skoulouzis/DRIP/develop/TOSCA/types/nodes.yaml"},{"data":"https://raw.githubusercontent.com/skoulouzis/CONF/develop/TOSCA/types/data.yml"},{"capabilities":"https://raw.githubusercontent.com/skoulouzis/DRIP/develop/TOSCA/types/capabilities.yaml"},{"policies":"https://raw.githubusercontent.com/skoulouzis/DRIP/develop/TOSCA/types/policies.yaml"},{"interfaces":"https://raw.githubusercontent.com/skoulouzis/DRIP/develop/TOSCA/types/interfaces.yml"}],"dsl_definitions":null,"node_types":null,"relationship_types":null,"relationship_templates":null,"capability_types":null,"artifact_types":null,"data_types":null,"interface_types":null,"policy_types":null,"group_types":null,"repositories":null}}
{"owner":"user","creationDate":1584628085846,"toscaTemplate":{"tosca_definitions_version":"tosca_simple_yaml_1_0","tosca_default_namespace":null,"template_name":null,"topology_template":{"description":null,"inputs":null,"node_templates":{"compute":{"properties":{"disk_size":"10000 MB","mem_size":"1000 MB","num_cores":1,"os":"Ubuntu 18.04","user_name":"vm_user"},"interfaces":{"Standard":{"create":"dumy.yaml"}},"type":"tosca.nodes.ARTICONF.VM.Compute"},"compute_1":{"properties":{"disk_size":"10000 MB","mem_size":"1000 MB","num_cores":1,"os":"Ubuntu 18.04","user_name":"vm_user"},"interfaces":{"Standard":{"create":"dumy.yaml"}},"type":"tosca.nodes.ARTICONF.VM.Compute"},"kubernetes":{"requirements":[{"host":{"capability":"tosca.capabilities.ARTICONF.VM.topology","node":"topology","relationship":"tosca.relationships.HostedOn"}}],"interfaces":{"Kubernetes":{"configure":{"inputs":{"playbook":"https://raw.githubusercontent.com/skoulouzis/CONF/develop/ansible_playbooks/dashboard.yaml"}},"create":{"inputs":{"playbook":"https://raw.githubusercontent.com/skoulouzis/CONF/develop/ansible_playbooks/create_k8s.yml"}},"install":{"inputs":{"playbook":"https://raw.githubusercontent.com/skoulouzis/CONF/develop/ansible_playbooks/install_k8s.yml"}}}},"type":"tosca.nodes.ARTICONF.docker.Orchestrator.Kubernetes"},"topology":{"properties":{"domain":"Frankfurt","provider":"EC2"},"requirements":[{"vm":{"capability":"tosca.capabilities.ARTICONF.VM","node":"compute","relationship":"tosca.relationships.DependsOn"}},{"vm":{"capability":"tosca.capabilities.ARTICONF.VM","node":"compute_1","relationship":"tosca.relationships.DependsOn"}}],"interfaces":{"CloudsStorm":{"delete":{"inputs":{"code_type":"SEQ","object_type":"SubTopology"}},"hscale":{"inputs":{"code_type":"SEQ","object_type":"SubTopology"}},"provision":{"inputs":{"code_type":"SEQ","object_type":"SubTopology"}},"start":{"inputs":{"code_type":"SEQ","object_type":"SubTopology"}},"stop":{"inputs":{"code_type":"SEQ","object_type":"SubTopology"}}}},"type":"tosca.nodes.ARTICONF.VM.topology","attributes":{"credential":{"cloud_provider_name":"EC2","keys":{"aws_access_key_id":"XXXXXXXXXXXXXXX"},"token":"XXXXXXXXXXXXXXXXXX","token_type":"access_key"},"desired_state":"RUNNING"}},"ws-pema":{"properties":{"ports":["30001:8080"]},"requirements":[{"host":{"capability":"tosca.capabilities.ARTICONF.docker.Orchestrator","node":"kubernetes","relationship":"tosca.relationships.HostedOn"}}],"type":"tosca.nodes.ARTICONF.Container.Application.Docker","artifacts":{"image":{"file":"alogo53/ws-pema-lifewatch","repository":"docker_hub","type":"tosca.artifacts.Deployment.Image.Container.Docker"}}}},"relationship_templates":null,"outputs":null,"groups":null,"substitution_mappings":null,"policies":null},"template_author":null,"template_version":null,"description":"TOSCA example","imports":[{"nodes":"https://raw.githubusercontent.com/skoulouzis/DRIP/develop/TOSCA/types/nodes.yaml"},{"data":"https://raw.githubusercontent.com/skoulouzis/CONF/develop/TOSCA/types/data.yml"},{"capabilities":"https://raw.githubusercontent.com/skoulouzis/DRIP/develop/TOSCA/types/capabilities.yaml"},{"policies":"https://raw.githubusercontent.com/skoulouzis/DRIP/develop/TOSCA/types/policies.yaml"},{"interfaces":"https://raw.githubusercontent.com/skoulouzis/DRIP/develop/TOSCA/types/interfaces.yml"}],"dsl_definitions":null,"node_types":null,"relationship_types":null,"relationship_templates":null,"capability_types":null,"artifact_types":null,"data_types":null,"interface_types":null,"policy_types":null,"group_types":null,"repositories":null}}
......@@ -161,13 +161,11 @@ public class DRIPService {
List<NodeTemplateMap> vmTopologies = helper.getVMTopologyTemplates();
for (NodeTemplateMap vmTopology : vmTopologies) {
CloudsStormSubTopology.StatusEnum status = helper.getVMTopologyTemplateStatus(vmTopology);
if (!status.equals(CloudsStormSubTopology.StatusEnum.DELETED)) {
toscaTemplate = setDesieredSate(toscaTemplate, vmTopologies, NODE_STATES.DELETED);
}
toscaTemplate = setDesieredSate(toscaTemplate, vmTopologies, NODE_STATES.DELETED);
}
return execute(toscaTemplate, provisionerQueueName);
}else{
} else {
}
return null;
......
......@@ -3,7 +3,7 @@
properties:
Operation:
type: "string"
enum: [provision , delete , execute , put , get , vscale , hscale , recover , start]
enum: [provision , delete , execute , put , get , vscale , hscale , recover , start, stop]
ObjectType:
type: "string"
enum: [SubTopology , VM , REQ]
......
......@@ -18,6 +18,7 @@ import java.io.File;
import java.io.FileNotFoundException;
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.nio.file.attribute.PosixFilePermission;
import java.util.ArrayList;
......@@ -29,6 +30,7 @@ import java.util.Properties;
import java.util.Set;
import java.util.logging.Level;
import java.util.logging.Logger;
import static nl.uva.sne.drip.commons.utils.Constatnts.ENCODED_FILE_DATATYPE;
import nl.uva.sne.drip.commons.utils.Converter;
import nl.uva.sne.drip.commons.utils.ToscaHelper;
import static nl.uva.sne.drip.commons.utils.ToscaHelper.cloudStormStatus2NodeState;
......@@ -191,7 +193,10 @@ class CloudStormService {
cloudsStormSubTopology.setDomain(domain);
cloudsStormSubTopology.setCloudProvider(provider);
cloudsStormSubTopology.setTopology(SUB_TOPOLOGY_NAME + i);
cloudsStormSubTopology.setStatus(CloudsStormSubTopology.StatusEnum.FRESH);
ToscaHelper.NODE_STATES currentState = helper.getNodeCurrentState(nodeTemplateMap);
ToscaHelper.NODE_STATES desiredState = helper.getNodeDesiredState(nodeTemplateMap);
cloudsStormSubTopology.setStatus(ToscaHelper.nodeCurrentState2CloudStormStatus(currentState));
CloudsStormVMs cloudsStormVMs = new CloudsStormVMs();
List<CloudsStormVM> vms = new ArrayList<>();
......@@ -304,25 +309,22 @@ class CloudStormService {
for (NodeTemplateMap vmTopologyMap : vmTopologiesMaps) {
ToscaHelper.NODE_STATES nodeCurrentState = helper.getNodeCurrentState(vmTopologyMap);
ToscaHelper.NODE_STATES nodeDesiredState = helper.getNodeDesiredState(vmTopologyMap);
if (nodeCurrentState != null && nodeCurrentState.equals(ToscaHelper.NODE_STATES.RUNNING)) {
//Already there
}
//Can provision
if (nodeCurrentState == null || nodeCurrentState.equals(ToscaHelper.NODE_STATES.DELETED)) {
Map<String, Object> provisionInterface = helper.getProvisionerInterfaceFromVMTopology(vmTopologyMap);
String operation = nodeDesiredState.toString().toLowerCase();
Map<String, Object> inputs = (Map<String, Object>) provisionInterface.get(operation);
inputs.put("object_type", cloudStormSubtopologies.get(i).getTopology());
OpCode opCode = new OpCode();
opCode.setLog(Boolean.FALSE);
opCode.setObjectType(OpCode.ObjectTypeEnum.SUBTOPOLOGY);
opCode.setObjects(cloudStormSubtopologies.get(i).getTopology());
opCode.setOperation(OpCode.OperationEnum.fromValue(operation));
InfrasCode infrasCode = new InfrasCode();
infrasCode.setCodeType(InfrasCode.CodeTypeEnum.SEQ);
infrasCode.setOpCode(opCode);
infrasCodes.add(infrasCode);
}
Map<String, Object> provisionInterface = helper.getProvisionerInterfaceFromVMTopology(vmTopologyMap);
OpCode.OperationEnum operation = ToscaHelper.NodeDesiredState2CloudStormOperation(nodeDesiredState);
Map<String, Object> inputs = (Map<String, Object>) provisionInterface.get(operation.toString().toLowerCase());
inputs.put("object_type", cloudStormSubtopologies.get(i).getTopology());
OpCode opCode = new OpCode();
opCode.setLog(Boolean.FALSE);
opCode.setObjectType(OpCode.ObjectTypeEnum.SUBTOPOLOGY);
opCode.setObjects(cloudStormSubtopologies.get(i).getTopology());
opCode.setOperation(operation);
InfrasCode infrasCode = new InfrasCode();
infrasCode.setCodeType(InfrasCode.CodeTypeEnum.SEQ);
infrasCode.setOpCode(opCode);
infrasCodes.add(infrasCode);
}
CloudsStormInfrasCode cloudsStormInfrasCode = new CloudsStormInfrasCode();
cloudsStormInfrasCode.setMode(CloudsStormInfrasCode.ModeEnum.LOCAL);
......@@ -341,8 +343,6 @@ class CloudStormService {
protected ToscaTemplate runCloudStorm(String tempInputDirPath) throws IOException, ApiException {
String[] args = new String[]{"run", tempInputDirPath};
standalone.MainAsTool.main(args);
// tempInputDirPath = "/tmp/Input-26386504078656";
CloudsStormTopTopology _top = objectMapper.readValue(new File(tempInputDirPath + TOPOLOGY_RELATIVE_PATH
+ TOP_TOPOLOGY_FILE_NAME),
CloudsStormTopTopology.class);
......@@ -354,12 +354,23 @@ class CloudStormService {
for (CloudsStormSubTopology subTopology : subTopologies) {
NodeTemplateMap vmTopologyMap = vmTopologiesMaps.get(i);
Map<String, Object> att = vmTopologyMap.getNodeTemplate().getAttributes();
if (att == null) {
att = new HashMap<>();
Map<String, Object> artifacts = vmTopologyMap.getNodeTemplate().getArtifacts();
if (artifacts == null) {
artifacts = new HashMap<>();
}
helper.setNodeCurrentState(vmTopologyMap, cloudStormStatus2NodeState(subTopology.getStatus()));
Map<String, String> provisionedFiles = new HashMap<>();
provisionedFiles.put("type", ENCODED_FILE_DATATYPE);
Path zipPath = Paths.get(tempInputDirPath + TOPOLOGY_RELATIVE_PATH+File.separator+TOPOLOGY_FOLDER_NAME+".zip");
Path sourceFolderPath = Paths.get(tempInputDirPath + TOPOLOGY_RELATIVE_PATH);
Converter.zipFolder(sourceFolderPath, zipPath);
String cloudStormZipFileContentsAsBase64 = Converter.encodeFileToBase64Binary(zipPath.toFile().getAbsolutePath());
provisionedFiles.put("file_contents", cloudStormZipFileContentsAsBase64);
provisionedFiles.put("encoding", "base64");
provisionedFiles.put("file_ext", "zip");
artifacts.put("provisioned_files", provisionedFiles);
helper.setNodeCurrentState(vmTopologyMap, cloudStormStatus2NodeState(subTopology.getStatus()));
String rootKeyPairFolder = tempInputDirPath + TOPOLOGY_RELATIVE_PATH
+ File.separator + subTopology.getSshKeyPairId();
......
......@@ -9,6 +9,8 @@ import com.fasterxml.jackson.annotation.JsonInclude;
import com.fasterxml.jackson.core.JsonProcessingException;
import com.fasterxml.jackson.databind.ObjectMapper;
import com.fasterxml.jackson.databind.SerializationFeature;
import com.fasterxml.jackson.dataformat.yaml.YAMLFactory;
import com.fasterxml.jackson.dataformat.yaml.YAMLGenerator;
import com.jcraft.jsch.KeyPair;
import java.io.File;
import java.io.FileNotFoundException;
......@@ -17,7 +19,11 @@ import java.util.List;
import java.util.Map;
import nl.uva.sne.drip.commons.utils.ToscaHelper;
import nl.uva.sne.drip.model.Message;
import nl.uva.sne.drip.model.cloud.storm.CloudsStormInfrasCode;
import nl.uva.sne.drip.model.cloud.storm.CloudsStormSubTopology;
import nl.uva.sne.drip.model.cloud.storm.CloudsStormTopTopology;
import nl.uva.sne.drip.model.cloud.storm.InfrasCode;
import nl.uva.sne.drip.model.cloud.storm.OpCode;
import static nl.uva.sne.drip.provisioner.CloudStormService.APP_FOLDER_NAME;
import static nl.uva.sne.drip.provisioner.CloudStormService.INFRASTUCTURE_CODE_FILE_NAME;
import static nl.uva.sne.drip.provisioner.CloudStormService.INFS_FOLDER_NAME;
......@@ -35,11 +41,11 @@ import static org.junit.Assert.*;
/**
*
* @author alogo
* @author S. Koulouzis
*/
public class CloudStormServiceTest {
private static final String messageExampleDeleteRequestFilePath = ".." + File.separator + "example_messages" + File.separator + "message_example_provisioned.json";
private static final String messageExampleDeleteRequestFilePath = ".." + File.separator + "example_messages" + File.separator + "message_delete_request.json";
private static final String messageExampleProvisioneRequestFilePath = ".." + File.separator + "example_messages" + File.separator + "message_provision_request.json";
private final ObjectMapper objectMapper;
private String tempInputDirPath;
......@@ -52,7 +58,7 @@ public class CloudStormServiceTest {
private String providersDBTempInputDirPath;
public CloudStormServiceTest() {
this.objectMapper = new ObjectMapper();
this.objectMapper = new ObjectMapper(new YAMLFactory().disable(YAMLGenerator.Feature.WRITE_DOC_START_MARKER));
objectMapper.disable(SerializationFeature.WRITE_DATES_AS_TIMESTAMPS);
objectMapper.setSerializationInclusion(JsonInclude.Include.NON_NULL);
}
......@@ -186,21 +192,9 @@ public class CloudStormServiceTest {
public void testWriteCloudStormInfrasCodeFiles() throws Exception {
if (ToscaHelper.isServiceUp(sureToscaBasePath)) {
System.out.println("writeCloudStormInfrasCodeFiles");
CloudStormService instance = getService(messageExampleProvisioneRequestFilePath);
Map<String, Object> subTopologiesAndVMs = instance.writeCloudStormTopologyFiles(topologyTempInputDirPath);
assertNotNull(subTopologiesAndVMs);
assertTrue(new File(topologyTempInputDirPath + File.separator + TOP_TOPOLOGY_FILE_NAME).exists());
List<CloudsStormSubTopology> cloudStormSubtopologies = (List<CloudsStormSubTopology>) subTopologiesAndVMs.get("cloud_storm_subtopologies");
instance.writeCloudStormInfrasCodeFiles(infrasCodeTempInputDirPath, cloudStormSubtopologies);
assertTrue(new File(infrasCodeTempInputDirPath + File.separator + INFRASTUCTURE_CODE_FILE_NAME).exists());
instance = getService(messageExampleDeleteRequestFilePath);
subTopologiesAndVMs = instance.writeCloudStormTopologyFiles(topologyTempInputDirPath);
assertNotNull(subTopologiesAndVMs);
assertTrue(new File(topologyTempInputDirPath + File.separator + TOP_TOPOLOGY_FILE_NAME).exists());
cloudStormSubtopologies = (List<CloudsStormSubTopology>) subTopologiesAndVMs.get("cloud_storm_subtopologies");
instance.writeCloudStormInfrasCodeFiles(infrasCodeTempInputDirPath, cloudStormSubtopologies);
assertTrue(new File(infrasCodeTempInputDirPath + File.separator + INFRASTUCTURE_CODE_FILE_NAME).exists());
testWriteCloudStormInfrasFiles(messageExampleProvisioneRequestFilePath, CloudsStormSubTopology.StatusEnum.FRESH, OpCode.OperationEnum.PROVISION);
testWriteCloudStormInfrasFiles(messageExampleDeleteRequestFilePath, CloudsStormSubTopology.StatusEnum.RUNNING, OpCode.OperationEnum.DELETE);
}
}
......@@ -259,4 +253,30 @@ public class CloudStormServiceTest {
return new CloudStormService(RPCServer.getProp(), message.getToscaTemplate());
}
private void testWriteCloudStormInfrasFiles(String path, CloudsStormSubTopology.StatusEnum status, OpCode.OperationEnum opCode) throws IOException, JsonProcessingException, ApiException, Exception {
CloudStormService instance = getService(path);
initPaths();
Map<String, Object> subTopologiesAndVMs = instance.writeCloudStormTopologyFiles(topologyTempInputDirPath);
assertNotNull(subTopologiesAndVMs);
File topTopologyFile = new File(topologyTempInputDirPath + File.separator + TOP_TOPOLOGY_FILE_NAME);
assertTrue(topTopologyFile.exists());
CloudsStormTopTopology _top = objectMapper.readValue(new File(tempInputDirPath + TOPOLOGY_RELATIVE_PATH
+ TOP_TOPOLOGY_FILE_NAME),
CloudsStormTopTopology.class);
for (CloudsStormSubTopology cloudsStormSubTopology : _top.getTopologies()) {
assertEquals(status, cloudsStormSubTopology.getStatus());
}
List<CloudsStormSubTopology> cloudStormSubtopologies = (List<CloudsStormSubTopology>) subTopologiesAndVMs.get("cloud_storm_subtopologies");
instance.writeCloudStormInfrasCodeFiles(infrasCodeTempInputDirPath, cloudStormSubtopologies);
File infrasCodeFile = new File(infrasCodeTempInputDirPath + File.separator + INFRASTUCTURE_CODE_FILE_NAME);
assertTrue(infrasCodeFile.exists());
CloudsStormInfrasCode cloudsStormInfrasCode = objectMapper.readValue(infrasCodeFile, CloudsStormInfrasCode.class);
for (InfrasCode code : cloudsStormInfrasCode.getInfrasCodes()) {
assertEquals(opCode, code.getOpCode().getOperation());
}
}
}
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment