Commit bb0e67be authored by Alfonso Orta's avatar Alfonso Orta

Merge branch 'develop' into 'staging'

Develop

See merge request !8
parents 21ce2a62 4cd08851
tosca_definitions_version: tosca_simple_yaml_1_0
description: TOSCA template
topology_template:
node_templates:
compute:
properties:
disk_size: "120 GB"
mem_size: "7 GB"
num_cores: 4
os: Ubuntu 18.04
user_name: vm_user
interfaces:
Standard:
create: dumy.yaml
type: tosca.nodes.QC.VM.Compute
compute_1:
properties:
disk_size: "120 GB"
mem_size: "7 GB"
num_cores: 4
os: Ubuntu 18.04
user_name: vm_user
interfaces:
Standard:
create: dumy.yaml
type: tosca.nodes.QC.VM.Compute
topology:
properties:
domain: Ireland
provider: EC2
requirements:
- vm:
capability: tosca.capabilities.QC.VM
node: compute
relationship: tosca.relationships.DependsOn
- vm:
capability: tosca.capabilities.QC.VM
node: compute_1
relationship: tosca.relationships.DependsOn
interfaces:
CloudsStorm:
delete:
inputs:
code_type: SEQ
object_type: SubTopology
hscale:
inputs:
code_type: SEQ
object_type: SubTopology
provision:
inputs:
code_type: SEQ
object_type: SubTopology
start:
inputs:
code_type: SEQ
object_type: SubTopology
stop:
inputs:
code_type: SEQ
object_type: SubTopology
type: tosca.nodes.QC.VM.topology
artifacts:
provisioned_files:
required: false
type: "string"
imports:
- nodes: https://raw.githubusercontent.com/qcdis-sdia/sdia-tosca/master/types/nodes.yaml
- data: https://raw.githubusercontent.com/qcdis-sdia/sdia-tosca/master/types/data.yml
- capabilities: https://raw.githubusercontent.com/qcdis-sdia/sdia-tosca/master/types/capabilities.yaml
- policies: https://raw.githubusercontent.com/qcdis-sdia/sdia-tosca/master/types/policies.yaml
- interfaces: https://raw.githubusercontent.com/qcdis-sdia/sdia-tosca/master/types/interfaces.yml
tosca_definitions_version: tosca_simple_yaml_1_0
description: TOSCA template
topology_template:
node_templates:
compute:
properties:
disk_size: "40 GB"
mem_size: "17500 MB"
num_cores: 2
os: Ubuntu 18.04
user_name: vm_user
interfaces:
Standard:
create: dumy.yaml
type: tosca.nodes.QC.VM.Compute
compute_1:
properties:
disk_size: "40 GB"
mem_size: "17500 MB"
num_cores: 2
os: Ubuntu 18.04
user_name: vm_user
interfaces:
Standard:
create: dumy.yaml
type: tosca.nodes.QC.VM.Compute
topology:
properties:
domain: Ireland
provider: EC2
requirements:
- vm:
capability: tosca.capabilities.QC.VM
node: compute
relationship: tosca.relationships.DependsOn
- vm:
capability: tosca.capabilities.QC.VM
node: compute_1
relationship: tosca.relationships.DependsOn
interfaces:
CloudsStorm:
delete:
inputs:
code_type: SEQ
object_type: SubTopology
hscale:
inputs:
code_type: SEQ
object_type: SubTopology
provision:
inputs:
code_type: SEQ
object_type: SubTopology
start:
inputs:
code_type: SEQ
object_type: SubTopology
stop:
inputs:
code_type: SEQ
object_type: SubTopology
type: tosca.nodes.QC.VM.topology
artifacts:
provisioned_files:
required: false
type: "string"
imports:
- nodes: https://raw.githubusercontent.com/qcdis-sdia/sdia-tosca/master/types/nodes.yaml
- data: https://raw.githubusercontent.com/qcdis-sdia/sdia-tosca/master/types/data.yml
- capabilities: https://raw.githubusercontent.com/qcdis-sdia/sdia-tosca/master/types/capabilities.yaml
- policies: https://raw.githubusercontent.com/qcdis-sdia/sdia-tosca/master/types/policies.yaml
- interfaces: https://raw.githubusercontent.com/qcdis-sdia/sdia-tosca/master/types/interfaces.yml
tosca_definitions_version: "tosca_simple_yaml_1_0"
topology_template:
node_templates:
compute:
properties:
disk_size: "120 GB"
mem_size: "7 GB"
num_cores: 4
os: "Ubuntu 18.04"
user_name: "vm_user"
interfaces:
Standard:
create: dumy.yaml
type: tosca.nodes.QC.VM.Compute
#compute_1:
#properties:
#disk_size: "40000 MB"
#mem_size: "1000 MB"
#num_cores: 1
#os: "Ubuntu 18.04"
#user_name: "vm_user"
#interfaces:
#Standard:
#create: "dumy.yaml"
#type: "tosca.nodes.QC.VM.Compute"
topology:
properties:
domain: "Ireland"
provider: "EC2"
requirements:
#- vm:
#capability: "tosca.capabilities.QC.VM"
#node: "compute"
#relationship: "tosca.relationships.DependsOn"
- vm:
capability: "tosca.capabilities.QC.VM"
node: "compute_1"
relationship: "tosca.relationships.DependsOn"
interfaces:
CloudsStorm:
delete:
inputs:
code_type: "SEQ"
object_type: "SubTopology"
hscale:
inputs:
code_type: "SEQ"
object_type: "SubTopology"
provision:
inputs:
code_type: "SEQ"
object_type: "SubTopology"
start:
inputs:
code_type: "SEQ"
object_type: "SubTopology"
stop:
inputs:
code_type: "SEQ"
object_type: "SubTopology"
type: "tosca.nodes.QC.VM.topology"
artifacts:
provisioned_files:
required: false
type: "string"
description: "TOSCA example"
imports:
- nodes: "https://raw.githubusercontent.com/qcdis-sdia/sdia-tosca/master/types/nodes.yaml"
- data: "https://raw.githubusercontent.com/qcdis-sdia/sdia-tosca/master/types/data.yml"
- capabilities: "https://raw.githubusercontent.com/qcdis-sdia/sdia-tosca/master/types/capabilities.yaml"
- policies: "https://raw.githubusercontent.com/qcdis-sdia/sdia-tosca/master/types/policies.yaml"
- interfaces: "https://raw.githubusercontent.com/qcdis-sdia/sdia-tosca/master/types/interfaces.yml"
tosca_definitions_version: tosca_simple_yaml_1_0
topology_template:
node_templates:
compute:
properties:
num_cores: 2
disk_size: 20 GB
mem_size: 4098 MB
os: Ubuntu 18.04
user_name: vm_user
interfaces:
Standard:
create: dumy.yaml
type: tosca.nodes.QC.VM.Compute
topology:
properties:
domain: Iraland
provider: EC2
requirements:
- vm:
capability: tosca.capabilities.QC.VM
node: compute
relationship: tosca.relationships.DependsOn
interfaces:
CloudsStorm:
delete:
inputs:
code_type: SEQ
object_type: SubTopology
hscale:
inputs:
code_type: SEQ
object_type: SubTopology
provision:
inputs:
code_type: SEQ
object_type: SubTopology
start:
inputs:
code_type: SEQ
object_type: SubTopology
stop:
inputs:
code_type: SEQ
object_type: SubTopology
type: tosca.nodes.QC.VM.topology
description: TOSCA template
imports:
- nodes: https://raw.githubusercontent.com/qcdis-sdia/sdia-tosca/master/types/nodes.yaml
- data: https://raw.githubusercontent.com/qcdis-sdia/sdia-tosca/master/types/data.yml
- capabilities: https://raw.githubusercontent.com/qcdis-sdia/sdia-tosca/master/types/capabilities.yaml
- policies: https://raw.githubusercontent.com/qcdis-sdia/sdia-tosca/master/types/policies.yaml
- interfaces: https://raw.githubusercontent.com/qcdis-sdia/sdia-tosca/master/types/interfaces.yml
#!/bin/bash
TOSCA_ID="5f8eb46c8ff9431bd55e931a"
BASE_URL="https://fry.lab.uvalight.net:30001/manager/"
USERNAME=deploy_tester
PASSWORD=edvbafeabvafdb
#
# # curl -s -D - -k -u $USERNAME:$PASSWORD -X GET "$BASE_URL/tosca_template/$TOSCA_ID" &> tosca_output
# RESPONSE=`cat tosca_output | head -n 1 | cut '-d ' '-f2'`
#
# if [ $RESPONSE != "200" ];
# then
# cat tosca_output
# exit -1
# fi
# TOSCA_CONTENTS=`cat tosca_output | tail -n +11`
# echo $TOSCA_CONTENTS
#
#
# # curl -s -D - -k -u $USERNAME:$PASSWORD -X GET "$BASE_URL/provisioner/provision/$TOSCA_ID" &> provision_output
# RESPONSE=`cat provision_output | head -n 1 | cut '-d ' '-f2'`
#
# if [ $RESPONSE != "200" ];
# then
# cat provision_output
# exit -1
# fi
# PROVISION_ID=`cat provision_output | tail -n +11`
# PROVISION_ID=5f8ec0a68ff9431bd55e931e
# echo $PROVISION_ID
#
#
# curl -s -D - -k -u $USERNAME:$PASSWORD -X GET "$BASE_URL/deployer/deploy/$PROVISION_ID" &> deploy_output
# RESPONSE=`cat deploy_output | head -n 1 | cut '-d ' '-f2'`
#
# if [ $RESPONSE != "200" ];
# then
# cat deploy_output
# exit -1
# fi
# DEPLOY_ID=`cat deploy_output | tail -n +11`
# echo $DEPLOY_ID
#
# #
#
# # curl -u $USERNAME:$PASSWORD -X GET "$BASE_URL/tosca_template/$DEPLOY_ID"
# #
# # curl -u $USERNAME:$PASSWORD -X DELETE "$BASE_URL/tosca_template/$DEPLOY_ID"
#
#
......@@ -84,20 +84,20 @@ def handle_delivery(message):
tosca_helper = ToscaHelper(sure_tosca_base_url, tosca_template_path)
# nodes_to_deploy = tosca_helper.get_application_nodes()
nodes_pairs = tosca_helper.get_deployment_node_pairs()
nodes = tosca_helper.get_deployment_node_pipeline()
deployService = DeployService(semaphore_base_url=semaphore_base_url, semaphore_username=semaphore_username,
semaphore_password=semaphore_password, vms=tosca_helper.get_vms())
try:
for node_pair in nodes_pairs:
updated_node = deployService.deploy(node_pair)
for node in nodes:
updated_node = deployService.deploy(node)
if isinstance(updated_node, list):
for node in updated_node:
tosca_template_dict = tosca_helper.set_node(node,tosca_template_dict)
logger.info("tosca_template_dict :" + json.dumps(tosca_template_dict))
# logger.info("tosca_template_dict :" + json.dumps(tosca_template_dict))
else:
tosca_template_dict = tosca_helper.set_node(updated_node, tosca_template_dict)
logger.info("tosca_template_dict :" + json.dumps(tosca_template_dict))
# logger.info("tosca_template_dict :" + json.dumps(tosca_template_dict))
response = {'toscaTemplate': tosca_template_dict}
output_current_milli_time = int(round(time.time() * 1000))
......@@ -114,7 +114,7 @@ def handle_delivery(message):
def threaded_function(args):
while not done:
connection.process_data_events()
sleep(5)
sleep(8)
if __name__ == "__main__":
......
import copy
import json
import logging
import os
import os.path
import tempfile
import time
import yaml
import re # noqa: F401
from pathlib import Path
import unittest
import sure_tosca_client
from sure_tosca_client import Configuration, ApiClient
from sure_tosca_client.api import default_api
from service.deploy_service import DeployService
from service.tosca_helper import ToscaHelper
class TestDeployer(unittest.TestCase):
def test(self):
logger = logging.getLogger(__name__)
message_file_path = str(Path.home()) + '/Downloads/message_deploy_request.json'
if os.path.isfile(message_file_path):
with open(message_file_path, 'r') as stream:
parsed_json_message = json.load(stream)
# owner = parsed_json_message['owner']
tosca_file_name = 'tosca_template'
tosca_template_dict = parsed_json_message['toscaTemplate']
tmp_path = tempfile.mkdtemp()
tosca_template_path = tmp_path + os.path.sep + 'toscaTemplate.yml'
with open(tosca_template_path, 'w') as outfile:
yaml.dump(tosca_template_dict, outfile, default_flow_style=False)
sure_tosca_base_url = 'http://127.0.0.1:8081/tosca-sure/1.0.0'
semaphore_base_url = 'http://127.0.0.1:3000/api'
tosca_service_is_up = ToscaHelper.service_is_up(sure_tosca_base_url)
semaphore_is_up = ToscaHelper.service_is_up(semaphore_base_url)
if tosca_service_is_up and semaphore_is_up:
tosca_helper = ToscaHelper(sure_tosca_base_url,tosca_template_path)
self.assertIsNotNone(tosca_helper.doc_id)
nodes_to_deploy = tosca_helper.get_application_nodes()
self.assertIsNotNone(nodes_to_deploy)
nodes_pairs = tosca_helper.get_deployment_node_pairs()
self.assertIsNotNone(nodes_pairs)
username = 'admin'
deployService = DeployService(polemarch_base_url=polemarch_base_url,polemarch_username=username,polemarch_password='admin',
semaphore_base_url=semaphore_base_url,semaphore_username=username,semaphore_password='password')
for node_pair in nodes_pairs:
deployService.deploy(node_pair)
def get_tosca_file(self, file_name):
tosca_path = "../../TOSCA/"
input_tosca_file_path = tosca_path + '/' + file_name
if not os.path.exists(input_tosca_file_path):
tosca_path = "../TOSCA/"
input_tosca_file_path = tosca_path + '/' + file_name
dir_path = os.path.dirname(os.path.realpath(__file__))
self.assertEqual(True, os.path.exists(input_tosca_file_path),
'Starting from: ' + dir_path + ' Input TOSCA file: ' + input_tosca_file_path + ' not found')
return input_tosca_file_path
if __name__ == '__main__':
import unittest
unittest.main()
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: admin-user
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: admin-user
#namespace: kubernetes-dashboard
namespace: default
apiVersion: v1
kind: ServiceAccount
metadata:
name: admin-user
#namespace: kubernetes-dashboard
namespace: default
#kube-system
# Copyright 2017 The Kubernetes Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
apiVersion: v1
kind: Namespace
metadata:
name: kubernetes-dashboard
---
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kubernetes-dashboard
---
kind: Service
apiVersion: v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kubernetes-dashboard
spec:
type: NodePort
ports:
- port: 8443
nodePort: 30443
selector:
k8s-app: kubernetes-dashboard
---
apiVersion: v1
kind: Secret
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-certs
namespace: kubernetes-dashboard
type: Opaque
---
apiVersion: v1
kind: Secret
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-csrf
namespace: kubernetes-dashboard
type: Opaque
data:
csrf: ""
---
apiVersion: v1
kind: Secret
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-key-holder
namespace: kubernetes-dashboard
type: Opaque
---
kind: ConfigMap
apiVersion: v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard-settings
namespace: kubernetes-dashboard
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kubernetes-dashboard
rules:
# Allow Dashboard to get, update and delete Dashboard exclusive secrets.
- apiGroups: [""]
resources: ["secrets"]
resourceNames: ["kubernetes-dashboard-key-holder", "kubernetes-dashboard-certs", "kubernetes-dashboard-csrf"]
verbs: ["get", "update", "delete"]
# Allow Dashboard to get and update 'kubernetes-dashboard-settings' config map.
- apiGroups: [""]
resources: ["configmaps"]
resourceNames: ["kubernetes-dashboard-settings"]
verbs: ["get", "update"]
# Allow Dashboard to get metrics.
- apiGroups: [""]
resources: ["services"]
resourceNames: ["heapster", "dashboard-metrics-scraper"]
verbs: ["proxy"]
- apiGroups: [""]
resources: ["services/proxy"]
resourceNames: ["heapster", "http:heapster:", "https:heapster:", "dashboard-metrics-scraper", "http:dashboard-metrics-scraper"]
verbs: ["get"]
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
rules:
# Allow Metrics Scraper to get metrics from the Metrics server
- apiGroups: ["metrics.k8s.io"]
resources: ["pods", "nodes"]
verbs: ["get", "list", "watch"]
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kubernetes-dashboard
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: kubernetes-dashboard
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kubernetes-dashboard
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: kubernetes-dashboard
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: kubernetes-dashboard
subjects:
- kind: ServiceAccount
name: kubernetes-dashboard
namespace: kubernetes-dashboard
---
kind: Deployment
apiVersion: apps/v1
metadata:
labels:
k8s-app: kubernetes-dashboard
name: kubernetes-dashboard
namespace: kubernetes-dashboard
spec:
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
k8s-app: kubernetes-dashboard
template:
metadata:
labels:
k8s-app: kubernetes-dashboard
spec:
containers:
- name: kubernetes-dashboard
image: kubernetesui/dashboard:v2.0.0-rc3
imagePullPolicy: Always
ports:
- containerPort: 8443
protocol: TCP
args:
- --auto-generate-certificates
- --namespace=kubernetes-dashboard
# Uncomment the following line to manually specify Kubernetes API server Host
# If not specified, Dashboard will attempt to auto discover the API server and connect
# to it. Uncomment only if the default does not work.
# - --apiserver-host=http://my-address:port
volumeMounts:
- name: kubernetes-dashboard-certs
mountPath: /certs
# Create on-disk volume to store exec logs
- mountPath: /tmp
name: tmp-volume
livenessProbe:
httpGet:
scheme: HTTPS
path: /
port: 8443
initialDelaySeconds: 30
timeoutSeconds: 30
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
runAsUser: 1001
runAsGroup: 2001
volumes:
- name: kubernetes-dashboard-certs
secret:
secretName: kubernetes-dashboard-certs
- name: tmp-volume
emptyDir: {}
serviceAccountName: kubernetes-dashboard
nodeSelector:
"beta.kubernetes.io/os": linux
# Comment the following tolerations if Dashboard must not be deployed on master
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
---
kind: Service
apiVersion: v1
metadata:
labels:
k8s-app: dashboard-metrics-scraper
name: dashboard-metrics-scraper
namespace: kubernetes-dashboard
spec:
ports:
- port: 8000
targetPort: 8000
selector:
k8s-app: dashboard-metrics-scraper
---
kind: Deployment
apiVersion: apps/v1
metadata:
labels:
k8s-app: dashboard-metrics-scraper
name: dashboard-metrics-scraper
namespace: kubernetes-dashboard
spec:
replicas: 1
revisionHistoryLimit: 10
selector:
matchLabels:
k8s-app: dashboard-metrics-scraper
template:
metadata:
labels:
k8s-app: dashboard-metrics-scraper
annotations:
seccomp.security.alpha.kubernetes.io/pod: 'runtime/default'
spec:
containers:
- name: dashboard-metrics-scraper
image: kubernetesui/metrics-scraper:v1.0.3
ports:
- containerPort: 8000
protocol: TCP
livenessProbe:
httpGet:
scheme: HTTP
path: /
port: 8000
initialDelaySeconds: 30
timeoutSeconds: 30
volumeMounts:
- mountPath: /tmp
name: tmp-volume
securityContext:
allowPrivilegeEscalation: false
readOnlyRootFilesystem: true
runAsUser: 1001
runAsGroup: 2001
serviceAccountName: kubernetes-dashboard
nodeSelector:
"beta.kubernetes.io/os": linux
# Comment the following tolerations if Dashboard must not be deployed on master
tolerations:
- key: node-role.kubernetes.io/master
effect: NoSchedule
volumes:
- name: tmp-volume
emptyDir: {}
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: NAME
name: NAME
namespace: default
spec:
selector:
matchLabels:
app: NAME
replicas: 1
template:
metadata:
labels:
app: NAME
spec:
containers:
- image: IMAGE_NAME
name: NAME
ports:
- containerPort: PORT
env:
- name: ENV_NAME
value: ENV_VAL
apiVersion: v1
kind: Service
metadata:
labels:
app: NAME
name: NAME
namespace: default
spec:
type: NodePort
ports:
- port: CONTAINER_PORT
nodePort: NODE_PORT
selector:
app: NAME
pika==1.1.0
names==0.3.0
networkx==2.4
requests==2.23.0
# requests==2.23.0
wheel==0.34.2
pyyaml==5.3.1
matplotlib==3.2.1
# ansible==2.9.6
certifi==2020.4.5.1
matplotlib==3.3.0
ansible==2.9.11
# certifi==2020.4.5.1
six==1.14.0
python_dateutil==2.8.1
setuptools==46.1.3
urllib3==1.25.8
# setuptools==46.1.3
# urllib3==1.25.8
kubernetes==11.0.0
\ No newline at end of file
This diff is collapsed.
......@@ -29,30 +29,33 @@ class DeployService:
self.master_ip = vm.node_template.attributes['public_ip']
break
def deploy(self,nodes_pair):
target = nodes_pair[0]
source = nodes_pair[1]
interface_types = tosca_helper.get_interface_types(source)
def deploy(self, application):
# target = nodes_pair[0]
# source = nodes_pair[1]
interface_types = tosca_helper.get_interface_types(application)
if interface_types:
ansible_service = AnsibleService(self.semaphore_base_url, self.semaphore_username, self.semaphore_password)
env_vars = self.get_env_vars(nodes_pair)
env_vars = self.get_env_vars(application)
if 'Standard' in interface_types:
task_outputs = ansible_service.execute(nodes_pair, 'Standard', self.vms, env_vars=env_vars)
source = self.set_attributes(task_outputs,source)
task_outputs = ansible_service.execute(application, 'Standard', self.vms, env_vars=env_vars)
application = self.set_attributes(task_outputs, application)
if 'Kubernetes' in interface_types:
task_outputs = ansible_service.execute(nodes_pair, 'Kubernetes', self.vms, env_vars=env_vars)
source = self.set_attributes(task_outputs,source)
task_outputs = ansible_service.execute(application, 'Kubernetes', self.vms, env_vars=env_vars)
application = self.set_attributes(task_outputs, application)
return application
return source
def get_env_vars(self, nodes_pair):
target = nodes_pair[0]
source = nodes_pair[1]
def get_env_vars(self, source):
# target = nodes_pair[0]
# source = nodes_pair[1]
env_vars = {'K8s_NAMESPACE': 'default'}
if source.node_template.type == 'tosca.nodes.QC.Container.Application.Docker':
env_vars['DOCKER_IMAGE'] = source.node_template.artifacts['image']['file']
env_vars['DOCKER_SERVICE_NAME'] = source.name
env_vars['CONTAINER_PORT'] = source.node_template.properties['ports'][0].split(':')[1]
env_vars['CONTAINER_PORT'] = '80'
if 'ports' in source.node_template.properties:
env_vars['CONTAINER_PORT'] = source.node_template.properties['ports'][0].split(':')[1]
if 'environment' in source.node_template.properties:
env_vars['DOCKER_ENV_VARIABLES'] = source.node_template.properties['environment']
return env_vars
def set_attributes(self, task_outputs,source):
......@@ -62,6 +65,8 @@ class DeployService:
source = self.set_kubernetes_attributes(source=source,task_outputs=task_outputs)
if source.node_template.type == 'tosca.nodes.QC.Container.Application.Docker':
source = self.set_docker_attributes(source=source, task_outputs=task_outputs)
if source.node_template.type == 'tosca.nodes.QC.Application.TIC':
source = self.set_tic_attributes(source=source, task_outputs=task_outputs)
# lst = list(nodes_pair)
# lst[1] = source
# nodes_pair = tuple(lst)
......@@ -154,6 +159,9 @@ class DeployService:
attributes['dashboard_url'] = dashboard_url
logger.info('source.node_template.attributes: ' + str(attributes))
return source
raise Exception(
'Did not find k8s_services and/or k8s_dashboard_token')
return None
def set_docker_attributes(self, source, task_outputs):
attributes = source.node_template.attributes
......@@ -169,3 +177,13 @@ class DeployService:
attributes['service_url'] = service_url
logger.info('source.node_template.attributes: ' + str(attributes))
return source
def set_tic_attributes(self, source, task_outputs):
attributes = source.node_template.attributes
if 'service_urls' not in source.node_template.attributes:
service_urls = []
attributes['service_urls'] = service_urls
for port in ['8090','9000','9090']:
service_urls.append('http://' + self.master_ip + ':' + str(port))
attributes['service_urls'] = service_urls
return source
......@@ -5,6 +5,8 @@ import urllib.request
from sure_tosca_client import Configuration, ApiClient, NodeTemplate
from sure_tosca_client.api import default_api
import networkx as nx
import matplotlib.pyplot as plt
class ToscaHelper:
......@@ -38,22 +40,30 @@ class ToscaHelper:
def get_application_nodes(self):
return self.tosca_client.get_node_templates(self.doc_id, type_name='tosca.nodes.QC.Application')
def get_deployment_node_pairs(self):
def get_deployment_node_pipeline(self):
nodes_to_deploy = self.get_application_nodes()
nodes_pairs = []
G = nx.DiGraph()
sorted_nodes = []
for node in nodes_to_deploy:
related_nodes = self.tosca_client.get_related_nodes(self.doc_id,node.name)
for related_node in related_nodes:
# We need to deploy the docker orchestrator on the VMs not the topology.
# But the topology is directly connected to the orchestrator not the VMs.
# So we explicitly get the VMs
# I don't like this solution but I can't think of something better.
if related_node.node_template.type == 'tosca.nodes.QC.VM.topology':
vms = self.get_vms()
related_node = vms
pair = (related_node, node)
nodes_pairs.append(pair)
return nodes_pairs
G.add_edge(node.name, related_node.name)
# # We need to deploy the docker orchestrator on the VMs not the topology.
# # But the topology is directly connected to the orchestrator not the VMs.
# # So we explicitly get the VMs
# # I don't like this solution but I can't think of something better.
# if related_node.node_template.type == 'tosca.nodes.QC.VM.topology':
# vms = self.get_vms()
# related_node = vms
# pair = (related_node, node)
# nodes_pairs.append(pair)
sorted_graph = sorted(G.in_degree, key=lambda x: x[1], reverse=True)
for node_tuple in sorted_graph:
node_name = node_tuple[0]
for node in nodes_to_deploy:
if node.name == node_name:
sorted_nodes.append(node)
return sorted_nodes
@classmethod
def service_is_up(cls, url):
......
......@@ -48,7 +48,7 @@ class TestDeployer(unittest.TestCase):
self.assertIsNotNone(tosca_helper.doc_id)
nodes_to_deploy = tosca_helper.get_application_nodes()
self.assertIsNotNone(nodes_to_deploy)
nodes_pairs = tosca_helper.get_deployment_node_pairs()
nodes_pairs = tosca_helper.get_deployment_node_pipeline()
self.assertIsNotNone(nodes_pairs)
username = 'admin'
......
-----BEGIN CERTIFICATE-----
MIIFRTCCAy2gAwIBAgIUSdwxBIoIhEgH/uLa8RI2rwg/XZ4wDQYJKoZIhvcNAQEL
BQAwMjEOMAwGA1UECgwFcWNkaXMxDjAMBgNVBAsMBXFjZGlzMRAwDgYDVQQDDAd1
bmEubmwvMB4XDTIwMDUxMTE0MDE1NVoXDTIwMDYxMDE0MDE1NVowMjEOMAwGA1UE
CgwFcWNkaXMxDjAMBgNVBAsMBXFjZGlzMRAwDgYDVQQDDAd1bmEubmwvMIICIjAN
BgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAsvBagSRSfrFgk9EYWXcRZUNnyBTW
h6GrbWYAfdnMvG2Xih//aaeMC+GqYCox207wKpmM0hocTuFxLA6j1qjY81U6TAtu
6OsJamsotT/587ov6/jkeoE0SfOPoFr+SsXWjvc4LBFR6Htm8qPyDxbVTeJbzs13
JnGhbWzoiUJCg3a5TR25mS4Jvem/YhbClARdpC3JgqQTBzq4BBSUKODFYxtQJNgF
62pXI29c3/DWIi/vxXwJeX+gRiaNezAuKRAFu37CdCaOEO0kTL5zj8Elm0AJzkUk
sxpC/NRYOqJmRVUc0qQxqzmNkWC5Zsbx9G6mfuxwNWhEJg1/eEwr5zKEDFl2VrC8
BnUV4Xisd1TQoFfdfgCmngY2O3LQaAy2LMI9J8FNStTZxP55S7QWKGF0wOTjbuDj
l3al5pAIqoQabA7PH1NtAJlQ15+8IudWLpjKrWF/eETj43ymgXfZR1S1Ct2ZTg5j
Yk5D9iCiqjKtt5uAYzKUc2eApTycmQ7jA/r5Ei0Qm1eeCobymeSUvC9SUwdqDwIQ
AB9T1HsAGY3rj56AZnhZPKMv2cY1CZz8TtQk4TrC8vcf92E6pqoD+vbE5gzyfoGW
MbDxAyqQNDiCLjoP4QL7mfAIRJ4SRGaFtIwtgdoalzGry4kojM3eX4YyLgYKcANy
/BLdzW5P1sxRW5cCAwEAAaNTMFEwHQYDVR0OBBYEFKPlVennli4sJhjlohRhtsYp
sJmeMB8GA1UdIwQYMBaAFKPlVennli4sJhjlohRhtsYpsJmeMA8GA1UdEwEB/wQF
MAMBAf8wDQYJKoZIhvcNAQELBQADggIBAKQBBAjdHDhfSLzdeTDyK1hX5KrWNCuV
1b9TiNUb88E0qrlnYVezw+/R3B36KwT9C7svk8HhaQZwnveQLWjs7HuK6CU7AUdJ
kW67bxNyIEH92p8VwIj7fPPEazNkALMUyzIcYWnULsaOZC22GQrIvbTsjH76q1IG
E0toNzja0992xxewI9h/Fk8Oy7v3w26kDRBx5vPCn43XQzhQMCNPdG4w3j/v4BQP
4jqPCHOBKDmAbXygzBQmzp2Zwnq1wVrlEiqERzXEGmcY4Jilt08CClRLhjs2+Rrj
ZHuAQVa8ULY3mgv3sptloeHMCcHEmNU93pDwYdug7/VEJ1rKeczFPOzjKaUDt4lx
uj3Wg3sN9wdCOKu4mCXZZjq7YRGkZtrNB+0XjIQbA/fG/jzvfQZQiGbrvEOjAplP
PY32ssRR+itgZ3UdhW6ALNmoIRbiq+igsCEytXB62eE4RkRkn+IEm7Hf5ub6z4d6
BBG3+BjlvYVyVwvp6aqtMqbfj8wS/147Bv9d4j6FSPbY4PyN8qzWHK4QvMhSmtSj
v8ENzbrhj0FE42FvaOOtLqWnxsXwrJ5oIj3reBKdNcZ07vnPy/GikcrIS1219aOa
dQrdXi4GfWogqNqTGCOOqbEjkLOmjOo6YTuqXkZH+Eg4hqucjf2VJEqUqsV3QKjf
SoaoXQYjAzNj
-----END CERTIFICATE-----
version: '3'
services:
rabbit:
image: rabbitmq:3.8-management
ports:
- "5671-5672:5671-5672"
- "15672:15672"
- "4369:4369"
- "15671:15671"
mongo:
image: mongo:4
ports:
- "27017:27017"
#nginx:
#image: nginx
#volumes:
#- ./nginx.conf:/etc/nginx/nginx.conf
#- ./cert.pem:/etc/nginx/cert.pem
#- ./privkey.pem:/etc/nginx/privkey.pem
##- ./www:/data/www
#ports:
#- "80:80"
#- "443:443"
##networks:
##frontend:
##ipv4_address: 172.20.0.2
#rabbit:
#image: rabbitmq:3.8-management
#ports:
#- "5671-5672:5671-5672"
#- "15672:15672"
#- "4369:4369"
#- "15671:15671"
##networks:
##frontend:
##ipv4_address: 172.20.0.3
mysql:
image: mysql:5.6
image: mysql:5.7 #works with 5.6 abd emaphore:2.4.1
environment:
MYSQL_RANDOM_ROOT_PASSWORD: 'yes'
MYSQL_DATABASE: semaphore
MYSQL_USER: semaphore
MYSQL_PASSWORD: semaphore
ports:
- "3306:3306"
#ports:
#- "3306:3306"
#networks:
#frontend:
#ipv4_address: 172.20.0.4
semaphore:
image: qcdis/docker_ansible_semaphore
image: ss
environment:
SEMAPHORE_DB_USER: semaphore
SEMAPHORE_DB_PASS: semaphore
......@@ -37,13 +52,57 @@ services:
SEMAPHORE_ADMIN_NAME: "Developer"
SEMAPHORE_ADMIN_EMAIL: admin@localhost
SEMAPHORE_ADMIN: admin
SEMAPHORE_WEB_ROOT: http://0.0.0.0:3000/semaphore
SEMAPHORE_WEB_ROOT: http://0.0.0.0:3000
ports:
- "3000:3000"
depends_on:
- mysql
sure-tosca:
image: qcdis/sure-tosca:3.0.0
ports:
- "8081:8081"
#networks:
#frontend:
#ipv4_address: 172.20.0.5
#mongo:
#image: mongo:4
#ports:
#- "27017:27017"
##networks:
##frontend:
##ipv4_address: 172.20.0.6
#sure-tosca:
#image: qcdis/sure-tosca
#ports:
#- "8081:8081"
##networks:
##frontend:
##ipv4_address: 172.20.0.7
#compute:
#image: ubuntu:18.04
#volumes:
#- "./run.sh:/tmp/run.sh"
#command: "/tmp/run.sh"
##networks:
##frontend:
##ipv4_address: 172.20.0.8
##compute_1:
##image: ubuntu:18.04
##command: "apt update && apt install openssh-server && tail -f /dev/null"
##networks:
##frontend:
##ipv4_address: 172.20.0.9
##networks:
##frontend:
##ipam:
##config:
##- subnet: 172.20.0.0/24
......@@ -14,14 +14,16 @@ services:
rabbit:
image: rabbitmq:3.8-management
ports:
ports:
- "5671-5672:5671-5672"
- "15672:15672"
- "4369:4369"
- "15671:15671"
- "15671:15671"
mysql:
image: mysql:5.6
image: mysql:5.7
volumes:
- ./mysql.cnf:/etc/mysql/mysql.conf.d/mysqld.cnf
environment:
MYSQL_RANDOM_ROOT_PASSWORD: 'yes'
MYSQL_DATABASE: semaphore
......@@ -29,7 +31,7 @@ services:
MYSQL_PASSWORD: semaphore
#ports:
#- "3306:3306"
semaphore:
image: qcdis/docker_ansible_semaphore
environment:
......@@ -48,62 +50,94 @@ services:
- "3000:3000"
depends_on:
- mysql
mongo:
image: mongo:4
ports:
- "27017:27017"
manager:
- "27017:27017"
mongo-express:
image: mongo-express
environment:
- ME_CONFIG_MONGODB_SERVER=mongo
- ME_CONFIG_MONGODB_PORT=27017
- ME_CONFIG_BASICAUTH_USERNAME=user
- ME_CONFIG_BASICAUTH_PASSWORD=pass
- ME_CONFIG_SITE_BASEURL=/mongo-express
- VCAP_APP_PORT=8082
depends_on:
- mongo
ports:
- "8082:8082"
#mongoclient:
#image: mongoclient/mongoclient
#environment:
#- MONGO_URL=mongodb://mongo:27017
#- ROOT_URL=http://mongoclient/mongoclient
#depends_on:
#- mongo
#ports:
#- "3001:3000"
sdia-orchestrator:
depends_on:
- rabbit
- mongo
- sure-tosca
image: qcdis/manager
image: qcdis/sdia-orchestrator
environment:
RABBITMQ_HOST: rabbit
MONGO_HOST: mongo
SURE_TOSCA_BASE_PATH: http://sure-tosca:8081/tosca-sure/1.0.0
CREDENTIAL_SECRET: 123
CREDENTIAL_SECRET: top_secret
ports:
- "8080:8080"
- "8080:8080"
sure-tosca:
image: qcdis/sure-tosca
ports:
ports:
- "8081:8081"
planner:
depends_on:
- rabbit
- sure-tosca
image: qcdis/planner
image: qcdis/planner
environment:
RABBITMQ_HOST: rabbit
provisioner:
depends_on:
- rabbit
- sure-tosca
image: qcdis/provisioner
image: qcdis/provisioner
environment:
RABBITMQ_HOST: rabbit
RABBITMQ_HOST: rabbit
SURE_TOSCA_BASE_PATH: http://sure-tosca:8081/tosca-sure/1.0.0
CLOUD_STORM_SECRET: 456
CREDENTIAL_SECRET: 123
CREDENTIAL_SECRET: top_secret
deployer:
depends_on:
- rabbit
- sure-tosca
image: qcdis/deployer
image: qcdis/deployer
environment:
RABBITMQ_HOST: rabbit
RABBITMQ_HOST: rabbit
SURE_TOSCA_BASE_PATH: http://sure-tosca:8081/tosca-sure/1.0.0
SEMAPHORE_BASE_PATH: http://semaphore:3000/api
#cadvisor:
#image: gcr.io/google-containers/cadvisor:latest
#ports:
#- 8083:8080
#volumes:
#- /:/rootfs:ro
#- /var/run:/var/run:rw
#- /sys:/sys:ro
#- /var/lib/docker/:/var/lib/docker:ro
[mysqld]
pid-file = /var/run/mysqld/mysqld.pid
socket = /var/run/mysqld/mysqld.sock
datadir = /var/lib/mysql
#log-error = /var/log/mysql/error.log
# By default we only accept connections from localhost
#bind-address = 127.0.0.1
# Disabling symbolic-links is recommended to prevent assorted security risks
symbolic-links=0
max_connections = 2048
......@@ -62,6 +62,7 @@ http {
location /manager {
add_header 'Access-Control-Allow-Origin' *;
proxy_pass http://manager:8080/manager;
proxy_set_header Host $http_host;
proxy_set_header X-Real-IP $remote_addr;
......@@ -95,6 +96,12 @@ http {
proxy_buffering off;
proxy_request_buffering off;
}
}
location /mongo-express {
proxy_pass http://mongo-express:8082/mongo-express;
proxy_set_header Host $host;
proxy_set_header X-Real_IP $remote_addr;
}
}
}
-----BEGIN PRIVATE KEY-----
MIIJQgIBADANBgkqhkiG9w0BAQEFAASCCSwwggkoAgEAAoICAQCy8FqBJFJ+sWCT
0RhZdxFlQ2fIFNaHoattZgB92cy8bZeKH/9pp4wL4apgKjHbTvAqmYzSGhxO4XEs
DqPWqNjzVTpMC27o6wlqayi1P/nzui/r+OR6gTRJ84+gWv5KxdaO9zgsEVHoe2by
o/IPFtVN4lvOzXcmcaFtbOiJQkKDdrlNHbmZLgm96b9iFsKUBF2kLcmCpBMHOrgE
FJQo4MVjG1Ak2AXralcjb1zf8NYiL+/FfAl5f6BGJo17MC4pEAW7fsJ0Jo4Q7SRM
vnOPwSWbQAnORSSzGkL81Fg6omZFVRzSpDGrOY2RYLlmxvH0bqZ+7HA1aEQmDX94
TCvnMoQMWXZWsLwGdRXheKx3VNCgV91+AKaeBjY7ctBoDLYswj0nwU1K1NnE/nlL
tBYoYXTA5ONu4OOXdqXmkAiqhBpsDs8fU20AmVDXn7wi51YumMqtYX94ROPjfKaB
d9lHVLUK3ZlODmNiTkP2IKKqMq23m4BjMpRzZ4ClPJyZDuMD+vkSLRCbV54KhvKZ
5JS8L1JTB2oPAhAAH1PUewAZjeuPnoBmeFk8oy/ZxjUJnPxO1CThOsLy9x/3YTqm
qgP69sTmDPJ+gZYxsPEDKpA0OIIuOg/hAvuZ8AhEnhJEZoW0jC2B2hqXMavLiSiM
zd5fhjIuBgpwA3L8Et3Nbk/WzFFblwIDAQABAoICACuuHIm10iV3KzoqKqV8OVTc
1XK0E4JcZrp30drm1eGRZxKiqPijm74ywiJjanQ8msfrX8LR+OMQiU3V7QyzfvI3
ddmmWNamuU+vBOrpyRxD8PrLcQqui5MZz4+3ZqfeD3gqxR2MxM/Zf9HvT2F7k2pF
vV+ILHPH/T32/fSzpgTtcGJwxSOtZ1divgM1xx3Wyv0O6EfpwXNcVBs64sfvxn9g
5Xl4+kjzVn4h6ywHYF5MEV3F35I3I2q8coEFy97eOGgCk3lDCB79pITPYOpQndt+
EGa33jOST5PkSf1WM1ztX/HTfwRrMjGwyNFb8yhV8nK9SM7guvIHDXzSK01uuWjZ
zH/0lWDqdTJPoIo0hY9nZi9myMLAB3gtXl5yCP2iNAAdRLHyAjjDS7+NmMssIxbL
FV0suj5D8E+q+4cWtFXN03P/FLB3tdODAk8CFOhSSTF6Sz8sD8We7rzL4tDPgx6z
W3TPpe5seFDDklHSaPBhq3a2FBpQYzA53zBZHcOtiT+qLLLZIywwTUqLMjdxmk7Y
h9ye1sQiD2rtgigKiaRJ5xXnS/Z5f42oHVbB5m5fO0vCD6TU/4xPg09kQ2JEWKyD
xto456iZ0APQrMEVRGI4gQkyR0lAIIJj177pVTMbIf1sYPc6rJ7UNUXNSxZQOTga
YltGBS0cOzu8F4DxexiBAoIBAQDcRx/eFNQ9tshQDWCRvXGKflmuBpB1ySIAoKCt
MuzKKETqBfOu8BU7bi+zu8f4ikKOfFiEyl2sJDVQt53XOP40fp5rz0ceg8fsV5F6
/Px4VTCzCubKM9nurelkcTU+pBQQle9Ne4KNptYiu0EFZkVEoiDrj2aqw7FLJpSH
tTZE1ySN8A41DmVf+PuBMYa4DsGbgxbD95qFMYCNEFVBbGKn8fpn5SfxYsPdQzsV
CB6Ori6T4H6x8mSX2QNE37Z5x4PSOftSM5JMpia7Adg/KHeZOucFb7EO3XSkhQI8
C/BBBs0Z/hHZpSjHnGyINqH4+W0zVDMEpzT+gyfegh0w/NOhAoIBAQDP9Qjpayon
ZlGh71uNul93CAvS/ZuuaRBHjG7wiCbb3CNPBlIk6wS1TjVLhl6qrhZKLLU55URy
Cc/KWtdy7jj6x0Xx4OBLTWjqH2AI6dDAZARCNMr4MTXmnc2Tlripae6JQhhkHUFs
FFHzrWW1caWyLZc5LlQhXdv0ivHdZVmMUgsCr4+R5xdBQjjlT8WtxU4aiWdRNiUh
/45CBpfFKCzwOsRLDg5FNgpMPK6ioE09iDAujyIrcNMpZkOtjed9KrAGHcXq1acl
K7H0lm2bnoe6aj97aRjMh0uyVzlAZYiBOG1oWaegOrbXX1owtdxJWUOZCIapmBro
T6NM3QnTNWQ3AoIBAQC1KT2ZPRIsy0XFKKtSUTavCykgSb/HTyrKz1A2AZriy+mZ
elmmd0dPAFj0/awByVvhqXx3gaM2bvT2NHz4w6O1cqlBy1AXVZQ32PEJ8ESHhrCt
n3qds7U5agh4Fe8PXIwv88/CrqB5dUvJdq2MDkdLofdnJCHwsU7/mnQvhCZkyXgD
z/kvQ262VqQp5ZyBhYlPJ6myc9G+Vy79VQB49PXSX91sUvfduzeQorlVm77d43zl
G423Nb3rcIwSudZuI1tTq2H5gfaBWWijBmpdzx9Fgz76ppg8vH8wyz9COSNXp4tJ
JD5z/DQkro5IHH5rsF9SBp6K5iVcaGuCwQnW/yfhAoIBAApL+rX8DWlIArC+9kyN
Nt7g8hzvW78GCr1QMcoNI8dtHf5ytKyJSoFjrvXfYF6ZIhdoIU0NkhcHb1d4qgRR
0VZxeYAhf1mbKp+1D/9A/IoaKa/Rh19EqIOTurMdGmWhDiOTtzt1y2B7nRcwtcGH
MAojgcJeDeJdEGAKZTFuLEHragonATfbNmaPzdtk2Mfi5hwgQ5Jc2PfW/Ic+ZlNn
ytnWPxPsTT6WD2p48riwsUJjtOcJRUrbWklJe/5i8VxcOb0DymH3VEBd6oDO1fyH
m/bZ/eyxZn3yEQhyky4iGOE1Fw28qrUfUyBU41VYG+Ex30v7hRdupZ5tGwvaQftI
QH8CggEAFqK8EIhfsuBBP4w7ELjrrXp57kMVQu1DBTDu8CWRhlRGbHI40+tp/2qw
iNCEwSYO6hFjhlOOFU1+6RWOzG2p3XcWJBqlEiVhmHreuXNlJS/DZickV+7lpQpJ
NMFGjUUJPozFjxDaMVSdg/G6gjLEYIwi9b3IHUAR302hpYfZZwGZw67NPvXn8tmn
EdV3N20E0AUM2utC5dJEOSnmiXG+iTDNipvElxS6H24rLpm3TDww0gDPsmVeQxuH
ZW1IiyxQm3XcS4gfUfdg5+qwBqtuFNn8nRy978/bLuDclDCxxZVcnjFnI2aAZKQH
eZ3DUvayu1QuVkGz3Y52NM1CbZmz9g==
-----END PRIVATE KEY-----
#!/bin/bash
apt update -y
apt install openssh-server -y
service ssh start
getent passwd vm_user > /dev/null 2&>1
if [ $? -eq 0 ]; then
echo "user exists"
else
useradd vm_user -s /bin/bash -m
fi
mkdir -p /home/vm_user/.ssh && touch /home/vm_user/.ssh/authorized_keys
echo 'c3NoLXJzYSBBQUFBQjNOemFDMXljMkVBQUFBREFRQUJBQUFBZ1FDSUJpNHRYeFZtaXdNL01YWWE5VEZRS1FRMjFIWjg0SnpReFRrai9PcXVoTkwwdVZmeWVHUFN6RUFVRnV5bWMrWk43cUJWRkhtOCtlSkpYd1pGRENoeENuWjBWZHFNS1ZzQkZWK2QxK3ZESXllZ0k4djBSVm42alFXOEV5UnlzTEprckRCdkdLa1UxcDNzNkVQMTEyVExZU3J2UXRYZHVMVHhONTNZMGY3QmV3PT0gZ2VuZXJhdGVkIHVzZXIgYWNjZWVzIGtleXMK' | base64 -d >> /home/vm_user/.ssh/authorized_keys
cat /home/vm_user/.ssh/authorized_keys
tail -f /dev/null
......@@ -198,19 +198,19 @@
"description": "TOSCA example",
"imports": [
{
"nodes": "https://raw.githubusercontent.com/QCDIS/sdia-tosca/master/types/nodes.yaml"
"nodes": "https://raw.githubusercontent.com/qcdis-sdia/sdia-tosca/master/types/nodes.yaml"
},
{
"data": "https://raw.githubusercontent.com/QCDIS/sdia-tosca/master/types/data.yml"
"data": "https://raw.githubusercontent.com/qcdis-sdia/sdia-tosca/master/types/data.yml"
},
{
"capabilities": "https://raw.githubusercontent.com/QCDIS/sdia-tosca/master/types/capabilities.yaml"
"capabilities": "https://raw.githubusercontent.com/qcdis-sdia/sdia-tosca/master/types/capabilities.yaml"
},
{
"policies": "https://raw.githubusercontent.com/QCDIS/sdia-tosca/master/types/policies.yaml"
"policies": "https://raw.githubusercontent.com/qcdis-sdia/sdia-tosca/master/types/policies.yaml"
},
{
"interfaces": "https://raw.githubusercontent.com/QCDIS/sdia-tosca/master/types/interfaces.yml"
"interfaces": "https://raw.githubusercontent.com/qcdis-sdia/sdia-tosca/master/types/interfaces.yml"
}
],
"dsl_definitions": null,
......
This diff is collapsed.
apiVersion: v1
kind: ConfigMap
metadata:
name: mysql-config
namespace: conf
data:
htpasswd: |
alogo:$apr1$pbMniSeq$m4PZevv7VLULQLhiD2V2R0
conf_user:$apr1$sDBv9ugd$AV7m5Jeg0463jXaBxiZDs.
articonf_ui:$apr1$qFaau5L2$xgO53tciXFlrL/Z61nrzP.
mysqld.cnf: |
[mysqld]
pid-file = /var/run/mysqld/mysqld.pid
socket = /var/run/mysqld/mysqld.sock
datadir = /var/lib/mysql
#log-error = /var/log/mysql/error.log
# By default we only accept connections from localhost
#bind-address = 127.0.0.1
# Disabling symbolic-links is recommended to prevent assorted security risks
symbolic-links=0
max_connections = 2048
......@@ -37,9 +37,24 @@ spec:
value: "yes"
- name: MYSQL_USER
value: semaphore
image: mysql:5.6
image: mysql:5.7
name: mysql
imagePullPolicy: Always
resources: {}
volumeMounts:
- name: config-volume
mountPath: /etc/mysql/mysql.conf.d/
volumes:
- name: config-volume
configMap:
name: mysql-config
items:
- key: mysqld.cnf
path: mysqld.cnf
restartPolicy: Always
status: {}
......@@ -36,8 +36,8 @@ spec:
- name: SURE_TOSCA_BASE_PATH
value: http://sure-tosca:8081/tosca-sure/1.0.0
- name: CREDENTIAL_SECRET
value: top_secret
image: qcdis/manager:3.0.0
value: MGY0MGQ1MDFkYzg5ZGIxYjY4MjQ4MzQz
image: qcdis/manager
name: manager
imagePullPolicy: Always
ports:
......
......@@ -10,10 +10,16 @@ metadata:
io.kompose.service: manager
name: manager
spec:
type: NodePort
ports:
- name: "8080"
port: 8080
targetPort: 8080
- port: 8080
nodePort: 30000
protocol: TCP
name: http
#ports:
#- name: "8080"
#port: 8080
#targetPort: 8080
selector:
io.kompose.service: manager
status:
......
apiVersion: apps/v1
kind: Deployment
metadata:
namespace: conf
annotations:
kompose.cmd: kompose convert
kompose.version: 1.16.0 (0c01309)
creationTimestamp: null
labels:
io.kompose.service: mongo-express
name: mongo-express
spec:
selector:
matchLabels:
io.kompose.service: mongo-express
replicas: 1
strategy: {}
template:
metadata:
creationTimestamp: null
labels:
io.kompose.service: mongo-express
spec:
containers:
- env:
#- name: ME_CONFIG_BASICAUTH_PASSWORD
#value: ZjEwMjhiNmRiNDQ0N2UzMTU5OWFhNjkz
#- name: ME_CONFIG_BASICAUTH_USERNAME
#value: NGU4NWIwMDM0Njg1ZGRhYjhkMzgyNGQ3
- name: ME_CONFIG_MONGODB_PORT
value: "27017"
- name: ME_CONFIG_MONGODB_SERVER
value: mongo
- name: ME_CONFIG_SITE_BASEURL
value: /mongo-express
- name: VCAP_APP_PORT
value: "8082"
image: mongo-express
name: mongo-express
ports:
- containerPort: 8082
resources: {}
restartPolicy: Always
status: {}
apiVersion: apps/v1
kind: Deployment
metadata:
namespace: conf
annotations:
kompose.cmd: kompose convert
kompose.version: 1.16.0 (0c01309)
creationTimestamp: null
labels:
io.kompose.service: mongo-express
name: mongo-express
spec:
selector:
matchLabels:
io.kompose.service: mongo-express
replicas: 1
strategy: {}
template:
metadata:
creationTimestamp: null
labels:
io.kompose.service: mongo-express
spec:
containers:
- env:
- name: ME_CONFIG_BASICAUTH_PASSWORD
value: pass
- name: ME_CONFIG_BASICAUTH_USERNAME
value: user
- name: ME_CONFIG_MONGODB_PORT
value: "27017"
- name: ME_CONFIG_MONGODB_SERVER
value: mongo
- name: ME_CONFIG_SITE_BASEURL
value: /mongo-express
- name: VCAP_APP_PORT
value: "8082"
image: mongo-express
name: mongo-express
ports:
- containerPort: 8082
resources: {}
restartPolicy: Always
status: {}
apiVersion: v1
kind: Service
metadata:
namespace: conf
annotations:
kompose.cmd: kompose convert
kompose.version: 1.16.0 (0c01309)
creationTimestamp: null
labels:
io.kompose.service: mongo-express
name: mongo-express
spec:
ports:
- name: "8082"
port: 8082
targetPort: 8082
selector:
io.kompose.service: mongo-express
status:
loadBalancer: {}
This diff is collapsed.
......@@ -12,6 +12,9 @@ spec:
nodePort: 30001
protocol: TCP
name: https
- port: 80
protocol: TCP
name: http
selector:
io.kompose.service: nginx
status:
......
......@@ -33,7 +33,11 @@ spec:
value: rabbit
- name: SURE_TOSCA_BASE_PATH
value: http://sure-tosca:8081/tosca-sure/1.0.0
image: qcdis/provisioner:3.0.0
- name: CLOUD_STORM_SECRET
value: ODlkYjgxM2RhNTAzMjExZTdiYWNhYWQ0
- name: CREDENTIAL_SECRET
value: MGY0MGQ1MDFkYzg5ZGIxYjY4MjQ4MzQz
image: qcdis/provisioner
name: provisioner
imagePullPolicy: Always
resources: {}
......
......@@ -51,7 +51,7 @@ spec:
value: /etc/semaphore
- name: SEMAPHORE_WEB_ROOT
value: http://0.0.0.0:3000
image: qcdis/docker_ansible_semaphore
image: qcdis/docker_ansible_semaphore:v2.5.1-2.9.9
name: semaphore
imagePullPolicy: Always
ports:
......
......@@ -15,15 +15,16 @@ import org.springframework.web.bind.annotation.RequestBody;
import javax.validation.Valid;
import javax.servlet.http.HttpServletRequest;
import java.util.List;
import java.util.logging.Level;
import javax.crypto.BadPaddingException;
import javax.crypto.IllegalBlockSizeException;
import javax.crypto.NoSuchPaddingException;
import nl.uva.sne.drip.service.CredentialService;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.web.bind.annotation.CrossOrigin;
@javax.annotation.Generated(value = "io.swagger.codegen.languages.SpringCodegen", date = "2019-10-10T17:15:46.465Z")
@CrossOrigin(origins = "*")
@Controller
public class CredentialApiController implements CredentialApi {
......@@ -46,27 +47,27 @@ public class CredentialApiController implements CredentialApi {
public ResponseEntity<String> createCredentials(@ApiParam(
value = "Created user object", required = true)
@Valid @RequestBody Credential body) {
String accept = request.getHeader("Accept");
if (accept != null && accept.contains("application/json")) {
// String accept = request.getHeader("Accept");
// if (accept != null && accept.contains("application/json")) {
try {
String id = credentialService.save(body);
return new ResponseEntity<>(id, HttpStatus.OK);
} catch (UnsupportedEncodingException | NoSuchAlgorithmException | NoSuchPaddingException | InvalidKeyException | IllegalBlockSizeException | BadPaddingException ex) {
return new ResponseEntity<>(HttpStatus.INTERNAL_SERVER_ERROR);
}
} else {
return new ResponseEntity<>(HttpStatus.NOT_ACCEPTABLE);
}
// } else {
// return new ResponseEntity<>(HttpStatus.NOT_ACCEPTABLE);
// }
}
@Override
public ResponseEntity<List<String>> getCredentialIDs() {
String accept = request.getHeader("Accept");
if (accept != null && accept.contains("application/json")) {
// String accept = request.getHeader("Accept");
// if (accept != null && accept.contains("application/json")) {
List<String> ids = credentialService.getAllIds();
return new ResponseEntity<>(ids, HttpStatus.OK);
}
return new ResponseEntity<>(HttpStatus.NOT_IMPLEMENTED);
// }
// return new ResponseEntity<>(HttpStatus.NOT_IMPLEMENTED);
}
}
......@@ -12,10 +12,12 @@ import org.springframework.web.bind.annotation.PathVariable;
import javax.servlet.http.HttpServletRequest;
import nl.uva.sne.drip.service.DRIPService;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.web.bind.annotation.CrossOrigin;
@javax.annotation.Generated(value = "io.swagger.codegen.languages.SpringCodegen", date = "2019-10-10T17:15:46.465Z")
@Controller
@CrossOrigin(origins = "*")
public class DeployerApiController implements DeployerApi {
private static final Logger log = LoggerFactory.getLogger(DeployerApiController.class);
......@@ -38,8 +40,8 @@ public class DeployerApiController implements DeployerApi {
@ApiParam(value = "ID of topolog template to deploy", required = true)
@PathVariable("id") String id) {
String accept = request.getHeader("Accept");
if (accept != null && accept.contains("")) {
// String accept = request.getHeader("Accept");
// if (accept != null && accept.contains("")) {
try {
String planedYemplateId = dripService.deploy(id, null);
return new ResponseEntity<>(planedYemplateId, HttpStatus.OK);
......@@ -49,9 +51,9 @@ public class DeployerApiController implements DeployerApi {
return new ResponseEntity<>(HttpStatus.INTERNAL_SERVER_ERROR);
}
} else {
return new ResponseEntity<>(HttpStatus.NOT_ACCEPTABLE);
}
// } else {
// return new ResponseEntity<>(HttpStatus.NOT_ACCEPTABLE);
// }
}
}
......@@ -15,8 +15,10 @@ import javax.servlet.http.HttpServletRequest;
import nl.uva.sne.drip.service.DRIPService;
import nl.uva.sne.drip.sure.tosca.client.ApiException;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.web.bind.annotation.CrossOrigin;
@Controller
@CrossOrigin(origins = "*")
public class PlannerApiController implements PlannerApi {
private static final Logger log = LoggerFactory.getLogger(PlannerApiController.class);
......@@ -38,21 +40,26 @@ public class PlannerApiController implements PlannerApi {
public ResponseEntity<String> planToscaTemplateByID(@ApiParam(
value = "ID of topolog template to plan", required = true)
@PathVariable("id") String id) {
String accept = request.getHeader("Accept");
if (accept != null && accept.contains("text/plain")) {
try {
String planedYemplateId = dripService.plan(id);
return new ResponseEntity<>(planedYemplateId, HttpStatus.OK);
} catch (ApiException | NotFoundException | IOException | TimeoutException | InterruptedException ex) {
java.util.logging.Logger.getLogger(PlannerApiController.class.getName()).log(Level.SEVERE, null, ex);
return new ResponseEntity<>(HttpStatus.INTERNAL_SERVER_ERROR);
}
} else {
return new ResponseEntity<>(HttpStatus.NOT_ACCEPTABLE);
// String accept = request.getHeader("Accept");
// if (accept != null && accept.contains("text/plain")) {
try {
String planedYemplateId = dripService.plan(id);
return new ResponseEntity<>(planedYemplateId, HttpStatus.OK);
} catch (NotFoundException | java.util.NoSuchElementException ex) {
java.util.logging.Logger.getLogger(ToscaTemplateApiController.class.getName()).log(Level.WARNING, null, ex);
return new ResponseEntity<>(HttpStatus.NOT_FOUND);
} catch (TimeoutException ex) {
java.util.logging.Logger.getLogger(PlannerApiController.class.getName()).log(Level.SEVERE, null, ex);
return new ResponseEntity<>(HttpStatus.GATEWAY_TIMEOUT);
} catch (ApiException | IOException | InterruptedException ex) {
java.util.logging.Logger.getLogger(PlannerApiController.class.getName()).log(Level.SEVERE, null, ex);
return new ResponseEntity<>(HttpStatus.INTERNAL_SERVER_ERROR);
}
// } else {
// return new ResponseEntity<>(HttpStatus.NOT_ACCEPTABLE);
// }
}
}
......@@ -18,10 +18,12 @@ import nl.uva.sne.drip.model.Exceptions.TypeExeption;
import nl.uva.sne.drip.service.DRIPService;
import nl.uva.sne.drip.sure.tosca.client.ApiException;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.web.bind.annotation.CrossOrigin;
@javax.annotation.Generated(value = "io.swagger.codegen.languages.SpringCodegen", date = "2019-10-10T17:15:46.465Z")
@Controller
@CrossOrigin(origins = "*")
public class ProvisionerApiController implements ProvisionerApi {
private static final Logger log = LoggerFactory.getLogger(ProvisionerApiController.class);
......@@ -43,8 +45,8 @@ public class ProvisionerApiController implements ProvisionerApi {
public ResponseEntity<String> provisionPlanToscaTemplateByID(
@ApiParam(value = "ID of topolog template to provision", required = true)
@PathVariable("id") String id) {
String accept = request.getHeader("Accept");
if (accept != null && accept.contains("text/plain")) {
// String accept = request.getHeader("Accept");
// if (accept != null && accept.contains("text/plain")) {
try {
String planedYemplateId = dripService.provision(id);
return new ResponseEntity<>(planedYemplateId, HttpStatus.OK);
......@@ -55,9 +57,9 @@ public class ProvisionerApiController implements ProvisionerApi {
java.util.logging.Logger.getLogger(ProvisionerApiController.class.getName()).log(Level.SEVERE, null, ex);
return ResponseEntity.status(HttpStatus.INTERNAL_SERVER_ERROR).body(ex.getMessage());
}
} else {
return new ResponseEntity<>(HttpStatus.NOT_ACCEPTABLE);
}
// } else {
// return new ResponseEntity<>(HttpStatus.NOT_ACCEPTABLE);
// }
}
}
......@@ -14,12 +14,14 @@ import nl.uva.sne.drip.service.ToscaTemplateService;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.http.HttpStatus;
import org.springframework.web.bind.annotation.CrossOrigin;
import org.springframework.web.bind.annotation.PathVariable;
import org.springframework.web.bind.annotation.RequestParam;
@javax.annotation.Generated(value = "io.swagger.codegen.languages.SpringCodegen", date = "2019-10-10T17:15:46.465Z")
@Controller
@CrossOrigin(origins = "*")
public class ScalerApiController implements ScalerApi {
private static final Logger log = LoggerFactory.getLogger(ScalerApiController.class);
......
......@@ -38,6 +38,24 @@ public interface ToscaTemplateApi {
@RequestMapping(value = "/manager/tosca_template/{id}",
method = RequestMethod.DELETE)
ResponseEntity<String> deleteToscaTemplateByID(@ApiParam(value = "ID of topology template to return", required = true) @PathVariable("id") String id, @ApiParam(value = "The node(s) to delete") @Valid @RequestParam(value = "node_name", required = false) List<String> nodeName);
@ApiOperation(value = "Deletes all tosca topology templates", nickname = "deleteAllToscaTemplates",
notes = "If the topology is provisoned it will delete the provison (Infrastructure). If it is deployed it will delete the deploymet too (Application)", response = String.class, authorizations = {
@Authorization(value = "auth", scopes = {
@AuthorizationScope(scope = "read:ToscaTemplate", description = "read your topolog template")
,
@AuthorizationScope(scope = "write:ToscaTemplate", description = "modify topolog template in your account")
})
}, tags = {})
@ApiResponses(value = {
@ApiResponse(code = 200, message = "successful operation", response = String.class)
,
@ApiResponse(code = 404, message = "ToscaTemplate not found")})
@RequestMapping(value = "/manager/tosca_template/all",
method = RequestMethod.DELETE)
ResponseEntity<String> deleteAllToscaTemplates();
@ApiOperation(value = "Find topolog template by ID", nickname = "getToscaTemplateByID", notes = "Returns a single topolog template", response = String.class, authorizations = {
@Authorization(value = "auth", scopes = {
......
......@@ -23,11 +23,13 @@ import nl.uva.sne.drip.service.DRIPService;
import nl.uva.sne.drip.service.ToscaTemplateService;
import nl.uva.sne.drip.sure.tosca.client.ApiException;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.web.bind.annotation.CrossOrigin;
import org.springframework.web.bind.annotation.RequestParam;
@javax.annotation.Generated(value = "io.swagger.codegen.languages.SpringCodegen", date = "2019-10-10T17:15:46.465Z")
@Controller
@CrossOrigin(origins = "*")
public class ToscaTemplateApiController implements ToscaTemplateApi {
private static final Logger log = LoggerFactory.getLogger(ToscaTemplateApiController.class);
......@@ -50,39 +52,58 @@ public class ToscaTemplateApiController implements ToscaTemplateApi {
@ApiParam(value = "ID of topology template to return", required = true)
@PathVariable("id") String id, @ApiParam(value = "The node(s) to delete")
@Valid @RequestParam(value = "node_names", required = false) List<String> nodeName) {
String accept = request.getHeader("Accept");
if (accept != null && accept.contains("text/plain")) {
try {
dripService.delete(id, nodeName);
return new ResponseEntity<>(id, HttpStatus.OK);
} catch (IOException | ApiException | TypeExeption | TimeoutException | InterruptedException ex) {
java.util.logging.Logger.getLogger(ToscaTemplateApiController.class.getName()).log(Level.SEVERE, null, ex);
return new ResponseEntity<>(HttpStatus.INTERNAL_SERVER_ERROR);
} catch (NotFoundException ex) {
return new ResponseEntity<>(HttpStatus.NOT_FOUND);
}
} else {
return new ResponseEntity<>(HttpStatus.NOT_ACCEPTABLE);
// String accept = request.getHeader("Accept");
// if (accept != null && accept.contains("text/plain")) {
try {
dripService.delete(id, nodeName);
return new ResponseEntity<>(id, HttpStatus.OK);
} catch (IOException | ApiException | TypeExeption | TimeoutException | InterruptedException ex) {
java.util.logging.Logger.getLogger(ToscaTemplateApiController.class.getName()).log(Level.SEVERE, null, ex);
return new ResponseEntity<>(HttpStatus.INTERNAL_SERVER_ERROR);
} catch (NotFoundException ex) {
return new ResponseEntity<>(HttpStatus.NOT_FOUND);
}
// } else {
// return new ResponseEntity<>(HttpStatus.NOT_ACCEPTABLE);
// }
}
@Override
public ResponseEntity<String> getToscaTemplateByID(@ApiParam(value = "ID of topolog template to return", required = true) @PathVariable("id") String id) {
String accept = request.getHeader("Accept");
if (accept != null && accept.contains("text/plain")) {
try {
String ymlStr = toscaTemplateService.findByID(id);
return new ResponseEntity<>(ymlStr, HttpStatus.OK);
} catch (JsonProcessingException | NotFoundException ex) {
java.util.logging.Logger.getLogger(ToscaTemplateApiController.class.getName()).log(Level.SEVERE, null, ex);
return new ResponseEntity<>(HttpStatus.INTERNAL_SERVER_ERROR);
public ResponseEntity<String> deleteAllToscaTemplates() {
List<String> ids = toscaTemplateService.getAllIds();
try {
for (String id : ids) {
dripService.delete(id, null);
}
return new ResponseEntity<>(HttpStatus.OK);
} catch (IOException | ApiException | TypeExeption | TimeoutException | InterruptedException ex) {
java.util.logging.Logger.getLogger(ToscaTemplateApiController.class.getName()).log(Level.SEVERE, null, ex);
return new ResponseEntity<>(HttpStatus.INTERNAL_SERVER_ERROR);
} catch (NotFoundException ex) {
return new ResponseEntity<>(HttpStatus.NOT_FOUND);
}
}
} else {
return new ResponseEntity<>(HttpStatus.NOT_ACCEPTABLE);
@Override
public ResponseEntity<String> getToscaTemplateByID(@ApiParam(value = "ID of topolog template to return", required = true)
@PathVariable("id") String id) {
// String accept = request.getHeader("Accept");
// if (accept != null && accept.contains("text/plain")) {
try {
java.util.logging.Logger.getLogger(ToscaTemplateApiController.class.getName()).log(Level.INFO, "Requestsed ID: {0}", id);
String ymlStr = toscaTemplateService.findByID(id);
return new ResponseEntity<>(ymlStr, HttpStatus.OK);
} catch (NotFoundException | java.util.NoSuchElementException ex) {
java.util.logging.Logger.getLogger(ToscaTemplateApiController.class.getName()).log(Level.WARNING, null, ex);
return new ResponseEntity<>(HttpStatus.NOT_FOUND);
} catch (JsonProcessingException ex) {
java.util.logging.Logger.getLogger(ToscaTemplateApiController.class.getName()).log(Level.SEVERE, null, ex);
return new ResponseEntity<>(HttpStatus.INTERNAL_SERVER_ERROR);
}
// } else {
// return new ResponseEntity<>(HttpStatus.NOT_ACCEPTABLE);
// }
}
@Override
......@@ -90,48 +111,48 @@ public class ToscaTemplateApiController implements ToscaTemplateApi {
value = "ID of topolog template to return", required = true)
@PathVariable("id") String id, @ApiParam(value = "file detail")
@Valid @RequestPart("file") MultipartFile file) {
String accept = request.getHeader("Accept");
if (accept != null && accept.contains("text/plain")) {
try {
id = toscaTemplateService.updateToscaTemplateByID(id, file);
return new ResponseEntity<>(id, HttpStatus.OK);
} catch (IOException e) {
log.error("Couldn't serialize response for content type ", e);
java.util.logging.Logger.getLogger(ToscaTemplateApiController.class.getName()).log(Level.SEVERE, null, e);
return new ResponseEntity<>(HttpStatus.INTERNAL_SERVER_ERROR);
}
} else {
return new ResponseEntity<>(HttpStatus.NOT_ACCEPTABLE);
// String accept = request.getHeader("Accept");
// if (accept != null && accept.contains("text/plain")) {
try {
id = toscaTemplateService.updateToscaTemplateByID(id, file);
return new ResponseEntity<>(id, HttpStatus.OK);
} catch (IOException e) {
log.error("Couldn't serialize response for content type ", e);
java.util.logging.Logger.getLogger(ToscaTemplateApiController.class.getName()).log(Level.SEVERE, null, e);
return new ResponseEntity<>(HttpStatus.INTERNAL_SERVER_ERROR);
}
// } else {
// return new ResponseEntity<>(HttpStatus.NOT_ACCEPTABLE);
// }
}
@Override
public ResponseEntity<String> uploadToscaTemplate(@ApiParam(value = "file detail") @Valid @RequestPart("file") MultipartFile file) {
String accept = request.getHeader("Accept");
if (accept != null && accept.contains("*/*")) {
try {
String id = toscaTemplateService.saveFile(file);
return new ResponseEntity<>(id, HttpStatus.OK);
} catch (IOException e) {
java.util.logging.Logger.getLogger(ToscaTemplateApiController.class.getName()).log(Level.SEVERE, null, e);
log.error("Couldn't serialize response for content type application/json", e);
return new ResponseEntity<>(HttpStatus.INTERNAL_SERVER_ERROR);
}
} else {
return new ResponseEntity<>(HttpStatus.NOT_ACCEPTABLE);
// String accept = request.getHeader("Accept");
// if (accept != null && accept.contains("*/*")) {
try {
String id = toscaTemplateService.saveFile(file);
return new ResponseEntity<>(id, HttpStatus.OK);
} catch (IOException e) {
java.util.logging.Logger.getLogger(ToscaTemplateApiController.class.getName()).log(Level.SEVERE, null, e);
log.error("Couldn't serialize response for content type application/json", e);
return new ResponseEntity<>(HttpStatus.INTERNAL_SERVER_ERROR);
}
// } else {
// return new ResponseEntity<>(HttpStatus.NOT_ACCEPTABLE);
// }
}
@Override
public ResponseEntity<List<String>> getToscaTemplateIDs() {
String accept = request.getHeader("Accept");
if (accept != null && accept.contains("application/json")) {
List<String> ids = toscaTemplateService.getAllIds();
return new ResponseEntity<>(ids, HttpStatus.NOT_IMPLEMENTED);
} else {
return new ResponseEntity<>(HttpStatus.NOT_ACCEPTABLE);
}
// String accept = request.getHeader("Accept");
// if (accept != null && accept.contains("application/json")) {
List<String> ids = toscaTemplateService.getAllIds();
return new ResponseEntity<>(ids, HttpStatus.OK);
// } else {
// return new ResponseEntity<>(HttpStatus.NOT_ACCEPTABLE);
// }
}
......
......@@ -17,10 +17,12 @@ import javax.validation.Valid;
import javax.servlet.http.HttpServletRequest;
import java.io.IOException;
import java.util.logging.Level;
import org.springframework.web.bind.annotation.CrossOrigin;
@javax.annotation.Generated(value = "io.swagger.codegen.languages.SpringCodegen", date = "2019-10-10T17:15:46.465Z")
@Controller
@CrossOrigin(origins = "*")
public class UserApiController implements UserApi {
private static final Logger log = LoggerFactory.getLogger(UserApiController.class);
......
......@@ -15,8 +15,12 @@ public class SwaggerDocumentationConfig {
ApiInfo apiInfo() {
return new ApiInfoBuilder()
.title("DRIP")
.description("The Dynamic Real-time infrastructure planner (DRIP) allows application developers to seamlessly plan a customized virtual infrastructure based on application level constraints on QoS and resource budgets, provisioning the virtual infrastructure, deploy application components onto the virtual infrastructure, and start execution on demand using TOSCA.")
.title("CONF")
.description("Allows application developers to seamlessly plan a customized "
+ "virtual infrastructure based on application level constraints "
+ "on QoS and resource budgets, provisioning the virtual infrastructure, "
+ "deploy application components onto the virtual infrastructure, "
+ "and start execution on demand using TOSCA.")
.license("Apache 2.0")
.licenseUrl("http://www.apache.org/licenses/LICENSE-2.0.html")
.termsOfServiceUrl("")
......
......@@ -9,9 +9,12 @@ import com.rabbitmq.client.ConnectionFactory;
import com.rabbitmq.client.DefaultConsumer;
import com.rabbitmq.client.Envelope;
import java.io.IOException;
import java.util.HashMap;
import java.util.Map;
import java.util.UUID;
import java.util.concurrent.ArrayBlockingQueue;
import java.util.concurrent.BlockingQueue;
import java.util.concurrent.TimeUnit;
import java.util.concurrent.TimeoutException;
import java.util.logging.Level;
import java.util.logging.Logger;
......@@ -40,9 +43,9 @@ import org.springframework.stereotype.Service;
@Service
public class DRIPCaller implements AutoCloseable {
private Connection connection;
private Channel channel;
private String replyQueueName;
// private Connection connection;
// private Channel channel;
// private String replyQueueName;
private String requestQeueName;
private final ObjectMapper mapper;
private final ConnectionFactory factory;
......@@ -60,72 +63,93 @@ public class DRIPCaller implements AutoCloseable {
}
public void init() throws IOException, TimeoutException {
if (connection == null || !connection.isOpen()) {
connection = factory.newConnection();
channel = connection.createChannel();
// create a single callback queue per client not per requests.
replyQueueName = channel.queueDeclare().getQueue();
}
}
/**
* @return the connection
*/
public Connection getConnection() {
return connection;
}
/**
* @return the channel
*/
public Channel getChannel() {
return channel;
}
/**
* @return the replyQueueName
*/
public String getReplyQueueName() {
return replyQueueName;
// if (connection == null || !connection.isOpen()) {
// connection = factory.newConnection();
// }
}
// /**
// * @return the connection
// */
// public Connection getConnection() {
// return connection;
// }
// /**
// * @return the channel
// */
// public Channel getChannel() {
// return channel;
// }
// /**
// * @return the replyQueueName
// */
// public String getReplyQueueName() {
// return replyQueueName;
// }
@Override
public void close() throws IOException, TimeoutException {
if (channel != null && channel.isOpen()) {
channel.close();
}
if (connection != null && connection.isOpen()) {
connection.close();
}
// if (connection != null && connection.isOpen()) {
// connection.close();
// }
}
public Message call(Message r) throws IOException, TimeoutException, InterruptedException {
Channel channel = null;
Connection connection = null;
try {
String jsonInString = mapper.writeValueAsString(r);
int timeOut = 25;
if (getRequestQeueName().equals("planner")) {
timeOut = 5;
}
if (getRequestQeueName().equals("provisioner")) {
timeOut = 10;
}
connection = factory.newConnection();
String jsonInString = mapper.writeValueAsString(r);
//Build a correlation ID to distinguish responds
final String corrId = UUID.randomUUID().toString();
AMQP.BasicProperties props = new AMQP.BasicProperties.Builder()
.correlationId(corrId)
.replyTo(getReplyQueueName())
.build();
Logger.getLogger(DRIPCaller.class.getName()).log(Level.INFO, "Sending: {0} to queue: {1}", new Object[]{jsonInString, getRequestQeueName()});
getChannel().basicPublish("", getRequestQeueName(), props, jsonInString.getBytes("UTF-8"));
final BlockingQueue<String> response = new ArrayBlockingQueue(1);
getChannel().basicConsume(getReplyQueueName(), true, new DefaultConsumer(getChannel()) {
@Override
public void handleDelivery(String consumerTag, Envelope envelope, AMQP.BasicProperties properties, byte[] body) throws IOException {
if (properties.getCorrelationId().equals(corrId)) {
response.offer(new String(body, "UTF-8"));
channel = connection.createChannel();
Map<String, Object> args = new HashMap<>();
String replyQueueName = channel.queueDeclare().getQueue();
//Build a correlation ID to distinguish responds
final String corrId = UUID.randomUUID().toString();
AMQP.BasicProperties props = new AMQP.BasicProperties.Builder()
.correlationId(corrId)
.expiration(String.valueOf(timeOut * 60000))
.replyTo(replyQueueName)
.build();
Logger.getLogger(DRIPCaller.class.getName()).log(Level.INFO, "Sending: {0} to queue: {1}", new Object[]{jsonInString, getRequestQeueName()});
channel.basicPublish("", getRequestQeueName(), props, jsonInString.getBytes("UTF-8"));
final BlockingQueue<String> response = new ArrayBlockingQueue(1);
channel.basicConsume(replyQueueName, true, new DefaultConsumer(channel) {
@Override
public void handleDelivery(String consumerTag, Envelope envelope, AMQP.BasicProperties properties, byte[] body) throws IOException {
if (properties.getCorrelationId().equals(corrId)) {
response.offer(new String(body, "UTF-8"));
}
}
}
});
String resp = response.take();
Logger.getLogger(DRIPCaller.class.getName()).log(Level.INFO, "Got: {0}", resp);
});
// String resp = response.take();
return mapper.readValue(resp, Message.class);
String resp = response.poll(timeOut, TimeUnit.MINUTES);
Logger.getLogger(DRIPCaller.class.getName()).log(Level.INFO, "Got: {0}", resp);
if (resp == null) {
throw new TimeoutException("Timeout on qeue: " + getRequestQeueName());
}
return mapper.readValue(resp, Message.class);
} finally {
if (channel != null && channel.isOpen()) {
channel.close();
}
if (connection != null && connection.isOpen()) {
connection.close();
}
}
}
/**
......
......@@ -58,19 +58,28 @@ public class DRIPService {
@Value("${message.broker.queue.deployer}")
private String deployerQueueName;
private String execute(ToscaTemplate toscaTemplate, String requestQeueName) throws JsonProcessingException, ApiException, IOException, TimeoutException, InterruptedException {
caller.init();
Logger.getLogger(DRIPService.class.getName()).log(Level.INFO, "toscaTemplate:\n{0}", toscaTemplate);
Message message = new Message();
message.setOwner("user");
message.setCreationDate(System.currentTimeMillis());
message.setToscaTemplate(toscaTemplate);
caller.setRequestQeueName(requestQeueName);
Message response = caller.call(message);
ToscaTemplate updatedToscaTemplate = response.getToscaTemplate();
caller.close();
return toscaTemplateService.save(updatedToscaTemplate);
private String execute(ToscaTemplate toscaTemplate, String requestQeueName) throws IOException, TimeoutException, InterruptedException{
try {
caller.init();
// Logger.getLogger(DRIPService.class.getName()).log(Level.INFO, "toscaTemplate:\n{0}", toscaTemplate);
Message message = new Message();
message.setOwner("user");
message.setCreationDate(System.currentTimeMillis());
message.setToscaTemplate(toscaTemplate);
caller.setRequestQeueName(requestQeueName);
Message response = caller.call(message);
ToscaTemplate updatedToscaTemplate = response.getToscaTemplate();
return toscaTemplateService.save(updatedToscaTemplate);
} catch (IOException | TimeoutException | InterruptedException ex) {
throw ex;
}finally{
try {
caller.close();
} catch (IOException | TimeoutException ex) {
Logger.getLogger(DRIPService.class.getName()).log(Level.SEVERE, null, ex);
}
}
}
......@@ -162,13 +171,24 @@ public class DRIPService {
public String delete(String id, List<String> nodeNames) throws NotFoundException, IOException, JsonProcessingException, ApiException, TypeExeption, TimeoutException, InterruptedException {
ToscaTemplate toscaTemplate = initExecution(id);
boolean nothingToDelete = true;
//If no nodes are specified delete all the infrastructure
if (nodeNames == null || nodeNames.isEmpty()) {
List<NodeTemplateMap> vmTopologies = helper.getVMTopologyTemplates();
for (NodeTemplateMap vmTopology : vmTopologies) {
toscaTemplate = setDesieredSate(toscaTemplate, vmTopology, NODE_STATES.DELETED);
if (vmTopologies != null) {
for (NodeTemplateMap vmTopology : vmTopologies) {
NODE_STATES currentState = helper.getNodeCurrentState(vmTopology);
if (currentState != null && currentState != NODE_STATES.DELETED) {
nothingToDelete = false;
toscaTemplate = setDesieredSate(toscaTemplate, vmTopology, NODE_STATES.DELETED);
}
}
if (!nothingToDelete) {
this.toscaTemplateService.deleteByID(id);
return execute(toscaTemplate, provisionerQueueName);
}
}
return execute(toscaTemplate, provisionerQueueName);
return id;
} else {
}
......
......@@ -14,9 +14,7 @@ import com.fasterxml.jackson.dataformat.yaml.YAMLGenerator.Feature;
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.Optional;
import java.util.logging.Level;
import java.util.logging.Logger;
import nl.uva.sne.drip.api.NotFoundException;
import nl.uva.sne.drip.dao.ToscaTemplateDAO;
import nl.uva.sne.drip.model.Exceptions.TypeExeption;
......@@ -76,6 +74,7 @@ public class ToscaTemplateService {
public String findByID(String id) throws JsonProcessingException, NotFoundException {
ToscaTemplate tt = dao.findById(id).get();
if (tt == null) {
java.util.logging.Logger.getLogger(ToscaTemplateService.class.getName()).log(Level.SEVERE, "ToscaTemplate with id: " + id + " not found");
throw new NotFoundException(404, "ToscaTemplate with id: " + id + " not found");
}
String ymlStr = objectMapper.writeValueAsString(tt);
......
......@@ -108,7 +108,7 @@ def handle_delivery(message, sys=None):
template_dict = tosca_helper.get_tosca_template_2_topology_template_dictionary(tosca_template)
Planner(yaml_dict_tpl=template_dict, spec_service=spec_service)
logger.info("template ----: \n" + yaml.dump(template_dict))
logger.debug("template ----: \n" + yaml.dump(template_dict))
response = {'toscaTemplate': template_dict}
output_current_milli_time = int(round(time.time() * 1000))
......@@ -130,14 +130,14 @@ if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
if sys.argv[1] == "test_local":
tosca_path = "../TOSCA/"
input_tosca_file_path = tosca_path + '/application_example_updated.yaml'
input_tosca_file_path = tosca_path + '/generated_tosca_description_a8a061322c3b4b5593c289df841727af.yaml'
conf = {'url': "http://host"}
spec_service = SpecService(conf)
test_planner = Planner(input_tosca_file_path, spec_service)
test_tosca_template = test_planner.resolve_requirements()
test_tosca_template = test_planner.set_node_templates_properties()
template_dict = tosca_helper.get_tosca_template_2_topology_template_dictionary(test_tosca_template)
logger.info("template ----: \n" + yaml.dump(template_dict))
logger.debug("template ----: \n" + yaml.dump(template_dict))
ToscaTemplate(yaml_dict_tpl=copy.deepcopy(template_dict))
......
This diff is collapsed.
import copy
from toscaparser.nodetemplate import NodeTemplate
from toscaparser.properties import Property
import networkx as nx
import logging
from service.specification_analyzer import SpecificationAnalyzer
from util import tosca_helper
def get_default_value(node_property):
if node_property and node_property.required and isinstance(node_property.value, dict) and 'required' in \
node_property.value and 'type' in node_property.value:
if node_property.default:
return {node_property.name: node_property.default}
if node_property.constraints:
for constraint in node_property.constraints:
print(constraint)
if node_property and node_property.required and node_property.value:
return {node_property.name: node_property.value}
return None
class SimpleAnalyzer(SpecificationAnalyzer):
def __init__(self, tosca_template):
super(SimpleAnalyzer, self).__init__(tosca_template)
def set_relationship_occurrences(self):
return_nodes = []
nodes_with_min_vms = tosca_helper.get_nodes_by_type('tosca.nodes.QC.docker.Orchestrator',
self.tosca_template.nodetemplates, self.all_node_types,
self.all_custom_def)
nodes_with_min_vms = nodes_with_min_vms + tosca_helper.get_nodes_by_type('tosca.nodes.QC.Application.GlusterFS',
self.tosca_template.nodetemplates,
self.all_node_types,
self.all_custom_def)
min_masters_num = 0
workers_num = 0
if nodes_with_min_vms:
for node_with_min_vms in nodes_with_min_vms:
if 'properties' in node_with_min_vms.entity_tpl:
if 'min_masters_num' in node_with_min_vms.entity_tpl['properties']:
min_masters_num = min_masters_num + node_with_min_vms.entity_tpl['properties'][
'min_masters_num']
if 'min_workers_num' in node_with_min_vms.entity_tpl['properties']:
workers_num = workers_num + node_with_min_vms.entity_tpl['properties']['min_workers_num']
else:
min_masters_num = min_masters_num + node_with_min_vms.get_property_value('min_masters_num')
workers_num = workers_num + node_with_min_vms.get_property_value('min_workers_num')
if min_masters_num < 0:
min_masters_num = 1
topology_nodes = tosca_helper.get_nodes_by_type('tosca.nodes.QC.VM.topology',
self.tosca_template.nodetemplates, self.all_node_types,
self.all_custom_def)
if topology_nodes:
vm_nodes = tosca_helper.get_nodes_by_type('tosca.nodes.QC.VM.Compute',
self.tosca_template.nodetemplates, self.all_node_types,
self.all_custom_def)
if vm_nodes:
for i in range(len(vm_nodes), min_masters_num):
old_vm_name = vm_nodes[0].name
new_vm = copy.deepcopy(vm_nodes[0])
new_vm_name = new_vm.name + '_' + str(i)
new_vm.name = new_vm_name
templates = new_vm.templates.pop(old_vm_name)
new_vm.templates[new_vm_name] = templates
return_nodes.append(new_vm)
for requirement in topology_nodes[0].requirements:
requirement_key = next(iter(requirement))
requirement_value = requirement[requirement_key]
if requirement_value['capability'] == 'tosca.capabilities.QC.VM':
new_requirement = copy.deepcopy(requirement)
new_requirement[requirement_key]['node'] = new_vm.name
topology_nodes[0].requirements.append(new_requirement)
return_nodes.append(topology_nodes[0])
break
for i in range(len(vm_nodes), workers_num + 1):
old_vm_name = vm_nodes[0].name
new_vm = copy.deepcopy(vm_nodes[0])
new_vm_name = new_vm.name + '_' + str(i)
new_vm.name = new_vm_name
templates = new_vm.templates.pop(old_vm_name)
new_vm.templates[new_vm_name] = templates
return_nodes.append(new_vm)
for requirement in topology_nodes[0].requirements:
requirement_key = next(iter(requirement))
requirement_value = requirement[requirement_key]
if requirement_value['capability'] == 'tosca.capabilities.QC.VM':
new_requirement = copy.deepcopy(requirement)
new_requirement[requirement_key]['node'] = new_vm.name
topology_nodes[0].requirements.append(new_requirement)
return_nodes.append(topology_nodes[0])
break
return return_nodes
def set_node_specifications(self):
nodes = []
for node_template in self.tosca_template.nodetemplates:
nodes.append(self.set_default_node_properties(node_template))
return nodes
# def set_default_node_properties(self, node):
# logging.info('Setting properties for: ' + str(node.type))
# ancestors_properties = tosca_helper.get_all_ancestors_properties(node, self.all_node_types,
# self.all_custom_def)
# default_properties = {}
# for ancestors_property in ancestors_properties:
# default_property = get_default_value(ancestors_property)
# if default_property:
# default_properties[next(iter(default_property))] = default_property[next(iter(default_property))]
#
# if default_properties:
# for default_property in default_properties:
# node.get_properties_objects().append(default_property)
# node_name = next(iter(node.templates))
# if 'properties' in node.templates[node_name]:
# for prop_name in node.templates[node_name]['properties']:
# if isinstance(node.templates[node_name]['properties'][prop_name], dict) or \
# isinstance(node.templates[node_name]['properties'][prop_name], str):
# if 'required' in node.templates[node_name]['properties'][prop_name] and \
# node.templates[node_name]['properties'][prop_name]['required'] and \
# 'default' in node.templates[node_name]['properties'][prop_name] and \
# prop_name not in default_properties:
# default_properties[prop_name] = node.templates[node_name]['properties'][prop_name][
# 'default']
#
# logging.info(
# 'Adding to : ' + str(node.templates[node_name]) + ' properties: ' + str(default_properties))
# node.templates[node_name]['properties'] = default_properties
# return node
class SpecService:
def __init__(self, conf):
self.configuration = conf
def get_property(self, prop_key):
return None
from abc import abstractmethod, ABCMeta
from toscaparser.tosca_template import ToscaTemplate
import networkx as nx
# import matplotlib.pyplot as plt
class SpecificationAnalyzer(metaclass=ABCMeta):
def __init__(self, tosca_template):
self.tosca_template = tosca_template
self.tosca_node_types = self.tosca_template.nodetemplates[0].type_definition.TOSCA_DEF
self.all_custom_def = self.tosca_template.nodetemplates[0].custom_def
self.all_node_types = {}
self.all_node_types.update(self.tosca_node_types.items())
self.all_node_types.update(self.all_custom_def.items())
self.required_nodes = []
self.g = self.build_graph(self.tosca_template.nodetemplates)
self.root_nodes = []
self.leaf_nodes = []
for node_name, degree in self.g.in_degree():
if degree == 0:
self.root_nodes.append(node_name)
for node_name, degree in self.g.out_degree():
if degree == 0:
self.leaf_nodes.append(node_name)
def build_graph(self, node_templates):
graph = nx.DiGraph()
for node in node_templates:
graph.add_node(node.name, attr_dict=node.entity_tpl)
for req in node.requirements:
req_name = next(iter(req))
req_node_name = req[req_name]['node']
if 'relationship' in req[req_name] and 'type' in req[req_name]['relationship']:
relationship_type = req[req_name]['relationship']['type']
else:
if 'relationship' not in req[req_name]:
relationship_type = 'tosca.relationships.DependsOn'
else:
relationship_type = req[req_name]['relationship']
graph.add_edge(node.name, req_node_name, relationship=relationship_type)
# nx.draw(graph, with_labels=True)
# plt.savefig("/tmp/graph.png")
# plt.show()
return graph
@abstractmethod
def set_node_specifications(self):
raise NotImplementedError('Must implement upload in subclasses')
@abstractmethod
def set_relationship_occurrences(self):
raise NotImplementedError('Must implement upload in subclasses')
import copy
import json
import logging
import os
import os.path
import tempfile
import time
import unittest
import requests
import yaml
from toscaparser.tosca_template import ToscaTemplate
from planner.planner import Planner
from service.spec_service import SpecService
from util import tosca_helper
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
class MyTestCase(unittest.TestCase):
# def test_tic_gluster_fs(self):
# url = 'https://raw.githubusercontent.com/qcdis-sdia/sdia-tosca/master/examples/glusterFS.yaml'
# input_tosca_file_path = self.get_remote_tosca_file(url)
# self.run_test(input_tosca_file_path)
def test_tic(self):
url = 'https://raw.githubusercontent.com/qcdis-sdia/sdia-tosca/master/examples/TIC.yaml'
input_tosca_file_path = self.get_remote_tosca_file(url)
self.run_test(input_tosca_file_path)
def test_docker(self):
url = 'https://raw.githubusercontent.com/qcdis-sdia/sdia-tosca/master/examples/application_example_updated.yaml'
input_tosca_file_path = self.get_remote_tosca_file(url)
self.run_test(input_tosca_file_path)
url = 'https://raw.githubusercontent.com/qcdis-sdia/sdia-tosca/master/examples/lifeWatch_vre1.yaml'
input_tosca_file_path = self.get_remote_tosca_file(url)
self.run_test(input_tosca_file_path)
def test_kubernetes(self):
url = 'https://raw.githubusercontent.com/qcdis-sdia/sdia-tosca/master/examples/kubernetes.yaml'
input_tosca_file_path = self.get_remote_tosca_file(url)
self.run_test(input_tosca_file_path)
def test_topology(self):
url = 'https://raw.githubusercontent.com/qcdis-sdia/sdia-tosca/master/examples/topology.yaml'
input_tosca_file_path = self.get_remote_tosca_file(url)
self.run_test(input_tosca_file_path)
def test_compute(self):
url = 'https://raw.githubusercontent.com/qcdis-sdia/sdia-tosca/master/examples/compute.yaml'
input_tosca_file_path = self.get_remote_tosca_file(url)
self.run_test(input_tosca_file_path)
def test_lifeWatch(self):
url = 'https://raw.githubusercontent.com/qcdis-sdia/sdia-tosca/master/examples/lifeWatch_vre1.yaml'
tic_tosca = requests.get(url)
input_tosca_file_path = os.path.join(tempfile.gettempdir(),'TIC.yaml')
open( input_tosca_file_path, 'wb').write(tic_tosca.content)
self.run_test(input_tosca_file_path)
def get_input_tosca_file_path(self, file_name):
tosca_path = "../../TOSCA/"
input_tosca_file_path = tosca_path + file_name
if not os.path.exists(input_tosca_file_path):
tosca_path = "../TOSCA/"
input_tosca_file_path = tosca_path + file_name
self.assertEqual(True, os.path.exists(input_tosca_file_path),
"Input TOSCA file: " + input_tosca_file_path + " not found")
return input_tosca_file_path
def run_test(self, input_tosca_file_path):
conf = {'url': "http://host"}
spec_service = SpecService(conf)
test_planner = Planner(input_tosca_file_path, spec_service)
test_tosca_template = test_planner.resolve_requirements()
template_dict = tosca_helper.get_tosca_template_2_topology_template_dictionary(test_tosca_template)
test_tosca_template = test_planner.set_node_templates_properties()
template_dict = tosca_helper.get_tosca_template_2_topology_template_dictionary(test_tosca_template)
logger.info("template ----: \n" + yaml.dump(template_dict))
print(yaml.dump(template_dict))
ToscaTemplate(yaml_dict_tpl=copy.deepcopy(template_dict))
test_response = {'toscaTemplate': template_dict}
response = {'toscaTemplate': template_dict}
output_current_milli_time = int(round(time.time() * 1000))
response["creationDate"] = output_current_milli_time
response["parameters"] = []
# print("Output message:" + json.dumps(response))
self.assertEqual(True, True)
def get_remote_tosca_file(self, url):
tosca = requests.get(url)
input_tosca_file_path = os.path.join(tempfile.gettempdir(),'test_tosca_file.yaml')
open( input_tosca_file_path, 'wb').write(tosca.content)
return input_tosca_file_path
This diff is collapsed.
Metadata-Version: 1.0
Name: drip-planner2
Version: 0.1
Summary: UNKNOWN
Home-page: UNKNOWN
Author: S. Koulouzis
Author-email: UNKNOWN
License: UNKNOWN
Description: Long description of the package
Platform: UNKNOWN
setup.py
drip_planner2.egg-info/PKG-INFO
drip_planner2.egg-info/SOURCES.txt
drip_planner2.egg-info/dependency_links.txt
drip_planner2.egg-info/requires.txt
drip_planner2.egg-info/top_level.txt
planner/__init__.py
planner/planner.py
service/__init__.py
service/simple_spec_alayzer.py
service/spec_service.py
service/specification_analyzer.py
test/__init__.py
test/test_planner.py
util/__init__.py
util/tosca_helper.py
\ No newline at end of file
matplotlib==3.1.1
matplotlib==3.1.1
names==0.3.0
networkx==2.4
pika==1.1.0
tosca-parser==1.6.0
This diff is collapsed.
......@@ -3,5 +3,5 @@ pika==1.1.0
names==0.3.0
networkx==2.4
pyyaml==5.3.1
tosca-parser==2.0.0
tosca-parser==2.1.1
matplotlib==3.2.1
\ No newline at end of file
......@@ -20,46 +20,51 @@ logger.setLevel(logging.DEBUG)
class MyTestCase(unittest.TestCase):
def test_open_stack(self):
url = 'https://raw.githubusercontent.com/qcdis-sdia/sdia-tosca/master/examples/openstack.yaml'
input_tosca_file_path = self.get_remote_tosca_file(url)
self.run_test(input_tosca_file_path)
def test_tic_gluster_fs(self):
url = 'https://raw.githubusercontent.com/QCDIS/sdia-tosca/master/examples/glusterFS.yaml'
url = 'https://raw.githubusercontent.com/qcdis-sdia/sdia-tosca/master/examples/glusterFS.yaml'
input_tosca_file_path = self.get_remote_tosca_file(url)
self.run_test(input_tosca_file_path)
def test_tic(self):
url = 'https://raw.githubusercontent.com/QCDIS/sdia-tosca/master/examples/TIC.yaml'
url = 'https://raw.githubusercontent.com/qcdis-sdia/sdia-tosca/master/examples/TIC.yaml'
input_tosca_file_path = self.get_remote_tosca_file(url)
self.run_test(input_tosca_file_path)
def test_docker(self):
url = 'https://raw.githubusercontent.com/QCDIS/sdia-tosca/master/examples/application_example_updated.yaml'
url = 'https://raw.githubusercontent.com/qcdis-sdia/sdia-tosca/master/examples/application_example_updated.yaml'
input_tosca_file_path = self.get_remote_tosca_file(url)
self.run_test(input_tosca_file_path)
url = 'https://raw.githubusercontent.com/QCDIS/sdia-tosca/master/examples/lifeWatch_vre1.yaml'
url = 'https://raw.githubusercontent.com/qcdis-sdia/sdia-tosca/master/examples/lifeWatch_vre1.yaml'
input_tosca_file_path = self.get_remote_tosca_file(url)
self.run_test(input_tosca_file_path)
def test_kubernetes(self):
url = 'https://raw.githubusercontent.com/QCDIS/sdia-tosca/master/examples/kubernetes.yaml'
url = 'https://raw.githubusercontent.com/qcdis-sdia/sdia-tosca/master/examples/kubernetes.yaml'
input_tosca_file_path = self.get_remote_tosca_file(url)
self.run_test(input_tosca_file_path)
def test_topology(self):
url = 'https://raw.githubusercontent.com/QCDIS/sdia-tosca/master/examples/topology.yaml'
url = 'https://raw.githubusercontent.com/qcdis-sdia/sdia-tosca/master/examples/topology.yaml'
input_tosca_file_path = self.get_remote_tosca_file(url)
self.run_test(input_tosca_file_path)
def test_compute(self):
url = 'https://raw.githubusercontent.com/QCDIS/sdia-tosca/master/examples/compute.yaml'
url = 'https://raw.githubusercontent.com/qcdis-sdia/sdia-tosca/master/examples/compute.yaml'
input_tosca_file_path = self.get_remote_tosca_file(url)
self.run_test(input_tosca_file_path)
def test_lifeWatch(self):
url = 'https://raw.githubusercontent.com/QCDIS/sdia-tosca/master/examples/lifeWatch_vre1.yaml'
url = 'https://raw.githubusercontent.com/qcdis-sdia/sdia-tosca/master/examples/lifeWatch_vre1.yaml'
tic_tosca = requests.get(url)
input_tosca_file_path = os.path.join(tempfile.gettempdir(),'TIC.yaml')
input_tosca_file_path = os.path.join(tempfile.gettempdir(),'lifeWatch_vre1.yaml')
open( input_tosca_file_path, 'wb').write(tic_tosca.content)
self.run_test(input_tosca_file_path)
......
......@@ -9,7 +9,7 @@
<parent>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-parent</artifactId>
<version>2.2.1.RELEASE</version>
<version>2.2.2.RELEASE</version>
</parent>
<properties>
<project.build.sourceEncoding>UTF-8</project.build.sourceEncoding>
......
This diff is collapsed.
......@@ -3,6 +3,7 @@ COPY target/provisioner-3.0.0-jar-with-dependencies.jar provisioner-3.0.0-jar-wi
COPY etc/ etc
CMD jar -xf provisioner-3.0.0-jar-with-dependencies.jar application.properties && \
echo "----------------------------" && \
cat application.properties && \
sed -ie "s#^message.broker.host=.*#message.broker.host=$RABBITMQ_HOST#" application.properties && \
sed -ie "s#^sure-tosca.base.path=.*#sure-tosca.base.path=$SURE_TOSCA_BASE_PATH#" application.properties && \
......@@ -12,6 +13,7 @@ CMD jar -xf provisioner-3.0.0-jar-with-dependencies.jar application.properties &
echo "" >> application.properties && \
echo "cloud.storm.db.path=/etc/UD" >> application.properties && \
cat application.properties && \
echo "----------------------------" && \
jar -uf provisioner-3.0.0-jar-with-dependencies.jar application.properties && \
sleep 5 && \
java -jar provisioner-3.0.0-jar-with-dependencies.jar
......
......@@ -164,6 +164,7 @@ DCMetaInfo:
AMI: "ami-2581aa40"
- domain: "Frankfurt"
endpoint: "ec2.eu-central-1.amazonaws.com"
country: Germany
......@@ -219,3 +220,60 @@ DCMetaInfo:
DefaultSSHAccount: "ubuntu"
extraInfo:
AMI: "ami-0b418580298265d5c"
- domain: "Ireland"
endpoint: "ec2.eu-west-1.amazonaws.com"
country: Ireland
longitude: "-7.77832031"
latitude: "53.2734"
availability: null
VMMetaInfo:
- OS: "Ubuntu 18.04"
CPU: 1
MEM: 0.5
VMType: "t2.nano"
Price: 0.0058
DefaultSSHAccount: "ubuntu"
extraInfo:
AMI: "ami-04137ed1a354f54c4"
- OS: "Ubuntu 18.04"
CPU: 1
MEM: 1
VMType: "t2.micro"
Price: 0.0116
DefaultSSHAccount: "ubuntu"
extraInfo:
AMI: "ami-04137ed1a354f54c4"
- OS: "Ubuntu 18.04"
CPU: 1
MEM: 2
VMType: "t2.small"
Price: 0.0116
DefaultSSHAccount: "ubuntu"
extraInfo:
AMI: "ami-04137ed1a354f54c4"
- OS: "Ubuntu 18.04"
CPU: 2
MEM: 4
VMType: "t2.medium"
Price: 0.0464
DefaultSSHAccount: "ubuntu"
extraInfo:
AMI: "ami-04137ed1a354f54c4"
- OS: "Ubuntu 18.04"
CPU: 2
MEM: 8
VMType: "t2.large"
Price: 0.0464
DefaultSSHAccount: "ubuntu"
extraInfo:
AMI: "ami-04137ed1a354f54c4"
- OS: "Ubuntu 18.04"
CPU: 4
MEM: 16
VMType: "t2.xlarge"
Price: 0.0464
DefaultSSHAccount: "ubuntu"
extraInfo:
AMI: "ami-04137ed1a354f54c4"
......@@ -71,9 +71,10 @@ class SemaphoreHelper:
repository = self.find_repository(project_id, name, git_url)
return repository.id
def create_template(self, project_id,key_id,inventory_id,repository_id,playbook_name):
def create_template(self, project_id,key_id,inventory_id,repository_id,playbook_name,arguments=None):
template_request = TemplateRequest(ssh_key_id=key_id, project_id=project_id, inventory_id=inventory_id,
repository_id=repository_id, alias=playbook_name, playbook=playbook_name)
repository_id=repository_id, alias=playbook_name, playbook=playbook_name,
arguments=arguments)
self.project_api.project_project_id_templates_post(template_request , project_id )
templates = self.project_api.project_project_id_templates_get(project_id, playbook_name, 'asc')
return self.find_template(templates,playbook_name).id
......
......@@ -2,7 +2,12 @@
<project version="4">
<component name="ChangeListManager">
<list default="true" id="3f84153d-6ed1-4691-94d6-53105266f15e" name="Default Changelist" comment="">
<change beforePath="$PROJECT_DIR$/../k8s/CONF/manager-deployment.yaml" beforeDir="false" afterPath="$PROJECT_DIR$/../k8s/CONF/manager-deployment.yaml" afterDir="false" />
<change beforePath="$PROJECT_DIR$/../k8s/CONF/nginx-configmap.yaml" beforeDir="false" afterPath="$PROJECT_DIR$/../k8s/CONF/nginx-configmap.yaml" afterDir="false" />
<change beforePath="$PROJECT_DIR$/../k8s/CONF/provisioner-deployment.yaml" beforeDir="false" afterPath="$PROJECT_DIR$/../k8s/CONF/provisioner-deployment.yaml" afterDir="false" />
<change beforePath="$PROJECT_DIR$/.idea/workspace.xml" beforeDir="false" afterPath="$PROJECT_DIR$/.idea/workspace.xml" afterDir="false" />
<change beforePath="$PROJECT_DIR$/../sure_tosca-flask-server/.idea/misc.xml" beforeDir="false" afterPath="$PROJECT_DIR$/../sure_tosca-flask-server/.idea/misc.xml" afterDir="false" />
<change beforePath="$PROJECT_DIR$/../sure_tosca-flask-server/.idea/sure_tosca-flask-server.iml" beforeDir="false" afterPath="$PROJECT_DIR$/../sure_tosca-flask-server/.idea/sure_tosca-flask-server.iml" afterDir="false" />
</list>
<option name="SHOW_DIALOG" value="false" />
<option name="HIGHLIGHT_CONFLICTS" value="true" />
......@@ -20,9 +25,14 @@
</component>
<component name="PropertiesComponent">
<property name="RunOnceActivity.ShowReadmeOnStart" value="true" />
<property name="last_opened_file_path" value="$PROJECT_DIR$/../../playbooks/install_nfs.yaml" />
<property name="last_opened_file_path" value="$PROJECT_DIR$/../../sdia-tosca" />
<property name="settings.editor.selected.configurable" value="com.jetbrains.python.configuration.PyActiveSdkModuleConfigurable" />
</component>
<component name="RecentsManager">
<key name="CopyFile.RECENT_KEYS">
<recent name="$PROJECT_DIR$/../../sdia-tosca/examples" />
</key>
</component>
<component name="RunManager">
<configuration name="Unittests in test_default_api.py" type="tests" factoryName="Unittests" temporary="true" nameIsGenerated="true">
<module name="sure_tosca-client_python_stubs" />
......@@ -58,49 +68,60 @@
<servers />
</component>
<component name="WindowStateProjectService">
<state x="723" y="257" width="770" height="700" key="FileChooserDialogImpl" timestamp="1587904825236">
<state x="723" y="257" width="770" height="700" key="FileChooserDialogImpl" timestamp="1594637033414">
<screen x="67" y="34" width="1853" height="1046" />
</state>
<state x="723" y="257" width="770" height="700" key="FileChooserDialogImpl/67.34.1853.1046@67.34.1853.1046" timestamp="1587904825236" />
<state width="1825" height="255" key="GridCell.Tab.0.bottom" timestamp="1587047773485">
<state x="723" y="257" width="770" height="700" key="FileChooserDialogImpl/67.34.1853.1046@67.34.1853.1046" timestamp="1594637033414" />
<state width="1825" height="255" key="GridCell.Tab.0.bottom" timestamp="1592388772726">
<screen x="67" y="34" width="1853" height="1046" />
</state>
<state width="1825" height="255" key="GridCell.Tab.0.bottom/67.34.1853.1046@67.34.1853.1046" timestamp="1587047773485" />
<state width="1825" height="255" key="GridCell.Tab.0.center" timestamp="1587047773484">
<state width="1825" height="255" key="GridCell.Tab.0.bottom/67.34.1853.1046@67.34.1853.1046" timestamp="1592388772726" />
<state width="1825" height="255" key="GridCell.Tab.0.center" timestamp="1592388772725">
<screen x="67" y="34" width="1853" height="1046" />
</state>
<state width="1825" height="255" key="GridCell.Tab.0.center/67.34.1853.1046@67.34.1853.1046" timestamp="1587047773484" />
<state width="1825" height="255" key="GridCell.Tab.0.left" timestamp="1587047773484">
<state width="1825" height="255" key="GridCell.Tab.0.center/67.34.1853.1046@67.34.1853.1046" timestamp="1592388772725" />
<state width="1825" height="255" key="GridCell.Tab.0.left" timestamp="1592388772725">
<screen x="67" y="34" width="1853" height="1046" />
</state>
<state width="1825" height="255" key="GridCell.Tab.0.left/67.34.1853.1046@67.34.1853.1046" timestamp="1587047773484" />
<state width="1825" height="255" key="GridCell.Tab.0.right" timestamp="1587047773485">
<state width="1825" height="255" key="GridCell.Tab.0.left/67.34.1853.1046@67.34.1853.1046" timestamp="1592388772725" />
<state width="1825" height="255" key="GridCell.Tab.0.right" timestamp="1592388772725">
<screen x="67" y="34" width="1853" height="1046" />
</state>
<state width="1825" height="255" key="GridCell.Tab.0.right/67.34.1853.1046@67.34.1853.1046" timestamp="1587047773485" />
<state width="1825" height="383" key="GridCell.Tab.1.bottom" timestamp="1585306552969">
<state width="1825" height="255" key="GridCell.Tab.0.right/67.34.1853.1046@67.34.1853.1046" timestamp="1592388772725" />
<state width="1825" height="264" key="GridCell.Tab.1.bottom" timestamp="1592388772719">
<screen x="67" y="34" width="1853" height="1046" />
</state>
<state width="1825" height="383" key="GridCell.Tab.1.bottom/67.34.1853.1046@67.34.1853.1046" timestamp="1585306552969" />
<state width="1825" height="383" key="GridCell.Tab.1.center" timestamp="1585306552966">
<state width="1825" height="264" key="GridCell.Tab.1.bottom/67.34.1853.1046@67.34.1853.1046" timestamp="1592388772719" />
<state width="1825" height="264" key="GridCell.Tab.1.center" timestamp="1592388772719">
<screen x="67" y="34" width="1853" height="1046" />
</state>
<state width="1825" height="383" key="GridCell.Tab.1.center/67.34.1853.1046@67.34.1853.1046" timestamp="1585306552966" />
<state width="1825" height="383" key="GridCell.Tab.1.left" timestamp="1585306552965">
<state width="1825" height="264" key="GridCell.Tab.1.center/67.34.1853.1046@67.34.1853.1046" timestamp="1592388772719" />
<state width="1825" height="264" key="GridCell.Tab.1.left" timestamp="1592388772719">
<screen x="67" y="34" width="1853" height="1046" />
</state>
<state width="1825" height="383" key="GridCell.Tab.1.left/67.34.1853.1046@67.34.1853.1046" timestamp="1585306552965" />
<state width="1825" height="383" key="GridCell.Tab.1.right" timestamp="1585306552968">
<state width="1825" height="264" key="GridCell.Tab.1.left/67.34.1853.1046@67.34.1853.1046" timestamp="1592388772719" />
<state width="1825" height="264" key="GridCell.Tab.1.right" timestamp="1592388772719">
<screen x="67" y="34" width="1853" height="1046" />
</state>
<state width="1825" height="383" key="GridCell.Tab.1.right/67.34.1853.1046@67.34.1853.1046" timestamp="1585306552968" />
<state width="1825" height="264" key="GridCell.Tab.1.right/67.34.1853.1046@67.34.1853.1046" timestamp="1592388772719" />
<state x="359" y="103" key="SettingsEditor" timestamp="1587036749389">
<screen x="67" y="34" width="1853" height="1046" />
</state>
<state x="359" y="103" key="SettingsEditor/67.34.1853.1046@67.34.1853.1046" timestamp="1587036749389" />
<state x="563" y="235" width="1053" height="732" key="find.popup" timestamp="1584900160970">
<state x="563" y="235" width="1053" height="732" key="find.popup" timestamp="1590670239704">
<screen x="67" y="34" width="1853" height="1046" />
</state>
<state x="563" y="235" width="1053" height="732" key="find.popup/67.34.1853.1046@67.34.1853.1046" timestamp="1584900160970" />
<state x="563" y="235" width="1053" height="732" key="find.popup/67.34.1853.1046@67.34.1853.1046" timestamp="1590670239704" />
</component>
<component name="XDebuggerManager">
<breakpoint-manager>
<breakpoints>
<line-breakpoint enabled="true" suspend="THREAD" type="python-line">
<url>file://$PROJECT_DIR$/test/test_default_api.py</url>
<line>33</line>
<option name="timeStamp" value="7" />
</line-breakpoint>
</breakpoints>
</breakpoint-manager>
</component>
</project>
\ No newline at end of file
tosca_definitions_version: tosca_simple_yaml_1_0
description: Template for deploying a single server with predefined properties.
topology_template:
node_templates:
my_server:
type: tosca.nodes.Compute
capabilities:
# Host container properties
host:
properties:
num_cpus: 1
disk_size: 10 GB
mem_size: 4096 MB
# Guest Operating System properties
os:
properties:
# host Operating System image properties
architecture: x86_64
type: linux
distribution: rhel
version: 6.5
\ No newline at end of file
<?xml version="1.0" encoding="UTF-8"?>
<project version="4">
<component name="ProjectRootManager" version="2" project-jdk-name="Python 3.7 (sure_tosca-flask-server)" project-jdk-type="Python SDK" />
<component name="ProjectRootManager" version="2" project-jdk-name="Python 3.6 (sure_tosca-flask-server)" project-jdk-type="Python SDK" />
<component name="PyCharmProfessionalAdvertiser">
<option name="shown" value="true" />
</component>
......
......@@ -5,7 +5,7 @@
<excludeFolder url="file://$MODULE_DIR$/venv" />
<excludeFolder url="file://$MODULE_DIR$/venv3-7" />
</content>
<orderEntry type="jdk" jdkName="Python 3.7 (sure_tosca-flask-server)" jdkType="Python SDK" />
<orderEntry type="jdk" jdkName="Python 3.6 (sure_tosca-flask-server)" jdkType="Python SDK" />
<orderEntry type="sourceFolder" forTests="false" />
</component>
</module>
\ No newline at end of file
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment