Commit fd197056 authored by Spiros Koulouzis's avatar Spiros Koulouzis

added planner

parent 5edac92e
...@@ -19,4 +19,5 @@ ...@@ -19,4 +19,5 @@
/drip_planner2 (copy)/nbproject/private/ /drip_planner2 (copy)/nbproject/private/
/drip_util/nbproject/private/ /drip_util/nbproject/private/
/drip_transformer/nbproject/private/ /drip_transformer/nbproject/private/
/drip_parser/nbproject/private/ /drip_parser/nbproject/private/
\ No newline at end of file /drip_parser (copy)/nbproject/private/
\ No newline at end of file
...@@ -100,6 +100,7 @@ public class PlannerController { ...@@ -100,6 +100,7 @@ public class PlannerController {
return null; return null;
} }
@RequestMapping(value = "/plan/", method = RequestMethod.POST) @RequestMapping(value = "/plan/", method = RequestMethod.POST)
@RolesAllowed({UserService.USER, UserService.ADMIN}) @RolesAllowed({UserService.USER, UserService.ADMIN})
public @ResponseBody public @ResponseBody
......
<?xml version="1.0" encoding="UTF-8"?>
<project-shared-configuration>
<!--
This file contains additional configuration written by modules in the NetBeans IDE.
The configuration is intended to be shared among all the users of project and
therefore it is assumed to be part of version control checkout.
Without this configuration present, some functionality in the IDE may be limited or fail altogether.
-->
<python-data xmlns="http://nbpython.dev.java.net/ns/php-project/1">
<main.file>rpc_server.py</main.file>
<platform.active>Python_2.7.12</platform.active>
<application.args>127.0.0.1 parser_queue</application.args>
</python-data>
</project-shared-configuration>
from setuptools import setup, find_packages from setuptools import setup, find_packages
setup ( setup (
name='drip_planner2', name='parser',
version='0.1', version='0.1',
packages=find_packages(), packages=find_packages(),
......
<?xml version="1.0" encoding="UTF-8"?>
<projectDescription>
<name>drip_planner2</name>
<comment></comment>
<projects>
</projects>
<buildSpec>
<buildCommand>
<name>org.python.pydev.PyDevBuilder</name>
<arguments>
</arguments>
</buildCommand>
</buildSpec>
<natures>
<nature>org.python.pydev.pythonNature</nature>
</natures>
</projectDescription>
<?xml version="1.0" encoding="UTF-8" standalone="no"?>
<?eclipse-pydev version="1.0"?><pydev_project>
<pydev_pathproperty name="org.python.pydev.PROJECT_SOURCE_PATH">
<path>/${PROJECT_DIR_NAME}</path>
</pydev_pathproperty>
<pydev_property name="org.python.pydev.PYTHON_PROJECT_VERSION">python 2.7</pydev_property>
<pydev_property name="org.python.pydev.PYTHON_PROJECT_INTERPRETER">Default</pydev_property>
</pydev_project>
java.lib.path=
main.file=rpc_server.py
platform.active=Python_2.7.12
python.lib.path=
src.dir=src
<?xml version="1.0" encoding="UTF-8"?>
<project xmlns="http://www.netbeans.org/ns/project/1">
<type>org.netbeans.modules.python.project</type>
<configuration>
<data xmlns="http://nbpython.dev.java.net/ns/php-project/1">
<name>drip_planner2</name>
<sources>
<root id="src.dir"/>
</sources>
<tests/>
</data>
</configuration>
</project>
from toscaparser import *
from toscaparser.tosca_template import ToscaTemplate
import toscaparser.utils.yamlparser
class DumpPlanner:
def __init__(self, tosca_file_path):
self.yaml_dict_tpl = toscaparser.utils.yamlparser.load_yaml(tosca_file_path)
self.errors = []
self.warnings = []
self.tt = None
try:
self.tt = ToscaTemplate(path=None, yaml_dict_tpl=self.yaml_dict_tpl)
except:
self.warnings.append("Not a valid tosca file")
self.DOCKER_TYPE = 'Switch.nodes.Application.Container.Docker'
self.COMPUTE_TYPE = 'Switch.nodes.Compute'
self.HW_HOST_TYPE = 'Switch.datatypes.hw.host'
self.HOSTED_NODE_TYPE = [self.DOCKER_TYPE, self.COMPUTE_TYPE]
def get_docker_types(self):
docker_types = set([])
node_types = self.get_node_types()
for node_type_key in node_types:
if node_types[node_type_key] and 'derived_from' in node_types[node_type_key].keys():
if node_types[node_type_key]['derived_from'] == self.DOCKER_TYPE:
docker_types.add(node_type_key)
return docker_types
def get_node_types(self):
return self.yaml_dict_tpl['node_types']
def get_node_templates(self):
return self.yaml_dict_tpl['topology_template']['node_templates']
def get_artifacts(self, node):
if 'artifacts' in node:
return node['artifacts']
def get_properties(self, node):
if 'properties' in node:
return node['properties']
def get_enviroment_vars(self, properties):
environment = []
for prop in properties:
if properties[prop] and not isinstance(properties[prop], dict):
environment.append(prop + "=" + str(properties[prop]))
return environment
def get_port_map(self, properties):
if 'ports_mapping' in properties:
ports_mappings = properties['ports_mapping']
port_maps = []
for port_map_key in ports_mappings:
host_port = ports_mappings[port_map_key]['host_port']
if not isinstance(host_port, (int, long, float, complex)):
host_port_var = host_port.replace('${', '').replace('}', '')
host_port = properties[host_port_var]
container_port = ports_mappings[port_map_key]['container_port']
if not isinstance(container_port, (int, long, float, complex)):
container_port_var = container_port.replace('${', '').replace('}', '')
container_port = properties[container_port_var]
port_maps.append(str(host_port) + ':' + str(container_port))
return port_maps
def get_hosted_nodes(self, node_templates):
docker_types = self.get_docker_types()
self.HOSTED_NODE_TYPE = self.HOSTED_NODE_TYPE + list(docker_types)
hosted_nodes = []
for node_key in node_templates:
for hosted_type in self.HOSTED_NODE_TYPE:
if node_templates[node_key]['type'] == hosted_type:
hosted_node = node_templates[node_key]
hosted_node['id'] = node_key
hosted_nodes.append(hosted_node)
break
return hosted_nodes
def plan(self):
node_templates = self.get_node_templates()
hosted_nodes = self.get_hosted_nodes(node_templates)
vms = []
for node in hosted_nodes:
vm = {}
vm['name'] = node['id']
vm['type'] = self.COMPUTE_TYPE
for req in node['requirements']:
vm['host'] = req['host']['node_filter']['capabilities']['host']
vm['OStype'] = req['host']['node_filter']['capabilities']['os']['distribution'] + " " + str(req['host']['node_filter']['capabilities']['os']['os_version'])
# vm['nodeType'] = 'medium'
# vm['dockers'] =
# artifacts = self.get_artifacts(node)
# if artifacts:
# key = next(iter(artifacts))
# docker_file = artifacts[key]['file']
# vm['dockers'] = docker_file
# vm['cloudProvider']
vms.append(vm)
print len(vms)
return vms
# To change this license header, choose License Headers in Project Properties.
# To change this template file, choose Tools | Templates
# and open the template in the editor.
import os
import os.path
import pika
import sys
import tempfile
import time
import json
from planner.dump_planner import *
from os.path import expanduser
def init_chanel(args):
if len(args) > 1:
rabbitmq_host = args[1]
queue_name = args[2] #tosca_2_docker_compose_queue
else:
rabbitmq_host = '127.0.0.1'
connection = pika.BlockingConnection(pika.ConnectionParameters(host=rabbitmq_host))
channel = connection.channel()
channel.queue_declare(queue=queue_name)
return channel
def start(channel):
channel.basic_qos(prefetch_count=1)
channel.basic_consume(on_request, queue=queue_name)
print(" [x] Awaiting RPC requests")
channel.start_consuming()
def on_request(ch, method, props, body):
response = handle_delivery(body)
ch.basic_publish(exchange='',
routing_key=props.reply_to,
properties=pika.BasicProperties(correlation_id=\
props.correlation_id),
body=str(response))
ch.basic_ack(delivery_tag=method.delivery_tag)
def handle_delivery(message):
parsed_json_message = json.loads(message)
params = parsed_json_message["parameters"]
param = params[0]
value = param["value"]
tosca_file_name = param["name"]
current_milli_time = lambda: int(round(time.time() * 1000))
try:
tosca_file_path = tempfile.gettempdir() + "/transformer_files/" + str(current_milli_time()) + "/"
except NameError:
import sys
tosca_file_path = os.path.dirname(os.path.abspath(sys.argv[0])) + "/transformer_files/" + str(current_milli_time()) + "/"
if not os.path.exists(tosca_file_path):
os.makedirs(tosca_file_path)
with open(tosca_file_path + "/" + tosca_file_name + ".yml", 'w') as outfile:
outfile.write(str(value))
if queue_name == "tosca_2_docker_compose_queue":
planner = DumpPlanner(tosca_file_path + "/" + tosca_file_name + ".yml");
planner.plan()
return "response"
if __name__ == "__main__":
home = expanduser("~")
planner = DumpPlanner(home+"/workspace/DRIP/docs/input_tosca_files/MOG_cardif.yml")
planner.plan()
# print sys.argv
# channel = init_chanel(sys.argv)
# global queue_name
# queue_name = sys.argv[2]
# start(channel)
# try:
## for node in tosca.nodetemplates:
## print "Name %s Type: %s " %(node.name,node.type)
##
## for input in tosca.inputs:
## print input.name
#
## for node in tosca.nodetemplates:
## for relationship, trgt in node.relationships.items():
## rel_template = trgt.get_relationship_template()
## for rel in rel_template:
## print "source %s Relationship: %s target: %s" %(rel.source.type,rel.type,rel.target.type)
## print dir(rel)
# response = {}
# current_milli_time = lambda: int(round(time.time() * 1000))
# response["creationDate"] = current_milli_time()
# response["parameters"] = []
# vm_nodes = []
#
# for node in tosca.nodetemplates:
# if not node.relationships.items() and 'docker' in node.type.lower():
# print "1Adding: %s , %s" %(node.name,node.type)
# vm_nodes.append(node)
## else:
# for relationship, trgt in node.relationships.items():
# if relationship.type == EntityType.HOSTEDON:
# rel_template = trgt.get_relationship_template()
# for rel in rel_template:
# print "2Adding: %s , %s" %(rel.target.name,rel.target.type)
## print "Name: %s Type: %s " %(node.name, node.type)
# vm_nodes.append(rel.target)
#
#
## if not compute_nodes:
## for node in tosca.nodetemplates:
### print dir(node)
## print "Name: %s Type: %s props: %s"%(node.name,node.type,node.get_properties().keys())
#
# for vm in vm_nodes:
# result = {}
# parameter = {}
# result['name'] = vm.name
# result['size'] = 'Medium'
# if 'dockers' in vm.get_properties():
# result['docker'] = vm.get_properties()['dockers'].value
# elif 'artifacts' in vm.templates[next(iter(vm.templates))]:
# artifacts = vm.templates[next(iter(vm.templates))]['artifacts']
# result['docker'] = artifacts['docker_image']['file']
## print "1st Key: %s" %next(iter(vm.templates))
## print vm.templates[next(iter(vm.templates))]
#
## print dir(compute_node.get_properties()['dockers'] )
## print "dockers. Name: %s Type: %s Value: %s" % (compute_node.get_properties()['dockers'].name, compute_node.get_properties()['dockers'].type, compute_node.get_properties()['dockers'].value )
# parameter['value'] = str(json.dumps(result))
# parameter['attributes'] = 'null'
# parameter["url"] = "null"
# parameter["encoding"] = "UTF-8"
# response["parameters"].append(parameter)
## print "Name: %s Type: %s properties: %s" %(vm.name,vm.type,vm.get_properties().keys())
##
# print ("Output message: %s" % json.dumps(response))
#
# except AttributeError as e:
# z = e
# print z
from setuptools import setup, find_packages
setup (
name='drip_planner2',
version='0.1',
packages=find_packages(),
# Declare your packages' dependencies here, for eg:
install_requires=['foo>=3'],
# Fill in these to make your Egg ready for upload to
# PyPI
author='S. Koulouzis',
author_email='',
#summary = 'Just another Python package for the cheese shop',
url='',
license='',
long_description='Long description of the package',
# could also include long_description, download_url, classifiers, etc.
)
\ No newline at end of file
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment