Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
C
CONF
Project
Project
Details
Activity
Releases
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Registry
Registry
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
UvA
CONF
Commits
a538b5a0
Commit
a538b5a0
authored
Nov 07, 2019
by
Spiros Koulouzis
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
refactor planner
parent
4ed61d65
Changes
13
Show whitespace changes
Inline
Side-by-side
Showing
13 changed files
with
1002 additions
and
0 deletions
+1002
-0
drip-planner.iml
drip-planner/.idea/drip-planner.iml
+13
-0
__main__.py
drip-planner/__main__.py
+146
-0
__init__.py
drip-planner/planner/__init__.py
+0
-0
planner.py
drip-planner/planner/planner.py
+241
-0
__init__.py
drip-planner/service/__init__.py
+0
-0
simple_spec_alayzer.py
drip-planner/service/simple_spec_alayzer.py
+182
-0
spec_service.py
drip-planner/service/spec_service.py
+7
-0
specification_analyzer.py
drip-planner/service/specification_analyzer.py
+58
-0
setup.py
drip-planner/setup.py
+24
-0
__init__.py
drip-planner/test/__init__.py
+0
-0
test_planner.py
drip-planner/test/test_planner.py
+63
-0
__init__.py
drip-planner/util/__init__.py
+0
-0
tosca_helper.py
drip-planner/util/tosca_helper.py
+268
-0
No files found.
drip-planner/.idea/drip-planner.iml
0 → 100644
View file @
a538b5a0
<?xml version="1.0" encoding="UTF-8"?>
<module
type=
"PYTHON_MODULE"
version=
"4"
>
<component
name=
"NewModuleRootManager"
>
<content
url=
"file://$MODULE_DIR$"
>
<excludeFolder
url=
"file://$MODULE_DIR$/venv"
/>
</content>
<orderEntry
type=
"inheritedJdk"
/>
<orderEntry
type=
"sourceFolder"
forTests=
"false"
/>
</component>
<component
name=
"TestRunnerService"
>
<option
name=
"PROJECT_TEST_RUNNER"
value=
"Unittests"
/>
</component>
</module>
\ No newline at end of file
drip-planner/__main__.py
0 → 100644
View file @
a538b5a0
# To change this license header, choose License Headers in Project Properties.
# To change this template file, choose Tools | Templates
# and open the template in the editor.
import
json
import
os
import
os.path
import
tempfile
import
time
import
logging
import
pika
import
yaml
import
sys
from
planner.planner
import
Planner
from
service.spec_service
import
SpecService
logger
=
logging
.
getLogger
(
__name__
)
# if not getattr(logger, 'handler_set', None):
# logger.setLevel(logging.INFO)
# h = logging.StreamHandler()
# formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
# h.setFormatter(formatter)
# logger.addHandler(h)
# logger.handler_set = True
def
init_chanel
(
args
):
global
rabbitmq_host
if
len
(
args
)
>
1
:
rabbitmq_host
=
args
[
1
]
queue_name
=
args
[
2
]
# planner
else
:
rabbitmq_host
=
'127.0.0.1'
connection
=
pika
.
BlockingConnection
(
pika
.
ConnectionParameters
(
host
=
rabbitmq_host
))
channel
=
connection
.
channel
()
channel
.
queue_declare
(
queue
=
queue_name
)
return
channel
def
start
(
this_channel
):
this_channel
.
basic_qos
(
prefetch_count
=
1
)
this_channel
.
basic_consume
(
queue
=
queue_name
,
on_message_callback
=
on_request
)
logger
.
info
(
" [x] Awaiting RPC requests"
)
this_channel
.
start_consuming
()
def
on_request
(
ch
,
method
,
props
,
body
):
response
=
handle_delivery
(
body
)
ch
.
basic_publish
(
exchange
=
''
,
routing_key
=
props
.
reply_to
,
properties
=
pika
.
BasicProperties
(
correlation_id
=
props
.
correlation_id
),
body
=
str
(
response
))
ch
.
basic_ack
(
delivery_tag
=
method
.
delivery_tag
)
def
handle_delivery
(
message
):
logger
.
info
(
"Got: "
+
str
(
message
))
try
:
message
=
message
.
decode
()
except
(
UnicodeDecodeError
,
AttributeError
):
pass
parsed_json_message
=
json
.
loads
(
message
)
owner
=
parsed_json_message
[
'owner'
]
tosca_file_name
=
'tosca_template'
tosca_template_json
=
parsed_json_message
[
'toscaTemplate'
]
input_current_milli_time
=
lambda
:
int
(
round
(
time
.
time
()
*
1000
))
# rabbit = DRIPLoggingHandler(host=rabbitmq_host, port=5672, user=owner)
# logger.addHandler(rabbit)
try
:
tosca_folder_path
=
os
.
path
.
join
(
tempfile
.
gettempdir
(),
"planner_files"
,
str
(
input_current_milli_time
()))
except
NameError
:
import
sys
millis
=
int
(
round
(
time
.
time
()
*
1000
))
tosca_folder_path
=
os
.
path
.
dirname
(
os
.
path
.
abspath
(
sys
.
argv
[
0
]))
+
os
.
path
.
join
(
tempfile
.
gettempdir
(),
"planner_files"
,
str
(
millis
))
if
not
os
.
path
.
exists
(
tosca_folder_path
):
os
.
makedirs
(
tosca_folder_path
)
input_tosca_file_path
=
os
.
path
.
join
(
tosca_folder_path
,
tosca_file_name
+
".yml"
)
with
open
(
input_tosca_file_path
,
'w'
)
as
outfile
:
outfile
.
write
(
yaml
.
dump
(
tosca_template_json
))
conf
=
{
'url'
:
"http://host"
}
spec_service
=
SpecService
(
conf
)
test_planner
=
Planner
(
input_tosca_file_path
,
spec_service
)
tosca_template
=
test_planner
.
resolve_requirements
()
tosca_template
=
test_planner
.
set_infrastructure_specifications
()
template_dict
=
tosca_util
.
get_tosca_template_2_topology_template_dictionary
(
tosca_template
)
logger
.
info
(
"template ----:
\n
"
+
yaml
.
dump
(
template_dict
))
response
=
{
'toscaTemplate'
:
template_dict
}
output_current_milli_time
=
int
(
round
(
time
.
time
()
*
1000
))
response
[
"creationDate"
]
=
output_current_milli_time
response
[
"parameters"
]
=
[]
if
queue_name
==
"planner_queue"
:
logger
.
info
(
"Planning"
)
logger
.
info
(
"Returning plan"
)
logger
.
info
(
"Output message:"
+
json
.
dumps
(
response
))
return
json
.
dumps
(
response
)
if
__name__
==
"__main__"
:
logging
.
basicConfig
(
level
=
logging
.
INFO
)
if
sys
.
argv
[
1
]
==
"test_local"
:
tosca_path
=
"../TOSCA/"
input_tosca_file_path
=
tosca_path
+
'/application_example_2_topologies.yaml'
conf
=
{
'url'
:
"http://host"
}
spec_service
=
SpecService
(
conf
)
test_planner
=
Planner
(
input_tosca_file_path
,
spec_service
)
test_tosca_template
=
test_planner
.
resolve_requirements
()
test_tosca_template
=
test_planner
.
set_infrastructure_specifications
()
template_dict
=
tosca_util
.
get_tosca_template_2_topology_template_dictionary
(
test_tosca_template
)
logger
.
info
(
"template ----:
\n
"
+
yaml
.
dump
(
template_dict
))
try
:
tosca_folder_path
=
os
.
path
.
join
(
tempfile
.
gettempdir
(),
tosca_path
)
except
NameError
:
import
sys
tosca_folder_path
=
os
.
path
.
dirname
(
os
.
path
.
abspath
(
sys
.
argv
[
0
]))
+
os
.
path
.
join
(
tempfile
.
gettempdir
(),
tosca_path
)
tosca_file_name
=
'tosca_template'
input_tosca_file_path
=
tosca_path
+
'/application_example_2_topologies.yaml'
with
open
(
input_tosca_file_path
,
'w'
)
as
outfile
:
outfile
.
write
(
yaml
.
dump
(
template_dict
))
ToscaTemplate
(
input_tosca_file_path
)
test_response
=
{
'toscaTemplate'
:
template_dict
}
logger
.
info
(
"Output message:"
+
json
.
dumps
(
test_response
))
else
:
logger
.
info
(
"Input args: "
+
sys
.
argv
[
0
]
+
' '
+
sys
.
argv
[
1
]
+
' '
+
sys
.
argv
[
2
])
channel
=
init_chanel
(
sys
.
argv
)
global
queue_name
queue_name
=
sys
.
argv
[
2
]
start
(
channel
)
drip-planner/planner/__init__.py
0 → 100644
View file @
a538b5a0
drip-planner/planner/planner.py
0 → 100644
View file @
a538b5a0
import
logging
from
toscaparser.nodetemplate
import
NodeTemplate
from
toscaparser.tosca_template
import
ToscaTemplate
from
toscaparser.topology_template
import
TopologyTemplate
import
operator
import
matplotlib.pyplot
as
plt
from
service.simple_spec_alayzer
import
SimpleAnalyzer
from
util
import
tosca_helper
class
Planner
:
def
__init__
(
self
,
path
,
spec_service
):
self
.
path
=
path
self
.
tosca_template
=
ToscaTemplate
(
path
)
self
.
tosca_node_types
=
self
.
tosca_template
.
nodetemplates
[
0
]
.
type_definition
.
TOSCA_DEF
self
.
all_custom_def
=
self
.
tosca_template
.
nodetemplates
[
0
]
.
custom_def
self
.
all_node_types
=
{}
self
.
all_node_types
.
update
(
self
.
tosca_node_types
.
items
())
self
.
all_node_types
.
update
(
self
.
all_custom_def
.
items
())
self
.
required_nodes
=
[]
self
.
spec_service
=
spec_service
def
add_required_nodes_to_template
(
self
,
required_nodes
):
for
req_node
in
required_nodes
:
node_template
=
tosca_helper
.
node_type_2_node_template
(
req_node
,
self
.
all_custom_def
)
self
.
tosca_template
.
nodetemplates
.
append
(
node_template
)
return
self
.
tosca_template
def
set_infrastructure_specifications
(
self
):
# Start bottom up and (node without requirements leaf) and find the root of the graph.
# Get root performance, version requirements and set specs to required node
specification_analyzer
=
SimpleAnalyzer
(
self
.
tosca_template
)
nodes_with_new_specifications
=
specification_analyzer
.
set_node_specifications
()
for
new_spec_node
in
nodes_with_new_specifications
:
for
index
,
node_in_temple
in
enumerate
(
self
.
tosca_template
.
nodetemplates
):
if
new_spec_node
.
name
==
node_in_temple
.
name
:
self
.
tosca_template
.
nodetemplates
[
index
]
=
new_spec_node
break
specification_analyzer
=
SimpleAnalyzer
(
self
.
tosca_template
)
nodes_with_new_relationship_occurrences
=
specification_analyzer
.
set_relationship_occurrences
()
added_node_names
=
[]
for
new_spec_occurrences
in
nodes_with_new_relationship_occurrences
:
for
index
,
node_in_temple
in
enumerate
(
self
.
tosca_template
.
nodetemplates
):
if
new_spec_occurrences
.
name
==
node_in_temple
.
name
:
added_node_names
.
append
(
new_spec_occurrences
.
name
)
self
.
tosca_template
.
nodetemplates
[
index
]
=
new_spec_occurrences
break
for
new_spec_occurrences
in
nodes_with_new_relationship_occurrences
:
if
new_spec_occurrences
.
name
not
in
added_node_names
:
self
.
tosca_template
.
nodetemplates
.
append
(
new_spec_occurrences
)
return
self
.
tosca_template
def
get_node_template_property
(
self
,
prop_key
,
node_prop_dict
):
prop_value
=
self
.
spec_service
.
get_property
(
prop_key
)
if
prop_value
:
return
{
prop_key
:
node_prop_dict
}
else
:
if
'required'
in
node_prop_dict
and
'default'
in
node_prop_dict
:
return
node_prop_dict
[
'default'
]
if
'constraints'
in
node_prop_dict
:
constraints
=
node_prop_dict
[
'constraints'
]
for
constraint
in
constraints
:
if
next
(
iter
(
constraint
))
==
'greater_or_equal'
:
return
constraint
[
next
(
iter
(
constraint
))]
return
None
def
resolve_requirements
(
self
):
""" Resolve requirements. Go over all nodes and recursively resolve requirements till node has no
requirements e.g. docker -> k8s -> cluster -> vm """
for
node
in
self
.
tosca_template
.
nodetemplates
:
self
.
add_required_nodes
(
node
)
return
self
.
add_required_nodes_to_template
(
self
.
required_nodes
)
def
add_required_nodes
(
self
,
node
):
"""Adds the required nodes in self.required_nodes for an input node."""
if
isinstance
(
node
,
NodeTemplate
):
logging
.
info
(
'Resolving requirements for: '
+
node
.
name
)
elif
isinstance
(
node
,
dict
):
logging
.
info
(
'Resolving requirements for: '
+
str
(
next
(
iter
(
node
))))
# Get all requirements for node.
all_requirements
=
self
.
get_all_requirements
(
node
)
if
not
all_requirements
:
logging
.
debug
(
'Node: '
+
tosca_helper
.
get_node_type_name
(
node
)
+
' has no requirements'
)
return
matching_node
=
self
.
find_best_node_for_requirements
(
all_requirements
)
# Only add node that is not in node_templates
matching_node_type_name
=
next
(
iter
(
matching_node
))
matching_node_template
=
tosca_helper
.
node_type_2_node_template
(
matching_node
,
self
.
all_custom_def
)
# Add the requirements to the node we analyzed. e.g. docker needed host now we added the type and name of host
node
=
self
.
add_requirements
(
node
,
all_requirements
,
matching_node_template
.
name
)
if
not
tosca_helper
.
contains_node_type
(
self
.
required_nodes
,
matching_node_type_name
):
logging
.
info
(
' Adding: '
+
str
(
matching_node_template
.
name
))
self
.
required_nodes
.
append
(
matching_node
)
# Find matching nodes for the new node's requirements
self
.
add_required_nodes
(
matching_node
)
def
get_all_requirements
(
self
,
node
):
"""Returns all requirements for an input node including all parents requirements"""
node_type_name
=
tosca_helper
.
get_node_type_name
(
node
)
logging
.
info
(
' Looking for requirements for node: '
+
node_type_name
)
# Get the requirements for this node from its definition e.g. docker: hostedOn k8s
def_type
=
self
.
all_node_types
[
node_type_name
]
all_requirements
=
[]
if
'requirements'
in
def_type
.
keys
():
all_requirements
=
def_type
[
'requirements'
]
logging
.
info
(
' Found requirements: '
+
str
(
all_requirements
)
+
' for node: '
+
node_type_name
)
# Get the requirements for this node from the template. e.g. wordpress: connectsTo mysql
# node_requirements = tosca_helper.get_node_requirements(node)
# if node_requirements:
# all_requirements += node_requirements
# Put them all together
parent_requirements
=
tosca_helper
.
get_ancestors_requirements
(
node
,
self
.
all_node_types
,
self
.
all_custom_def
)
parent_type
=
tosca_helper
.
get_node_type_name
(
node
)
if
parent_type
and
parent_requirements
:
logging
.
info
(
' Adding to : '
+
str
(
node_type_name
)
+
' parent requirements from: '
+
str
(
parent_type
))
if
not
all_requirements
:
all_requirements
+=
parent_requirements
else
:
for
all_requirement
in
all_requirements
:
for
parent_requirement
in
parent_requirements
:
all_requirement_key
=
next
(
iter
(
all_requirement
))
parent_requirement_key
=
next
(
iter
(
parent_requirement
))
if
all_requirement_key
!=
parent_requirement_key
and
all_requirement
[
all_requirement_key
][
'capability'
]
!=
parent_requirement
[
parent_requirement_key
][
'capability'
]:
all_requirements
.
append
(
parent_requirement
)
logging
.
debug
(
' all_requirements: '
+
str
(
all_requirements
))
return
all_requirements
def
get_node_types_by_capability
(
self
,
cap
):
"""Returns all nodes that have the capability: cap and have interfaces. This way we distinguish between
'abstract' and 'concrete' """
candidate_nodes
=
{}
for
tosca_node_type
in
self
.
all_node_types
:
if
tosca_node_type
.
startswith
(
'tosca.nodes'
)
and
'capabilities'
in
self
.
all_node_types
[
tosca_node_type
]:
logging
.
debug
(
' Node: '
+
str
(
tosca_node_type
))
for
caps
in
self
.
all_node_types
[
tosca_node_type
][
'capabilities'
]:
logging
.
debug
(
' '
+
str
(
self
.
all_node_types
[
tosca_node_type
][
'capabilities'
][
caps
][
'type'
])
+
' == '
+
cap
)
if
self
.
all_node_types
[
tosca_node_type
][
'capabilities'
][
caps
][
'type'
]
==
cap
:
candidate_nodes
[
tosca_node_type
]
=
self
.
all_node_types
[
tosca_node_type
]
logging
.
debug
(
' candidate_node: '
+
str
(
tosca_node_type
))
candidate_child_nodes
=
{}
for
node
in
candidate_nodes
:
candidate_child_nodes
.
update
(
self
.
get_child_nodes
(
node
))
candidate_nodes
.
update
(
candidate_child_nodes
)
capable_nodes
=
{}
# Only return the nodes that have interfaces. This means that they are not "abstract"
nodes_type_names_with_interface
=
tosca_helper
.
get_node_types_with_interface
(
candidate_nodes
)
for
type_name
in
nodes_type_names_with_interface
:
capable_nodes
[
type_name
]
=
candidate_nodes
[
type_name
]
return
capable_nodes
def
find_best_node_for_requirements
(
self
,
all_requirements
):
"""Returns the 'best' node for a set of requirements. Here we count the number of requiremets that the node
can cover and return the one which covers the most """
matching_nodes
=
{}
number_of_matching_requirement
=
{}
# Loop requirements to find nodes per requirement
for
req
in
all_requirements
:
if
'capability'
in
req
[
next
(
iter
(
req
))]:
capability
=
req
[
next
(
iter
(
req
))][
'capability'
]
logging
.
info
(
' Looking for nodes with capability: '
+
capability
)
# Find all nodes in the definitions that have the capability: capability
capable_nodes
=
self
.
get_node_types_by_capability
(
capability
)
if
capable_nodes
:
# Add number of matching capabilities for each node.
# Try to score matching_nodes to return one. The more requirements a node meets the better
for
node_type
in
capable_nodes
:
matching_requirement_count
=
1
if
node_type
not
in
number_of_matching_requirement
:
number_of_matching_requirement
[
node_type
]
=
matching_requirement_count
else
:
matching_requirement_count
=
number_of_matching_requirement
[
node_type
]
matching_requirement_count
+=
1
number_of_matching_requirement
[
node_type
]
=
matching_requirement_count
logging
.
info
(
' Found: '
+
str
(
node_type
))
matching_nodes
.
update
(
capable_nodes
)
else
:
logging
.
error
(
'Did not find any node with required capability: '
+
str
(
capability
))
raise
Exception
(
'Did not find any node with required capability: '
+
str
(
capability
))
# if we only found 1 return it
if
len
(
matching_nodes
)
==
1
:
return
matching_nodes
sorted_number_of_matching_requirement
=
sorted
(
number_of_matching_requirement
.
items
(),
key
=
operator
.
itemgetter
(
1
))
index
=
len
(
sorted_number_of_matching_requirement
)
-
1
winner_type
=
next
(
iter
(
sorted_number_of_matching_requirement
[
index
]))
return
{
winner_type
:
matching_nodes
[
winner_type
]}
def
get_child_nodes
(
self
,
parent_node_type_name
):
child_nodes
=
{}
for
tosca_node_type
in
self
.
all_node_types
:
if
tosca_node_type
.
startswith
(
'tosca.nodes'
)
and
'derived_from'
in
self
.
all_node_types
[
tosca_node_type
]:
if
parent_node_type_name
==
self
.
all_node_types
[
tosca_node_type
][
'derived_from'
]:
child_nodes
[
tosca_node_type
]
=
self
.
all_node_types
[
tosca_node_type
]
return
child_nodes
def
add_requirements
(
self
,
node
,
missing_requirements
,
capable_node_name
):
"""Add the requirements to the node """
for
req
in
missing_requirements
:
req
[
next
(
iter
(
req
))][
'node'
]
=
capable_node_name
if
isinstance
(
node
,
NodeTemplate
):
contains_requirement
=
False
for
node_requirement
in
node
.
requirements
:
if
node_requirement
==
req
:
contains_requirement
=
True
break
if
not
contains_requirement
:
node
.
requirements
.
append
(
req
)
elif
isinstance
(
node
,
dict
):
type_name
=
next
(
iter
(
node
))
if
'requirements'
not
in
node
[
type_name
]:
node
[
type_name
][
'requirements'
]
=
[]
node_requirements
=
node
[
type_name
][
'requirements'
]
contains_requirement
=
False
for
node_requirement
in
node_requirements
:
if
node_requirement
==
req
:
contains_requirement
=
True
break
if
not
contains_requirement
:
node
[
type_name
][
'requirements'
]
.
append
(
req
)
return
node
drip-planner/service/__init__.py
0 → 100644
View file @
a538b5a0
drip-planner/service/simple_spec_alayzer.py
0 → 100644
View file @
a538b5a0
import
copy
from
toscaparser.nodetemplate
import
NodeTemplate
from
toscaparser.properties
import
Property
import
networkx
as
nx
import
logging
from
service.specification_analyzer
import
SpecificationAnalyzer
from
util
import
tosca_helper
class
SimpleAnalyzer
(
SpecificationAnalyzer
):
def
__init__
(
self
,
tosca_template
):
super
(
SimpleAnalyzer
,
self
)
.
__init__
(
tosca_template
)
def
set_relationship_occurrences
(
self
):
return_nodes
=
[]
# nodes_with_occurrences_in_requirements = tosca_util.get_nodes_with_occurrences_in_requirements(
# self.tosca_template.nodetemplates)
orchestrator_nodes
=
tosca_helper
.
get_nodes_by_type
(
'tosca.nodes.ARTICONF.Orchestrator'
,
self
.
tosca_template
.
nodetemplates
,
self
.
all_node_types
,
self
.
all_custom_def
)
if
'properties'
in
orchestrator_nodes
[
0
]
.
entity_tpl
:
if
'masters_num'
in
orchestrator_nodes
[
0
]
.
entity_tpl
[
'properties'
]:
masters_num
=
orchestrator_nodes
[
0
]
.
entity_tpl
[
'properties'
][
'masters_num'
]
if
'workers_num'
in
orchestrator_nodes
[
0
]
.
entity_tpl
[
'properties'
]:
workers_num
=
orchestrator_nodes
[
0
]
.
entity_tpl
[
'properties'
][
'workers_num'
]
else
:
masters_num
=
orchestrator_nodes
[
0
]
.
get_property_value
(
'masters_num'
)
workers_num
=
orchestrator_nodes
[
0
]
.
get_property_value
(
'workers_num'
)
topology_nodes
=
tosca_helper
.
get_nodes_by_type
(
'tosca.nodes.ARTICONF.VM.topology'
,
self
.
tosca_template
.
nodetemplates
,
self
.
all_node_types
,
self
.
all_custom_def
)
# for requirement in topology_nodes[0].requirements:
# requirement_dict = requirement[next(iter(requirement))]
# if requirement_dict['capability'] == 'tosca.capabilities.ARTICONF.VM':
# requirement_dict['occurrences'] = min_num_of_vm
vm_nodes
=
tosca_helper
.
get_nodes_by_type
(
'tosca.nodes.ARTICONF.VM.Compute'
,
self
.
tosca_template
.
nodetemplates
,
self
.
all_node_types
,
self
.
all_custom_def
)
if
vm_nodes
:
for
i
in
range
(
len
(
vm_nodes
),
masters_num
):
old_vm_name
=
vm_nodes
[
0
]
.
name
new_vm
=
copy
.
deepcopy
(
vm_nodes
[
0
])
new_vm_name
=
new_vm
.
name
+
'_'
+
str
(
i
)
new_vm
.
name
=
new_vm_name
templates
=
new_vm
.
templates
.
pop
(
old_vm_name
)
new_vm
.
templates
[
new_vm_name
]
=
templates
return_nodes
.
append
(
new_vm
)
for
requirement
in
topology_nodes
[
0
]
.
requirements
:
requirement_key
=
next
(
iter
(
requirement
))
requirement_value
=
requirement
[
requirement_key
]
if
requirement_value
[
'capability'
]
==
'tosca.capabilities.ARTICONF.VM'
:
new_requirement
=
copy
.
deepcopy
(
requirement
)
new_requirement
[
requirement_key
][
'node'
]
=
new_vm
.
name
topology_nodes
[
0
]
.
requirements
.
append
(
new_requirement
)
return_nodes
.
append
(
topology_nodes
[
0
])
break
for
i
in
range
(
len
(
vm_nodes
),
workers_num
+
1
):
old_vm_name
=
vm_nodes
[
0
]
.
name
new_vm
=
copy
.
deepcopy
(
vm_nodes
[
0
])
new_vm_name
=
new_vm
.
name
+
'_'
+
str
(
i
)
new_vm
.
name
=
new_vm_name
templates
=
new_vm
.
templates
.
pop
(
old_vm_name
)
new_vm
.
templates
[
new_vm_name
]
=
templates
new_vm
.
templates
[
next
(
iter
(
new_vm
.
templates
))][
'properties'
][
'role'
]
=
"worker"
return_nodes
.
append
(
new_vm
)
for
requirement
in
topology_nodes
[
0
]
.
requirements
:
requirement_key
=
next
(
iter
(
requirement
))
requirement_value
=
requirement
[
requirement_key
]
if
requirement_value
[
'capability'
]
==
'tosca.capabilities.ARTICONF.VM'
:
new_requirement
=
copy
.
deepcopy
(
requirement
)
new_requirement
[
requirement_key
][
'node'
]
=
new_vm
.
name
topology_nodes
[
0
]
.
requirements
.
append
(
new_requirement
)
return_nodes
.
append
(
topology_nodes
[
0
])
break
return
return_nodes
def
set_node_specifications
(
self
):
nodes_to_implement_policies
=
self
.
get_nodes_to_implement_policy
()
affected_nodes
=
[]
for
node_name
in
nodes_to_implement_policies
:
policies
=
nodes_to_implement_policies
[
node_name
]
affected_node
=
self
.
set_specs
(
node_name
,
policies
,
self
.
tosca_template
.
nodetemplates
)
if
affected_node
:
affected_nodes
.
append
(
affected_node
)
return
affected_nodes
def
get_nodes_to_implement_policy
(
self
):
nodes_to_implement_policies
=
{}
for
policy
in
self
.
tosca_template
.
policies
:
for
target
in
policy
.
targets
:
for
leaf
in
self
.
leaf_nodes
:
for
affected_node_name
in
(
nx
.
shortest_path
(
self
.
g
,
source
=
target
,
target
=
leaf
)):
if
affected_node_name
not
in
nodes_to_implement_policies
:
policy_list
=
[]
nodes_to_implement_policies
[
affected_node_name
]
=
policy_list
policy_list
=
nodes_to_implement_policies
[
affected_node_name
]
policy_list
.
append
(
policy
.
type
)
nodes_to_implement_policies
[
affected_node_name
]
=
policy_list
return
nodes_to_implement_policies
def
set_node_properties_for_policy
(
self
,
affected_node
,
policies
):
logging
.
info
(
'Setting properties for: '
+
str
(
affected_node
.
type
))
ancestors_types
=
tosca_helper
.
get_all_ancestors_types
(
affected_node
,
self
.
all_node_types
,
self
.
all_custom_def
)
# if 'tosca.nodes.ARTICONF.Orchestrator' in ancestors_types:
# logging.info('Do Something')
properties
=
tosca_helper
.
get_all_ancestors_properties
(
affected_node
,
self
.
all_node_types
,
self
.
all_custom_def
)
default_properties
=
{}
for
node_property
in
properties
:
default_property
=
self
.
get_defult_value
(
node_property
)
if
default_property
:
default_properties
[
next
(
iter
(
default_property
))]
=
default_property
[
next
(
iter
(
default_property
))]
if
default_properties
:
for
default_property
in
default_properties
:
affected_node
.
get_properties_objects
()
.
append
(
default_property
)
if
'properties'
in
affected_node
.
templates
[
next
(
iter
(
affected_node
.
templates
))]:
for
prop_name
in
affected_node
.
templates
[
next
(
iter
(
affected_node
.
templates
))][
'properties'
]:
if
'required'
not
in
affected_node
.
templates
[
next
(
iter
(
affected_node
.
templates
))][
'properties'
][
prop_name
]
and
'type'
not
in
\
affected_node
.
templates
[
next
(
iter
(
affected_node
.
templates
))][
'properties'
][
prop_name
]:
default_properties
[
prop_name
]
=
\
affected_node
.
templates
[
next
(
iter
(
affected_node
.
templates
))][
'properties'
][
prop_name
]
affected_node
.
templates
[
next
(
iter
(
affected_node
.
templates
))][
'properties'
]
=
default_properties
return
affected_node
else
:
return
None
def
set_specs
(
self
,
node_name
,
policies
,
nodes_in_template
):
logging
.
info
(
'node_name: '
+
str
(
node_name
)
+
' will implement policies: '
+
str
(
len
(
policies
)))
for
node
in
nodes_in_template
:
if
node
.
name
==
node_name
:
affected_node
=
node
break
logging
.
info
(
'node: '
+
str
(
affected_node
.
type
)
+
' will implement policies: '
+
str
(
len
(
policies
)))
affected_node
=
self
.
set_node_properties_for_policy
(
affected_node
,
policies
)
return
affected_node
def
get_defult_value
(
self
,
node_property
):
if
isinstance
(
node_property
.
value
,
dict
)
and
'required'
in
node_property
.
value
and
'type'
in
node_property
.
value
:
if
node_property
.
value
[
'required'
]:
default_prop
=
{}
if
'default'
in
node_property
.
value
:
if
node_property
.
value
[
'type'
]
==
'integer'
:
default_prop
=
int
(
node_property
.
value
[
'default'
])
else
:
default_prop
=
str
(
node_property
.
value
[
'default'
])
elif
'constraints'
in
node_property
.
value
:
constraints
=
node_property
.
value
[
'constraints'
]
for
constraint
in
constraints
:
for
constraint_key
in
constraint
:
if
'equal'
in
constraint_key
:
if
node_property
.
value
[
'type'
]
==
'integer'
:
default_prop
=
int
(
constraint
[
constraint_key
])
else
:
default_prop
=
str
(
constraint
[
constraint_key
])
name
=
node_property
.
name
node_property
=
{
name
:
default_prop
}
return
node_property
return
None
drip-planner/service/spec_service.py
0 → 100644
View file @
a538b5a0
class
SpecService
:
def
__init__
(
self
,
conf
):
self
.
configuration
=
conf
def
get_property
(
self
,
prop_key
):
return
None
drip-planner/service/specification_analyzer.py
0 → 100644
View file @
a538b5a0
from
abc
import
abstractmethod
,
ABCMeta
from
toscaparser.tosca_template
import
ToscaTemplate
import
networkx
as
nx
import
matplotlib.pyplot
as
plt
class
SpecificationAnalyzer
(
metaclass
=
ABCMeta
):
def
__init__
(
self
,
tosca_template
):
self
.
tosca_template
=
tosca_template
self
.
tosca_node_types
=
self
.
tosca_template
.
nodetemplates
[
0
]
.
type_definition
.
TOSCA_DEF
self
.
all_custom_def
=
self
.
tosca_template
.
nodetemplates
[
0
]
.
custom_def
self
.
all_node_types
=
{}
self
.
all_node_types
.
update
(
self
.
tosca_node_types
.
items
())
self
.
all_node_types
.
update
(
self
.
all_custom_def
.
items
())
self
.
required_nodes
=
[]
self
.
g
=
self
.
build_graph
(
self
.
tosca_template
.
nodetemplates
)
self
.
root_nodes
=
[]
self
.
leaf_nodes
=
[]
for
node_name
,
degree
in
self
.
g
.
in_degree
():
if
degree
==
0
:
self
.
root_nodes
.
append
(
node_name
)
for
node_name
,
degree
in
self
.
g
.
out_degree
():
if
degree
==
0
:
self
.
leaf_nodes
.
append
(
node_name
)
def
build_graph
(
self
,
node_templates
):
graph
=
nx
.
DiGraph
()
for
node
in
node_templates
:
graph
.
add_node
(
node
.
name
,
attr_dict
=
node
.
entity_tpl
)
for
req
in
node
.
requirements
:
req_name
=
next
(
iter
(
req
))
req_node_name
=
req
[
req_name
][
'node'
]
if
'relationship'
in
req
[
req_name
]
and
'type'
in
req
[
req_name
][
'relationship'
]:
relationship_type
=
req
[
req_name
][
'relationship'
][
'type'
]
else
:
if
'relationship'
not
in
req
[
req_name
]:
relationship_type
=
'tosca.relationships.DependsOn'
else
:
relationship_type
=
req
[
req_name
][
'relationship'
]
graph
.
add_edge
(
node
.
name
,
req_node_name
,
relationship
=
relationship_type
)
nx
.
draw
(
graph
,
with_labels
=
True
)
plt
.
savefig
(
"/tmp/graph.png"
)
# plt.show()
return
graph
@
abstractmethod
def
set_node_specifications
(
self
):
raise
NotImplementedError
(
'Must implement upload in subclasses'
)
@
abstractmethod
def
set_relationship_occurrences
(
self
):
raise
NotImplementedError
(
'Must implement upload in subclasses'
)
drip-planner/setup.py
0 → 100644
View file @
a538b5a0
from
setuptools
import
setup
,
find_packages
setup
(
name
=
'drip_planner2'
,
version
=
'0.1'
,
packages
=
find_packages
(),
# Declare your packages' dependencies here, for eg:
install_requires
=
[
'matplotlib==3.1.1'
,
'pika==1.1.0'
,
'tosca-parser==1.6.0'
,
'names==0.3.0'
,
'networkx==2.4'
,
'matplotlib==3.1.1'
],
# Fill in these to make your Egg ready for upload to
# PyPI
author
=
'S. Koulouzis'
,
author_email
=
''
,
# summary = 'Just another Python package for the cheese shop',
url
=
''
,
license
=
''
,
long_description
=
'Long description of the package'
,
# could also include long_description, download_url, classifiers, etc.
)
drip-planner/test/__init__.py
0 → 100644
View file @
a538b5a0
drip-planner/test/test_planner.py
0 → 100644
View file @
a538b5a0
import
json
import
logging
import
os
import
os.path
import
tempfile
import
time
import
unittest
import
yaml
from
toscaparser.tosca_template
import
ToscaTemplate
from
planner.planner
import
Planner
from
service.spec_service
import
SpecService
class
MyTestCase
(
unittest
.
TestCase
):
def
test_something
(
self
):
logger
=
logging
.
getLogger
(
__name__
)
tosca_path
=
"../../TOSCA/"
input_tosca_file_path
=
tosca_path
+
'/application_example_2_topologies.yaml'
dir_path
=
os
.
path
.
dirname
(
os
.
path
.
realpath
(
__file__
))
print
(
dir_path
)
self
.
assertEqual
(
True
,
os
.
path
.
exists
(
input_tosca_file_path
),
"Input TOSCA file: "
+
input_tosca_file_path
+
" not found"
)
conf
=
{
'url'
:
"http://host"
}
spec_service
=
SpecService
(
conf
)
test_planner
=
Planner
(
input_tosca_file_path
,
spec_service
)
test_tosca_template
=
test_planner
.
resolve_requirements
()
test_tosca_template
=
test_planner
.
set_infrastructure_specifications
()
template_dict
=
tosca_util
.
get_tosca_template_2_topology_template_dictionary
(
test_tosca_template
)
logger
.
info
(
"template ----:
\n
"
+
yaml
.
dump
(
template_dict
))
try
:
tosca_folder_path
=
os
.
path
.
join
(
tempfile
.
gettempdir
(),
tosca_path
)
except
NameError
:
import
sys
tosca_folder_path
=
os
.
path
.
dirname
(
os
.
path
.
abspath
(
sys
.
argv
[
0
]))
+
os
.
path
.
join
(
tempfile
.
gettempdir
(),
tosca_path
)
tosca_file_name
=
'tosca_template'
input_tosca_file_path
=
tosca_path
+
'/application_example_output.yaml'
with
open
(
input_tosca_file_path
,
'w'
)
as
outfile
:
outfile
.
write
(
yaml
.
dump
(
template_dict
))
ToscaTemplate
(
input_tosca_file_path
)
test_response
=
{
'toscaTemplate'
:
template_dict
}
response
=
{
'toscaTemplate'
:
template_dict
}
output_current_milli_time
=
int
(
round
(
time
.
time
()
*
1000
))
response
[
"creationDate"
]
=
output_current_milli_time
response
[
"parameters"
]
=
[]
print
(
"Output message:"
+
json
.
dumps
(
response
))
self
.
assertEqual
(
True
,
True
)
if
__name__
==
'__main__'
:
unittest
.
main
()
drip-planner/util/__init__.py
0 → 100644
View file @
a538b5a0
drip-planner/util/tosca_helper.py
0 → 100644
View file @
a538b5a0
import
copy
from
itertools
import
chain
from
toscaparser
import
tosca_template
from
toscaparser.elements.nodetype
import
NodeType
from
toscaparser.nodetemplate
import
NodeTemplate
import
yaml
import
logging
# TOSCA template key names
SECTIONS
=
(
DEFINITION_VERSION
,
DEFAULT_NAMESPACE
,
TEMPLATE_NAME
,
TOPOLOGY_TEMPLATE
,
TEMPLATE_AUTHOR
,
TEMPLATE_VERSION
,
DESCRIPTION
,
IMPORTS
,
DSL_DEFINITIONS
,
NODE_TYPES
,
RELATIONSHIP_TYPES
,
RELATIONSHIP_TEMPLATES
,
CAPABILITY_TYPES
,
ARTIFACT_TYPES
,
DATA_TYPES
,
INTERFACE_TYPES
,
POLICY_TYPES
,
GROUP_TYPES
,
REPOSITORIES
,
INPUTS
,
NODE_TEMPLATES
,
OUTPUTS
,
GROUPS
,
SUBSTITUION_MAPPINGS
,
POLICIES
,
TYPE
,
REQUIREMENTS
,
ARTIFACTS
,
PROPERTIES
,
INTERFACES
)
=
\
(
'tosca_definitions_version'
,
'tosca_default_namespace'
,
'template_name'
,
'tosca_template'
,
'template_author'
,
'template_version'
,
'description'
,
'imports'
,
'dsl_definitions'
,
'node_types'
,
'relationship_types'
,
'relationship_templates'
,
'capability_types'
,
'artifact_types'
,
'data_types'
,
'interface_types'
,
'policy_types'
,
'group_types'
,
'repositories'
,
'inputs'
,
'node_templates'
,
'outputs'
,
'groups'
,
'substitution_mappings'
,
'policies'
,
'type'
,
'requirements'
,
'artifacts'
,
'properties'
,
'interfaces'
)
node_type_key_names_to_remove
=
[
'capabilities'
,
'derived_from'
]
def
get_node_type_name
(
node
):
"""Returns the node's type name as string"""
if
isinstance
(
node
,
NodeTemplate
):
if
node
.
type
:
if
node
.
type
and
isinstance
(
node
.
type
,
str
):
node_type
=
node
.
type
elif
isinstance
(
node
.
type
,
NodeTemplate
):
node_type
=
node
.
type
.
type
else
:
node_type
=
None
elif
isinstance
(
node
,
dict
):
node_type
=
next
(
iter
(
node
))
return
node_type
def
get_node_requirements
(
node
):
if
isinstance
(
node
,
NodeTemplate
):
node_requirements
=
node
.
requirements
elif
isinstance
(
node
,
dict
):
node_type_name
=
get_node_type_name
(
node
)
if
'requirements'
not
in
node
[
node_type_name
]:
node
[
node_type_name
][
'requirements'
]
=
{}
node_requirements
=
node
[
node_type_name
][
'requirements'
]
return
node_requirements
def
get_parent_type
(
node
):
if
isinstance
(
node
,
NodeTemplate
):
if
node
.
parent_type
:
parent_type
=
node
.
parent_type
.
type
else
:
parent_type
=
None
elif
isinstance
(
node
,
dict
):
parent_type
=
node
[
next
(
iter
(
node
))][
'derived_from'
]
return
parent_type
def
get_node_type_requirements
(
type_name
,
all_nodes
):
"""Returns the requirements for an input node as described in the template not in the node's definition """
def_type
=
all_nodes
[
type_name
]
if
'requirements'
in
def_type
.
keys
():
return
def_type
[
'requirements'
]
return
None
def
get_ancestors_requirements
(
node
,
all_nodes
,
all_custom_def
,
parent_requirements
=
None
):
"""Recursively get all requirements all the way to the ROOT including the input node's"""
if
not
parent_requirements
:
parent_requirements
=
[]
if
isinstance
(
node
,
NodeTemplate
):
# If node has parent and parent has requirements add them
if
node
.
parent_type
and
node
.
parent_type
.
requirements
:
if
isinstance
(
node
.
parent_type
.
requirements
,
dict
):
parent_requirements
.
append
(
node
.
parent_type
.
requirements
)
elif
isinstance
(
node
.
parent_type
.
requirements
,
list
):
parent_requirements
.
extend
(
node
.
parent_type
.
requirements
)
# Make parent type to NodeTemplate to continue
if
node
.
parent_type
.
type
:
parent_template
=
node_type_2_node_template
({
'name'
:
all_nodes
[
node
.
parent_type
.
type
]},
all_custom_def
)
if
parent_template
:
get_ancestors_requirements
(
parent_template
,
all_nodes
,
parent_requirements
)
elif
isinstance
(
node
,
dict
):
node_type_name
=
get_node_type_name
(
node
)
node_template
=
node_type_2_node_template
({
'name'
:
all_nodes
[
node_type_name
]},
all_custom_def
)
return
get_ancestors_requirements
(
node_template
,
all_nodes
,
all_custom_def
,
parent_requirements
)
return
parent_requirements
def
get_node_types_with_interface
(
nodes
):
node_types_with_interface
=
[]
for
node_name
in
nodes
:
if
'interfaces'
in
nodes
[
node_name
]
.
keys
()
and
'tosca.nodes.Root'
!=
node_name
:
node_types_with_interface
.
append
(
node_name
)
return
node_types_with_interface
def
node_type_2_node_template
(
node_type
,
all_custom_def
):
node_template_dict
=
{}
type_name
=
next
(
iter
(
node_type
))
node_type_array
=
type_name
.
split
(
"."
)
name
=
node_type_array
[
len
(
node_type_array
)
-
1
]
.
lower
()
node_template_dict
[
name
]
=
node_type
[
next
(
iter
(
node_type
))]
.
copy
()
node_template_dict
[
name
][
'type'
]
=
type_name
for
name_to_remove
in
node_type_key_names_to_remove
:
if
name_to_remove
in
node_template_dict
[
name
]:
node_template_dict
[
name
]
.
pop
(
name_to_remove
)
if
'type'
in
node_type
[
next
(
iter
(
node_type
))]:
node_type
[
next
(
iter
(
node_type
))]
.
pop
(
'type'
)
node_template
=
NodeTemplate
(
name
,
node_template_dict
,
node_type
)
# For some reason the tosca.nodes.ARTICONF.Orchestrator doesn't have all definitions so we need to add them
# manually. We get 'toscaparser.common.exception.InvalidTypeError: Type "tosca.nodes.ARTICONF.Orchestrator"
# is not a valid type.'
if
len
(
node_template
.
custom_def
)
<
len
(
all_custom_def
):
for
def_key
in
all_custom_def
:
if
isinstance
(
def_key
,
dict
):
node_template
.
custom_def
.
update
(
def_key
)
else
:
node_template
.
custom_def
[
def_key
]
=
all_custom_def
[
def_key
]
return
node_template
def
get_tosca_template_2_topology_template_dictionary
(
template
):
yaml_str
=
tosca_template2_yaml
(
template
)
tosca_template_dict
=
yaml
.
load
(
yaml_str
,
Loader
=
yaml
.
FullLoader
)
this_tosca_template
=
tosca_template_dict
[
'tosca_template'
]
tosca_template_dict
.
pop
(
'tosca_template'
)
tosca_template_dict
[
'topology_template'
]
=
this_tosca_template
if
template
.
policies
and
'policies'
not
in
tosca_template_dict
[
'topology_template'
]:
policies_list
=
[]
for
policy
in
template
.
policies
:
policy_dict
=
{
policy
.
name
:
policy
.
entity_tpl
}
policies_list
.
append
(
policy_dict
)
tosca_template_dict
[
'topology_template'
][
'policies'
]
=
policies_list
return
tosca_template_dict
def
contains_node_type
(
node_types_list
,
node_type_name
):
if
node_types_list
is
None
:
return
False
for
node_type
in
node_types_list
:
if
isinstance
(
node_type
,
NodeTemplate
):
type_name
=
node_type
.
type
elif
isinstance
(
node_type
,
dict
):
type_name
=
next
(
iter
(
node_type
))
if
type_name
==
node_type_name
:
return
True
return
False
def
get_node_properties
(
node
):
node_type_name
=
get_node_type_name
(
node
)
return
node
[
node_type_name
][
'properties'
]
def
set_node_properties
(
node
,
properties
):
node_type_name
=
get_node_type_name
(
node
)
node
[
node_type_name
][
'properties'
]
=
properties
return
node
def
get_nodes_by_type
(
node_type
,
nodes
,
all_node_types
,
all_custom_def
):
nodes_by_type
=
[]
for
node
in
nodes
:
if
node
.
type
==
node_type
:
nodes_by_type
.
append
(
node
)
break
elif
node_type
in
get_all_ancestors_types
(
node
,
all_node_types
,
all_custom_def
):
nodes_by_type
.
append
(
node
)
return
nodes_by_type
def
get_all_ancestors_types
(
child_node
,
all_node_types
,
all_custom_def
,
ancestors_types
=
None
):
if
not
ancestors_types
:
ancestors_types
=
[
get_node_type_name
(
child_node
)]
parent_type
=
get_parent_type
(
child_node
)
if
parent_type
:
ancestors_types
.
append
(
parent_type
)
parent_type
=
node_type_2_node_template
({
'name'
:
all_node_types
[
parent_type
]},
all_custom_def
)
get_all_ancestors_types
(
parent_type
,
all_node_types
,
all_custom_def
,
ancestors_types
)
return
ancestors_types
def
get_all_ancestors_properties
(
node
,
all_nodes
,
all_custom_def
,
ancestors_properties
=
None
,
ancestors_types
=
None
):
if
not
ancestors_properties
:
ancestors_properties
=
[]
ancestors_properties_names
=
[]
node_prop_names
=
[]
if
node
.
get_properties_objects
():
for
node_prop
in
node
.
get_properties_objects
():
node_prop_names
.
append
(
node_prop
.
name
)
ancestors_properties
.
append
(
node_prop
)
if
not
ancestors_types
:
ancestors_types
=
get_all_ancestors_types
(
node
,
all_nodes
,
all_custom_def
)
for
ancestors_type
in
ancestors_types
:
ancestor
=
node_type_2_node_template
({
'name'
:
all_nodes
[
ancestors_type
]},
all_custom_def
)
if
ancestor
.
get_properties_objects
():
for
ancestor_prop
in
ancestor
.
get_properties_objects
():
if
ancestor_prop
.
name
not
in
ancestors_properties_names
and
ancestor_prop
.
name
not
in
node_prop_names
:
ancestors_properties_names
.
append
(
ancestor_prop
.
name
)
ancestors_properties
.
append
(
ancestor_prop
)
return
ancestors_properties
def
get_nodes_with_occurrences_in_requirements
(
topology_nodes
):
nodes_with_occurrences_in_requirement
=
[]
for
node
in
topology_nodes
:
for
requirement
in
node
.
requirements
:
requirement_dict
=
requirement
[
next
(
iter
(
requirement
))]
if
'occurrences'
in
requirement_dict
:
nodes_with_occurrences_in_requirement
.
append
(
node
)
break
return
nodes_with_occurrences_in_requirement
def
tosca_template2_yaml
(
tosca_template
):
topology_dict
=
{
DEFINITION_VERSION
:
tosca_template
.
version
,
IMPORTS
:
tosca_template
.
_tpl_imports
(),
DESCRIPTION
:
tosca_template
.
description
,
TOPOLOGY_TEMPLATE
:
{}}
topology_dict
[
TOPOLOGY_TEMPLATE
][
NODE_TEMPLATES
]
=
{}
node_templates
=
tosca_template
.
nodetemplates
for
node_template
in
node_templates
:
node_template_dict
=
get_node_template_dict
(
node_template
)
topology_dict
[
TOPOLOGY_TEMPLATE
][
NODE_TEMPLATES
][
node_template
.
name
]
=
node_template_dict
# If we don't add this then dump uses references for the same dictionary entries i.e. '&id001'
yaml
.
Dumper
.
ignore_aliases
=
lambda
*
args
:
True
return
yaml
.
dump
(
topology_dict
,
default_flow_style
=
False
)
def
get_node_template_dict
(
node_template
):
node_template_dict
=
{
TYPE
:
node_template
.
type
}
# node_template_dict[REQUIREMENTS] = {}
if
node_template
.
requirements
:
node_template_dict
[
REQUIREMENTS
]
=
node_template
.
requirements
# if node_template.interfaces:
# interfaces = {}
# for interface in node_template.interfaces:
# interfaces[interface.type] = {}
# interfaces[interface.type][interface.name] = interface.implementation
# print( node_template.templates[node_template.name] )
if
ARTIFACTS
in
node_template
.
templates
[
node_template
.
name
]
.
keys
():
node_template_dict
[
ARTIFACTS
]
=
node_template
.
templates
[
node_template
.
name
][
ARTIFACTS
]
if
PROPERTIES
in
node_template
.
templates
[
node_template
.
name
]
.
keys
():
node_template_dict
[
PROPERTIES
]
=
node_template
.
templates
[
node_template
.
name
][
PROPERTIES
]
if
INTERFACES
in
node_template
.
templates
[
node_template
.
name
]
.
keys
():
node_template_dict
[
INTERFACES
]
=
node_template
.
templates
[
node_template
.
name
][
INTERFACES
]
# print(dir(node_template))
# print(node_template.templates)
return
node_template_dict
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment