Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
C
CONF
Project
Project
Details
Activity
Releases
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Registry
Registry
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
UvA
CONF
Commits
0f38c7e5
Commit
0f38c7e5
authored
Oct 26, 2017
by
Spiros Koulouzis
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
throw exception if deployment values are not there
parent
036f5ec2
Changes
6
Hide whitespace changes
Inline
Side-by-side
Showing
6 changed files
with
98 additions
and
52 deletions
+98
-52
stack_monitoring.yml
docs/composer_files/stack_monitoring.yml
+14
-0
BEIA.yml
docs/input_tosca_files/BEIA.yml
+12
-0
ProvisionService.java
...in/java/nl/uva/sne/drip/api/service/ProvisionService.java
+4
-3
rpc_server.py
drip_parser/src/rpc_server.py
+1
-1
docker_compose_transformer.py
drip_parser/src/transformer/docker_compose_transformer.py
+67
-48
docker_compose_transformer.pyc
drip_parser/src/transformer/docker_compose_transformer.pyc
+0
-0
No files found.
docs/composer_files/stack_monitoring.yml
0 → 100644
View file @
0f38c7e5
version
:
'
3'
services
:
server
:
image
:
salmant/ul_monitoring_server_container_image
ports
:
-
"
8080:8080/tcp"
agent
:
image
:
salmant/monitoring_agent
deploy
:
replicas
:
20
depends_on
:
-
server
command
:
--monitoringServerIP=server
docs/input_tosca_files/BEIA.yml
View file @
0f38c7e5
...
...
@@ -505,6 +505,7 @@ node_types:
repository
:
SWITCH_docker_hub
file
:
"
/???"
derived_from
:
"
tosca.nodes.Compute"
"
Switch.nodes.Application.Container.Docker.VLAD_AlertChecker"
:
artifacts
:
sipnotifier_image
:
...
...
@@ -512,6 +513,17 @@ node_types:
repository
:
SWITCH_docker_hub
file
:
"
beia/sip_notifier"
derived_from
:
"
Switch.nodes.Application.Container.Docker"
"
Switch.nodes.Application.Container.Docker.VLAD_ConfigManager"
:
artifacts
:
sipnotifier_image
:
type
:
"
tosca.artifacts.Deployment.Image.Container.Docker"
repository
:
SWITCH_docker_hub
file
:
"
beia/config_manager"
derived_from
:
"
Switch.nodes.Application.Container.Docker"
"
switch.Component.Component.Docker"
:
derived_from
:
"
Switch.nodes.Application.Container.Docker"
"
Switch.nodes.Network"
:
...
...
drip-api/src/main/java/nl/uva/sne/drip/api/service/ProvisionService.java
View file @
0f38c7e5
...
...
@@ -93,7 +93,7 @@ public class ProvisionService {
User
user
=
(
User
)
SecurityContextHolder
.
getContext
().
getAuthentication
().
getPrincipal
();
String
owner
=
user
.
getUsername
();
ownedObject
.
setOwner
(
owner
);
return
provisionDao
.
save
(
ownedObject
);
}
...
...
@@ -658,13 +658,14 @@ public class ProvisionService {
case
"deploy_parameters"
:
String
value
=
p
.
getValue
();
String
[]
lines
=
value
.
split
(
"\n"
);
if
(
value
==
null
&&
value
.
length
()
<
2
)
{
throw
new
Exception
(
"Provision failed"
);
}
for
(
String
line
:
lines
)
{
DeployParameter
deployParam
=
new
DeployParameter
();
String
[]
parts
=
line
.
split
(
" "
);
String
deployIP
=
parts
[
0
];
String
deployUser
=
parts
[
1
];
// String deployCertPath = parts[2];
// String cloudCertificateName = FilenameUtils.removeExtension(FilenameUtils.getBaseName(deployCertPath));
String
deployRole
=
parts
[
2
];
...
...
drip_parser/src/rpc_server.py
View file @
0f38c7e5
...
...
@@ -84,7 +84,7 @@ def test_local():
home
=
expanduser
(
"~"
)
transformer
=
DockerComposeTransformer
(
home
+
"/workspace/DRIP/docs/input_tosca_files/BEIA.yml"
)
compose
=
transformer
.
getnerate_compose
()
#
print yaml.dump(compose)
print
yaml
.
dump
(
compose
)
response
=
{}
current_milli_time
=
lambda
:
int
(
round
(
time
.
time
()
*
1000
))
response
[
"creationDate"
]
=
current_milli_time
()
...
...
drip_parser/src/transformer/docker_compose_transformer.py
View file @
0f38c7e5
...
...
@@ -34,7 +34,6 @@ class DockerComposeTransformer:
if
node_types
[
node_type_key
]
and
isinstance
(
node_types
[
node_type_key
],
dict
)
and
'derived_from'
in
node_types
[
node_type_key
]
.
keys
():
if
node_types
[
node_type_key
][
'derived_from'
]
==
self
.
DOCKER_TYPE
:
docker_types
.
add
(
node_type_key
)
print
node_type_key
return
docker_types
def
get_node_templates
(
self
):
...
...
@@ -49,17 +48,25 @@ class DockerComposeTransformer:
return
node
[
'properties'
]
def
get_enviroment_vars
(
self
,
properties
):
environment
=
[]
environment
s
=
[]
for
prop
in
properties
:
if
properties
[
prop
]
and
not
isinstance
(
properties
[
prop
],
dict
):
environment
.
append
(
prop
+
"="
+
str
(
properties
[
prop
]))
return
environment
if
prop
==
'Environment_variables'
or
prop
==
'Live_variables'
:
for
var
in
properties
[
prop
]:
environment
=
{}
environment
[
var
]
=
properties
[
prop
][
var
]
environments
.
append
(
environment
)
# if properties[prop] and not isinstance(properties[prop],dict):
# environment.append(prop+"="+str(properties[prop]))
return
environments
def
get_port_map
(
self
,
properties
):
port_maps
=
[]
if
'ports_mapping'
in
properties
:
ports_mappings
=
properties
[
'ports_mapping'
]
for
port_map_key
in
ports_mappings
:
for
port_map_key
in
ports_mappings
:
port_map
=
{}
host_port
=
ports_mappings
[
port_map_key
][
'host_port'
]
if
not
isinstance
(
host_port
,
(
int
,
long
,
float
,
complex
)):
host_port_var
=
host_port
.
replace
(
'${'
,
''
)
.
replace
(
'}'
,
''
)
...
...
@@ -69,9 +76,49 @@ class DockerComposeTransformer:
if
not
isinstance
(
container_port
,
(
int
,
long
,
float
,
complex
)):
container_port_var
=
container_port
.
replace
(
'${'
,
''
)
.
replace
(
'}'
,
''
)
container_port
=
properties
[
container_port_var
]
port_maps
.
append
(
str
(
host_port
)
+
':'
+
str
(
container_port
))
port_map
[
host_port
]
=
container_port
port_maps
.
append
(
port_map
)
if
'in_ports'
in
properties
:
ports_mappings
=
properties
[
'in_ports'
]
for
port_map_key
in
ports_mappings
:
port_map
=
{}
host_port
=
ports_mappings
[
port_map_key
][
'host_port'
]
container_port
=
ports_mappings
[
port_map_key
][
'container_port'
]
if
'protocol'
in
ports_mappings
[
port_map_key
]:
protocol
=
ports_mappings
[
port_map_key
][
'protocol'
]
if
protocol
:
container_port
=
container_port
+
'/'
+
protocol
port_map
[
host_port
]
=
container_port
port_maps
.
append
(
port_map
)
if
'out_ports'
in
properties
:
ports_mappings
=
properties
[
'out_ports'
]
for
port_map_key
in
ports_mappings
:
port_map
=
{}
if
'host_port'
in
ports_mappings
[
port_map_key
]
and
'container_port'
in
ports_mappings
[
port_map_key
]:
host_port
=
ports_mappings
[
port_map_key
][
'host_port'
]
container_port
=
ports_mappings
[
port_map_key
][
'container_port'
]
if
'protocol'
in
ports_mappings
[
port_map_key
]:
protocol
=
ports_mappings
[
port_map_key
][
'protocol'
]
if
protocol
:
container_port
=
container_port
+
'/'
+
protocol
port_map
[
host_port
]
=
container_port
port_maps
.
append
(
port_map
)
return
port_maps
def
get_requirements
(
self
,
node
):
if
'requirements'
in
node
:
return
node
[
'requirements'
]
def
get_volumes
(
self
,
requirements
):
volumes
=
[]
for
req
in
requirements
:
if
'volume'
in
req
:
vol
=
{}
name
=
req
[
'volume'
][
'name'
]
path
=
req
[
'volume'
][
'link'
]
vol
[
name
]
=
path
volumes
.
append
(
vol
)
return
volumes
def
analyze_yaml
(
self
):
docker_types
=
self
.
get_docker_types
()
...
...
@@ -79,6 +126,7 @@ class DockerComposeTransformer:
services
=
{}
services
[
'version'
]
=
'2'
services
[
'services'
]
=
{}
all_volumes
=
[]
for
node_template_key
in
node_templates
:
for
docker_type
in
docker_types
:
if
isinstance
(
node_templates
[
node_template_key
],
dict
)
and
'type'
in
node_templates
[
node_template_key
]
and
docker_type
in
node_templates
[
node_template_key
][
'type'
]:
...
...
@@ -102,46 +150,17 @@ class DockerComposeTransformer:
port_maps
=
self
.
get_port_map
(
properties
)
if
port_maps
:
service
[
'ports'
]
=
port_maps
requirements
=
self
.
get_requirements
(
node_templates
[
node_template_key
])
volumes
=
self
.
get_volumes
(
requirements
)
if
volumes
:
service
[
'volumes'
]
=
volumes
for
vol
in
volumes
:
volume_id
=
{}
volume_id
[
next
(
iter
(
vol
))]
=
None
all_volumes
.
append
(
volume_id
)
services
[
'services'
][
node_template_key
]
=
service
break
break
if
all_volumes
:
services
[
'volumes'
]
=
all_volumes
return
services
# def analize_tosca():
# dockers = []
# print dir(self.tt.topology_template)
# print dir(self.tt.outputs)
# print dir(self.tt.nested_tosca_tpls_with_topology)
# print dir(self.tt.nested_tosca_templates_with_topology)
# print dir(self.tt.inputs)
# print dir(self.tt.input_path)
# print dir(self.tt.graph)
# for node in self.tt.nodetemplates:
# if node.parent_type.type == self.DOCKER_TYPE:
# dockers.append(node)
# print dir(node)
# print "Name %s Type: %s" %(node.name,node.type)
# service = {}
# service['name'] = node.type
# print dir(node.get_properties_objects())
# for prop_obj in node.get_properties_objects():
# print dir(prop_obj)
# print "Name %s Type: %s Val: %s" %(prop_obj.name,prop_obj.type,prop_obj.value)
# print (node.templates.keys())
# docker_file = ""
# for temp in node.templates:
# print "\t template: %s" %(temp)
# if 'artifacts' in node.templates[temp]:
# key = next(iter(node.templates[temp]['artifacts']))
# if 'file' in node.templates[temp]['artifacts'][key]:
# docker_file = node.templates[temp]['artifacts'][key]['file']
# print "\t\tdocker_file: %s"%(docker_file)
#
# if docker_file:
# container_name = docker_file.split("/")[1]
# if ':' in container_name:
# container_name = container_name.split(':')[0]
# print container_name
# service ['container_name'] = container_name
# print "Name %s Type: %s Val: %s" %(prop_obj.name,prop_obj.type,prop_obj.value)
# service ['container_name'] =
drip_parser/src/transformer/docker_compose_transformer.pyc
View file @
0f38c7e5
No preview for this file type
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment