Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
C
CONF
Project
Project
Details
Activity
Releases
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Registry
Registry
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
UvA
CONF
Commits
0032c1bc
Commit
0032c1bc
authored
Apr 17, 2019
by
Spiros Koulouzis
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
experimental planner with winery
parent
055ac648
Changes
13
Show whitespace changes
Inline
Side-by-side
Showing
13 changed files
with
185 additions
and
233 deletions
+185
-233
PlannerController.java
...in/java/nl/uva/sne/drip/controller/PlannerController.java
+42
-0
ProvisionController.java
.../java/nl/uva/sne/drip/controller/ProvisionController.java
+1
-1
PlannerService.java
...src/main/java/nl/uva/sne/drip/service/PlannerService.java
+31
-0
swagger.yaml
drip-api/swagger.yaml
+38
-0
project.properties
drip_planner2/nbproject/project.properties
+1
-1
__init__.py
drip_planner2/src/drip_logging/__init__.py
+0
-0
__init__.pyc
drip_planner2/src/drip_logging/__init__.pyc
+0
-0
drip_logging_handler.py
drip_planner2/src/drip_logging/drip_logging_handler.py
+0
-77
drip_logging_handler.pyc
drip_planner2/src/drip_logging/drip_logging_handler.pyc
+0
-0
__init__.cpython-36.pyc
..._planner2/src/planner/__pycache__/__init__.cpython-36.pyc
+0
-0
dum_planner.cpython-36.pyc
...anner2/src/planner/__pycache__/dum_planner.cpython-36.pyc
+0
-0
dum_planner.py
drip_planner2/src/planner/dum_planner.py
+71
-150
rpc_server.py
drip_planner2/src/rpc_server.py
+1
-4
No files found.
drip-api/src/main/java/nl/uva/sne/drip/controller/PlannerController.java
0 → 100644
View file @
0032c1bc
/*
* Copyright 2019 S. Koulouzis, Huan Zhou, Yang Hu
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package
nl
.
uva
.
sne
.
drip
.
controller
;
import
org.springframework.web.bind.annotation.RequestMapping
;
import
org.springframework.web.bind.annotation.RestController
;
import
com.webcohesion.enunciate.metadata.rs.ResponseCode
;
import
com.webcohesion.enunciate.metadata.rs.StatusCodes
;
import
nl.uva.sne.drip.service.CloudStormService
;
import
nl.uva.sne.drip.service.PlannerService
;
import
org.springframework.beans.factory.annotation.Autowired
;
/**
* This controller is responsible for obtaining resources from cloud providers
*
*
* @author S. Koulouzis
*/
@RestController
@RequestMapping
(
"/user/v3.0/planner"
)
@StatusCodes
({
@ResponseCode
(
code
=
401
,
condition
=
"Bad credentials"
)
})
public
class
PlannerController
{
@Autowired
private
PlannerService
plannerService
;
}
drip-api/src/main/java/nl/uva/sne/drip/controller/ProvisionController.java
View file @
0032c1bc
...
@@ -29,7 +29,7 @@ import org.springframework.beans.factory.annotation.Autowired;
...
@@ -29,7 +29,7 @@ import org.springframework.beans.factory.annotation.Autowired;
* @author S. Koulouzis
* @author S. Koulouzis
*/
*/
@RestController
@RestController
@RequestMapping
(
"/user/v
1
.0/provisioner"
)
@RequestMapping
(
"/user/v
3
.0/provisioner"
)
@StatusCodes
({
@StatusCodes
({
@ResponseCode
(
code
=
401
,
condition
=
"Bad credentials"
)
@ResponseCode
(
code
=
401
,
condition
=
"Bad credentials"
)
})
})
...
...
drip-api/src/main/java/nl/uva/sne/drip/service/PlannerService.java
0 → 100644
View file @
0032c1bc
/*
* Copyright 2019 S. Koulouzis, Huan Zhou, Yang Hu
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package
nl
.
uva
.
sne
.
drip
.
service
;
import
org.springframework.beans.factory.annotation.Value
;
import
org.springframework.stereotype.Service
;
/**
*
* @author S. Koulouzis
*/
@Service
public
class
PlannerService
{
@Value
(
"${planner.queue.prefix}"
)
private
String
plannerQueuePrefix
;
}
drip-api/swagger.yaml
0 → 100644
View file @
0032c1bc
swagger
:
"
2.0"
info
:
description
:
"
The
Dynamic
Real-time
infrastructure
planner
(DRIP)
allows
application
developers
to
seamlessly
plan
a
customized
virtual
infrastructure
based
on
application
level
constraints
on
QoS
and
resource
budgets,
provisioning
the
virtual
infrastructure
using
standardized
interfaces
(e.g.,
TOSCA
and
OCCI),
deploy
application
components
onto
the
virtual
infrastructure,
and
start
execution
on
demand."
version
:
"
3.0.0"
title
:
"
DRIP
manager"
license
:
name
:
"
Apache
2.0"
url
:
"
http://www.apache.org/licenses/LICENSE-2.0.html"
host
:
"
localhost:8080"
basePath
:
"
/v3"
schemes
:
-
"
https"
-
"
http"
paths
:
/planner
:
post
:
description
:
"
Uploads
a
TOSCA
definition"
consumes
:
-
"
multipart/form-data"
produces
:
-
"
application/xml"
-
"
application/json"
parameters
:
-
in
:
formData
name
:
file
required
:
true
type
:
file
description
:
The TOSCA file
x-mimetype
:
-
application/x-yaml
-
text/yaml
responses
:
405
:
description
:
"
Invalid
TOSCA
file"
200
:
description
:
"
Upload
Successful"
drip_planner2/nbproject/project.properties
View file @
0032c1bc
java.lib.path
=
java.lib.path
=
main.file
=
rpc_server.py
main.file
=
rpc_server.py
platform.active
=
Python_
2.7.12
platform.active
=
Python_
3.6.6
python.lib.path
=
python.lib.path
=
src.dir
=
src
src.dir
=
src
drip_planner2/src/drip_logging/__init__.py
deleted
100644 → 0
View file @
055ac648
drip_planner2/src/drip_logging/__init__.pyc
deleted
100644 → 0
View file @
055ac648
File deleted
drip_planner2/src/drip_logging/drip_logging_handler.py
deleted
100644 → 0
View file @
055ac648
import
json
import
logging
import
pika
from
python_logging_rabbitmq
import
RabbitMQHandler
class
DRIPLoggingHandler
(
RabbitMQHandler
):
def
__init__
(
self
,
host
=
'localhost'
,
port
=
5672
,
username
=
None
,
password
=
None
,
user
=
None
):
super
(
DRIPLoggingHandler
,
self
)
.
__init__
(
host
=
host
,
port
=
port
,
username
=
username
,
password
=
password
)
self
.
user
=
user
def
open_connection
(
self
):
self
.
sequenceNumber
=
0
"""
Connect to RabbitMQ.
"""
# Set logger for pika.
# See if something went wrong connecting to RabbitMQ.
handler
=
logging
.
StreamHandler
()
handler
.
setFormatter
(
self
.
formatter
)
rabbitmq_logger
=
logging
.
getLogger
(
'pika'
)
rabbitmq_logger
.
addHandler
(
handler
)
rabbitmq_logger
.
propagate
=
False
rabbitmq_logger
.
setLevel
(
logging
.
WARNING
)
if
not
self
.
connection
or
self
.
connection
.
is_closed
:
self
.
connection
=
pika
.
BlockingConnection
(
pika
.
ConnectionParameters
(
**
self
.
connection_params
))
if
not
self
.
channel
or
self
.
channel
.
is_closed
:
self
.
channel
=
self
.
connection
.
channel
()
self
.
channel
.
queue_declare
(
queue
=
'log_qeue_'
+
self
.
user
,
durable
=
True
)
# Manually remove logger to avoid shutdown message.
rabbitmq_logger
.
removeHandler
(
handler
)
def
emit
(
self
,
record
):
self
.
acquire
()
try
:
if
not
self
.
connection
or
self
.
connection
.
is_closed
or
not
self
.
channel
or
self
.
channel
.
is_closed
:
self
.
open_connection
()
queue
=
'log_qeue_'
+
self
.
user
self
.
channel
.
basic_publish
(
exchange
=
''
,
routing_key
=
queue
,
body
=
self
.
format
(
record
),
properties
=
pika
.
BasicProperties
(
delivery_mode
=
2
)
)
except
Exception
:
self
.
channel
,
self
.
connection
=
None
,
None
self
.
handleError
(
record
)
finally
:
if
self
.
close_after_emit
:
self
.
close_connection
()
self
.
release
()
def
format
(
self
,
record
):
drip_record
=
{}
drip_record
[
'timestamp'
]
=
record
.
created
drip_record
[
'owner'
]
=
'user'
drip_record
[
'level'
]
=
record
.
levelname
drip_record
[
'loggerName'
]
=
record
.
module
drip_record
[
'message'
]
=
record
.
message
drip_record
[
'millis'
]
=
record
.
created
self
.
sequenceNumber
+=
1
drip_record
[
'sequenceNumber'
]
=
self
.
sequenceNumber
drip_record
[
'sourceClassName'
]
=
record
.
module
drip_record
[
'sourceMethodName'
]
=
record
.
funcName
return
json
.
dumps
(
drip_record
)
\ No newline at end of file
drip_planner2/src/drip_logging/drip_logging_handler.pyc
deleted
100644 → 0
View file @
055ac648
File deleted
drip_planner2/src/planner/__pycache__/__init__.cpython-36.pyc
0 → 100644
View file @
0032c1bc
File added
drip_planner2/src/planner/__pycache__/dum_planner.cpython-36.pyc
0 → 100644
View file @
0032c1bc
File added
drip_planner2/src/planner/dum_planner.py
View file @
0032c1bc
from
toscaparser
import
*
from
toscaparser.tosca_template
import
ToscaTemplate
from
toscaparser.tosca_template
import
ToscaTemplate
import
toscaparser.utils.yamlparser
import
toscaparser.utils.yamlparser
import
re
import
re
import
operator
import
operator
import
json
class
DumpPlanner
:
class
DumpPlanner
:
service_template_names
=
[
'serviceTemplateOrNodeTypeOrNodeTypeImplementation'
]
topology_template_names
=
[
'topologyTemplate'
]
node_template_names
=
[
'nodeTemplates'
]
requirement_names
=
[
'requirements'
]
def
__init__
(
self
,
service_templaete_file_path
):
dict_tpl
=
self
.
load_file
(
service_templaete_file_path
)
requirements
=
self
.
get_all_requirements
(
dict_tpl
)
unmet_requirements
=
self
.
get_unmet_requirements
(
requirements
)
print
(
requirements
)
# yaml_dict_tpl = self.trnsform_to_tosca(yaml_dict_tpl)
def
load_file
(
self
,
path
):
is_json
=
True
with
open
(
path
)
as
f
:
try
:
dict_tpl
=
json
.
load
(
f
)
except
Exception
as
e
:
if
(
not
isinstance
(
e
,
json
.
decoder
.
JSONDecodeError
)):
raise
e
else
:
is_json
=
False
if
is_json
:
return
dict_tpl
else
:
return
toscaparser
.
utils
.
yamlparser
.
load_yaml
(
path
)
def
__init__
(
self
,
tosca_file_path
):
self
.
yaml_dict_tpl
=
toscaparser
.
utils
.
yamlparser
.
load_yaml
(
tosca_file_path
)
def
trnsform_to_tosca
(
self
,
yaml_dict_tpl
):
self
.
errors
=
[]
self
.
warnings
=
[]
self
.
tt
=
None
try
:
try
:
self
.
tt
=
ToscaTemplate
(
path
=
None
,
yaml_dict_tpl
=
self
.
yaml_dict_tpl
)
self
.
tt
=
ToscaTemplate
(
path
=
None
,
yaml_dict_tpl
=
yaml_dict_tpl
)
except
:
except
Exception
as
e
:
self
.
warnings
.
append
(
"Not a valid tosca file"
)
self
.
handle_tosca_exeption
(
e
,
yaml_dict_tpl
)
self
.
DOCKER_TYPE
=
'Switch.nodes.Application.Container.Docker'
def
handle_tosca_exeption
(
self
,
exception
,
yaml_dict_tpl
):
self
.
COMPUTE_TYPE
=
'Switch.nodes.Compute'
print
(
exception
)
self
.
HW_HOST_TYPE
=
'Switch.datatypes.hw.host'
self
.
HOSTED_NODE_TYPE
=
[
self
.
DOCKER_TYPE
,
self
.
COMPUTE_TYPE
]
def
get_docker_types
(
self
):
def
get_all_requirements
(
self
,
dict_tpl
):
docker_types
=
set
([])
all_requirements
=
[]
node_types
=
self
.
get_node_types
()
service_templates
=
self
.
get_service_template
(
dict_tpl
)
for
node_type_key
in
node_types
:
for
service
in
service_templates
:
if
node_types
[
node_type_key
]
and
'derived_from'
in
node_types
[
node_type_key
]
.
keys
():
topology_template
=
self
.
get_topology_template
(
service
)
if
node_types
[
node_type_key
][
'derived_from'
]
==
self
.
DOCKER_TYPE
:
node_templates
=
self
.
get_node_templates
(
topology_template
)
docker_types
.
add
(
node_type_key
)
for
node_template
in
node_templates
:
return
docker_types
requirements
=
self
.
get_requirements
(
node_template
)
if
requirements
:
for
requirement
in
requirements
[
'requirement'
]:
all_requirements
.
append
(
requirement
)
return
all_requirements
def
get_node_types
(
self
):
return
self
.
yaml_dict_tpl
[
'node_types'
]
def
get_service_template
(
self
,
dict_tpl
):
return
self
.
find
(
dict_tpl
,
self
.
service_template_names
)
def
get_
node_templates
(
self
):
def
get_
topology_template
(
self
,
dict_tpl
):
return
self
.
yaml_dict_tpl
[
'topology_template'
][
'node_templates'
]
return
self
.
find
(
dict_tpl
,
self
.
topology_template_names
)
def
get_network_templates
(
self
):
def
get_node_templates
(
self
,
dict_tpl
):
if
'network_templates'
in
self
.
yaml_dict_tpl
[
'topology_template'
]:
return
self
.
find
(
dict_tpl
,
self
.
node_template_names
)
return
self
.
yaml_dict_tpl
[
'topology_template'
][
'network_templates'
]
else
:
def
get_requirements
(
self
,
dict_tpl
):
return
None
return
self
.
find
(
dict_tpl
,
self
.
requirement_names
)
def
get_artifacts
(
self
,
node
):
if
'artifacts'
in
node
:
def
find
(
self
,
dict_tpl
,
names
):
return
node
[
'artifacts'
]
if
dict_tpl
:
for
name
in
names
:
if
(
name
in
dict_tpl
):
return
dict_tpl
[
name
]
def
get_hosted_nodes
(
self
,
node_templates
):
docker_types
=
self
.
get_docker_types
()
self
.
HOSTED_NODE_TYPE
=
self
.
HOSTED_NODE_TYPE
+
list
(
docker_types
)
hosted_nodes
=
[]
for
node_key
in
node_templates
:
for
hosted_type
in
self
.
HOSTED_NODE_TYPE
:
if
node_templates
[
node_key
][
'type'
]
==
hosted_type
:
hosted_node
=
node_templates
[
node_key
]
hosted_node
[
'id'
]
=
node_key
hosted_nodes
.
append
(
hosted_node
)
break
return
hosted_nodes
def
get_properties
(
self
,
node
):
if
'properties'
in
node
:
return
node
[
'properties'
]
def
cast_to_int
(
self
,
value
):
if
isinstance
(
value
,
int
):
return
value
if
isinstance
(
value
,
str
):
return
int
(
re
.
findall
(
"
\
d+"
,
value
)[
0
])
def
sort_vms
(
self
,
vms
,
max_vms
):
sorted_vms
=
[]
vms_dict
=
{}
for
vm
in
vms
:
score
=
0
score
+=
self
.
cast_to_int
(
vm
[
'host'
][
'cpu_frequency'
])
score
+=
self
.
cast_to_int
(
vm
[
'host'
][
'mem_size'
])
score
+=
self
.
cast_to_int
(
vm
[
'host'
][
'num_cpus'
])
score
+=
self
.
cast_to_int
(
vm
[
'host'
][
'disk_size'
])
vms_dict
[
vm
[
'name'
]]
=
score
sorted_vms_dict
=
sorted
(
vms_dict
.
items
(),
key
=
operator
.
itemgetter
(
1
),
reverse
=
True
)
counter
=
0
for
sorted_vm
in
sorted_vms_dict
:
if
counter
>=
max_vms
:
break
for
vm
in
vms
:
if
sorted_vm
[
0
]
==
vm
[
'name'
]:
counter
+=
1
sorted_vms
.
append
(
vm
)
break
return
sorted_vms
def
plan
(
self
,
max_vms
):
network_templates
=
self
.
get_network_templates
()
vms
=
[]
if
network_templates
and
'network'
in
network_templates
and
network_templates
[
'network'
][
'multicast'
]
==
True
:
vm
=
{}
vm
[
'name'
]
=
'id'
vm
[
'type'
]
=
self
.
COMPUTE_TYPE
host
=
{}
host
[
'cpu_frequency'
]
=
'2.6GHz'
host
[
'mem_size'
]
=
'32GB'
host
[
'num_cpus'
]
=
'16'
host
[
'disk_size'
]
=
'10GB'
vm
[
'host'
]
=
host
os
=
{}
os
[
'os_version'
]
=
16.04
os
[
'distribution'
]
=
'ubuntu'
os
[
'type'
]
=
'linux'
os
[
'architecture'
]
=
'x86_64'
vm
[
'os'
]
=
os
vm
[
'scaling_mode'
]
=
'multiple'
vms
.
append
(
vm
)
return
vms
node_templates
=
self
.
get_node_templates
()
hosted_nodes
=
self
.
get_hosted_nodes
(
node_templates
)
for
node
in
hosted_nodes
:
vm
=
{}
vm
[
'name'
]
=
node
[
'id'
]
vm
[
'type'
]
=
self
.
COMPUTE_TYPE
if
'requirements'
in
node
and
node
[
'requirements'
]:
for
req
in
node
[
'requirements'
]:
if
'host'
in
req
and
'node_filter'
in
req
[
'host'
]:
vm
[
'host'
]
=
req
[
'host'
][
'node_filter'
][
'capabilities'
][
'host'
]
vm
[
'os'
]
=
req
[
'host'
][
'node_filter'
][
'capabilities'
][
'os'
]
if
'host'
not
in
vm
:
host
=
{}
host
[
'cpu_frequency'
]
=
'2GHz'
host
[
'mem_size'
]
=
'4GB'
host
[
'num_cpus'
]
=
'1'
host
[
'disk_size'
]
=
'10GB'
vm
[
'host'
]
=
host
if
'os'
not
in
vm
:
os
=
{}
os
[
'os_version'
]
=
16.04
os
[
'distribution'
]
=
'ubuntu'
os
[
'type'
]
=
'linux'
os
[
'architecture'
]
=
'x86_64'
vm
[
'os'
]
=
os
properties
=
self
.
get_properties
(
node
)
if
properties
and
'scaling_mode'
in
properties
:
vm
[
'scaling_mode'
]
=
properties
[
'scaling_mode'
]
else
:
vm
[
'scaling_mode'
]
=
'single'
vms
.
append
(
vm
)
if
max_vms
<=
-
1
:
max_vms
=
len
(
vms
)
//
3
if
max_vms
>
-
1
and
len
(
vms
)
>
max_vms
:
vms
=
self
.
sort_vms
(
vms
,
max_vms
)
return
vms
def
get_unmet_requirements
(
self
,
requirements
):
for
requirement
in
requirement
:
print
(
requirement
)
\ No newline at end of file
drip_planner2/src/rpc_server.py
View file @
0032c1bc
...
@@ -12,7 +12,6 @@ from planner.dum_planner import *
...
@@ -12,7 +12,6 @@ from planner.dum_planner import *
import
sys
import
sys
import
tempfile
import
tempfile
import
time
import
time
from
drip_logging.drip_logging_handler
import
*
logger
=
logging
.
getLogger
(
__name__
)
logger
=
logging
.
getLogger
(
__name__
)
if
not
getattr
(
logger
,
'handler_set'
,
None
):
if
not
getattr
(
logger
,
'handler_set'
,
None
):
...
@@ -112,9 +111,7 @@ def handle_delivery(message):
...
@@ -112,9 +111,7 @@ def handle_delivery(message):
if
__name__
==
"__main__"
:
if
__name__
==
"__main__"
:
if
(
sys
.
argv
[
1
]
==
"test_local"
):
if
(
sys
.
argv
[
1
]
==
"test_local"
):
home
=
expanduser
(
"~"
)
home
=
expanduser
(
"~"
)
planner
=
DumpPlanner
(
home
+
"/Downloads/tosca.yml"
)
planner
=
DumpPlanner
(
home
+
"/Downloads/topology.json"
)
max_vms
=
-
1
print
planner
.
plan
(
max_vms
)
else
:
else
:
logger
.
info
(
"Input args: "
+
sys
.
argv
[
0
]
+
' '
+
sys
.
argv
[
1
]
+
' '
+
sys
.
argv
[
2
])
logger
.
info
(
"Input args: "
+
sys
.
argv
[
0
]
+
' '
+
sys
.
argv
[
1
]
+
' '
+
sys
.
argv
[
2
])
channel
=
init_chanel
(
sys
.
argv
)
channel
=
init_chanel
(
sys
.
argv
)
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment