Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
C
CONF
Project
Project
Details
Activity
Releases
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Registry
Registry
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
UvA
CONF
Commits
62addd15
Commit
62addd15
authored
Jul 31, 2020
by
Spiros Koulouzis
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
fixed hyperleger inventory
parent
11370fde
Changes
6
Show whitespace changes
Inline
Side-by-side
Showing
6 changed files
with
168 additions
and
58 deletions
+168
-58
__main__.py
deployer/__main__.py
+3
-3
requirements.txt
deployer/requirements.txt
+6
-6
ansible_service.py
deployer/service/ansible_service.py
+111
-23
deploy_service.py
deployer/service/deploy_service.py
+25
-13
tosca_helper.py
deployer/service/tosca_helper.py
+22
-12
test_deployer.py
deployer/test/test_deployer.py
+1
-1
No files found.
deployer/__main__.py
View file @
62addd15
...
...
@@ -84,13 +84,13 @@ def handle_delivery(message):
tosca_helper
=
ToscaHelper
(
sure_tosca_base_url
,
tosca_template_path
)
# nodes_to_deploy = tosca_helper.get_application_nodes()
nodes
_pairs
=
tosca_helper
.
get_deployment_node_pairs
()
nodes
=
tosca_helper
.
get_deployment_node_pipeline
()
deployService
=
DeployService
(
semaphore_base_url
=
semaphore_base_url
,
semaphore_username
=
semaphore_username
,
semaphore_password
=
semaphore_password
,
vms
=
tosca_helper
.
get_vms
())
try
:
for
node
_pair
in
nodes_pair
s
:
updated_node
=
deployService
.
deploy
(
node
_pair
)
for
node
in
node
s
:
updated_node
=
deployService
.
deploy
(
node
)
if
isinstance
(
updated_node
,
list
):
for
node
in
updated_node
:
tosca_template_dict
=
tosca_helper
.
set_node
(
node
,
tosca_template_dict
)
...
...
deployer/requirements.txt
View file @
62addd15
pika==1.1.0
names==0.3.0
networkx==2.4
requests==2.23.0
#
requests==2.23.0
wheel==0.34.2
pyyaml==5.3.1
matplotlib==3.2.1
# ansible==2.9.6
certifi==2020.4.5.1
#
matplotlib==3.2.1
ansible==2.9.11
#
certifi==2020.4.5.1
six==1.14.0
python_dateutil==2.8.1
setuptools==46.1.3
urllib3==1.25.8
#
setuptools==46.1.3
#
urllib3==1.25.8
kubernetes==11.0.0
\ No newline at end of file
deployer/service/ansible_service.py
View file @
62addd15
import
base64
import
configparser
import
logging
from
time
import
sleep
import
datetime
...
...
@@ -29,8 +30,7 @@ class AnsibleService:
self
.
repository_id
=
None
self
.
template_id
=
None
def
execute
(
self
,
nodes_pair
,
interface_type
,
vms
,
env_vars
=
None
):
application
=
nodes_pair
[
1
]
def
execute
(
self
,
application
,
interface_type
,
vms
,
env_vars
=
None
):
name
=
application
.
name
desired_state
=
None
tasks_outputs
=
{}
...
...
@@ -43,7 +43,8 @@ class AnsibleService:
if
desired_state
:
now
=
datetime
.
datetime
.
now
()
project_id
=
self
.
semaphore_helper
.
create_project
(
application
.
name
+
'_'
+
str
(
now
))
inventory_contents
=
yaml
.
dump
(
self
.
build_yml_inventory
(
vms
),
default_flow_style
=
False
)
inventory_dict
=
self
.
build_inventory
(
vms
,
application_name
=
application
.
name
)
inventory_contents
=
yaml
.
dump
(
inventory_dict
,
default_flow_style
=
False
)
private_key
=
self
.
get_private_key
(
vms
)
key_id
=
self
.
semaphore_helper
.
create_ssh_key
(
application
.
name
,
project_id
,
private_key
)
inventory_id
=
self
.
semaphore_helper
.
create_inventory
(
application
.
name
,
project_id
,
key_id
,
...
...
@@ -59,17 +60,28 @@ class AnsibleService:
environment_id
=
None
if
env_vars
:
environment_id
=
self
.
semaphore_helper
.
create_environment
(
project_id
,
name
,
env_vars
)
arguments
=
None
if
application
.
name
==
'gluster_fs'
or
application
.
name
==
'glusterfs'
or
application
.
name
==
'tic'
:
if
playbook_name
==
'003.setup_glusterfs_cluster.yml'
:
arguments
=
'["-u","vm_user"]'
if
playbook_name
==
'013.mount_fs.yml'
:
master_ip
=
next
(
iter
(
inventory_dict
[
'all'
][
'children'
][
'swarm_manager_prime'
][
'hosts'
]))
# outputs 'foo'
arguments
=
'["-u","vm_user","--extra-vars","gluster_cluster_host0=
\'
'
+
master_ip
+
'
\'
gluster_cluster_volume=
\'
gfs0
\'
"]'
task_id
=
self
.
run_task
(
name
,
project_id
,
key_id
,
git_url
,
inventory_id
,
playbook_name
,
environment_id
=
environment_id
)
environment_id
=
environment_id
,
arguments
=
arguments
)
if
self
.
semaphore_helper
.
get_task
(
project_id
,
task_id
)
.
status
!=
'success'
:
msg
=
' '
for
out
in
self
.
semaphore_helper
.
get_task_outputs
(
project_id
,
task_id
):
msg
=
msg
+
' '
+
out
.
output
raise
Exception
(
'Task: '
+
playbook_name
+
' failed. '
+
self
.
semaphore_helper
.
get_task
(
project_id
,
task_id
)
.
status
+
' Output: '
+
msg
)
msg
=
msg
+
' '
+
out
.
output
raise
Exception
(
'Task: '
+
playbook_name
+
' failed. '
+
self
.
semaphore_helper
.
get_task
(
project_id
,
task_id
)
.
status
+
' Output: '
+
msg
)
tasks_outputs
[
task_id
]
=
self
.
semaphore_helper
.
get_task_outputs
(
project_id
,
task_id
)
if
'configure'
in
interface
and
self
.
semaphore_helper
.
get_task
(
project_id
,
task_id
)
.
status
==
'success'
and
'resources'
in
inputs
:
if
'configure'
in
interface
and
self
.
semaphore_helper
.
get_task
(
project_id
,
task_id
)
.
status
==
'success'
and
'resources'
in
inputs
:
configure
=
interface
[
'configure'
]
inputs
=
configure
[
'inputs'
]
git_url
=
inputs
[
'repository'
]
...
...
@@ -91,29 +103,40 @@ class AnsibleService:
tasks_outputs
[
task_id
]
=
self
.
semaphore_helper
.
get_task_outputs
(
project_id
,
task_id
)
return
tasks_outputs
def
build_
yml_inventory
(
self
,
vms
):
def
build_
inventory
(
self
,
vms
,
application_name
=
None
):
# loader = DataLoader()
# inventory = InventoryManager(loader=loader)
# variable_manager = VariableManager()
vars
=
{}
# vars ['ansible_ssh_common_args'] = '"-o StrictHostKeyChecking=no"'
vars
[
'ansible_ssh_user'
]
=
vms
[
0
]
.
node_template
.
properties
[
'user_name'
]
vars
[
'ansible_python_interpreter'
]
=
'/usr/bin/python3'
if
application_name
==
'gluster_fs'
or
application_name
==
'glusterfs'
:
return
self
.
build_glusterfs_inventory
(
vms
,
vars
)
if
application_name
==
'tic'
:
return
self
.
build_tic_inventory
(
vms
,
vars
)
inventory
=
{}
all
=
{}
vars
=
{
'ansible_ssh_common_args'
:
'-o StrictHostKeyChecking=no'
}
vars
[
'ansible_ssh_user'
]
=
vms
[
0
]
.
node_template
.
properties
[
'user_name'
]
children
=
{}
for
vm
in
vms
:
attributes
=
vm
.
node_template
.
attributes
role
=
attributes
[
'role'
]
roles
=
[]
roles
.
append
(
attributes
[
'role'
])
public_ip
=
attributes
[
'public_ip'
]
for
role
in
roles
:
if
role
not
in
children
:
hosts
=
{}
else
:
hosts
=
children
[
role
]
host
=
{}
host
[
public_ip
]
=
vars
if
'hosts'
in
hosts
:
hosts
[
'hosts'
][
public_ip
]
=
vars
else
:
hosts
[
'hosts'
]
=
host
children
[
role
]
=
hosts
# inventory.add_group(role)
# inventory.add_host(public_ip,group=role)
all
[
'children'
]
=
children
inventory
[
'all'
]
=
all
return
inventory
...
...
@@ -122,12 +145,13 @@ class AnsibleService:
private_key
=
vms
[
0
]
.
node_template
.
attributes
[
'user_key_pair'
][
'keys'
][
'private_key'
]
return
base64
.
b64decode
(
private_key
)
.
decode
(
'utf-8'
)
.
replace
(
r'\n'
,
'
\n
'
)
def
run_task
(
self
,
name
,
project_id
,
key_id
,
git_url
,
inventory_id
,
playbook_name
,
environment_id
=
None
):
def
run_task
(
self
,
name
,
project_id
,
key_id
,
git_url
,
inventory_id
,
playbook_name
,
environment_id
=
None
,
arguments
=
None
):
logger
.
info
(
'project_id: '
+
str
(
project_id
)
+
' task name: '
+
str
(
name
)
+
' git url: '
+
git_url
+
' playbook: '
+
playbook_name
)
self
.
repository_id
=
self
.
semaphore_helper
.
create_repository
(
name
,
project_id
,
key_id
,
git_url
)
template_id
=
self
.
semaphore_helper
.
create_template
(
project_id
,
key_id
,
inventory_id
,
self
.
repository_id
,
playbook_name
)
playbook_name
,
arguments
)
task_id
=
self
.
semaphore_helper
.
execute_task
(
project_id
,
template_id
,
playbook_name
,
environment_id
=
environment_id
)
task
=
self
.
semaphore_helper
.
get_task
(
project_id
,
task_id
)
...
...
@@ -140,3 +164,67 @@ class AnsibleService:
last_status
=
this_status
sleep
(
6
)
return
task_id
def
build_glusterfs_inventory
(
self
,
vms
,
vars
):
inventory
=
{}
all
=
{}
children
=
{}
for
vm
in
vms
:
attributes
=
vm
.
node_template
.
attributes
roles
=
[]
roles
.
append
(
'gfscluster'
)
public_ip
=
attributes
[
'public_ip'
]
for
role
in
roles
:
if
role
not
in
children
:
hosts
=
{}
else
:
hosts
=
children
[
role
]
if
'hosts'
in
hosts
:
hosts
[
'hosts'
][
public_ip
]
=
vars
else
:
host
=
{}
host
[
public_ip
]
=
vars
hosts
[
'hosts'
]
=
host
children
[
role
]
=
hosts
all
[
'children'
]
=
children
inventory
[
'all'
]
=
all
return
inventory
def
build_tic_inventory
(
self
,
vms
,
vars
):
inventory
=
{}
all
=
{}
children
=
{}
for
vm
in
vms
:
attributes
=
vm
.
node_template
.
attributes
roles
=
[]
if
attributes
[
'role'
]
==
'master'
:
roles
.
append
(
'swarm_manager_prime'
)
roles
.
append
(
'swarm_managers'
)
elif
attributes
[
'role'
]
==
'worker'
:
roles
.
append
(
'swarm_workers'
)
public_ip
=
attributes
[
'public_ip'
]
for
role
in
roles
:
if
role
not
in
children
:
hosts
=
{}
else
:
hosts
=
children
[
role
]
if
'hosts'
in
hosts
:
# if attributes['role'] == 'master':
hosts
[
'hosts'
]
=
{
public_ip
:
vars
}
# else:
# hosts['hosts'] = {public_ip: vars}
else
:
host
=
{}
host
[
public_ip
]
=
vars
# if role == 'swarm_manager_prime':
# host['fabric-manager'] = vars
# elif role == 'swarm_managers':
# host['fabric-manager'] = {}
# else:
# host['fabric-worker'] = vars
hosts
[
'hosts'
]
=
host
children
[
role
]
=
hosts
all
[
'children'
]
=
children
inventory
[
'all'
]
=
all
return
inventory
deployer/service/deploy_service.py
View file @
62addd15
...
...
@@ -29,24 +29,24 @@ class DeployService:
self
.
master_ip
=
vm
.
node_template
.
attributes
[
'public_ip'
]
break
def
deploy
(
self
,
nodes_pair
):
target
=
nodes_pair
[
0
]
source
=
nodes_pair
[
1
]
interface_types
=
tosca_helper
.
get_interface_types
(
source
)
def
deploy
(
self
,
application
):
#
target = nodes_pair[0]
#
source = nodes_pair[1]
interface_types
=
tosca_helper
.
get_interface_types
(
application
)
if
interface_types
:
ansible_service
=
AnsibleService
(
self
.
semaphore_base_url
,
self
.
semaphore_username
,
self
.
semaphore_password
)
env_vars
=
self
.
get_env_vars
(
nodes_pair
)
env_vars
=
self
.
get_env_vars
(
application
)
if
'Standard'
in
interface_types
:
task_outputs
=
ansible_service
.
execute
(
nodes_pair
,
'Standard'
,
self
.
vms
,
env_vars
=
env_vars
)
source
=
self
.
set_attributes
(
task_outputs
,
source
)
task_outputs
=
ansible_service
.
execute
(
application
,
'Standard'
,
self
.
vms
,
env_vars
=
env_vars
)
application
=
self
.
set_attributes
(
task_outputs
,
application
)
if
'Kubernetes'
in
interface_types
:
task_outputs
=
ansible_service
.
execute
(
nodes_pair
,
'Kubernetes'
,
self
.
vms
,
env_vars
=
env_vars
)
source
=
self
.
set_attributes
(
task_outputs
,
source
)
return
source
task_outputs
=
ansible_service
.
execute
(
application
,
'Kubernetes'
,
self
.
vms
,
env_vars
=
env_vars
)
application
=
self
.
set_attributes
(
task_outputs
,
application
)
return
application
def
get_env_vars
(
self
,
nodes_pair
):
target
=
nodes_pair
[
0
]
source
=
nodes_pair
[
1
]
def
get_env_vars
(
self
,
source
):
#
target = nodes_pair[0]
#
source = nodes_pair[1]
env_vars
=
{
'K8s_NAMESPACE'
:
'default'
}
if
source
.
node_template
.
type
==
'tosca.nodes.QC.Container.Application.Docker'
:
env_vars
[
'DOCKER_IMAGE'
]
=
source
.
node_template
.
artifacts
[
'image'
][
'file'
]
...
...
@@ -65,6 +65,8 @@ class DeployService:
source
=
self
.
set_kubernetes_attributes
(
source
=
source
,
task_outputs
=
task_outputs
)
if
source
.
node_template
.
type
==
'tosca.nodes.QC.Container.Application.Docker'
:
source
=
self
.
set_docker_attributes
(
source
=
source
,
task_outputs
=
task_outputs
)
if
source
.
node_template
.
type
==
'tosca.nodes.QC.Application.TIC'
:
source
=
self
.
set_tic_attributes
(
source
=
source
,
task_outputs
=
task_outputs
)
# lst = list(nodes_pair)
# lst[1] = source
# nodes_pair = tuple(lst)
...
...
@@ -175,3 +177,13 @@ class DeployService:
attributes
[
'service_url'
]
=
service_url
logger
.
info
(
'source.node_template.attributes: '
+
str
(
attributes
))
return
source
def
set_tic_attributes
(
self
,
source
,
task_outputs
):
attributes
=
source
.
node_template
.
attributes
if
'service_urls'
not
in
source
.
node_template
.
attributes
:
service_urls
=
[]
attributes
[
'service_urls'
]
=
service_urls
for
port
in
[
'8090'
,
'9000'
,
'9090'
]:
service_urls
.
append
(
'http://'
+
self
.
master_ip
+
':'
+
str
(
port
))
attributes
[
'service_urls'
]
=
service_urls
return
source
deployer/service/tosca_helper.py
View file @
62addd15
...
...
@@ -5,6 +5,8 @@ import urllib.request
from
sure_tosca_client
import
Configuration
,
ApiClient
,
NodeTemplate
from
sure_tosca_client.api
import
default_api
import
networkx
as
nx
import
matplotlib.pyplot
as
plt
class
ToscaHelper
:
...
...
@@ -38,22 +40,30 @@ class ToscaHelper:
def
get_application_nodes
(
self
):
return
self
.
tosca_client
.
get_node_templates
(
self
.
doc_id
,
type_name
=
'tosca.nodes.QC.Application'
)
def
get_deployment_node_p
airs
(
self
):
def
get_deployment_node_p
ipeline
(
self
):
nodes_to_deploy
=
self
.
get_application_nodes
()
nodes_pairs
=
[]
G
=
nx
.
DiGraph
()
sorted_nodes
=
[]
for
node
in
nodes_to_deploy
:
related_nodes
=
self
.
tosca_client
.
get_related_nodes
(
self
.
doc_id
,
node
.
name
)
for
related_node
in
related_nodes
:
# We need to deploy the docker orchestrator on the VMs not the topology.
# But the topology is directly connected to the orchestrator not the VMs.
# So we explicitly get the VMs
# I don't like this solution but I can't think of something better.
if
related_node
.
node_template
.
type
==
'tosca.nodes.QC.VM.topology'
:
vms
=
self
.
get_vms
()
related_node
=
vms
pair
=
(
related_node
,
node
)
nodes_pairs
.
append
(
pair
)
return
nodes_pairs
G
.
add_edge
(
node
.
name
,
related_node
.
name
)
# # We need to deploy the docker orchestrator on the VMs not the topology.
# # But the topology is directly connected to the orchestrator not the VMs.
# # So we explicitly get the VMs
# # I don't like this solution but I can't think of something better.
# if related_node.node_template.type == 'tosca.nodes.QC.VM.topology':
# vms = self.get_vms()
# related_node = vms
# pair = (related_node, node)
# nodes_pairs.append(pair)
sorted_graph
=
sorted
(
G
.
in_degree
,
key
=
lambda
x
:
x
[
1
],
reverse
=
True
)
for
node_tuple
in
sorted_graph
:
node_name
=
node_tuple
[
0
]
for
node
in
nodes_to_deploy
:
if
node
.
name
==
node_name
:
sorted_nodes
.
append
(
node
)
return
sorted_nodes
@
classmethod
def
service_is_up
(
cls
,
url
):
...
...
deployer/test/test_deployer.py
View file @
62addd15
...
...
@@ -48,7 +48,7 @@ class TestDeployer(unittest.TestCase):
self
.
assertIsNotNone
(
tosca_helper
.
doc_id
)
nodes_to_deploy
=
tosca_helper
.
get_application_nodes
()
self
.
assertIsNotNone
(
nodes_to_deploy
)
nodes_pairs
=
tosca_helper
.
get_deployment_node_p
airs
()
nodes_pairs
=
tosca_helper
.
get_deployment_node_p
ipeline
()
self
.
assertIsNotNone
(
nodes_pairs
)
username
=
'admin'
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment