Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
C
CONF
Project
Project
Details
Activity
Releases
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Registry
Registry
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
UvA
CONF
Commits
17dceead
Commit
17dceead
authored
Mar 27, 2020
by
Spiros Koulouzis
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
changed interfaces
parent
bb8326aa
Changes
4
Hide whitespace changes
Inline
Side-by-side
Showing
4 changed files
with
70 additions
and
197 deletions
+70
-197
ansible_service.py
deployer/service/ansible_service.py
+4
-164
deploy_service.py
deployer/service/deploy_service.py
+5
-11
tosca_helper.py
deployer/service/tosca_helper.py
+53
-0
test_deployer.py
deployer/test/test_deployer.py
+8
-22
No files found.
deployer/service/ansible_service.py
View file @
17dceead
...
@@ -7,12 +7,6 @@ from collections import namedtuple
...
@@ -7,12 +7,6 @@ from collections import namedtuple
from
stat
import
S_IREAD
from
stat
import
S_IREAD
from
subprocess
import
Popen
,
PIPE
from
subprocess
import
Popen
,
PIPE
import
ansible
import
requests
from
ansible.executor.playbook_executor
import
PlaybookExecutor
from
ansible.parsing.dataloader
import
DataLoader
from
ansible.vars.manager
import
VariableManager
import
re
import
re
logger
=
logging
.
getLogger
(
__name__
)
logger
=
logging
.
getLogger
(
__name__
)
...
@@ -25,161 +19,7 @@ if not getattr(logger, 'handler_set', None):
...
@@ -25,161 +19,7 @@ if not getattr(logger, 'handler_set', None):
logger
.
handler_set
=
True
logger
.
handler_set
=
True
def
write_inventory_file
(
tmp_path
,
vms
):
def
execute
(
nodes_pair
):
workers
=
[]
target
=
nodes_pair
[
0
]
k8_master
=
None
source
=
nodes_pair
[
1
]
ansible_ssh_private_key_file_path
=
None
pass
ansible_ssh_user
=
None
\ No newline at end of file
for
vm_name
in
vms
:
attributes
=
vms
[
vm_name
][
'attributes'
]
role
=
attributes
[
'role'
]
if
'public_ip'
not
in
attributes
:
raise
ValueError
(
'VM: '
+
vm_name
+
' has no public_ip attribute'
)
if
role
==
'master'
:
k8_master
=
attributes
[
'public_ip'
]
else
:
workers
.
append
(
attributes
[
'public_ip'
])
if
ansible_ssh_private_key_file_path
is
None
:
ansible_ssh_private_key_encoded
=
attributes
[
'user_key_pair'
][
'keys'
][
'private_key'
]
ansible_ssh_private_key
=
base64
.
b64decode
(
ansible_ssh_private_key_encoded
)
.
decode
(
'utf-8'
)
ansible_ssh_private_key_file_path
=
tmp_path
+
"/id_rsa"
with
open
(
ansible_ssh_private_key_file_path
,
"w"
)
as
ansible_ssh_private_key_file
:
print
(
ansible_ssh_private_key
,
file
=
ansible_ssh_private_key_file
)
os
.
chmod
(
ansible_ssh_private_key_file_path
,
S_IREAD
)
if
ansible_ssh_user
is
None
:
ansible_ssh_user
=
vms
[
vm_name
][
'properties'
][
'user_name'
]
k8s_hosts_path
=
tmp_path
+
"/k8s_hosts"
with
open
(
k8s_hosts_path
,
"w"
)
as
k8s_hosts_file
:
print
(
'[k8-master]'
,
file
=
k8s_hosts_file
)
print
(
k8_master
,
file
=
k8s_hosts_file
)
print
(
'
\n
'
,
file
=
k8s_hosts_file
)
print
(
'[worker]'
,
file
=
k8s_hosts_file
)
for
worker
in
workers
:
print
(
worker
,
file
=
k8s_hosts_file
)
print
(
'
\n
'
,
file
=
k8s_hosts_file
)
print
(
'[cluster:children]'
,
file
=
k8s_hosts_file
)
print
(
'k8-master'
,
file
=
k8s_hosts_file
)
print
(
'worker'
,
file
=
k8s_hosts_file
)
print
(
'
\n
'
,
file
=
k8s_hosts_file
)
print
(
'[cluster:vars]'
,
file
=
k8s_hosts_file
)
print
(
'ansible_ssh_private_key_file='
+
ansible_ssh_private_key_file_path
,
file
=
k8s_hosts_file
)
print
(
'ansible_ssh_common_args=
\'
-o StrictHostKeyChecking=no
\'
'
,
file
=
k8s_hosts_file
)
print
(
'ansible_ssh_user='
+
ansible_ssh_user
,
file
=
k8s_hosts_file
)
logger
.
info
(
"Returning inventory file at: "
+
str
(
k8s_hosts_path
))
return
k8s_hosts_path
def
write_playbooks
(
tmp_path
,
interface
):
playbook_paths
=
[]
interface_stage_list
=
[
'install'
,
'create'
]
# for interface_stage in interface:
for
interface_stage
in
interface_stage_list
:
playbook_url
=
interface
[
interface_stage
][
'inputs'
][
'playbook'
]
r
=
requests
.
get
(
playbook_url
)
playbook_path
=
tmp_path
+
"/"
+
interface_stage
+
'.yaml'
with
open
(
playbook_path
,
'wb'
)
as
f
:
f
.
write
(
r
.
content
)
playbook_paths
.
append
(
playbook_path
)
return
playbook_paths
def
write_playbooks_from_tosca_interface
(
interfaces
,
tmp_path
):
playbook_paths
=
[]
for
interface_name
in
interfaces
:
playbook_paths
=
playbook_paths
+
write_playbooks
(
tmp_path
,
interfaces
[
interface_name
])
logger
.
info
(
"Returning playbook paths file at: "
+
str
(
playbook_paths
))
return
playbook_paths
def
run
(
inventory_path
,
playbook_path
):
logger
.
info
(
"Executing playbook: "
+
str
(
playbook_path
))
p
=
Popen
([
"ansible-playbook"
,
"-i"
,
inventory_path
,
playbook_path
,
'--ssh-common-args=
\'
-o '
'StrictHostKeyChecking=no
\'
'
],
stdin
=
PIPE
,
stdout
=
PIPE
,
stderr
=
PIPE
)
output
,
err
=
p
.
communicate
()
# print(output.decode('utf-8'))
# print(err.decode('utf-8'))
logger
.
info
(
"Playbook output: "
+
str
(
output
.
decode
(
'utf-8'
)))
logger
.
info
(
"Playbook err: "
+
str
(
err
.
decode
(
'utf-8'
)))
rc
=
p
.
returncode
return
output
,
err
def
parse_dashboard_tokens
(
out
):
token
=
None
if
'admin-user-token'
in
out
:
m
=
re
.
search
(
'
\"
token: (.+?)
\"
'
,
out
)
if
m
:
token
=
m
.
group
(
1
)
.
strip
()
return
token
def
parse_api_tokens
(
out
):
api_key
=
None
join_token
=
None
discovery_token_ca_cert_hash
=
None
if
'Join command is kubeadm join'
in
out
:
api_key
=
re
.
search
(
"^msg.*"
,
out
)
join_token
=
re
.
search
(
"^
\"
stdout
\"
:
\"
$"
,
out
)
m
=
re
.
search
(
'Join command is kubeadm join(.+?)
\"
'
,
out
)
if
m
:
found
=
m
.
group
(
1
)
m
=
re
.
search
(
'--token (.+?) --discovery-token-ca-cert-hash'
,
found
)
if
m
:
join_token
=
m
.
group
(
1
)
m
=
re
.
search
(
'--discovery-token-ca-cert-hash (.+?) "}'
,
out
)
if
m
:
discovery_token_ca_cert_hash
=
m
.
group
(
1
)
m
=
re
.
search
(
'--token (.+?) --discovery-token-ca-cert-hash'
,
found
)
if
m
:
join_token
=
m
.
group
(
1
)
m
=
re
.
search
(
'"stdout": " (.+?)",'
,
out
)
if
m
:
api_key
=
m
.
group
(
1
)
return
api_key
,
join_token
,
discovery_token_ca_cert_hash
def
execute_playbook
(
hosts
,
playbook_path
,
user
,
ssh_key_file
,
extra_vars
,
passwords
):
if
not
os
.
path
.
exists
(
playbook_path
):
logger
.
error
(
'The playbook does not exist'
)
return
'[ERROR] The playbook does not exist'
os
.
environ
[
'ANSIBLE_HOST_KEY_CHECKING'
]
=
'false'
ansible
.
constants
.
HOST_KEY_CHECKING
=
False
os
.
environ
[
'ANSIBLE_SSH_RETRIES'
]
=
'retry_count'
ansible
.
constants
.
ANSIBLE_SSH_RETRIES
=
3
variable_manager
=
VariableManager
()
loader
=
DataLoader
()
# inventory = Inventory(loader=loader, variable_manager=variable_manager, host_list=hosts)
Options
=
namedtuple
(
'Options'
,
[
'listtags'
,
'listtasks'
,
'listhosts'
,
'syntax'
,
'connection'
,
'module_path'
,
'forks'
,
'remote_user'
,
'private_key_file'
,
'ssh_common_args'
,
'ssh_extra_args'
,
'sftp_extra_args'
,
'scp_extra_args'
,
'become'
,
'become_method'
,
'become_user'
,
'verbosity'
,
'check'
,
'host_key_checking'
,
'retries'
])
options
=
Options
(
listtags
=
False
,
listtasks
=
False
,
listhosts
=
False
,
syntax
=
False
,
connection
=
'smart'
,
module_path
=
None
,
forks
=
None
,
remote_user
=
user
,
private_key_file
=
ssh_key_file
,
ssh_common_args
=
''
,
ssh_extra_args
=
''
,
sftp_extra_args
=
None
,
scp_extra_args
=
None
,
become
=
True
,
become_method
=
'sudo'
,
become_user
=
'root'
,
verbosity
=
None
,
check
=
False
,
host_key_checking
=
False
,
retries
=
retry_count
)
variable_manager
.
extra_vars
=
extra_vars
# pbex = PlaybookExecutor(playbooks=[playbook_path],
# inventory=inventory,
# variable_manager=variable_manager,
# loader=loader,
# options=options,
# passwords=passwords,
# )
deployer/service/deploy_service.py
View file @
17dceead
def
get_interface_types
(
target
):
from
service
import
tosca_helper
print
(
target
.
node_template
.
interfaces
)
from
service.ansible_service
import
execute
interface_types
=
[]
for
interface
in
target
.
node_template
.
interfaces
:
interface_types
.
append
(
interface
)
return
interface_types
def
deploy
(
nodes_pair
):
def
deploy
(
nodes_pair
):
target
=
nodes_pair
[
0
]
target
=
nodes_pair
[
0
]
source
=
nodes_pair
[
1
]
source
=
nodes_pair
[
1
]
interface_types
=
get_interface_types
(
source
)
if
'Kubernetes'
in
interface_types
:
kubernetes_service
.
deploy
(
nodes_pair
)
elif
'Kubernetes'
in
interface_types
:
interface_types
=
tosca_helper
.
get_interface_types
(
source
)
if
'Standard'
in
interface_types
:
execute
(
nodes_pair
)
print
(
source
)
print
(
source
)
print
(
target
)
print
(
target
)
...
...
deployer/service/tosca_helper.py
0 → 100644
View file @
17dceead
from
sure_tosca_client
import
Configuration
,
ApiClient
from
sure_tosca_client.api
import
default_api
class
ToscaHelper
:
def
__init__
(
self
,
sure_tosca_base_url
,
tosca_template_path
):
self
.
sure_tosca_base_url
=
sure_tosca_base_url
self
.
tosca_template_path
=
tosca_template_path
self
.
tosca_client
=
self
.
init_sure_tosca_client
(
sure_tosca_base_url
)
self
.
doc_id
=
self
.
upload_tosca_template
(
tosca_template_path
)
def
upload_tosca_template
(
self
,
file_path
):
file_id
=
self
.
tosca_client
.
upload_tosca_template
(
file_path
)
return
file_id
def
init_sure_tosca_client
(
self
,
sure_tosca_base_path
):
configuration
=
Configuration
()
configuration
.
host
=
sure_tosca_base_path
api_client
=
ApiClient
(
configuration
=
configuration
)
api
=
default_api
.
DefaultApi
(
api_client
=
api_client
)
return
api
def
get_interface_types
(
target
):
print
(
target
.
node_template
.
interfaces
)
interface_types
=
[]
for
interface
in
target
.
node_template
.
interfaces
:
interface_types
.
append
(
interface
)
return
interface_types
def
get_application_nodes
(
self
):
return
self
.
tosca_client
.
get_node_templates
(
self
.
doc_id
,
type_name
=
'tosca.nodes.ARTICONF.Application'
)
def
get_deployment_node_pairs
(
self
):
nodes_to_deploy
=
self
.
get_application_nodes
()
nodes_pairs
=
[]
for
node
in
nodes_to_deploy
:
related_nodes
=
self
.
tosca_client
.
get_related_nodes
(
self
.
doc_id
,
node
.
name
)
for
related_node
in
related_nodes
:
pair
=
(
related_node
,
node
)
nodes_pairs
.
append
(
pair
)
return
nodes_pairs
def
get_interface_types
(
node
):
interface_type_names
=
[]
for
interface
in
node
.
node_template
.
interfaces
:
interface_type_names
.
append
(
interface
)
return
interface_type_names
\ No newline at end of file
deployer/test/test_deployer.py
View file @
17dceead
...
@@ -14,6 +14,7 @@ from sure_tosca_client import Configuration, ApiClient
...
@@ -14,6 +14,7 @@ from sure_tosca_client import Configuration, ApiClient
from
sure_tosca_client.api
import
default_api
from
sure_tosca_client.api
import
default_api
from
service.deploy_service
import
deploy
from
service.deploy_service
import
deploy
from
service.tosca_helper
import
ToscaHelper
class
TestDeployer
(
unittest
.
TestCase
):
class
TestDeployer
(
unittest
.
TestCase
):
...
@@ -50,24 +51,16 @@ class TestDeployer(unittest.TestCase):
...
@@ -50,24 +51,16 @@ class TestDeployer(unittest.TestCase):
with
open
(
tosca_template_path
,
'w'
)
as
outfile
:
with
open
(
tosca_template_path
,
'w'
)
as
outfile
:
yaml
.
dump
(
tosca_template_dict
,
outfile
,
default_flow_style
=
False
)
yaml
.
dump
(
tosca_template_dict
,
outfile
,
default_flow_style
=
False
)
tosca_helper
=
ToscaHelper
(
'http://localhost:8081/tosca-sure/1.0.0'
,
tosca_template_path
)
self
.
assertIsNotNone
(
tosca_helper
.
doc_id
)
tosca_client
=
self
.
init_sure_tosca_client
(
'http://localhost:8081/tosca-sure/1.0.0'
)
doc_id
=
self
.
upload_tosca_template
(
tosca_template_path
,
tosca_client
)
self
.
assertIsNotNone
(
doc_id
)
nodes_to_deploy
=
tosca_client
.
get_node_templates
(
doc_id
,
type_name
=
'tosca.nodes.ARTICONF.Application'
)
nodes_to_deploy
=
tosca_helper
.
get_application_nodes
()
self
.
assertIsNotNone
(
nodes_to_deploy
)
self
.
assertIsNotNone
(
nodes_to_deploy
)
nodes_pairs
=
[]
nodes_pairs
=
tosca_helper
.
get_deployment_node_pairs
()
infrastructure_nodes
=
[]
self
.
assertIsNotNone
(
nodes_pairs
)
for
node
in
nodes_to_deploy
:
related_nodes
=
tosca_client
.
get_related_nodes
(
doc_id
,
node
.
name
)
for
related_node
in
related_nodes
:
# if related_node in nodes_to_deploy:
pair
=
(
related_node
,
node
)
nodes_pairs
.
append
(
pair
)
for
node_pair
in
nodes_pairs
:
for
node_pair
in
nodes_pairs
:
deploy
(
node_pair
)
deploy
(
node_pair
)
...
@@ -106,9 +99,7 @@ class TestDeployer(unittest.TestCase):
...
@@ -106,9 +99,7 @@ class TestDeployer(unittest.TestCase):
# print(json.dumps(response))
# print(json.dumps(response))
def
upload_tosca_template
(
self
,
file_path
,
api
):
file_id
=
api
.
upload_tosca_template
(
file_path
)
return
file_id
def
get_tosca_file
(
self
,
file_name
):
def
get_tosca_file
(
self
,
file_name
):
...
@@ -123,12 +114,7 @@ class TestDeployer(unittest.TestCase):
...
@@ -123,12 +114,7 @@ class TestDeployer(unittest.TestCase):
'Starting from: '
+
dir_path
+
' Input TOSCA file: '
+
input_tosca_file_path
+
' not found'
)
'Starting from: '
+
dir_path
+
' Input TOSCA file: '
+
input_tosca_file_path
+
' not found'
)
return
input_tosca_file_path
return
input_tosca_file_path
def
init_sure_tosca_client
(
self
,
sure_tosca_base_path
):
configuration
=
Configuration
()
configuration
.
host
=
sure_tosca_base_path
api_client
=
ApiClient
(
configuration
=
configuration
)
api
=
default_api
.
DefaultApi
(
api_client
=
api_client
)
# noqa: E501
return
api
if
__name__
==
'__main__'
:
if
__name__
==
'__main__'
:
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment