Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
C
CONF
Project
Project
Details
Activity
Releases
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Registry
Registry
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
UvA
CONF
Commits
cb7e459e
Commit
cb7e459e
authored
Sep 17, 2019
by
Spiros Koulouzis
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
add ubunut 16.04
parent
bab2b8a9
Changes
6
Hide whitespace changes
Inline
Side-by-side
Showing
6 changed files
with
157 additions
and
140 deletions
+157
-140
admin.conf
drip-deployer/admin.conf
+0
-0
docker_kubernetes.py
drip-deployer/docker_kubernetes.py
+116
-114
docker_kubernetes.sh
drip-deployer/docker_kubernetes.sh
+1
-1
rpc_server.py
drip-deployer/rpc_server.py
+16
-13
workspace.xml
drip_planner2/.idea/workspace.xml
+5
-2
basic_planner.py
drip_planner2/src/planner/basic_planner.py
+19
-10
No files found.
drip-deployer/admin.conf
0 → 100644
View file @
cb7e459e
drip-deployer/docker_kubernetes.py
View file @
cb7e459e
#
! /usr/bin/env python
# Copyright 2017 --Yang Hu--
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
! /usr/bin/env python
# Copyright 2017 --Yang Hu--
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__
=
'Yang Hu'
import
paramiko
,
os
...
...
@@ -21,6 +21,7 @@ from vm_info import VmInfo
import
linecache
import
sys
import
logging
# from drip_logging.drip_logging_handler import *
...
...
@@ -32,9 +33,9 @@ if not getattr(logger, 'handler_set', None):
h
.
setFormatter
(
formatter
)
logger
.
addHandler
(
h
)
logger
.
handler_set
=
True
retry
=
0
retry
=
0
def
PrintException
():
exc_type
,
exc_obj
,
tb
=
sys
.
exc_info
()
...
...
@@ -44,105 +45,106 @@ def PrintException():
linecache
.
checkcache
(
filename
)
line
=
linecache
.
getline
(
filename
,
lineno
,
f
.
f_globals
)
print
'EXCEPTION IN ({}, LINE {} "{}"): {}'
.
format
(
filename
,
lineno
,
line
.
strip
(),
exc_obj
)
def
install_manager
(
vm
):
try
:
logger
.
info
(
"Starting kubernetes master installation on: "
+
(
vm
.
ip
))
parentDir
=
os
.
path
.
dirname
(
os
.
path
.
abspath
(
vm
.
key
))
os
.
chmod
(
parentDir
,
0o700
)
os
.
chmod
(
vm
.
key
,
0o600
)
ssh
=
paramiko
.
SSHClient
()
ssh
.
set_missing_host_key_policy
(
paramiko
.
AutoAddPolicy
())
ssh
.
connect
(
vm
.
ip
,
username
=
vm
.
user
,
key_filename
=
vm
.
key
)
sftp
=
ssh
.
open_sftp
()
file_path
=
os
.
path
.
dirname
(
os
.
path
.
abspath
(
__file__
))
sftp
.
chdir
(
'/tmp/'
)
install_script
=
file_path
+
"/"
+
"docker_kubernetes.sh"
sftp
.
put
(
install_script
,
"kubernetes_setup.sh"
)
#stdin, stdout, stderr = ssh.exec_command("sudo hostname ip-%s" % (vm.ip.replace('.','-')))
stdout
.
read
()
stdin
,
stdout
,
stderr
=
ssh
.
exec_command
(
"sudo sh /tmp/kubernetes_setup.sh"
)
stdout
.
read
()
stdin
,
stdout
,
stderr
=
ssh
.
exec_command
(
"sudo kubeadm reset"
)
stdout
.
read
()
stdin
,
stdout
,
stderr
=
ssh
.
exec_command
(
"sudo kubeadm init --apiserver-advertise-address=
%
s"
%
(
vm
.
ip
))
retstr
=
stdout
.
readlines
()
stdin
,
stdout
,
stderr
=
ssh
.
exec_command
(
"sudo cp /etc/kubernetes/admin.conf /tmp/"
)
stdout
.
read
()
stdin
,
stdout
,
stderr
=
ssh
.
exec_command
(
"sudo chown
%
s /tmp/admin.conf"
%
(
vm
.
user
))
stdout
.
read
()
stdin
,
stdout
,
stderr
=
ssh
.
exec_command
(
"sudo chgrp
%
s /tmp/admin.conf"
%
(
vm
.
user
))
stdout
.
read
()
sftp
.
get
(
"/tmp/admin.conf"
,
file_path
+
"/admin.conf"
)
logger
.
info
(
"Finished kubernetes master installation on: "
+
(
vm
.
ip
))
except
Exception
as
e
:
global
retry
#print '%s: %s' % (vm.ip, e)
logger
.
error
(
vm
.
ip
+
" "
+
str
(
e
))
PrintException
()
return
"ERROR:"
+
vm
.
ip
+
" "
+
str
(
e
)
ssh
.
close
()
return
retstr
[
-
1
]
try
:
logger
.
info
(
"Starting kubernetes master installation on: "
+
(
vm
.
ip
))
parentDir
=
os
.
path
.
dirname
(
os
.
path
.
abspath
(
vm
.
key
))
os
.
chmod
(
parentDir
,
0o700
)
os
.
chmod
(
vm
.
key
,
0o600
)
ssh
=
paramiko
.
SSHClient
()
ssh
.
set_missing_host_key_policy
(
paramiko
.
AutoAddPolicy
())
ssh
.
connect
(
vm
.
ip
,
username
=
vm
.
user
,
key_filename
=
vm
.
key
)
sftp
=
ssh
.
open_sftp
()
file_path
=
os
.
path
.
dirname
(
os
.
path
.
abspath
(
__file__
))
sftp
.
chdir
(
'/tmp/'
)
install_script
=
file_path
+
"/"
+
"docker_kubernetes.sh"
sftp
.
put
(
install_script
,
"kubernetes_setup.sh"
)
# stdin, stdout, stderr = ssh.exec_command("sudo hostname ip-%s" % (vm.ip.replace('.','-')))
# stdout.read()
stdin
,
stdout
,
stderr
=
ssh
.
exec_command
(
"sudo sh /tmp/kubernetes_setup.sh"
)
stdout
.
read
()
stdin
,
stdout
,
stderr
=
ssh
.
exec_command
(
"sudo kubeadm reset"
)
stdout
.
read
()
stdin
,
stdout
,
stderr
=
ssh
.
exec_command
(
"sudo kubeadm init --apiserver-advertise-address=
%
s"
%
(
vm
.
ip
))
retstr
=
stdout
.
readlines
()
stdin
,
stdout
,
stderr
=
ssh
.
exec_command
(
"sudo cp /etc/kubernetes/admin.conf /tmp/"
)
stdout
.
read
()
stdin
,
stdout
,
stderr
=
ssh
.
exec_command
(
"sudo chown
%
s /tmp/admin.conf"
%
(
vm
.
user
))
stdout
.
read
()
stdin
,
stdout
,
stderr
=
ssh
.
exec_command
(
"sudo chgrp
%
s /tmp/admin.conf"
%
(
vm
.
user
))
stdout
.
read
()
sftp
.
get
(
"/tmp/admin.conf"
,
file_path
+
"/admin.conf"
)
logger
.
info
(
"Finished kubernetes master installation on: "
+
(
vm
.
ip
))
except
Exception
as
e
:
global
retry
# print '%s: %s' % (vm.ip, e)
logger
.
error
(
vm
.
ip
+
" "
+
str
(
e
))
PrintException
()
return
"ERROR:"
+
vm
.
ip
+
" "
+
str
(
e
)
ssh
.
close
()
return
retstr
[
-
1
]
def
install_worker
(
join_cmd
,
vm
):
try
:
logger
.
info
(
"Starting kubernetes slave installation on: "
+
(
vm
.
ip
))
ssh
=
paramiko
.
SSHClient
()
ssh
.
set_missing_host_key_policy
(
paramiko
.
AutoAddPolicy
())
ssh
.
connect
(
vm
.
ip
,
username
=
vm
.
user
,
key_filename
=
vm
.
key
,
timeout
=
30
)
parentDir
=
os
.
path
.
dirname
(
os
.
path
.
abspath
(
vm
.
key
))
os
.
chmod
(
parentDir
,
0o700
)
os
.
chmod
(
vm
.
key
,
0o600
)
sftp
=
ssh
.
open_sftp
()
sftp
.
chdir
(
'/tmp/'
)
file_path
=
os
.
path
.
dirname
(
os
.
path
.
abspath
(
__file__
))
install_script
=
file_path
+
"/"
+
"docker_kubernetes.sh"
sftp
.
put
(
install_script
,
"kubernetes_setup.sh"
)
stdin
,
stdout
,
stderr
=
ssh
.
exec_command
(
"sudo hostname ip-
%
s"
%
(
vm
.
ip
.
replace
(
'.'
,
'-'
)))
stdout
.
read
()
stdin
,
stdout
,
stderr
=
ssh
.
exec_command
(
"sudo sh /tmp/kubernetes_setup.sh"
)
stdout
.
read
()
stdin
,
stdout
,
stderr
=
ssh
.
exec_command
(
"sudo kubeadm reset"
)
stdout
.
read
()
stdin
,
stdout
,
stderr
=
ssh
.
exec_command
(
"sudo
%
s"
%
(
join_cmd
))
stdout
.
read
()
logger
.
info
(
"Finished kubernetes slave installation on: "
+
(
vm
.
ip
))
except
Exception
as
e
:
#print '%s: %s' % (vm.ip, e)
logger
.
error
(
vm
.
ip
+
" "
+
str
(
e
))
return
"ERROR:"
+
vm
.
ip
+
" "
+
str
(
e
)
ssh
.
close
()
return
"SUCCESS"
def
run
(
vm_list
,
rabbitmq_host
,
owner
):
# rabbit = DRIPLoggingHandler(host=rabbitmq_host, port=5672,user=owner)
# logger.addHandler(rabbit)
for
i
in
vm_list
:
if
i
.
role
==
"master"
:
join_cmd
=
install_manager
(
i
)
if
"ERROR"
in
join_cmd
:
return
join_cmd
else
:
join_cmd
=
join_cmd
.
encode
()
join_cmd
=
join_cmd
.
strip
()
break
for
i
in
vm_list
:
if
i
.
role
==
"slave"
:
worker_cmd
=
install_worker
(
join_cmd
,
i
)
if
"ERROR"
in
worker_cmd
:
return
worker_cmd
try
:
logger
.
info
(
"Starting kubernetes slave installation on: "
+
(
vm
.
ip
))
ssh
=
paramiko
.
SSHClient
()
ssh
.
set_missing_host_key_policy
(
paramiko
.
AutoAddPolicy
())
ssh
.
connect
(
vm
.
ip
,
username
=
vm
.
user
,
key_filename
=
vm
.
key
,
timeout
=
30
)
parentDir
=
os
.
path
.
dirname
(
os
.
path
.
abspath
(
vm
.
key
))
os
.
chmod
(
parentDir
,
0o700
)
os
.
chmod
(
vm
.
key
,
0o600
)
sftp
=
ssh
.
open_sftp
()
sftp
.
chdir
(
'/tmp/'
)
file_path
=
os
.
path
.
dirname
(
os
.
path
.
abspath
(
__file__
))
kuber_file
=
open
(
file_path
+
"/admin.conf"
,
"r"
)
kuber_string
=
kuber_file
.
read
()
kuber_file
.
close
()
return
kuber_string
\ No newline at end of file
install_script
=
file_path
+
"/"
+
"docker_kubernetes.sh"
sftp
.
put
(
install_script
,
"kubernetes_setup.sh"
)
stdin
,
stdout
,
stderr
=
ssh
.
exec_command
(
"sudo hostname ip-
%
s"
%
(
vm
.
ip
.
replace
(
'.'
,
'-'
)))
stdout
.
read
()
stdin
,
stdout
,
stderr
=
ssh
.
exec_command
(
"sudo sh /tmp/kubernetes_setup.sh"
)
stdout
.
read
()
stdin
,
stdout
,
stderr
=
ssh
.
exec_command
(
"sudo kubeadm reset"
)
stdout
.
read
()
stdin
,
stdout
,
stderr
=
ssh
.
exec_command
(
"sudo
%
s"
%
(
join_cmd
))
stdout
.
read
()
logger
.
info
(
"Finished kubernetes slave installation on: "
+
(
vm
.
ip
))
except
Exception
as
e
:
# print '%s: %s' % (vm.ip, e)
logger
.
error
(
vm
.
ip
+
" "
+
str
(
e
))
return
"ERROR:"
+
vm
.
ip
+
" "
+
str
(
e
)
ssh
.
close
()
return
"SUCCESS"
def
run
(
vm_list
,
rabbitmq_host
,
owner
):
# rabbit = DRIPLoggingHandler(host=rabbitmq_host, port=5672,user=owner)
# logger.addHandler(rabbit)
for
i
in
vm_list
:
if
i
.
role
==
"master"
:
join_cmd
=
install_manager
(
i
)
if
"ERROR"
in
join_cmd
:
return
join_cmd
else
:
join_cmd
=
join_cmd
.
encode
()
join_cmd
=
join_cmd
.
strip
()
break
for
i
in
vm_list
:
if
i
.
role
==
"slave"
:
worker_cmd
=
install_worker
(
join_cmd
,
i
)
if
"ERROR"
in
worker_cmd
:
return
worker_cmd
file_path
=
os
.
path
.
dirname
(
os
.
path
.
abspath
(
__file__
))
kuber_file
=
open
(
file_path
+
"/admin.conf"
,
"r"
)
kuber_string
=
kuber_file
.
read
()
kuber_file
.
close
()
return
kuber_string
drip-deployer/docker_kubernetes.sh
View file @
cb7e459e
#! /bin/bash
sudo
apt-get update
&&
sudo
apt-get
install
-y
apt-transport-https
sudo
apt-get update
&&
sudo
apt-get
upgrade
-y
&&
sudo
apt-get
install
-y
apt-transport-https
sudo
curl
-s
https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key add -
echo
"deb http://apt.kubernetes.io/ kubernetes-xenial main"
|
sudo tee
/etc/apt/sources.list.d/kubernetes.list
sudo
apt-get update
...
...
drip-deployer/rpc_server.py
View file @
cb7e459e
#!/usr/bin/env python
import
pika
import
base64
import
json
import
logging
import
os
import
time
from
vm_info
import
VmInfo
import
docker_kubernetes
import
docker_engine
import
docker_swarm
import
docker_compose
import
docker_service
import
docker_check
import
control_agent
import
os.path
# import ansible_playbook
import
sys
,
argparse
import
sys
import
time
from
threading
import
Thread
from
time
import
sleep
import
os.path
import
logging
import
pika
import
docker_check
import
docker_compose
import
docker_engine
import
docker_kubernetes
import
docker_service
import
docker_swarm
from
vm_info
import
VmInfo
global
rabbitmq_host
if
len
(
sys
.
argv
)
>
1
:
...
...
@@ -69,6 +71,7 @@ def handleDelivery(message):
manager_type
=
param
[
"value"
]
elif
name
==
"credential"
:
value
=
param
[
"value"
]
value
=
base64
.
b64decode
(
value
)
ip
=
param
[
"attributes"
][
"IP"
]
user
=
param
[
"attributes"
][
"user"
]
role
=
param
[
"attributes"
][
"role"
]
...
...
drip_planner2/.idea/workspace.xml
View file @
cb7e459e
...
...
@@ -2,8 +2,11 @@
<project
version=
"4"
>
<component
name=
"ChangeListManager"
>
<list
default=
"true"
id=
"462ede19-adfe-472b-975e-fefefa973fe0"
name=
"Default Changelist"
comment=
"slolved cap error"
>
<change
beforePath=
"$PROJECT_DIR$/../drip-api/src/main/java/nl/uva/sne/drip/api/service/DeployService.java"
beforeDir=
"false"
afterPath=
"$PROJECT_DIR$/../drip-api/src/main/java/nl/uva/sne/drip/api/service/DeployService.java"
afterDir=
"false"
/>
<change
beforePath=
"$PROJECT_DIR$/../drip-commons/src/main/java/nl/uva/sne/drip/commons/utils/TOSCAUtils.java"
beforeDir=
"false"
afterPath=
"$PROJECT_DIR$/../drip-commons/src/main/java/nl/uva/sne/drip/commons/utils/TOSCAUtils.java"
afterDir=
"false"
/>
<change
beforePath=
"$PROJECT_DIR$/../drip-deployer/docker_kubernetes.py"
beforeDir=
"false"
afterPath=
"$PROJECT_DIR$/../drip-deployer/docker_kubernetes.py"
afterDir=
"false"
/>
<change
beforePath=
"$PROJECT_DIR$/../drip-deployer/docker_kubernetes.sh"
beforeDir=
"false"
afterPath=
"$PROJECT_DIR$/../drip-deployer/docker_kubernetes.sh"
afterDir=
"false"
/>
<change
beforePath=
"$PROJECT_DIR$/../drip-deployer/rpc_server.py"
beforeDir=
"false"
afterPath=
"$PROJECT_DIR$/../drip-deployer/rpc_server.py"
afterDir=
"false"
/>
<change
beforePath=
"$PROJECT_DIR$/.idea/workspace.xml"
beforeDir=
"false"
afterPath=
"$PROJECT_DIR$/.idea/workspace.xml"
afterDir=
"false"
/>
<change
beforePath=
"$PROJECT_DIR$/src/planner/basic_planner.py"
beforeDir=
"false"
afterPath=
"$PROJECT_DIR$/src/planner/basic_planner.py"
afterDir=
"false"
/>
<change
beforePath=
"$PROJECT_DIR$/venv/lib/python3.6/site-packages/easy-install.pth"
beforeDir=
"false"
/>
<change
beforePath=
"$PROJECT_DIR$/venv/lib/python3.6/site-packages/prettytable.py"
beforeDir=
"false"
/>
<change
beforePath=
"$PROJECT_DIR$/venv/lib/python3.6/site-packages/pyparsing.py"
beforeDir=
"false"
/>
...
...
drip_planner2/src/planner/basic_planner.py
View file @
cb7e459e
...
...
@@ -20,28 +20,32 @@ import logging
def
get_cpu_frequency
():
return
'2.9 GHz'
def
get_num_cpus
():
return
1
def
get_cloud_domain
():
return
'UvA (Amsterdam, The Netherlands) XO Rack'
def
get_cloud_provider
():
return
'ExoGeni'
def
get_disk_size
():
return
'25000 MB'
def
get_mem_size
():
return
'3000 MB'
def
get_os_distribution
():
return
'Ubuntu 14.04'
return
'Ubuntu 16.04'
def
set_
VM
_properties
(
node_template_dict
):
def
set_
vm
_properties
(
node_template_dict
):
node_template_dict
[
'properties'
]
.
pop
(
'host_name'
)
node_template_dict
[
'properties'
][
'host_name'
]
=
'vm'
node_template_dict
[
'properties'
]
.
pop
(
'num_cpus'
)
...
...
@@ -56,6 +60,7 @@ def set_VM_properties(node_template_dict):
node_template_dict
[
'properties'
][
'os'
]
=
get_os_distribution
()
return
node_template_dict
def
set_topology_properties
(
node_template_dict
):
node_template_dict
[
'properties'
]
.
pop
(
'provider'
)
node_template_dict
[
'properties'
][
'provider'
]
=
get_cloud_provider
()
...
...
@@ -71,9 +76,10 @@ def fill_in_properties(nodetemplate_dict):
if
'type'
in
nodetemplate_dict
and
nodetemplate_dict
[
'type'
]
==
'tosca.nodes.ARTICONF.VM.topology'
:
nodetemplate_dict
=
set_topology_properties
(
nodetemplate_dict
)
if
'type'
in
nodetemplate_dict
and
nodetemplate_dict
[
'type'
]
==
'tosca.nodes.ARTICONF.VM.Compute'
:
nodetemplate_dict
=
set_
VM
_properties
(
nodetemplate_dict
)
nodetemplate_dict
=
set_
vm
_properties
(
nodetemplate_dict
)
return
nodetemplate_dict
def
fix_occurrences
(
node_templates
):
# Replace 'occurrences': [1, 'UNBOUNDED'] with 'occurrences': 1
for
node
in
node_templates
:
...
...
@@ -82,6 +88,7 @@ def fix_occurrences(node_templates):
req
[
next
(
iter
(
req
))]
.
pop
(
'occurrences'
)
return
node_templates
def
node_type_2_node_template
(
node_type
):
nodetemplate_dict
=
{}
type_name
=
next
(
iter
(
node_type
))
...
...
@@ -121,6 +128,8 @@ def get_requirement_occurrences(req):
if
'occurrences'
in
req
:
return
req
[
'occurrences'
]
return
None
def
fix_duplicate_vm_names
(
yaml_str
):
topology_dict
=
yaml
.
load
(
yaml_str
)
node_templates
=
topology_dict
[
'tosca_template'
][
'node_templates'
]
...
...
@@ -131,10 +140,10 @@ def fix_duplicate_vm_names(yaml_str):
for
node_name
in
node_templates
:
if
node_templates
[
node_name
][
'type'
]
==
'tosca.nodes.ARTICONF.VM.topology'
:
i
=
0
i
=
0
for
req
in
node_templates
[
node_name
][
'requirements'
]:
req
[
'vm'
][
'node'
]
=
vm_names
[
i
]
i
+=
1
i
+=
1
return
yaml
.
dump
(
topology_dict
)
...
...
@@ -163,11 +172,11 @@ class BasicPlanner:
yaml_str
=
yaml_str
.
replace
(
'tosca_definitions_version: tosca_simple_yaml_1_0'
,
''
)
yaml_str
=
yaml_str
.
replace
(
'description: TOSCA example'
,
''
)
yaml_str
=
yaml_str
.
replace
(
'tosca_template'
,
'topology_template'
)
self
.
formatted_yaml_str
=
'tosca_definitions_version: tosca_simple_yaml_1_0
\n
repositories:
\n
docker_hub: https://hub.docker.com/
\n
'
+
yaml_str
self
.
formatted_yaml_str
=
'tosca_definitions_version: tosca_simple_yaml_1_0
\n
repositories:
\n
docker_hub: https://hub.docker.com/
\n
'
+
yaml_str
logging
.
info
(
'TOSCA template:
\n
'
+
self
.
formatted_yaml_str
)
def
get_plan
(
self
):
return
self
.
formatted_yaml_str
return
self
.
formatted_yaml_str
def
get_missing_requirements
(
self
,
node
):
logging
.
info
(
'Looking for requirements for node: '
+
node
.
name
)
...
...
@@ -224,7 +233,8 @@ class BasicPlanner:
# Only return the nodes that have interfaces. This means that they are not "abstract"
for
candidate_node_name
in
candidate_nodes
:
if
'interfaces'
in
candidate_nodes
[
candidate_node_name
]
.
keys
()
and
'tosca.nodes.Root'
!=
candidate_node_name
:
if
'interfaces'
in
candidate_nodes
[
candidate_node_name
]
.
keys
()
and
'tosca.nodes.Root'
!=
candidate_node_name
:
capable_nodes
[
candidate_node_name
]
=
candidate_nodes
[
candidate_node_name
]
return
capable_nodes
...
...
@@ -308,4 +318,3 @@ class BasicPlanner:
node_templates
.
append
(
node
)
return
node_templates
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment