Commit c4891ab0 authored by Spiros Koulouzis's avatar Spiros Koulouzis

added new planner and Kubernetes interface

parent 02c7f99e
...@@ -58,4 +58,21 @@ interface_types: ...@@ -58,4 +58,21 @@ interface_types:
description: Recover some failed sub-topologies. ObjectType can be SubTopology or REQ. description: Recover some failed sub-topologies. ObjectType can be SubTopology or REQ.
provision: provision:
description: Provision the defined objects (resources). ObjectType can be SubTopology or VM. description: Provision the defined objects (resources). ObjectType can be SubTopology or VM.
tosca.interfaces.ARTICONF.Kubernetes:
derived_from: tosca.interfaces.node.lifecycle.Standard
inputs:
inventory_file:
type: string
required: true
default: k8s_hosts
playbook:
type: string
required: true
default: https://raw.githubusercontent.com/skoulouzis/CONF/DRIP_3.0/ansible_playbooks/install_k8s.yml
install:
description: install Kubernetes
...@@ -70,11 +70,13 @@ node_types: ...@@ -70,11 +70,13 @@ node_types:
derived_from: tosca.nodes.ARTICONF.Orchestrator derived_from: tosca.nodes.ARTICONF.Orchestrator
description: Kubernetes orchestrator description: Kubernetes orchestrator
interfaces: interfaces:
Standard: Kubernetes:
create: type: tosca.interfaces.ARTICONF.Kubernetes
inputs: install:
inventory-file: k8s_hosts inputs:
playbook: https://raw.githubusercontent.com/skoulouzis/CONF/DRIP_3.0/ansible_playbooks/install_k8s.yml inventory_file: k8s_hosts
playbook: https://raw.githubusercontent.com/skoulouzis/CONF/DRIP_3.0/ansible_playbooks/install_k8s.yml
#tosca.nodes.ARTICONF.Orchestrator.Swarm: #tosca.nodes.ARTICONF.Orchestrator.Swarm:
#derived_from: tosca.nodes.ARTICONF.Orchestrator #derived_from: tosca.nodes.ARTICONF.Orchestrator
......
<?xml version="1.0" encoding="UTF-8"?> <?xml version="1.0" encoding="UTF-8"?>
<module type="PYTHON_MODULE" version="4"> <module type="PYTHON_MODULE" version="4">
<component name="NewModuleRootManager"> <component name="NewModuleRootManager">
<content url="file://$MODULE_DIR$"> <content url="file://$MODULE_DIR$" />
<excludeFolder url="file://$MODULE_DIR$/venv" /> <orderEntry type="inheritedJdk" />
</content>
<orderEntry type="jdk" jdkName="Python 2.7 (drip-deployer)" jdkType="Python SDK" />
<orderEntry type="sourceFolder" forTests="false" /> <orderEntry type="sourceFolder" forTests="false" />
</component> </component>
<component name="TestRunnerService"> <component name="PyDocumentationSettings">
<option name="PROJECT_TEST_RUNNER" value="Unittests" /> <option name="format" value="PLAIN" />
<option name="myDocStringFormat" value="Plain" />
</component> </component>
</module> </module>
\ No newline at end of file
<?xml version="1.0" encoding="UTF-8"?> <?xml version="1.0" encoding="UTF-8"?>
<project version="4"> <project version="4">
<component name="ProjectRootManager" version="2" project-jdk-name="Python 2.7 (drip-deployer)" project-jdk-type="Python SDK" /> <component name="ProjectRootManager" version="2" project-jdk-name="Python 3.8 (drip-deployer)" project-jdk-type="Python SDK" />
</project> </project>
\ No newline at end of file
FROM python:3.7-buster
RUN mkdir -p /usr/src/app
WORKDIR /usr/src/app
COPY requirements.txt /usr/src/app/
RUN pip3 install --no-cache-dir -r requirements.txt
COPY . /usr/src/app
EXPOSE 8081
ENTRYPOINT ["python3 __main__.py $RABBITMQ_HOST deployer_queue"]
# To change this license header, choose License Headers in Project Properties.
# To change this template file, choose Tools | Templates
# and open the template in the editor.
import json
import os
import os.path
import tempfile
import time
import logging
import pika
import yaml
import sys
from service import tosca
logger = logging.getLogger(__name__)
# if not getattr(logger, 'handler_set', None):
# logger.setLevel(logging.INFO)
# h = logging.StreamHandler()
# formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
# h.setFormatter(formatter)
# logger.addHandler(h)
# logger.handler_set = True
def init_chanel(args):
global rabbitmq_host
if len(args) > 1:
rabbitmq_host = args[1]
queue_name = args[2] # deployer_qeue
else:
rabbitmq_host = '127.0.0.1'
connection = pika.BlockingConnection(pika.ConnectionParameters(host=rabbitmq_host))
channel = connection.channel()
channel.queue_declare(queue=queue_name)
return channel
def start(this_channel):
this_channel.basic_qos(prefetch_count=1)
this_channel.basic_consume(queue=queue_name, on_message_callback=on_request)
logger.info(" [x] Awaiting RPC requests")
this_channel.start_consuming()
def on_request(ch, method, props, body):
response = handle_delivery(body)
ch.basic_publish(exchange='',
routing_key=props.reply_to,
properties=pika.BasicProperties(correlation_id=
props.correlation_id),
body=str(response))
ch.basic_ack(delivery_tag=method.delivery_tag)
def handle_delivery(message):
logger.info("Got: " + str(message))
try:
message = message.decode()
except (UnicodeDecodeError, AttributeError):
pass
parsed_json_message = json.loads(message)
owner = parsed_json_message['owner']
tosca_file_name = 'tosca_template'
tosca_template_json = parsed_json_message['toscaTemplate']
input_current_milli_time = lambda: int(round(time.time() * 1000))
interfaces = tosca.get_interfaces(tosca_template_json)
logger.info("template ----: \n" + yaml.dump(template_dict))
response = {'toscaTemplate': template_dict}
output_current_milli_time = int(round(time.time() * 1000))
response["creationDate"] = output_current_milli_time
response["parameters"] = []
logger.info("Returning plan")
logger.info("Output message:" + json.dumps(response))
return json.dumps(response)
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
logger.info("Input args: " + sys.argv[0] + ' ' + sys.argv[1] + ' ' + sys.argv[2])
channel = init_chanel(sys.argv)
global queue_name
queue_name = sys.argv[2]
start(channel)
#!/usr/bin/env python
import json
from collections import namedtuple
from ansible.parsing.dataloader import DataLoader
from ansible.vars import VariableManager
from ansible.inventory import Inventory
from ansible.playbook.play import Play
from ansible.executor.task_queue_manager import TaskQueueManager
import ansible.executor.task_queue_manager
import ansible.inventory
import ansible.parsing.dataloader
import ansible.playbook.play
import ansible.plugins.callback
import ansible.vars
from ansible.executor.playbook_executor import PlaybookExecutor
from ansible.plugins import callback_loader
import os
import logging
import yaml
import sys
from results_collector import ResultsCollector
hosts='172.17.0.2,'
playbook_path = 'playbook.yml'
if not os.path.exists(playbook_path):
print '[ERROR] The playbook does not exist'
sys.exit()
user='vm_user'
ssh_key_file='id_ras'
extra_vars = {} #{'resultslocation':'/tmp/res','ansible_sudo_pass':'123'}
variable_manager = VariableManager()
loader = DataLoader()
inventory = Inventory(loader=loader, variable_manager=variable_manager, host_list=hosts)
Options = namedtuple('Options', ['listtags', 'listtasks', 'listhosts', 'syntax', 'connection','module_path', 'forks', 'remote_user', 'private_key_file', 'ssh_common_args', 'ssh_extra_args', 'sftp_extra_args', 'scp_extra_args', 'become', 'become_method', 'become_user', 'verbosity', 'check'])
options = Options(listtags=False, listtasks=False, listhosts=False, syntax=False, connection='smart', module_path=None, forks=None, remote_user=user, private_key_file=ssh_key_file, ssh_common_args=None, ssh_extra_args=None, sftp_extra_args=None, scp_extra_args=None, become=True, become_method='sudo', become_user='root', verbosity=None, check=False)
variable_manager.extra_vars = extra_vars
passwords = {}
pbex = PlaybookExecutor(playbooks=[playbook_path],
inventory=inventory,
variable_manager=variable_manager,
loader=loader,
options=options,
passwords=passwords,
)
results_callback = ResultsCollector()
pbex._tqm._stdout_callback = results_callback
results = pbex.run()
ok = results_callback.host_ok
answer = []
for res in ok:
resp = json.dumps({"host":res['ip'], "result":res['result']._result})
answer.append({"host":res['ip'], "result":res['result']._result})
unreachable = results_callback.host_unreachable
for res in unreachable:
resp = json.dumps({"host":res['ip'], "result":res['result']._result})
answer.append({"host":res['ip'], "result":res['result']._result})
host_failed = results_callback.host_failed
for res in host_failed:
resp = json.dumps({"host":res['ip'], "result":res['result']._result})
answer.append({"host":res['ip'], "result":res['result']._result})
print json.dumps(answer,indent=4)
\ No newline at end of file
#! /usr/bin/env python
# Copyright 2017 S. Koulouzis
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = 'S. Koulouzis'
import json
from collections import namedtuple
from ansible.parsing.dataloader import DataLoader
from ansible.vars import VariableManager
from ansible.inventory import Inventory
from ansible.playbook.play import Play
from ansible.executor.task_queue_manager import TaskQueueManager
import ansible.executor.task_queue_manager
import ansible.inventory
import ansible.parsing.dataloader
import ansible.playbook.play
import ansible.plugins.callback
import ansible.vars
from ansible.executor.playbook_executor import PlaybookExecutor
from ansible.plugins import callback_loader
import os
import paramiko
import logging
import yaml
import sys
from results_collector import ResultsCollector
from drip_logging.drip_logging_handler import *
import multiprocessing
from ansible.executor.task_executor import TaskExecutor
from ansible.playbook import Playbook
logger = logging.getLogger(__name__)
if not getattr(logger, 'handler_set', None):
logger.setLevel(logging.INFO)
h = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
h.setFormatter(formatter)
logger.addHandler(h)
logger.handler_set = True
retry=0
retry_count = 20
tasks_done = {}
#cwd = os.getcwd()
falied_playbook_path='/tmp/falied_playbook.yml'
def install_prerequisites(vm,return_dict):
try:
logger.info("Installing ansible prerequisites on: "+vm.ip)
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(vm.ip, username=vm.user, key_filename=vm.key,timeout=30)
sftp = ssh.open_sftp()
file_path = os.path.dirname(os.path.abspath(__file__))
sftp.chdir('/tmp/')
install_script = file_path + "/" + "ansible_setup.sh"
sftp.put(install_script, "ansible_setup.sh")
stdin, stdout, stderr = ssh.exec_command("sudo hostname ip-%s" % (vm.ip.replace('.','-')))
sshout = stdout.read()
logger.info("stdout: " + sshout)
logger.info("stderr: " + stderr.read())
stdin, stdout, stderr = ssh.exec_command("sudo sh /tmp/ansible_setup.sh")
logger.info("stdout: " + stdout.read())
logger.info("stderr: " + stderr.read())
logger.info("Ansible prerequisites installed on: " + (vm.ip))
except Exception as e:
global retry
if retry < 10:
logger.warning(vm.ip + " " + str(e)+". Retrying")
retry+=1
return install_prerequisites(vm,return_dict)
logger.error(vm.ip + " " + str(e))
return_dict[vm.ip] = "ERROR:"+vm.ip+" "+str(e)
return "ERROR:"+vm.ip+" "+str(e)
ssh.close()
return_dict[vm.ip] = "SUCCESS"
retry = 0
return "SUCCESS"
def create_faied_playbooks(failed_tasks,playbook_path):
tasks = []
hosts = []
plays = []
while not plays:
logger.info("Loading: \'"+playbook_path+ "\'")
with open(playbook_path) as stream:
plays = yaml.load(stream)
logger.info("Number of plays loaded : \'"+str(len(plays))+ "\'")
logger.info("Number of failed tasks : \'"+str(len(failed_tasks))+ "\'")
failed_playsbooks = []
failed = failed_tasks[0]
host = failed['ip']
failed_task = failed['task']
failed_plays = []
failed_play = {}
retry_task = []
hosts = ""
if isinstance(failed_task, ansible.parsing.yaml.objects.AnsibleUnicode) or isinstance(failed_task, unicode) or isinstance(failed_task,str):
task_name = str(failed_task)
else:
task_name = str(failed_task._task.get_name())
hosts +=host+","
if task_name == 'setup':
found_first_failed_task = True
else:
found_first_failed_task = False
for play in plays:
for task in play['tasks']:
if found_first_failed_task:
retry_task.append(task)
else:
if task_name in tasks:
host_done = tasks_done[task_name]
if host_done == host or host_done == 'all':
logger.info("Task: \'"+task_name+ "\'. on host: "+ host+ " already done. Skipping" )
continue
if 'name' in task and task['name'] == task_name:
retry_task.append(task)
logger.info("First faield task: \'"+task_name+ "\'. Host: "+ host)
found_first_failed_task = True
elif task_name in task:
retry_task.append(task)
logger.info("First faield task: \'"+task_name+ "\'. Host: "+ host)
found_first_failed_task = True
failed_play['hosts'] = hosts
failed_play['tasks'] = retry_task
failed_plays.append(failed_play)
failed_playsbooks.append(failed_plays)
logger.info("Number retruned playbooks : \'"+str(len(failed_playsbooks))+ "\'")
return failed_playsbooks
def execute_playbook(hosts, playbook_path,user,ssh_key_file,extra_vars,passwords):
if not os.path.exists(playbook_path):
logger.error('The playbook does not exist')
return '[ERROR] The playbook does not exist'
os.environ['ANSIBLE_HOST_KEY_CHECKING'] = 'false'
ansible.constants.HOST_KEY_CHECKING = False
os.environ['ANSIBLE_SSH_RETRIES'] = 'retry_count'
ansible.constants.ANSIBLE_SSH_RETRIES = retry_count
variable_manager = VariableManager()
loader = DataLoader()
inventory = Inventory(loader=loader, variable_manager=variable_manager, host_list=hosts)
Options = namedtuple('Options', ['listtags', 'listtasks', 'listhosts', 'syntax', 'connection','module_path', 'forks', 'remote_user', 'private_key_file', 'ssh_common_args', 'ssh_extra_args', 'sftp_extra_args', 'scp_extra_args', 'become', 'become_method', 'become_user', 'verbosity', 'check','host_key_checking','retries'])
options = Options(listtags=False, listtasks=False, listhosts=False, syntax=False, connection='smart', module_path=None, forks=None, remote_user=user, private_key_file=ssh_key_file, ssh_common_args='', ssh_extra_args='', sftp_extra_args=None, scp_extra_args=None, become=True, become_method='sudo', become_user='root', verbosity=None, check=False , host_key_checking=False, retries=retry_count)
variable_manager.extra_vars = extra_vars
pbex = PlaybookExecutor(playbooks=[playbook_path],
inventory=inventory,
variable_manager=variable_manager,
loader=loader,
options=options,
passwords=passwords,
)
results_callback = ResultsCollector()
pbex._tqm._stdout_callback = results_callback
results = pbex.run()
answer = []
failed_tasks = []
ok = results_callback.host_ok
for res in ok:
#failed_tasks.append(res)
resp = json.dumps({"host":res['ip'], "result":res['result']._result,"task":res['task']})
logger.info("Task: "+res['task'] + ". Host: "+ res['ip'] +". State: ok")
global tasks_done
tasks_done[res['task']]= res['ip']
answer.append({"host":res['ip'], "result":res['result']._result,"task":res['task']})
unreachable = results_callback.host_unreachable
for res in unreachable:
failed_tasks.append(res)
resp = json.dumps({"host":res['ip'], "result":res['result']._result,"task":res['task']})
logger.warning("Task: "+res['task'] + ". Host: "+ res['ip'] +". State: unreachable")
answer.append({"host":res['ip'], "result":res['result']._result,"task":res['task']})
host_failed = results_callback.host_failed
for res in host_failed:
#failed_tasks.append(res['result'])
resp = json.dumps({"host":res['ip'], "result":res['result']._result, "task":res['task']})
logger.error("Task: "+res['task'] + ". Host: "+ res['ip'] +". State: host_failed")
logger.error(resp)
answer.append({"host":res['ip'], "result":res['result']._result,"task":res['task']})
return answer,failed_tasks
def run(vm_list,playbook_path,rabbitmq_host,owner):
hosts=""
ssh_key_file=""
rabbit = DRIPLoggingHandler(host=rabbitmq_host, port=5672,user=owner)
logger.addHandler(rabbit)
logger.info("DRIPLogging host: \'"+str(rabbitmq_host)+ "\'"+" logging message owner: \'"+owner+"\'")
manager = multiprocessing.Manager()
return_dict = manager.dict()
jobs = []
if os.path.exists(falied_playbook_path):
os.remove(falied_playbook_path)
for vm in vm_list:
#ret = install_prerequisites(vm,return_dict)
p = multiprocessing.Process(target=install_prerequisites, args=(vm,return_dict,))
jobs.append(p)
p.start()
hosts+=vm.ip+","
ssh_key_file = vm.key
user = vm.user
for proc in jobs:
proc.join()
if "ERROR" in return_dict.values(): return "ERROR"
extra_vars = {}
passwords = {}
logger.info("Executing playbook: " + (playbook_path))
answer,failed_tasks = execute_playbook(hosts,playbook_path,user,ssh_key_file,extra_vars,passwords)
failed_playsbooks = []
if failed_tasks:
failed = failed_tasks[0]
failed_task = failed['task']
if isinstance(failed_task, ansible.parsing.yaml.objects.AnsibleUnicode) or isinstance(failed_task, unicode) or isinstance(failed_task,str):
task_name = str(failed_task)
else:
task_name = str(failed_task._task.get_name())
retry_setup = 0
while task_name and task_name == 'setup' and retry_setup < retry_count :
retry_setup+=1
answer,failed_tasks = execute_playbook(hosts,playbook_path,user,ssh_key_file,extra_vars,passwords)
if failed_tasks:
failed = failed_tasks[0]
failed_task = failed['task']
if isinstance(failed_task, ansible.parsing.yaml.objects.AnsibleUnicode) or isinstance(failed_task, unicode) or isinstance(failed_task,str):
task_name = str(failed_task)
else:
task_name = str(failed_task._task.get_name())
else:
task_name = None
while not failed_playsbooks:
failed_playsbooks = create_faied_playbooks(failed_tasks,playbook_path)
for failed_playbook in failed_playsbooks:
hosts = failed_playbook[0]['hosts']
logger.info("Writing new playbook at : \'"+falied_playbook_path+ "\'")
with open(falied_playbook_path, 'w') as outfile:
yaml.dump(failed_playbook, outfile)
retry_failed_tasks = 0
failed_tasks = None
done = False
while not done:
logger.info("Executing playbook : " + (falied_playbook_path) +" in host: "+hosts+" Retries: "+str(retry_failed_tasks))
answer,failed_tasks = execute_playbook(hosts,falied_playbook_path,user,ssh_key_file,extra_vars,passwords)
retry_failed_tasks+=1
if retry_failed_tasks > retry_count or not failed_tasks:
retry_failed_tasks = 0
done = True
break
if os.path.exists(falied_playbook_path):
os.remove(falied_playbook_path)
if os.path.exists(falied_playbook_path):
os.remove(falied_playbook_path)
return json.dumps(answer)
\ No newline at end of file
#! /bin/bash
killall apt
rm /var/lib/dpkg/lock
dpkg --configure -a
apt-get update
export DEBIAN_FRONTEND=noninteractive
apt-get -o Dpkg::Options::="--force-confold" upgrade -q -y --force-yes
apt-get -y install software-properties-common python openssh-server sudo
service ssh restart
\ No newline at end of file
#! /usr/bin/env python
# Copyright 2017 --Yang Hu--
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = 'Yang Hu'
import paramiko, os
import logging
# from drip_logging.drip_logging_handler import *
logger = logging.getLogger(__name__)
if not getattr(logger, 'handler_set', None):
logger.setLevel(logging.INFO)
h = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
h.setFormatter(formatter)
logger.addHandler(h)
logger.handler_set = True
retry=0
def install_agent(vm, vm_list):
try:
logger.info("Starting control agent installation on: "+(vm.ip))
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(vm.ip, username=vm.user, key_filename=vm.key,timeout=30)
sftp = ssh.open_sftp()
sftp.chdir('/tmp/')
vm_cnt = 0
file_path = os.path.dirname(os.path.abspath(__file__))
fo = open(file_path + "/cluster_file", "w")
for i in vm_list:
vm_cnt += 1
sftp.put(i.key, "%d" % (vm_cnt))
fo.write("%s %s /tmp/%d %s\n" % (i.ip, i.user, vm_cnt, i.role))
fo.close()
sftp.put(file_path + "/cluster_file", "cluster_file")
sftp.put(file_path + "/control_agent.sh", "control_agent.sh")
stdin, stdout, stderr = ssh.exec_command("sudo sh /tmp/control_agent.sh")
stdout.read()
stdin, stdout, stderr = ssh.exec_command("nohup sudo python /root/Swarm-Agent/run.py>/dev/null 2>&1 &")
stdout.read()
logger.info("Finished control agent installation on: "+(vm.ip))
except Exception as e:
global retry
if retry < 10:
logger.warning(vm.ip + " " + str(e)+". Retrying")
retry+=1
return install_agent(vm, vm_list)
logger.error(vm.ip + " " + str(e))
print '%s: %s' % (vm.ip, e)
return "ERROR:" + vm.ip + " " + str(e)
ssh.close()
return "SUCCESS"
def run(vm_list):
for i in vm_list:
parentDir = os.path.dirname(os.path.abspath(i.key))
os.chmod(parentDir, 0o700)
os.chmod(i.key, 0o600)
if i.role == "master":
ret = install_agent(i, vm_list)
if "ERROR" in ret: return ret
return "SUCCESS"
#! /bin/bash
sudo apt-get update
sudo apt-get -y install git
sudo apt-get -y install wget
sudo apt-get -y install build-essential libssl-dev libffi-dev python-dev
cd /root/
sudo wget https://bootstrap.pypa.io/get-pip.py
sudo python get-pip.py
sudo pip install flask
sudo pip install paramiko
sudo git clone https://github.com/oceanshy/Swarm-Agent.git
DEB [20180615-17:11:41.096] thr=1 paramiko.transport: starting thread (client mode): 0xf7674e90L
DEB [20180615-17:11:41.097] thr=1 paramiko.transport: Local version/idstring: SSH-2.0-paramiko_2.1.2
DEB [20180615-17:11:41.126] thr=1 paramiko.transport: Remote version/idstring: SSH-2.0-OpenSSH_7.2p2 Ubuntu-4ubuntu2.4
INF [20180615-17:11:41.126] thr=1 paramiko.transport: Connected (version 2.0, client OpenSSH_7.2p2)
DEB [20180615-17:11:41.145] thr=1 paramiko.transport: kex algos:[u'curve25519-sha256@libssh.org', u'ecdh-sha2-nistp256', u'ecdh-sha2-nistp384', u'ecdh-sha2-nistp521', u'diffie-hellman-group-exchange-sha256', u'diffie-hellman-group14-sha1'] server key:[u'ssh-rsa', u'rsa-sha2-512', u'rsa-sha2-256', u'ecdsa-sha2-nistp256', u'ssh-ed25519'] client encrypt:[u'chacha20-poly1305@openssh.com', u'aes128-ctr', u'aes192-ctr', u'aes256-ctr', u'aes128-gcm@openssh.com', u'aes256-gcm@openssh.com'] server encrypt:[u'chacha20-poly1305@openssh.com', u'aes128-ctr', u'aes192-ctr', u'aes256-ctr', u'aes128-gcm@openssh.com', u'aes256-gcm@openssh.com'] client mac:[u'umac-64-etm@openssh.com', u'umac-128-etm@openssh.com', u'hmac-sha2-256-etm@openssh.com', u'hmac-sha2-512-etm@openssh.com', u'hmac-sha1-etm@openssh.com', u'umac-64@openssh.com', u'umac-128@openssh.com', u'hmac-sha2-256', u'hmac-sha2-512', u'hmac-sha1'] server mac:[u'umac-64-etm@openssh.com', u'umac-128-etm@openssh.com', u'hmac-sha2-256-etm@openssh.com', u'hmac-sha2-512-etm@openssh.com', u'hmac-sha1-etm@openssh.com', u'umac-64@openssh.com', u'umac-128@openssh.com', u'hmac-sha2-256', u'hmac-sha2-512', u'hmac-sha1'] client compress:[u'none', u'zlib@openssh.com'] server compress:[u'none', u'zlib@openssh.com'] client lang:[u''] server lang:[u''] kex follows?False
DEB [20180615-17:11:41.145] thr=1 paramiko.transport: Kex agreed: diffie-hellman-group14-sha1
DEB [20180615-17:11:41.145] thr=1 paramiko.transport: Cipher agreed: aes128-ctr
DEB [20180615-17:11:41.146] thr=1 paramiko.transport: MAC agreed: hmac-sha2-256
DEB [20180615-17:11:41.146] thr=1 paramiko.transport: Compression agreed: none
DEB [20180615-17:11:41.331] thr=1 paramiko.transport: kex engine KexGroup14 specified hash_algo <built-in function openssl_sha1>
DEB [20180615-17:11:41.331] thr=1 paramiko.transport: Switch to new keys ...
DEB [20180615-17:11:41.347] thr=2 paramiko.transport: Adding ssh-rsa host key for 147.228.242.108: aa458249495c431a65c21c5cfb1f70fa
DEB [20180615-17:11:41.348] thr=2 paramiko.transport: Trying key 3de4f984cbfc1ae64c5f8e1a2c6bf076 from /home/alogo/workspace/DRIP/drip-deployer/deployer_files/1529075500355/1.txt
DEB [20180615-17:11:41.410] thr=1 paramiko.transport: userauth is OK
INF [20180615-17:11:41.472] thr=1 paramiko.transport: Authentication (publickey) successful!
DEB [20180615-17:11:41.480] thr=2 paramiko.transport: [chan 0] Max packet in: 32768 bytes
DEB [20180615-17:11:41.501] thr=1 paramiko.transport: Received global request "hostkeys-00@openssh.com"
DEB [20180615-17:11:41.501] thr=1 paramiko.transport: Rejecting "hostkeys-00@openssh.com" global request from server.
DEB [20180615-17:11:41.559] thr=1 paramiko.transport: [chan 0] Max packet out: 32768 bytes
DEB [20180615-17:11:41.559] thr=1 paramiko.transport: Secsh channel 0 opened.
DEB [20180615-17:11:41.599] thr=1 paramiko.transport: [chan 0] Sesch channel 0 request ok
DEB [20180615-17:11:41.762] thr=1 paramiko.transport: [chan 0] EOF received (0)
DEB [20180615-17:11:41.763] thr=2 paramiko.transport: [chan 1] Max packet in: 32768 bytes
DEB [20180615-17:11:41.764] thr=1 paramiko.transport: [chan 0] EOF sent (0)
DEB [20180615-17:11:41.782] thr=1 paramiko.transport: [chan 1] Max packet out: 32768 bytes
DEB [20180615-17:11:41.782] thr=1 paramiko.transport: Secsh channel 1 opened.
DEB [20180615-17:11:41.855] thr=1 paramiko.transport: [chan 1] Sesch channel 1 request ok
DEB [20180615-17:11:42.008] thr=1 paramiko.transport: [chan 1] EOF received (1)
DEB [20180615-17:11:42.009] thr=2 paramiko.transport: [chan 2] Max packet in: 32768 bytes
DEB [20180615-17:11:42.009] thr=1 paramiko.transport: [chan 1] EOF sent (1)
DEB [20180615-17:11:42.041] thr=1 paramiko.transport: [chan 2] Max packet out: 32768 bytes
DEB [20180615-17:11:42.041] thr=1 paramiko.transport: Secsh channel 2 opened.
DEB [20180615-17:11:42.119] thr=1 paramiko.transport: [chan 2] Sesch channel 2 request ok
DEB [20180615-17:11:42.266] thr=1 paramiko.transport: [chan 2] EOF received (2)
DEB [20180615-17:11:42.266] thr=1 paramiko.transport: [chan 2] EOF sent (2)
DEB [20180615-17:11:42.299] thr=3 paramiko.transport: starting thread (client mode): 0xf5cede90L
DEB [20180615-17:11:42.300] thr=3 paramiko.transport: Local version/idstring: SSH-2.0-paramiko_2.1.2
DEB [20180615-17:11:42.321] thr=3 paramiko.transport: Remote version/idstring: SSH-2.0-OpenSSH_7.2p2 Ubuntu-4ubuntu2.4
INF [20180615-17:11:42.322] thr=3 paramiko.transport: Connected (version 2.0, client OpenSSH_7.2p2)
DEB [20180615-17:11:42.339] thr=3 paramiko.transport: kex algos:[u'curve25519-sha256@libssh.org', u'ecdh-sha2-nistp256', u'ecdh-sha2-nistp384', u'ecdh-sha2-nistp521', u'diffie-hellman-group-exchange-sha256', u'diffie-hellman-group14-sha1'] server key:[u'ssh-rsa', u'rsa-sha2-512', u'rsa-sha2-256', u'ecdsa-sha2-nistp256', u'ssh-ed25519'] client encrypt:[u'chacha20-poly1305@openssh.com', u'aes128-ctr', u'aes192-ctr', u'aes256-ctr', u'aes128-gcm@openssh.com', u'aes256-gcm@openssh.com'] server encrypt:[u'chacha20-poly1305@openssh.com', u'aes128-ctr', u'aes192-ctr', u'aes256-ctr', u'aes128-gcm@openssh.com', u'aes256-gcm@openssh.com'] client mac:[u'umac-64-etm@openssh.com', u'umac-128-etm@openssh.com', u'hmac-sha2-256-etm@openssh.com', u'hmac-sha2-512-etm@openssh.com', u'hmac-sha1-etm@openssh.com', u'umac-64@openssh.com', u'umac-128@openssh.com', u'hmac-sha2-256', u'hmac-sha2-512', u'hmac-sha1'] server mac:[u'umac-64-etm@openssh.com', u'umac-128-etm@openssh.com', u'hmac-sha2-256-etm@openssh.com', u'hmac-sha2-512-etm@openssh.com', u'hmac-sha1-etm@openssh.com', u'umac-64@openssh.com', u'umac-128@openssh.com', u'hmac-sha2-256', u'hmac-sha2-512', u'hmac-sha1'] client compress:[u'none', u'zlib@openssh.com'] server compress:[u'none', u'zlib@openssh.com'] client lang:[u''] server lang:[u''] kex follows?False
DEB [20180615-17:11:42.339] thr=3 paramiko.transport: Kex agreed: diffie-hellman-group14-sha1
DEB [20180615-17:11:42.339] thr=3 paramiko.transport: Cipher agreed: aes128-ctr
DEB [20180615-17:11:42.339] thr=3 paramiko.transport: MAC agreed: hmac-sha2-256
DEB [20180615-17:11:42.339] thr=3 paramiko.transport: Compression agreed: none
DEB [20180615-17:11:42.436] thr=3 paramiko.transport: kex engine KexGroup14 specified hash_algo <built-in function openssl_sha1>
DEB [20180615-17:11:42.437] thr=3 paramiko.transport: Switch to new keys ...
DEB [20180615-17:11:42.468] thr=2 paramiko.transport: Adding ssh-rsa host key for 147.228.242.109: f5bce6e801118ca62f34298b238fbb4a
DEB [20180615-17:11:42.469] thr=2 paramiko.transport: Trying key 3de4f984cbfc1ae64c5f8e1a2c6bf076 from /home/alogo/workspace/DRIP/drip-deployer/deployer_files/1529075500355/2.txt
DEB [20180615-17:11:42.513] thr=3 paramiko.transport: userauth is OK
INF [20180615-17:11:42.536] thr=3 paramiko.transport: Authentication (publickey) successful!
DEB [20180615-17:11:42.570] thr=2 paramiko.transport: [chan 0] Max packet in: 32768 bytes
DEB [20180615-17:11:42.588] thr=3 paramiko.transport: Received global request "hostkeys-00@openssh.com"
DEB [20180615-17:11:42.588] thr=3 paramiko.transport: Rejecting "hostkeys-00@openssh.com" global request from server.
DEB [20180615-17:11:42.641] thr=3 paramiko.transport: [chan 0] Max packet out: 32768 bytes
DEB [20180615-17:11:42.641] thr=3 paramiko.transport: Secsh channel 0 opened.
DEB [20180615-17:11:42.728] thr=3 paramiko.transport: [chan 0] Sesch channel 0 request ok
DEB [20180615-17:11:42.895] thr=3 paramiko.transport: [chan 0] EOF received (0)
DEB [20180615-17:11:42.897] thr=3 paramiko.transport: [chan 0] EOF sent (0)
DEB [20180615-17:11:42.931] thr=3 paramiko.transport: starting thread (client mode): 0xf5cede10L
DEB [20180615-17:11:42.931] thr=3 paramiko.transport: Local version/idstring: SSH-2.0-paramiko_2.1.2
DEB [20180615-17:11:42.965] thr=3 paramiko.transport: Remote version/idstring: SSH-2.0-OpenSSH_7.2p2 Ubuntu-4ubuntu2.4
INF [20180615-17:11:42.965] thr=3 paramiko.transport: Connected (version 2.0, client OpenSSH_7.2p2)
DEB [20180615-17:11:42.982] thr=3 paramiko.transport: kex algos:[u'curve25519-sha256@libssh.org', u'ecdh-sha2-nistp256', u'ecdh-sha2-nistp384', u'ecdh-sha2-nistp521', u'diffie-hellman-group-exchange-sha256', u'diffie-hellman-group14-sha1'] server key:[u'ssh-rsa', u'rsa-sha2-512', u'rsa-sha2-256', u'ecdsa-sha2-nistp256', u'ssh-ed25519'] client encrypt:[u'chacha20-poly1305@openssh.com', u'aes128-ctr', u'aes192-ctr', u'aes256-ctr', u'aes128-gcm@openssh.com', u'aes256-gcm@openssh.com'] server encrypt:[u'chacha20-poly1305@openssh.com', u'aes128-ctr', u'aes192-ctr', u'aes256-ctr', u'aes128-gcm@openssh.com', u'aes256-gcm@openssh.com'] client mac:[u'umac-64-etm@openssh.com', u'umac-128-etm@openssh.com', u'hmac-sha2-256-etm@openssh.com', u'hmac-sha2-512-etm@openssh.com', u'hmac-sha1-etm@openssh.com', u'umac-64@openssh.com', u'umac-128@openssh.com', u'hmac-sha2-256', u'hmac-sha2-512', u'hmac-sha1'] server mac:[u'umac-64-etm@openssh.com', u'umac-128-etm@openssh.com', u'hmac-sha2-256-etm@openssh.com', u'hmac-sha2-512-etm@openssh.com', u'hmac-sha1-etm@openssh.com', u'umac-64@openssh.com', u'umac-128@openssh.com', u'hmac-sha2-256', u'hmac-sha2-512', u'hmac-sha1'] client compress:[u'none', u'zlib@openssh.com'] server compress:[u'none', u'zlib@openssh.com'] client lang:[u''] server lang:[u''] kex follows?False
DEB [20180615-17:11:42.983] thr=3 paramiko.transport: Kex agreed: diffie-hellman-group14-sha1
DEB [20180615-17:11:42.983] thr=3 paramiko.transport: Cipher agreed: aes128-ctr
DEB [20180615-17:11:42.983] thr=3 paramiko.transport: MAC agreed: hmac-sha2-256
DEB [20180615-17:11:42.983] thr=3 paramiko.transport: Compression agreed: none
DEB [20180615-17:11:43.065] thr=3 paramiko.transport: kex engine KexGroup14 specified hash_algo <built-in function openssl_sha1>
DEB [20180615-17:11:43.066] thr=3 paramiko.transport: Switch to new keys ...
DEB [20180615-17:11:43.081] thr=2 paramiko.transport: Adding ssh-rsa host key for 147.228.242.108: aa458249495c431a65c21c5cfb1f70fa
DEB [20180615-17:11:43.082] thr=2 paramiko.transport: Trying key 3de4f984cbfc1ae64c5f8e1a2c6bf076 from /home/alogo/workspace/DRIP/drip-deployer/deployer_files/1529075500355/1.txt
DEB [20180615-17:11:43.140] thr=3 paramiko.transport: userauth is OK
INF [20180615-17:11:43.167] thr=3 paramiko.transport: Authentication (publickey) successful!
DEB [20180615-17:11:43.183] thr=2 paramiko.transport: [chan 0] Max packet in: 32768 bytes
DEB [20180615-17:11:43.203] thr=3 paramiko.transport: Received global request "hostkeys-00@openssh.com"
DEB [20180615-17:11:43.203] thr=3 paramiko.transport: Rejecting "hostkeys-00@openssh.com" global request from server.
DEB [20180615-17:11:43.258] thr=3 paramiko.transport: [chan 0] Max packet out: 32768 bytes
DEB [20180615-17:11:43.258] thr=3 paramiko.transport: Secsh channel 0 opened.
DEB [20180615-17:11:43.305] thr=3 paramiko.transport: [chan 0] Sesch channel 0 request ok
DEB [20180615-17:11:44.481] thr=3 paramiko.transport: [chan 0] EOF received (0)
DEB [20180615-17:11:44.482] thr=3 paramiko.transport: [chan 0] EOF sent (0)
DEB [20180615-17:11:44.502] thr=4 paramiko.transport: starting thread (client mode): 0xf5c7df50L
DEB [20180615-17:11:44.503] thr=4 paramiko.transport: Local version/idstring: SSH-2.0-paramiko_2.1.2
DEB [20180615-17:11:44.531] thr=4 paramiko.transport: Remote version/idstring: SSH-2.0-OpenSSH_7.2p2 Ubuntu-4ubuntu2.4
INF [20180615-17:11:44.532] thr=4 paramiko.transport: Connected (version 2.0, client OpenSSH_7.2p2)
DEB [20180615-17:11:44.552] thr=4 paramiko.transport: kex algos:[u'curve25519-sha256@libssh.org', u'ecdh-sha2-nistp256', u'ecdh-sha2-nistp384', u'ecdh-sha2-nistp521', u'diffie-hellman-group-exchange-sha256', u'diffie-hellman-group14-sha1'] server key:[u'ssh-rsa', u'rsa-sha2-512', u'rsa-sha2-256', u'ecdsa-sha2-nistp256', u'ssh-ed25519'] client encrypt:[u'chacha20-poly1305@openssh.com', u'aes128-ctr', u'aes192-ctr', u'aes256-ctr', u'aes128-gcm@openssh.com', u'aes256-gcm@openssh.com'] server encrypt:[u'chacha20-poly1305@openssh.com', u'aes128-ctr', u'aes192-ctr', u'aes256-ctr', u'aes128-gcm@openssh.com', u'aes256-gcm@openssh.com'] client mac:[u'umac-64-etm@openssh.com', u'umac-128-etm@openssh.com', u'hmac-sha2-256-etm@openssh.com', u'hmac-sha2-512-etm@openssh.com', u'hmac-sha1-etm@openssh.com', u'umac-64@openssh.com', u'umac-128@openssh.com', u'hmac-sha2-256', u'hmac-sha2-512', u'hmac-sha1'] server mac:[u'umac-64-etm@openssh.com', u'umac-128-etm@openssh.com', u'hmac-sha2-256-etm@openssh.com', u'hmac-sha2-512-etm@openssh.com', u'hmac-sha1-etm@openssh.com', u'umac-64@openssh.com', u'umac-128@openssh.com', u'hmac-sha2-256', u'hmac-sha2-512', u'hmac-sha1'] client compress:[u'none', u'zlib@openssh.com'] server compress:[u'none', u'zlib@openssh.com'] client lang:[u''] server lang:[u''] kex follows?False
DEB [20180615-17:11:44.552] thr=4 paramiko.transport: Kex agreed: diffie-hellman-group14-sha1
DEB [20180615-17:11:44.552] thr=4 paramiko.transport: Cipher agreed: aes128-ctr
DEB [20180615-17:11:44.552] thr=4 paramiko.transport: MAC agreed: hmac-sha2-256
DEB [20180615-17:11:44.553] thr=4 paramiko.transport: Compression agreed: none
DEB [20180615-17:11:44.634] thr=4 paramiko.transport: kex engine KexGroup14 specified hash_algo <built-in function openssl_sha1>
DEB [20180615-17:11:44.635] thr=4 paramiko.transport: Switch to new keys ...
DEB [20180615-17:11:44.651] thr=2 paramiko.transport: Adding ssh-rsa host key for 147.228.242.109: f5bce6e801118ca62f34298b238fbb4a
DEB [20180615-17:11:44.652] thr=2 paramiko.transport: Trying key 3de4f984cbfc1ae64c5f8e1a2c6bf076 from /home/alogo/workspace/DRIP/drip-deployer/deployer_files/1529075500355/2.txt
DEB [20180615-17:11:44.707] thr=4 paramiko.transport: userauth is OK
INF [20180615-17:11:44.733] thr=4 paramiko.transport: Authentication (publickey) successful!
DEB [20180615-17:11:44.752] thr=2 paramiko.transport: [chan 0] Max packet in: 32768 bytes
DEB [20180615-17:11:44.771] thr=4 paramiko.transport: Received global request "hostkeys-00@openssh.com"
DEB [20180615-17:11:44.772] thr=4 paramiko.transport: Rejecting "hostkeys-00@openssh.com" global request from server.
DEB [20180615-17:11:44.825] thr=4 paramiko.transport: [chan 0] Max packet out: 32768 bytes
DEB [20180615-17:11:44.825] thr=4 paramiko.transport: Secsh channel 0 opened.
DEB [20180615-17:11:44.872] thr=4 paramiko.transport: [chan 0] Sesch channel 0 request ok
DEB [20180615-17:11:45.834] thr=4 paramiko.transport: [chan 0] EOF received (0)
DEB [20180615-17:11:45.835] thr=4 paramiko.transport: [chan 0] EOF sent (0)
DEB [20180615-17:11:45.936] thr=4 paramiko.transport: EOF in transport thread
DEB [20180615-17:11:45.999] thr=5 paramiko.transport: starting thread (client mode): 0xf5ceded0L
DEB [20180615-17:11:45.999] thr=5 paramiko.transport: Local version/idstring: SSH-2.0-paramiko_2.1.2
DEB [20180615-17:11:46.025] thr=5 paramiko.transport: Remote version/idstring: SSH-2.0-OpenSSH_7.2p2 Ubuntu-4ubuntu2.4
INF [20180615-17:11:46.026] thr=5 paramiko.transport: Connected (version 2.0, client OpenSSH_7.2p2)
DEB [20180615-17:11:46.043] thr=5 paramiko.transport: kex algos:[u'curve25519-sha256@libssh.org', u'ecdh-sha2-nistp256', u'ecdh-sha2-nistp384', u'ecdh-sha2-nistp521', u'diffie-hellman-group-exchange-sha256', u'diffie-hellman-group14-sha1'] server key:[u'ssh-rsa', u'rsa-sha2-512', u'rsa-sha2-256', u'ecdsa-sha2-nistp256', u'ssh-ed25519'] client encrypt:[u'chacha20-poly1305@openssh.com', u'aes128-ctr', u'aes192-ctr', u'aes256-ctr', u'aes128-gcm@openssh.com', u'aes256-gcm@openssh.com'] server encrypt:[u'chacha20-poly1305@openssh.com', u'aes128-ctr', u'aes192-ctr', u'aes256-ctr', u'aes128-gcm@openssh.com', u'aes256-gcm@openssh.com'] client mac:[u'umac-64-etm@openssh.com', u'umac-128-etm@openssh.com', u'hmac-sha2-256-etm@openssh.com', u'hmac-sha2-512-etm@openssh.com', u'hmac-sha1-etm@openssh.com', u'umac-64@openssh.com', u'umac-128@openssh.com', u'hmac-sha2-256', u'hmac-sha2-512', u'hmac-sha1'] server mac:[u'umac-64-etm@openssh.com', u'umac-128-etm@openssh.com', u'hmac-sha2-256-etm@openssh.com', u'hmac-sha2-512-etm@openssh.com', u'hmac-sha1-etm@openssh.com', u'umac-64@openssh.com', u'umac-128@openssh.com', u'hmac-sha2-256', u'hmac-sha2-512', u'hmac-sha1'] client compress:[u'none', u'zlib@openssh.com'] server compress:[u'none', u'zlib@openssh.com'] client lang:[u''] server lang:[u''] kex follows?False
DEB [20180615-17:11:46.043] thr=5 paramiko.transport: Kex agreed: diffie-hellman-group14-sha1
DEB [20180615-17:11:46.043] thr=5 paramiko.transport: Cipher agreed: aes128-ctr
DEB [20180615-17:11:46.043] thr=5 paramiko.transport: MAC agreed: hmac-sha2-256
DEB [20180615-17:11:46.043] thr=5 paramiko.transport: Compression agreed: none
DEB [20180615-17:11:46.124] thr=5 paramiko.transport: kex engine KexGroup14 specified hash_algo <built-in function openssl_sha1>
DEB [20180615-17:11:46.125] thr=5 paramiko.transport: Switch to new keys ...
DEB [20180615-17:11:46.132] thr=2 paramiko.transport: Adding ssh-rsa host key for 147.228.242.108: aa458249495c431a65c21c5cfb1f70fa
DEB [20180615-17:11:46.133] thr=2 paramiko.transport: Trying key 3de4f984cbfc1ae64c5f8e1a2c6bf076 from /home/alogo/workspace/DRIP/drip-deployer/deployer_files/1529075500355/1.txt
DEB [20180615-17:11:46.213] thr=5 paramiko.transport: userauth is OK
INF [20180615-17:11:46.238] thr=5 paramiko.transport: Authentication (publickey) successful!
DEB [20180615-17:11:46.241] thr=2 paramiko.transport: [chan 0] Max packet in: 32768 bytes
DEB [20180615-17:11:46.260] thr=5 paramiko.transport: Received global request "hostkeys-00@openssh.com"
DEB [20180615-17:11:46.260] thr=5 paramiko.transport: Rejecting "hostkeys-00@openssh.com" global request from server.
DEB [20180615-17:11:46.314] thr=5 paramiko.transport: [chan 0] Max packet out: 32768 bytes
DEB [20180615-17:11:46.314] thr=5 paramiko.transport: Secsh channel 0 opened.
DEB [20180615-17:11:46.368] thr=5 paramiko.transport: [chan 0] Sesch channel 0 request ok
INF [20180615-17:11:46.437] thr=2 paramiko.transport.sftp: [chan 0] Opened sftp connection (server version 3)
DEB [20180615-17:11:46.437] thr=2 paramiko.transport.sftp: [chan 0] stat('/tmp/')
DEB [20180615-17:11:46.460] thr=2 paramiko.transport.sftp: [chan 0] normalize('/tmp/')
DEB [20180615-17:11:46.484] thr=2 paramiko.transport.sftp: [chan 0] open('/tmp/docker-compose.yml', 'wb')
DEB [20180615-17:11:46.504] thr=2 paramiko.transport.sftp: [chan 0] open('/tmp/docker-compose.yml', 'wb') -> 00000000
DEB [20180615-17:11:46.504] thr=2 paramiko.transport.sftp: [chan 0] close(00000000)
DEB [20180615-17:11:46.542] thr=2 paramiko.transport.sftp: [chan 0] stat('/tmp/docker-compose.yml')
DEB [20180615-17:11:46.562] thr=2 paramiko.transport: [chan 1] Max packet in: 32768 bytes
DEB [20180615-17:11:46.579] thr=5 paramiko.transport: [chan 1] Max packet out: 32768 bytes
DEB [20180615-17:11:46.580] thr=5 paramiko.transport: Secsh channel 1 opened.
DEB [20180615-17:11:46.613] thr=5 paramiko.transport: [chan 1] Sesch channel 1 request ok
DEB [20180615-17:12:00.252] thr=5 paramiko.transport: [chan 1] EOF received (1)
DEB [20180615-17:12:00.253] thr=5 paramiko.transport: [chan 1] EOF sent (1)
DEB [20180615-17:12:00.355] thr=5 paramiko.transport: EOF in transport thread
DEB [20180615-17:12:25.412] thr=6 paramiko.transport: starting thread (client mode): 0xf5c7cf90L
DEB [20180615-17:12:25.412] thr=6 paramiko.transport: Local version/idstring: SSH-2.0-paramiko_2.1.2
DEB [20180615-17:12:25.443] thr=6 paramiko.transport: Remote version/idstring: SSH-2.0-OpenSSH_7.2p2 Ubuntu-4ubuntu2.4
INF [20180615-17:12:25.443] thr=6 paramiko.transport: Connected (version 2.0, client OpenSSH_7.2p2)
DEB [20180615-17:12:25.486] thr=6 paramiko.transport: kex algos:[u'curve25519-sha256@libssh.org', u'ecdh-sha2-nistp256', u'ecdh-sha2-nistp384', u'ecdh-sha2-nistp521', u'diffie-hellman-group-exchange-sha256', u'diffie-hellman-group14-sha1'] server key:[u'ssh-rsa', u'rsa-sha2-512', u'rsa-sha2-256', u'ecdsa-sha2-nistp256', u'ssh-ed25519'] client encrypt:[u'chacha20-poly1305@openssh.com', u'aes128-ctr', u'aes192-ctr', u'aes256-ctr', u'aes128-gcm@openssh.com', u'aes256-gcm@openssh.com'] server encrypt:[u'chacha20-poly1305@openssh.com', u'aes128-ctr', u'aes192-ctr', u'aes256-ctr', u'aes128-gcm@openssh.com', u'aes256-gcm@openssh.com'] client mac:[u'umac-64-etm@openssh.com', u'umac-128-etm@openssh.com', u'hmac-sha2-256-etm@openssh.com', u'hmac-sha2-512-etm@openssh.com', u'hmac-sha1-etm@openssh.com', u'umac-64@openssh.com', u'umac-128@openssh.com', u'hmac-sha2-256', u'hmac-sha2-512', u'hmac-sha1'] server mac:[u'umac-64-etm@openssh.com', u'umac-128-etm@openssh.com', u'hmac-sha2-256-etm@openssh.com', u'hmac-sha2-512-etm@openssh.com', u'hmac-sha1-etm@openssh.com', u'umac-64@openssh.com', u'umac-128@openssh.com', u'hmac-sha2-256', u'hmac-sha2-512', u'hmac-sha1'] client compress:[u'none', u'zlib@openssh.com'] server compress:[u'none', u'zlib@openssh.com'] client lang:[u''] server lang:[u''] kex follows?False
DEB [20180615-17:12:25.486] thr=6 paramiko.transport: Kex agreed: diffie-hellman-group14-sha1
DEB [20180615-17:12:25.486] thr=6 paramiko.transport: Cipher agreed: aes128-ctr
DEB [20180615-17:12:25.486] thr=6 paramiko.transport: MAC agreed: hmac-sha2-256
DEB [20180615-17:12:25.486] thr=6 paramiko.transport: Compression agreed: none
DEB [20180615-17:12:25.545] thr=6 paramiko.transport: kex engine KexGroup14 specified hash_algo <built-in function openssl_sha1>
DEB [20180615-17:12:25.545] thr=6 paramiko.transport: Switch to new keys ...
DEB [20180615-17:12:25.561] thr=2 paramiko.transport: Adding ssh-rsa host key for 147.228.242.108: aa458249495c431a65c21c5cfb1f70fa
DEB [20180615-17:12:25.562] thr=2 paramiko.transport: Trying key 3de4f984cbfc1ae64c5f8e1a2c6bf076 from /home/alogo/workspace/DRIP/drip-deployer/deployer_files/1529075545389/1.txt
DEB [20180615-17:12:25.625] thr=6 paramiko.transport: userauth is OK
INF [20180615-17:12:25.649] thr=6 paramiko.transport: Authentication (publickey) successful!
DEB [20180615-17:12:25.662] thr=2 paramiko.transport: [chan 0] Max packet in: 32768 bytes
DEB [20180615-17:12:25.681] thr=6 paramiko.transport: Received global request "hostkeys-00@openssh.com"
DEB [20180615-17:12:25.681] thr=6 paramiko.transport: Rejecting "hostkeys-00@openssh.com" global request from server.
DEB [20180615-17:12:25.739] thr=6 paramiko.transport: [chan 0] Max packet out: 32768 bytes
DEB [20180615-17:12:25.739] thr=6 paramiko.transport: Secsh channel 0 opened.
DEB [20180615-17:12:25.783] thr=6 paramiko.transport: [chan 0] Sesch channel 0 request ok
DEB [20180615-17:12:25.953] thr=6 paramiko.transport: [chan 0] EOF received (0)
DEB [20180615-17:12:25.957] thr=2 paramiko.transport: [chan 1] Max packet in: 32768 bytes
DEB [20180615-17:12:25.958] thr=6 paramiko.transport: [chan 0] EOF sent (0)
DEB [20180615-17:12:25.976] thr=6 paramiko.transport: [chan 1] Max packet out: 32768 bytes
DEB [20180615-17:12:25.976] thr=6 paramiko.transport: Secsh channel 1 opened.
DEB [20180615-17:12:26.049] thr=6 paramiko.transport: [chan 1] Sesch channel 1 request ok
DEB [20180615-17:12:26.215] thr=6 paramiko.transport: [chan 1] EOF received (1)
DEB [20180615-17:12:26.216] thr=2 paramiko.transport: [chan 2] Max packet in: 32768 bytes
DEB [20180615-17:12:26.217] thr=6 paramiko.transport: [chan 1] EOF sent (1)
DEB [20180615-17:12:26.236] thr=6 paramiko.transport: [chan 2] Max packet out: 32768 bytes
DEB [20180615-17:12:26.236] thr=6 paramiko.transport: Secsh channel 2 opened.
DEB [20180615-17:12:26.307] thr=6 paramiko.transport: [chan 2] Sesch channel 2 request ok
DEB [20180615-17:12:26.446] thr=6 paramiko.transport: [chan 2] EOF received (2)
DEB [20180615-17:12:26.447] thr=2 paramiko.transport: [chan 3] Max packet in: 32768 bytes
DEB [20180615-17:12:26.447] thr=6 paramiko.transport: [chan 2] EOF sent (2)
DEB [20180615-17:12:26.464] thr=6 paramiko.transport: [chan 3] Max packet out: 32768 bytes
DEB [20180615-17:12:26.465] thr=6 paramiko.transport: Secsh channel 3 opened.
DEB [20180615-17:12:26.547] thr=6 paramiko.transport: [chan 3] Sesch channel 3 request ok
DEB [20180615-17:12:26.672] thr=6 paramiko.transport: [chan 3] EOF received (3)
DEB [20180615-17:12:26.673] thr=2 paramiko.transport: [chan 4] Max packet in: 32768 bytes
DEB [20180615-17:12:26.674] thr=6 paramiko.transport: [chan 3] EOF sent (3)
DEB [20180615-17:12:26.693] thr=6 paramiko.transport: [chan 4] Max packet out: 32768 bytes
DEB [20180615-17:12:26.693] thr=6 paramiko.transport: Secsh channel 4 opened.
DEB [20180615-17:12:26.769] thr=6 paramiko.transport: [chan 4] Sesch channel 4 request ok
DEB [20180615-17:12:27.206] thr=6 paramiko.transport: [chan 4] EOF received (4)
DEB [20180615-17:12:37.238] thr=5 paramiko.transport: starting thread (client mode): 0xf5ceded0L
DEB [20180615-17:12:37.239] thr=5 paramiko.transport: Local version/idstring: SSH-2.0-paramiko_2.1.2
DEB [20180615-17:12:37.271] thr=5 paramiko.transport: Remote version/idstring: SSH-2.0-OpenSSH_7.2p2 Ubuntu-4ubuntu2.4
INF [20180615-17:12:37.272] thr=5 paramiko.transport: Connected (version 2.0, client OpenSSH_7.2p2)
DEB [20180615-17:12:37.289] thr=5 paramiko.transport: kex algos:[u'curve25519-sha256@libssh.org', u'ecdh-sha2-nistp256', u'ecdh-sha2-nistp384', u'ecdh-sha2-nistp521', u'diffie-hellman-group-exchange-sha256', u'diffie-hellman-group14-sha1'] server key:[u'ssh-rsa', u'rsa-sha2-512', u'rsa-sha2-256', u'ecdsa-sha2-nistp256', u'ssh-ed25519'] client encrypt:[u'chacha20-poly1305@openssh.com', u'aes128-ctr', u'aes192-ctr', u'aes256-ctr', u'aes128-gcm@openssh.com', u'aes256-gcm@openssh.com'] server encrypt:[u'chacha20-poly1305@openssh.com', u'aes128-ctr', u'aes192-ctr', u'aes256-ctr', u'aes128-gcm@openssh.com', u'aes256-gcm@openssh.com'] client mac:[u'umac-64-etm@openssh.com', u'umac-128-etm@openssh.com', u'hmac-sha2-256-etm@openssh.com', u'hmac-sha2-512-etm@openssh.com', u'hmac-sha1-etm@openssh.com', u'umac-64@openssh.com', u'umac-128@openssh.com', u'hmac-sha2-256', u'hmac-sha2-512', u'hmac-sha1'] server mac:[u'umac-64-etm@openssh.com', u'umac-128-etm@openssh.com', u'hmac-sha2-256-etm@openssh.com', u'hmac-sha2-512-etm@openssh.com', u'hmac-sha1-etm@openssh.com', u'umac-64@openssh.com', u'umac-128@openssh.com', u'hmac-sha2-256', u'hmac-sha2-512', u'hmac-sha1'] client compress:[u'none', u'zlib@openssh.com'] server compress:[u'none', u'zlib@openssh.com'] client lang:[u''] server lang:[u''] kex follows?False
DEB [20180615-17:12:37.289] thr=5 paramiko.transport: Kex agreed: diffie-hellman-group14-sha1
DEB [20180615-17:12:37.289] thr=5 paramiko.transport: Cipher agreed: aes128-ctr
DEB [20180615-17:12:37.289] thr=5 paramiko.transport: MAC agreed: hmac-sha2-256
DEB [20180615-17:12:37.289] thr=5 paramiko.transport: Compression agreed: none
DEB [20180615-17:12:37.374] thr=5 paramiko.transport: kex engine KexGroup14 specified hash_algo <built-in function openssl_sha1>
DEB [20180615-17:12:37.374] thr=5 paramiko.transport: Switch to new keys ...
DEB [20180615-17:12:37.390] thr=2 paramiko.transport: Adding ssh-rsa host key for 147.228.242.108: aa458249495c431a65c21c5cfb1f70fa
DEB [20180615-17:12:37.391] thr=2 paramiko.transport: Trying key 3de4f984cbfc1ae64c5f8e1a2c6bf076 from /home/alogo/workspace/DRIP/drip-deployer/deployer_files/1529075557221/1.txt
DEB [20180615-17:12:37.447] thr=5 paramiko.transport: userauth is OK
INF [20180615-17:12:37.473] thr=5 paramiko.transport: Authentication (publickey) successful!
DEB [20180615-17:12:37.491] thr=2 paramiko.transport: [chan 0] Max packet in: 32768 bytes
DEB [20180615-17:12:37.540] thr=5 paramiko.transport: Received global request "hostkeys-00@openssh.com"
DEB [20180615-17:12:37.540] thr=5 paramiko.transport: Rejecting "hostkeys-00@openssh.com" global request from server.
DEB [20180615-17:12:37.599] thr=5 paramiko.transport: [chan 0] Max packet out: 32768 bytes
DEB [20180615-17:12:37.600] thr=5 paramiko.transport: Secsh channel 0 opened.
DEB [20180615-17:12:37.627] thr=5 paramiko.transport: [chan 0] Sesch channel 0 request ok
DEB [20180615-17:12:37.767] thr=5 paramiko.transport: [chan 0] EOF received (0)
DEB [20180615-17:12:37.767] thr=2 paramiko.transport: [chan 1] Max packet in: 32768 bytes
DEB [20180615-17:12:37.768] thr=5 paramiko.transport: [chan 0] EOF sent (0)
DEB [20180615-17:12:37.785] thr=5 paramiko.transport: [chan 1] Max packet out: 32768 bytes
DEB [20180615-17:12:37.785] thr=5 paramiko.transport: Secsh channel 1 opened.
DEB [20180615-17:12:37.859] thr=5 paramiko.transport: [chan 1] Sesch channel 1 request ok
DEB [20180615-17:12:38.010] thr=5 paramiko.transport: [chan 1] EOF received (1)
DEB [20180615-17:12:38.011] thr=2 paramiko.transport: [chan 2] Max packet in: 32768 bytes
DEB [20180615-17:12:38.012] thr=5 paramiko.transport: [chan 1] EOF sent (1)
DEB [20180615-17:12:38.030] thr=5 paramiko.transport: [chan 2] Max packet out: 32768 bytes
DEB [20180615-17:12:38.030] thr=5 paramiko.transport: Secsh channel 2 opened.
DEB [20180615-17:12:38.104] thr=5 paramiko.transport: [chan 2] Sesch channel 2 request ok
DEB [20180615-17:12:38.251] thr=5 paramiko.transport: [chan 2] EOF received (2)
DEB [20180615-17:12:38.252] thr=2 paramiko.transport: [chan 3] Max packet in: 32768 bytes
DEB [20180615-17:12:38.253] thr=5 paramiko.transport: [chan 2] EOF sent (2)
DEB [20180615-17:12:38.271] thr=5 paramiko.transport: [chan 3] Max packet out: 32768 bytes
DEB [20180615-17:12:38.271] thr=5 paramiko.transport: Secsh channel 3 opened.
DEB [20180615-17:12:38.382] thr=5 paramiko.transport: [chan 3] Sesch channel 3 request ok
DEB [20180615-17:12:38.508] thr=5 paramiko.transport: [chan 3] EOF received (3)
DEB [20180615-17:12:38.509] thr=2 paramiko.transport: [chan 4] Max packet in: 32768 bytes
DEB [20180615-17:12:38.510] thr=5 paramiko.transport: [chan 3] EOF sent (3)
DEB [20180615-17:12:38.527] thr=5 paramiko.transport: [chan 4] Max packet out: 32768 bytes
DEB [20180615-17:12:38.527] thr=5 paramiko.transport: Secsh channel 4 opened.
DEB [20180615-17:12:38.602] thr=5 paramiko.transport: [chan 4] Sesch channel 4 request ok
DEB [20180615-17:12:39.036] thr=5 paramiko.transport: [chan 4] EOF received (4)
DEB [20180615-17:17:39.339] thr=7 paramiko.transport: starting thread (client mode): 0xf5c9ddd0L
DEB [20180615-17:17:39.339] thr=7 paramiko.transport: starting thread (client mode): 0xf5c9de50L
DEB [20180615-17:17:39.339] thr=7 paramiko.transport: Local version/idstring: SSH-2.0-paramiko_2.1.2
DEB [20180615-17:17:39.340] thr=7 paramiko.transport: Local version/idstring: SSH-2.0-paramiko_2.1.2
DEB [20180615-17:17:39.423] thr=7 paramiko.transport: Remote version/idstring: SSH-2.0-OpenSSH_7.2p2 Ubuntu-4ubuntu2.4
DEB [20180615-17:17:39.423] thr=7 paramiko.transport: Remote version/idstring: SSH-2.0-OpenSSH_7.2p2 Ubuntu-4ubuntu2.4
INF [20180615-17:17:39.423] thr=7 paramiko.transport: Connected (version 2.0, client OpenSSH_7.2p2)
INF [20180615-17:17:39.423] thr=7 paramiko.transport: Connected (version 2.0, client OpenSSH_7.2p2)
DEB [20180615-17:17:39.462] thr=7 paramiko.transport: kex algos:[u'curve25519-sha256@libssh.org', u'ecdh-sha2-nistp256', u'ecdh-sha2-nistp384', u'ecdh-sha2-nistp521', u'diffie-hellman-group-exchange-sha256', u'diffie-hellman-group14-sha1'] server key:[u'ssh-rsa', u'rsa-sha2-512', u'rsa-sha2-256', u'ecdsa-sha2-nistp256', u'ssh-ed25519'] client encrypt:[u'chacha20-poly1305@openssh.com', u'aes128-ctr', u'aes192-ctr', u'aes256-ctr', u'aes128-gcm@openssh.com', u'aes256-gcm@openssh.com'] server encrypt:[u'chacha20-poly1305@openssh.com', u'aes128-ctr', u'aes192-ctr', u'aes256-ctr', u'aes128-gcm@openssh.com', u'aes256-gcm@openssh.com'] client mac:[u'umac-64-etm@openssh.com', u'umac-128-etm@openssh.com', u'hmac-sha2-256-etm@openssh.com', u'hmac-sha2-512-etm@openssh.com', u'hmac-sha1-etm@openssh.com', u'umac-64@openssh.com', u'umac-128@openssh.com', u'hmac-sha2-256', u'hmac-sha2-512', u'hmac-sha1'] server mac:[u'umac-64-etm@openssh.com', u'umac-128-etm@openssh.com', u'hmac-sha2-256-etm@openssh.com', u'hmac-sha2-512-etm@openssh.com', u'hmac-sha1-etm@openssh.com', u'umac-64@openssh.com', u'umac-128@openssh.com', u'hmac-sha2-256', u'hmac-sha2-512', u'hmac-sha1'] client compress:[u'none', u'zlib@openssh.com'] server compress:[u'none', u'zlib@openssh.com'] client lang:[u''] server lang:[u''] kex follows?False
DEB [20180615-17:17:39.462] thr=7 paramiko.transport: kex algos:[u'curve25519-sha256@libssh.org', u'ecdh-sha2-nistp256', u'ecdh-sha2-nistp384', u'ecdh-sha2-nistp521', u'diffie-hellman-group-exchange-sha256', u'diffie-hellman-group14-sha1'] server key:[u'ssh-rsa', u'rsa-sha2-512', u'rsa-sha2-256', u'ecdsa-sha2-nistp256', u'ssh-ed25519'] client encrypt:[u'chacha20-poly1305@openssh.com', u'aes128-ctr', u'aes192-ctr', u'aes256-ctr', u'aes128-gcm@openssh.com', u'aes256-gcm@openssh.com'] server encrypt:[u'chacha20-poly1305@openssh.com', u'aes128-ctr', u'aes192-ctr', u'aes256-ctr', u'aes128-gcm@openssh.com', u'aes256-gcm@openssh.com'] client mac:[u'umac-64-etm@openssh.com', u'umac-128-etm@openssh.com', u'hmac-sha2-256-etm@openssh.com', u'hmac-sha2-512-etm@openssh.com', u'hmac-sha1-etm@openssh.com', u'umac-64@openssh.com', u'umac-128@openssh.com', u'hmac-sha2-256', u'hmac-sha2-512', u'hmac-sha1'] server mac:[u'umac-64-etm@openssh.com', u'umac-128-etm@openssh.com', u'hmac-sha2-256-etm@openssh.com', u'hmac-sha2-512-etm@openssh.com', u'hmac-sha1-etm@openssh.com', u'umac-64@openssh.com', u'umac-128@openssh.com', u'hmac-sha2-256', u'hmac-sha2-512', u'hmac-sha1'] client compress:[u'none', u'zlib@openssh.com'] server compress:[u'none', u'zlib@openssh.com'] client lang:[u''] server lang:[u''] kex follows?False
DEB [20180615-17:17:39.462] thr=7 paramiko.transport: Kex agreed: diffie-hellman-group14-sha1
DEB [20180615-17:17:39.462] thr=7 paramiko.transport: Kex agreed: diffie-hellman-group14-sha1
DEB [20180615-17:17:39.462] thr=7 paramiko.transport: Cipher agreed: aes128-ctr
DEB [20180615-17:17:39.462] thr=7 paramiko.transport: Cipher agreed: aes128-ctr
DEB [20180615-17:17:39.462] thr=7 paramiko.transport: MAC agreed: hmac-sha2-256
DEB [20180615-17:17:39.462] thr=7 paramiko.transport: MAC agreed: hmac-sha2-256
DEB [20180615-17:17:39.463] thr=7 paramiko.transport: Compression agreed: none
DEB [20180615-17:17:39.463] thr=7 paramiko.transport: Compression agreed: none
DEB [20180615-17:17:39.543] thr=7 paramiko.transport: kex engine KexGroup14 specified hash_algo <built-in function openssl_sha1>
DEB [20180615-17:17:39.544] thr=7 paramiko.transport: Switch to new keys ...
DEB [20180615-17:17:39.546] thr=2 paramiko.transport: Adding ssh-rsa host key for 147.228.242.109: 907f8f8b7d6e655040520cb27a7314d3
DEB [20180615-17:17:39.547] thr=2 paramiko.transport: Trying key 487a2c49a123eeefac7d55e874a1445e from /home/alogo/workspace/DRIP/drip-deployer/deployer_files/1529075859266/1.txt
DEB [20180615-17:17:39.551] thr=7 paramiko.transport: kex engine KexGroup14 specified hash_algo <built-in function openssl_sha1>
DEB [20180615-17:17:39.552] thr=7 paramiko.transport: Switch to new keys ...
DEB [20180615-17:17:39.555] thr=2 paramiko.transport: Adding ssh-rsa host key for 147.228.242.110: cf0ead5254e63dd5e48e7e7d651ff9fc
DEB [20180615-17:17:39.556] thr=2 paramiko.transport: Trying key 487a2c49a123eeefac7d55e874a1445e from /home/alogo/workspace/DRIP/drip-deployer/deployer_files/1529075859266/2.txt
DEB [20180615-17:17:39.618] thr=7 paramiko.transport: userauth is OK
DEB [20180615-17:17:39.628] thr=7 paramiko.transport: userauth is OK
INF [20180615-17:17:39.643] thr=7 paramiko.transport: Authentication (publickey) successful!
DEB [20180615-17:17:39.648] thr=2 paramiko.transport: [chan 0] Max packet in: 32768 bytes
INF [20180615-17:17:39.653] thr=7 paramiko.transport: Authentication (publickey) successful!
DEB [20180615-17:17:39.656] thr=2 paramiko.transport: [chan 0] Max packet in: 32768 bytes
DEB [20180615-17:17:39.667] thr=7 paramiko.transport: Received global request "hostkeys-00@openssh.com"
DEB [20180615-17:17:39.667] thr=7 paramiko.transport: Rejecting "hostkeys-00@openssh.com" global request from server.
DEB [20180615-17:17:39.675] thr=7 paramiko.transport: Received global request "hostkeys-00@openssh.com"
DEB [20180615-17:17:39.675] thr=7 paramiko.transport: Rejecting "hostkeys-00@openssh.com" global request from server.
DEB [20180615-17:17:39.721] thr=7 paramiko.transport: [chan 0] Max packet out: 32768 bytes
DEB [20180615-17:17:39.722] thr=7 paramiko.transport: Secsh channel 0 opened.
DEB [20180615-17:17:39.729] thr=7 paramiko.transport: [chan 0] Max packet out: 32768 bytes
DEB [20180615-17:17:39.730] thr=7 paramiko.transport: Secsh channel 0 opened.
DEB [20180615-17:17:39.768] thr=7 paramiko.transport: [chan 0] Sesch channel 0 request ok
DEB [20180615-17:17:39.777] thr=7 paramiko.transport: [chan 0] Sesch channel 0 request ok
DEB [20180615-17:17:39.800] thr=7 paramiko.transport: [chan 0] EOF received (0)
DEB [20180615-17:17:39.801] thr=2 paramiko.transport: [chan 1] Max packet in: 32768 bytes
DEB [20180615-17:17:39.801] thr=7 paramiko.transport: [chan 0] EOF sent (0)
DEB [20180615-17:17:39.811] thr=7 paramiko.transport: [chan 0] EOF received (0)
DEB [20180615-17:17:39.812] thr=2 paramiko.transport: [chan 1] Max packet in: 32768 bytes
DEB [20180615-17:17:39.813] thr=7 paramiko.transport: [chan 0] EOF sent (0)
DEB [20180615-17:17:39.820] thr=7 paramiko.transport: [chan 1] Max packet out: 32768 bytes
DEB [20180615-17:17:39.820] thr=7 paramiko.transport: Secsh channel 1 opened.
DEB [20180615-17:17:39.831] thr=7 paramiko.transport: [chan 1] Max packet out: 32768 bytes
DEB [20180615-17:17:39.831] thr=7 paramiko.transport: Secsh channel 1 opened.
DEB [20180615-17:17:39.897] thr=7 paramiko.transport: [chan 1] Sesch channel 1 request ok
DEB [20180615-17:17:39.907] thr=7 paramiko.transport: [chan 1] Sesch channel 1 request ok
INF [20180615-17:17:39.916] thr=2 paramiko.transport.sftp: [chan 1] Opened sftp connection (server version 3)
DEB [20180615-17:17:39.917] thr=2 paramiko.transport.sftp: [chan 1] stat('/tmp/')
INF [20180615-17:17:39.929] thr=2 paramiko.transport.sftp: [chan 1] Opened sftp connection (server version 3)
DEB [20180615-17:17:39.929] thr=2 paramiko.transport.sftp: [chan 1] stat('/tmp/')
DEB [20180615-17:17:39.937] thr=2 paramiko.transport.sftp: [chan 1] normalize('/tmp/')
DEB [20180615-17:17:39.953] thr=2 paramiko.transport.sftp: [chan 1] normalize('/tmp/')
DEB [20180615-17:17:39.962] thr=2 paramiko.transport.sftp: [chan 1] open('/tmp/engine_setup.sh', 'wb')
DEB [20180615-17:17:40.015] thr=2 paramiko.transport.sftp: [chan 1] open('/tmp/engine_setup.sh', 'wb') -> 00000000
DEB [20180615-17:17:40.015] thr=2 paramiko.transport.sftp: [chan 1] open('/tmp/engine_setup.sh', 'wb')
DEB [20180615-17:17:40.016] thr=2 paramiko.transport.sftp: [chan 1] close(00000000)
DEB [20180615-17:17:40.076] thr=2 paramiko.transport.sftp: [chan 1] open('/tmp/engine_setup.sh', 'wb') -> 00000000
DEB [20180615-17:17:40.077] thr=2 paramiko.transport.sftp: [chan 1] close(00000000)
DEB [20180615-17:17:40.094] thr=2 paramiko.transport.sftp: [chan 1] stat('/tmp/engine_setup.sh')
DEB [20180615-17:17:40.114] thr=2 paramiko.transport: [chan 2] Max packet in: 32768 bytes
DEB [20180615-17:17:40.114] thr=2 paramiko.transport.sftp: [chan 1] stat('/tmp/engine_setup.sh')
DEB [20180615-17:17:40.132] thr=7 paramiko.transport: [chan 2] Max packet out: 32768 bytes
DEB [20180615-17:17:40.132] thr=7 paramiko.transport: Secsh channel 2 opened.
DEB [20180615-17:17:40.133] thr=2 paramiko.transport: [chan 2] Max packet in: 32768 bytes
DEB [20180615-17:17:40.151] thr=7 paramiko.transport: [chan 2] Max packet out: 32768 bytes
DEB [20180615-17:17:40.151] thr=7 paramiko.transport: Secsh channel 2 opened.
DEB [20180615-17:17:40.164] thr=7 paramiko.transport: [chan 2] Sesch channel 2 request ok
DEB [20180615-17:17:40.185] thr=7 paramiko.transport: [chan 2] Sesch channel 2 request ok
version: '3.1'
services:
zookeeper:
image: zookeeper
deploy:
restart_policy:
condition: on-failure
networks:
- storm
nimbus:
image: alogo53/docker-storm
environment:
TOPOLOGY_URL: https://github.com/skoulouzis/lightning/releases/download/v0.1-beta/lightning-0.0.1-SNAPSHOT-jar-with-dependencies.jar
TOPOLOGY_NAME: lightning
TOPOLOGY_MAIN: de.pangaea.lightning.ENVRI_NRTQualityCheck
TOPOLOGY_ARGS: arg1 arg2
depends_on:
- zookeeper
deploy:
placement:
constraints: [node.role == manager]
ports:
- 6627:6627
networks:
- storm
#worker
supervisor:
image: storm
command: storm supervisor
depends_on:
- nimbus
- zookeeper
deploy:
restart_policy:
condition: on-failure
ports:
- 8000:8000
networks:
- storm
ui:
image: storm
command: storm ui
depends_on:
- nimbus
- zookeeper
- supervisor
deploy:
restart_policy:
condition: on-failure
ports:
- 8081:8080
networks:
#- monitoring
- storm
############### monitoring#####################
#influx:
#image: influxdb
#environment:
#INFLUXDB_DB: cadvisor
##volumes:
##- influx:/var/lib/influxdb
#deploy:
#replicas: 1
#placement:
#constraints:
#- node.role == manager
#resources:
#limits:
#cpus: "0.10"
#memory: "128M"
#reservations:
#cpus: "0.05"
#memory: "64M"
#networks:
#- monitoring
#grafana:
#image: alogo53/grafana-docker
#environment:
#DS_NAME: InfluxDB
#DS_TYPE: InfluxDB
#DS_ACCESS: proxy
#DS_URL: http://influx:8086
#DS_DB: cadvisor
#ports:
#- 0.0.0.0:3000:3000
##volumes:
##- grafana:/var/lib/grafana
#depends_on:
#- influx
#deploy:
#replicas: 1
#placement:
#constraints:
#- node.role == manager
#resources:
#limits:
#cpus: "0.10"
#memory: "128M"
#reservations:
#cpus: "0.05"
#memory: "64M"
##networks:
##- monitoring
#cadvisor:
#image: google/cadvisor
#ports:
#- 0.0.0.0:8080:8080
#hostname: '{{.Node.ID}}'
#command: -logtostderr -docker_only -storage_driver=influxdb -storage_driver_db=cadvisor -storage_driver_host=influx:8086
#volumes:
#- /:/rootfs:ro
#- /var/run:/var/run:rw
#- /sys:/sys:ro
#- /var/lib/docker/:/var/lib/docker:ro
#depends_on:
#- influx
#deploy:
#mode: global
#resources:
#limits:
#cpus: "0.10"
#memory: "128M"
#reservations:
#cpus: "0.05"
#memory: "64M"
#networks:
#- monitoring
logspout:
image: gliderlabs/logspout:latest
networks:
- logging
volumes:
- /etc/hostname:/etc/host_hostname:ro
- /var/run/docker.sock:/var/run/docker.sock
ports:
- "8001:80"
deploy:
mode: global
resources:
limits:
cpus: "0.10"
memory: "128M"
reservations:
cpus: "0.05"
memory: "64M"
networks:
- monitoring
#volumes:
#influx:
#driver: local
#grafana:
#driver: local
networks:
monitoring:
storm:
\ No newline at end of file
#! /usr/bin/env python
# Copyright 2017 --Yang Hu--
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = 'Yang Hu'
import paramiko, os
from vm_info import VmInfo
import json
import logging
import linecache
import sys
import ast
import re
# from drip_logging.drip_logging_handler import *
logger = logging.getLogger(__name__)
if not getattr(logger, 'handler_set', None):
#logger.setLevel(logging.INFO)
h = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
h.setFormatter(formatter)
logger.addHandler(h)
logger.handler_set = True
retry=0
def get_resp_line(line):
line = line.encode('utf-8').strip('\n').encode('string_escape')
return json.dumps(line)
def docker_check(vm, compose_name):
try:
logger.info("Starting docker info services on: "+vm.ip)
paramiko.util.log_to_file("deployment.log")
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(vm.ip, username=vm.user, key_filename=vm.key,timeout=30)
node_format = '\'{\"ID\":\"{{.ID}}\",\"hostname\":\"{{.Hostname}}\",\"status\":\"{{.Status}}\",\"availability\":\"{{.Availability}}\",\"status\":\"{{.Status}}\"}\''
cmd = 'sudo docker node ls --format ' + (node_format)
logger.info("Sending :"+cmd)
json_response = {}
cluster_node_info = []
stdin, stdout, stderr = ssh.exec_command(cmd)
logger.info("Got response running \"docker node ls\"")
node_ls_resp = stdout.readlines()
for i in node_ls_resp:
line = get_resp_line(i)
if line:
node_info = json.loads(line)
if not isinstance(node_info, dict):
node_info = ast.literal_eval(node_info)
cluster_node_info.append(node_info)
json_response ['cluster_node_info'] = cluster_node_info
services_format = '\'{\"ID\":\"{{.ID}}\",\"name\":\"{{.Name}}\",\"image\":\"{{.Image}}\",\"node\":\"{{.Node}}\",\"desired_state\":\"{{.DesiredState}}\",\"current_state\":\"{{.CurrentState}}\",\"error\":\"{{.Error}}\",\"ports\":\"{{.Ports}}\"}\''
cmd = 'sudo docker stack ps '+ compose_name +' --format ' + services_format
logger.info("Sending :"+cmd)
stdin, stdout, stderr = ssh.exec_command(cmd)
stack_ps_resp = stdout.readlines()
services_info = []
services_ids = []
nodes_hostname = set()
for i in stack_ps_resp:
line = get_resp_line(i)
if line:
json_dict = {}
json_dict = json.loads(line)
json_dict = json.loads(json.dumps(json_dict))
if not isinstance(json_dict, dict):
try:
json_dict = json.loads(json_dict)
except Exception as e:
json_dict = json_dict.replace('\"ports\":\"\"', '\"ports\":null').replace('\"\"', '\"')
json_dict = json_dict.replace('\"node\":\",\"', '\"node\":null,\"').replace('\"\"', '\"')
json_dict = json_dict.replace("\\", "").replace("Noxe2x80xa6", "No...")
json_dict = json.loads(json_dict)
nodes_hostname.add(json_dict['node'])
services_info.append(json_dict)
services_ids.append(json_dict['ID'])
json_response ['services_info'] = services_info
stack_format = '\'{"ID":"{{.ID}}","name":"{{.Name}}","mode":"{{.Mode}}","replicas":"{{.Replicas}}","image":"{{.Image}}"}\''
cmd = 'sudo docker stack services '+ compose_name +' --format ' + (stack_format)
logger.info("Sending :"+cmd)
stdin, stdout, stderr = ssh.exec_command(cmd)
logger.info("Got response running \"docker stack services\"")
stack_resp = stdout.readlines()
stack_info = []
for i in stack_resp:
line = get_resp_line(i)
if line:
json_dict = {}
json_dict = json.loads(line)
if not isinstance(json_dict, dict):
json_dict = json.loads(json_dict)
stack_info.append(json_dict)
json_response ['stack_info'] = stack_info
cmd = 'sudo docker node inspect '
for hostname in nodes_hostname:
if hostname:
cmd += ' '+hostname
logger.info("Sending :"+cmd)
stdin, stdout, stderr = ssh.exec_command(cmd)
inspect_resp = stdout.readlines()
response_str = ""
for i in inspect_resp:
line = i.rstrip("\n\r").encode()
if line:
response_str+=line
json_dict = {}
response_str = response_str.rstrip("\n\r").strip(' \t\n\r').strip().encode('string_escape')
json_dict = json.loads(response_str)
json_response['nodes_info'] = json_dict
#"{{.Status.ContainerStatus.ContainerID}}"
cmd = 'sudo docker inspect '
for id in services_ids:
cmd += ' '+id
logger.info("Sending :"+cmd)
stdin, stdout, stderr = ssh.exec_command(cmd)
logger.info("Got response running \"docker inspect\"")
inspect_resp = stdout.readlines()
response_str = ""
for i in inspect_resp:
line = i.rstrip("\n\r").encode()
if line:
response_str+=line
json_dict = {}
response_str = response_str.rstrip("\n\r").strip(' \t\n\r').strip().encode('string_escape')
json_dict = json.loads(response_str)
json_response['inspect_info'] = json_dict
logger.info("Finished docker info services on: "+vm.ip)
except Exception as e:
global retry
if retry < 10 and 'timed out' in str(e):
logger.warning(vm.ip + " " + str(e)+". Retrying")
retry+=1
return docker_check(vm, compose_name)
exc_type, exc_obj, tb = sys.exc_info()
f = tb.tb_frame
lineno = tb.tb_lineno
filename = f.f_code.co_filename
linecache.checkcache(filename)
line = linecache.getline(filename, lineno, f.f_globals)
print 'EXCEPTION IN ({}, LINE {} "{}"): {}'.format(filename, lineno, line.strip(), exc_obj)
#logger.error(vm.ip + " " + str(e)+ " line:" +lineno)
return "ERROR:" + vm.ip + " " + str(e)
ssh.close()
retry = 0
return json_response
def run(vm_list, compose_name,rabbitmq_host,owner):
rabbit = DRIPLoggingHandler(host=rabbitmq_host, port=5672,user=owner)
logger.addHandler(rabbit)
for i in vm_list:
if i.role == "master":
ret = docker_check(i, compose_name)
if "ERROR:" in ret:
return ret
return ret
\ No newline at end of file
#! /usr/bin/env python
# Copyright 2017 --Yang Hu--
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = 'Yang Hu'
import paramiko, os
from vm_info import VmInfo
import logging
# from drip_logging.drip_logging_handler import *
logger = logging.getLogger(__name__)
if not getattr(logger, 'handler_set', None):
logger.setLevel(logging.INFO)
h = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
h.setFormatter(formatter)
logger.addHandler(h)
logger.handler_set = True
retry = 0
def deploy_compose(vm, compose_file, compose_name, docker_login):
try:
logger.info("Starting docker compose deployment on: " + vm.ip)
paramiko.util.log_to_file("deployment.log")
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(vm.ip, username=vm.user, key_filename=vm.key, timeout=30)
sftp = ssh.open_sftp()
sftp.chdir('/tmp/')
sftp.put(compose_file, "docker-compose.yml")
if (docker_login):
# stdin, stdout, stderr = ssh.exec_command("sudo docker stack rm %s" % (compose_name))
# stdout.read()
# err = stderr.read()
# sleep 5
stdin, stdout, stderr = ssh.exec_command(
"sudo docker login -u " + docker_login['username'] + " -p " + docker_login['password'] + " " +
docker_login[
'registry'] + " && sudo docker stack deploy --with-registry-auth --compose-file /tmp/docker-compose.yml %s" % (
compose_name))
else:
# stdin, stdout, stderr = ssh.exec_command("sudo docker stack rm %s" % (compose_name))
# stdout.read()
# err = stderr.read()
cmd = "sudo docker stack deploy --with-registry-auth --compose-file /tmp/docker-compose.yml " + compose_name
logger.info("Sendding : " + cmd)
stdin, stdout, stderr = ssh.exec_command(cmd)
out = stdout.read()
err = stderr.read()
logger.info("stderr from: " + vm.ip + " " + err)
logger.info("stdout from: " + vm.ip + " " + out)
logger.info("Finished docker compose deployment on: " + vm.ip)
except Exception as e:
global retry
if retry < 10:
logger.warning(vm.ip + " " + str(e) + ". Retrying")
retry += 1
return deploy_compose(vm, compose_file, compose_name, docker_login)
logger.error(vm.ip + " " + str(e))
return "ERROR:" + vm.ip + " " + str(e)
ssh.close()
retry = 0
return "SUCCESS"
def run(vm_list, compose_file, compose_name, rabbitmq_host, owner, docker_login):
rabbit = DRIPLoggingHandler(host=rabbitmq_host, port=5672, user=owner)
logger.addHandler(rabbit)
for i in vm_list:
if i.role == "master":
ret = deploy_compose(i, compose_file, compose_name, docker_login)
if "ERROR" in ret:
return ret
else:
swarm_file = open(i.key)
ret = swarm_file.read()
swarm_file.close()
break
return ret
# ! /usr/bin/env python
# Copyright 2017 --Yang Hu--
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = 'Yang Hu'
import paramiko, os
import threading
import logging
# from drip_logging.drip_logging_handler import *
import multiprocessing
logger = logging.getLogger(__name__)
if not getattr(logger, 'handler_set', None):
logger.setLevel(logging.INFO)
h = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
h.setFormatter(formatter)
logger.addHandler(h)
logger.handler_set = True
retry = 0
def install_engine(vm, return_dict):
try:
logger.info("Starting docker engine installation on: " + (vm.ip))
paramiko.util.log_to_file("deployment.log")
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(vm.ip, username=vm.user, key_filename=vm.key, timeout=30)
stdin, stdout, stderr = ssh.exec_command("sudo dpkg --get-selections | grep docker")
temp_list = stdout.readlines()
temp_str = ""
for i in temp_list: temp_str += i
if temp_str.find("docker") != -1:
logger.info("Docker engine arleady installated on: " + (vm.ip) + " Skiping")
return_dict[vm.ip] = "SUCCESS"
return "SUCCESS"
sftp = ssh.open_sftp()
sftp.chdir('/tmp/')
file_path = os.path.dirname(os.path.abspath(__file__))
install_script = file_path + "/" + "docker_engine.sh"
sftp.put(install_script, "engine_setup.sh")
stdin, stdout, stderr = ssh.exec_command("sudo sh /tmp/engine_setup.sh")
output = stdout.read()
logger.info("stdout: " + (output))
output = stderr.read()
logger.info("stderr: " + (output))
logger.info("Finised docker engine installation on: " + (vm.ip))
except Exception as e:
global retry
if retry < 10:
logger.warning(vm.ip + " " + str(e) + ". Retrying")
retry += 1
return install_engine(vm, return_dict)
logger.error(vm.ip + " " + str(e))
return_dict[vm.ip] = "ERROR:" + vm.ip + " " + str(e)
return "ERROR:" + vm.ip + " " + str(e)
ssh.close()
retry = 0
return_dict[vm.ip] = "SUCCESS"
return "SUCCESS"
def run(vm_list, rabbitmq_host, owner):
rabbit = DRIPLoggingHandler(host=rabbitmq_host, port=5672, user=owner)
logger.addHandler(rabbit)
manager = multiprocessing.Manager()
return_dict = manager.dict()
jobs = []
for i in vm_list:
# ret = install_engine(i)
p = multiprocessing.Process(target=install_engine, args=(i, return_dict,))
jobs.append(p)
p.start()
for proc in jobs:
proc.join()
if "ERROR" in return_dict.values(): return "ERROR"
# if "ERROR" in ret: return ret
return "SUCCESS"
#! /bin/bash
sudo apt-get update
sudo apt-get -y install linux-image-extra-$(uname -r) linux-image-extra-virtual
sudo apt-get -y install apt-transport-https ca-certificates curl software-properties-common
sudo curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
sudo apt-key fingerprint 0EBFCD88
sudo add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
sudo apt-get -y update
sudo apt-get -y install docker-ce
#sudo echo "{ \"insecure-registries\":[\"129.7.98.3:5000\"] }" > /etc/docker/daemon.json
#sudo service docker restart
sudo docker plugin install --grant-all-permissions weaveworks/net-plugin:latest_release
sudo docker plugin disable weaveworks/net-plugin:latest_release
sudo docker plugin set weaveworks/net-plugin:latest_release WEAVE_MULTICAST=1
sudo docker plugin enable weaveworks/net-plugin:latest_release
sudo wget -O /usr/local/bin/weave https://github.com/weaveworks/weave/releases/download/latest_release/weave
sudo chmod a+x /usr/local/bin/weave
sudo weave status
sudo curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add
sudo add-apt-repository "deb http://apt.kubernetes.io/ kubernetes-xenial main"
sudo apt-get update
sudo apt-get install -y kubelet kubeadm kubectl kubernetes-cni
curl -L https://github.com/kubernetes/kompose/releases/download/v1.7.0/kompose-linux-amd64 -o kompose
chmod +x kompose
sudo mv ./kompose /usr/local/bin/kompose
# ! /usr/bin/env python
# Copyright 2017 --Yang Hu--
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = 'Yang Hu'
import paramiko, os
from vm_info import VmInfo
import linecache
import sys
import logging
import time
# from drip_logging.drip_logging_handler import *
from os import listdir
from os.path import isfile, join
logger = logging.getLogger(__name__)
if not getattr(logger, 'handler_set', None):
logger.setLevel(logging.INFO)
h = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
h.setFormatter(formatter)
logger.addHandler(h)
logger.handler_set = True
retry = 0
def print_exception():
exc_type, exc_obj, tb = sys.exc_info()
f = tb.tb_frame
lineno = tb.tb_lineno
filename = f.f_code.co_filename
linecache.checkcache(filename)
line = linecache.getline(filename, lineno, f.f_globals)
print 'EXCEPTION IN ({}, LINE {} "{}"): {}'.format(filename, lineno, line.strip(), exc_obj)
def install_manager(vm):
try:
logger.info("Starting kubernetes master installation on: " + (vm.ip))
parentDir = os.path.dirname(os.path.abspath(vm.key))
os.chmod(parentDir, 0o700)
os.chmod(vm.key, 0o600)
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(vm.ip, username=vm.user, key_filename=vm.key)
sftp = ssh.open_sftp()
file_path = os.path.dirname(os.path.abspath(__file__))
sftp.chdir('/tmp/')
install_script = file_path + "/" + "docker_kubernetes.sh"
sftp.put(install_script, "kubernetes_setup.sh")
# stdin, stdout, stderr = ssh.exec_command("sudo hostname ip-%s" % (vm.ip.replace('.','-')))
# stdout.read()
stdin, stdout, stderr = ssh.exec_command("sudo sh /tmp/kubernetes_setup.sh > log 2>&1")
out = stdout.read()
out = stderr.read()
# stdin, stdout, stderr = ssh.exec_command("sudo kubeadm kubernetes-xenialreset --force")
# stdout.read()
stdin, stdout, stderr = ssh.exec_command("sudo kubeadm reset --force >> log 2>&1")
stdout.read()
# stdin, stdout, stderr = ssh.exec_command("sudo kubeadm init --apiserver-advertise-address=%s" % (vm.ip))
stdin, stdout, stderr = ssh.exec_command("sudo kubeadm init")
retstr = stdout.readlines()
stdin, stdout, stderr = ssh.exec_command("mkdir -p $HOME/.kube")
stdout.read()
stdin, stdout, stderr = ssh.exec_command("sudo cp /etc/kubernetes/admin.conf $HOME/.kube/config")
stdout.read()
stdin, stdout, stderr = ssh.exec_command("sudo chown $(id -u):$(id -g) $HOME/.kube/config")
stdout.read()
stdin, stdout, stderr = ssh.exec_command("sudo sysctl net.bridge.bridge-nf-call-iptables=1")
retstr = stdout.readlines()
stdin, stdout, stderr = ssh.exec_command(
"kubectl apply -f \"https://cloud.weave.works/k8s/net?k8s-version=$(kubectl version | base64 | tr -d '\n')\"")
retstr = stdout.readlines()
stdin, stdout, stderr = ssh.exec_command(
"kubectl taint nodes --all node-role.kubernetes.io/master-")
retstr = stdout.readlines()
stdin, stdout, stderr = ssh.exec_command("sudo chown %s /tmp/admin.conf" % (vm.user))
stdout.read()
stdin, stdout, stderr = ssh.exec_command("sudo chgrp %s /tmp/admin.conf" % (vm.user))
stdout.read()
# sftp.get("/tmp/admin.conf", file_path + "/admin.conf")
logger.info("Finished kubernetes master installation on: " + (vm.ip))
except Exception as e:
global retry
# print '%s: %s' % (vm.ip, e)
logger.error(vm.ip + " " + str(e))
print_exception()
return "ERROR:" + vm.ip + " " + str(e)
ssh.close()
return retstr[-1]
def install_worker(join_cmd, vm):
try:
logger.info("Starting kubernetes slave installation on: " + (vm.ip))
logger.info("User: " + vm.user + " key file: " + vm.key)
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(vm.ip, username=vm.user, key_filename=vm.key, timeout=30)
parentDir = os.path.dirname(os.path.abspath(vm.key))
os.chmod(parentDir, 0o700)
os.chmod(vm.key, 0o600)
sftp = ssh.open_sftp()
sftp.chdir('/tmp/')
file_path = os.path.dirname(os.path.abspath(__file__))
install_script = file_path + "/" + "docker_kubernetes.sh"
sftp.put(install_script, "kubernetes_setup.sh")
# stdin, stdout, stderr = ssh.exec_command("sudo hostname ip-%s" % (vm.ip.replace('.', '-')))
# stdout.read()
stdin, stdout, stderr = ssh.exec_command("sudo sh /tmp/kubernetes_setup.sh")
stdout.read()
stdin, stdout, stderr = ssh.exec_command("sudo kubeadm reset")
stdout.read()
stdin, stdout, stderr = ssh.exec_command("sudo %s" % (join_cmd))
stdout.read()
logger.info("Finished kubernetes slave installation on: " + (vm.ip))
except Exception as e:
# print '%s: %s' % (vm.ip, e)
logger.error(vm.ip + " " + str(e))
return "ERROR:" + vm.ip + " " + str(e)
ssh.close()
return "SUCCESS"
def deploy_on_master(deployment_file, vm):
try:
k8s_files = [f for f in listdir(deployment_file) if isfile(join(deployment_file, f))]
logger.info("Starting deployment on: " + (vm.ip))
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(vm.ip, username=vm.user, key_filename=vm.key, timeout=30)
parentDir = os.path.dirname(os.path.abspath(vm.key))
os.chmod(parentDir, 0o700)
os.chmod(vm.key, 0o600)
stdin, stdout, stderr = ssh.exec_command("mkdir /tmp/k8s")
stdout.read()
sftp = ssh.open_sftp()
sftp.chdir('/tmp/k8s')
file_path = os.path.dirname(os.path.abspath(__file__))
for f in k8s_files:
k8s_file = deployment_file + "/" + f
sftp.put(k8s_file, f)
stdin, stdout, stderr = ssh.exec_command("kubectl create -f /tmp/k8s/ >> log 2>&1")
s_out = stdout.read()
e_out = stderr.read()
time.sleep(2)
# cmd = 'kubectl get svc --all-namespaces -o go-template=\'{{range .items}}{{range.spec.ports}}{{if .nodePort}}{{.nodePort}}{{"\\n"}}{{end}}{{end}}{{end}}\''
cmd = 'kubectl get svc --output json'
stdin, stdout, stderr = ssh.exec_command(cmd)
e_out = stderr.read()
json_output = stdout.read()
# exposed_ports_str = ''
# for port in exposed_ports:
# exposed_ports_str += port + ','
# exposed_ports_str = exposed_ports_str[:-1]
except Exception as e:
# print '%s: %s' % (vm.ip, e)
logger.error(vm.ip + " " + str(e))
return "ERROR:" + vm.ip + " " + str(e)
ssh.close()
return json_output
def deploy(vm_list, deployment_file):
for i in vm_list:
if i.role == "master":
return deploy_on_master(deployment_file, i)
def run(vm_list, rabbitmq_host, owner):
# rabbit = DRIPLoggingHandler(host=rabbitmq_host, port=5672,user=owner)
# logger.addHandler(rabbit)
for i in vm_list:
if i.role == "master":
join_cmd = install_manager(i)
if "ERROR" in join_cmd:
return join_cmd
else:
join_cmd = join_cmd.encode()
join_cmd = join_cmd.strip()
break
for i in vm_list:
if i.role == "slave":
worker_cmd = install_worker(join_cmd, i)
if "ERROR" in worker_cmd:
return worker_cmd
file_path = os.path.dirname(os.path.abspath(__file__))
kuber_file = open(file_path + "/admin.conf", "r")
kuber_string = kuber_file.read()
kuber_file.close()
return kuber_string
#! /bin/bash
sudo kubeadm reset --force
sudo sed -i -re 's/([a-z]{2}\.)?archive.ubuntu.com|security.ubuntu.com/old-releases.ubuntu.com/g' /etc/apt/sources.list
sudo apt-get update && apt-get install -y apt-transport-https ca-certificates curl software-properties-common
sudo add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
sudo apt-get update && apt-get install -y --allow-unauthenticated docker-ce=18.06.2~ce~3-0~ubuntu
echo "{\n \"exec-opts\": [\"native.cgroupdriver=systemd\"], \n \"log-driver\": \"json-file\", \n \"log-opts\": {\"max-size\": \"100m\"}, \n \"storage-driver\": \"overlay2\" \n}" | sudo tee /etc/docker/daemon.json
sudo mkdir -p /etc/systemd/system/docker.service.d
sudo systemctl daemon-reload
sudo systemctl restart docker
sudo apt-get update && apt-get install -y apt-transport-https curl
sudo curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key add -
echo "deb https://apt.kubernetes.io/ kubernetes-xenial main" | sudo tee /etc/apt/sources.list.d/kubernetes.list
sudo apt-get update && apt-get install -y kubelet kubeadm kubectl
sudo apt-mark hold kubelet kubeadm kubectl
#! /usr/bin/env python
# Copyright 2017 --Yang Hu--
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = 'Yang Hu'
import paramiko, os
from vm_info import VmInfo
import logging
# from drip_logging.drip_logging_handler import *
import multiprocessing
logger = logging.getLogger(__name__)
if not getattr(logger, 'handler_set', None):
logger.setLevel(logging.INFO)
h = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
h.setFormatter(formatter)
logger.addHandler(h)
logger.handler_set = True
retry=0
def scale_service(vm, application_name, service_name, service_num):
try:
logger.info("Starting docker service scaling on: "+vm.ip)
paramiko.util.log_to_file("deployment.log")
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(vm.ip, username=vm.user, key_filename=vm.key,timeout=30)
stdin, stdout, stderr = ssh.exec_command("sudo docker service scale %s_%s=%s" % (application_name, service_name, service_num))
stdout.read()
logger.info("Finished docker service scaling on: "+vm.ip)
except Exception as e:
global retry
if retry < 10:
logger.warning(vm.ip + " " + str(e)+". Retrying")
retry+=1
return scale_service(vm, application_name, service_name, service_num)
logger.error(vm.ip + " " + str(e))
return "ERROR:" + vm.ip + " " + str(e)
ssh.close()
return "SUCCESS"
def run(vm_list, application_name, service_name, service_num,rabbitmq_host,owner):
rabbit = DRIPLoggingHandler(host=rabbitmq_host, port=5672,user=owner)
logger.addHandler(rabbit)
for i in vm_list:
if i.role == "master":
ret = scale_service(i, application_name, service_name, service_num)
if "ERROR" in ret:
return ret
break
return ret
\ No newline at end of file
#! /usr/bin/env python
# Copyright 2017 --Yang Hu--
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = 'Yang Hu'
import paramiko, os
from vm_info import VmInfo
import logging
# from drip_logging.drip_logging_handler import *
import multiprocessing
logger = logging.getLogger(__name__)
if not getattr(logger, 'handler_set', None):
logger.setLevel(logging.INFO)
h = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
h.setFormatter(formatter)
logger.addHandler(h)
logger.handler_set = True
retry=0
def install_manager(vm):
try:
logger.info("Starting swarm manager installation on: "+(vm.ip))
paramiko.util.log_to_file("deployment.log")
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(vm.ip, username=vm.user, key_filename=vm.key,timeout=30)
stdin, stdout, stderr = ssh.exec_command("sudo docker info | grep 'Swarm'")
temp_list1 = stdout.readlines()
stdin, stdout, stderr = ssh.exec_command("sudo docker info | grep 'Is Manager'")
temp_list2 = stdout.readlines()
if temp_list1[0].find("Swarm: active") != -1:
if temp_list2[0].find("Is Manager: true") != -1:
stdin, stdout, stderr = ssh.exec_command("sudo docker swarm join-token worker")
retstr = stdout.readlines()
return retstr[2].encode()
stdin, stdout, stderr = ssh.exec_command("sudo docker swarm leave --force")
stdout.read()
stdin, stdout, stderr = ssh.exec_command("sudo docker swarm init --advertise-addr %s" % (vm.ip))
stdout.read()
stdin, stdout, stderr = ssh.exec_command("sudo docker swarm join-token worker")
retstr = stdout.readlines()
ret = retstr[2].encode()
logger.info("Finished swarm manager installation on: "+(vm.ip))
except Exception as e:
global retry
if retry < 10:
logger.warning(vm.ip + " " + str(e)+". Retrying")
retry+=1
return install_manager(vm)
logger.error(vm.ip + " " + str(e))
return "ERROR:" + vm.ip + " " + str(e)
ssh.close()
retry=0
return ret
def install_worker(join_cmd, vm,return_dict):
try:
logger.info("Starting swarm worker installation on: "+(vm.ip))
paramiko.util.log_to_file("deployment.log")
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(vm.ip, username=vm.user, key_filename=vm.key,timeout=30)
stdin, stdout, stderr = ssh.exec_command("sudo docker info | grep 'Swarm'")
temp_list1 = stdout.readlines()
if temp_list1[0].find("Swarm: active") != -1:
logger.info("Swarm worker arleady installated on: "+(vm.ip)+" Skiping")
return_dict[vm.ip] = "SUCCESS"
return "SUCCESS"
stdin, stdout, stderr = ssh.exec_command("sudo docker swarm leave --force")
stdout.read()
stdin, stdout, stderr = ssh.exec_command("sudo %s" % (join_cmd))
stdout.read()
logger.info("Finished swarm worker installation on: "+(vm.ip))
except Exception as e:
global retry
if retry < 10:
logger.warning(vm.ip + " " + str(e)+". Retrying")
retry+=1
return install_worker(join_cmd, vm,return_dict)
logger.error(vm.ip + " " + str(e))
return_dict[vm.ip] = "ERROR:"+vm.ip+" "+str(e)
return "ERROR:" + vm.ip + " " + str(e)
ssh.close()
retry=0
return_dict[vm.ip] = "SUCCESS"
return "SUCCESS"
def connect_wave(vm_list,master):
try:
logger.info("Starting wave connection on: "+(master.ip))
paramiko.util.log_to_file("deployment.log")
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
for i in vm_list:
if i.role == "slave":
ssh.connect(master.ip, username=master.user, key_filename=master.key)
logger.info("weave connect "+(i.ip))
stdin, stdout, stderr = ssh.exec_command("sudo weave connect "+i.ip)
out = stdout.read()
err = stderr.read()
logger.info("stdout: "+(out))
logger.info("stderr: "+(err))
ssh.connect(i.ip, username=master.user, key_filename=i.key)
stdin, stdout, stderr = ssh.exec_command("sudo weave connect "+master.ip)
out = stdout.read()
err = stderr.read()
logger.info("stdout: "+(out))
logger.info("stderr: "+(err))
logger.info("Finished wave connection on: "+(master.ip))
except Exception as e:
global retry
if retry < 10:
logger.warning(master.ip + " " + str(e)+". Retrying")
retry+=1
return connect_wave(vm_list,master)
logger.error(master.ip + " " + str(e))
return "ERROR:" + master.ip + " " + str(e)
ssh.close()
retry=0
return "SUCCESS"
def run(vm_list,rabbitmq_host,owner):
rabbit = DRIPLoggingHandler(host=rabbitmq_host, port=5672,user=owner)
logger.addHandler(rabbit)
master = None
for i in vm_list:
if i.role == "master":
master = i
join_cmd = install_manager(i)
if "ERROR:" in join_cmd:
return join_cmd
parentDir = os.path.dirname(os.path.abspath(i.key))
os.chmod(parentDir, 0o700)
os.chmod(i.key, 0o600)
swarm_file = open(i.key)
swarm_string = swarm_file.read()
swarm_file.close()
break
manager = multiprocessing.Manager()
return_dict = manager.dict()
jobs = []
for i in vm_list:
if i.role == "slave":
p = multiprocessing.Process(target=install_worker, args=(join_cmd, i,return_dict,))
jobs.append(p)
p.start()
for proc in jobs:
proc.join()
connect_wave(vm_list,master)
if "ERROR" in return_dict.values(): return "ERROR"
return swarm_string
\ No newline at end of file
import json
import logging
import pika
from python_logging_rabbitmq import RabbitMQHandler
class DRIPLoggingHandler(RabbitMQHandler):
def __init__(self, host='localhost', port=5672, username=None, password=None, user=None):
super(DRIPLoggingHandler, self).__init__(host=host, port=port, username=username, password=password)
self.user = user
def open_connection(self):
self.sequenceNumber = 0
"""
Connect to RabbitMQ.
"""
# Set logger for pika.
# See if something went wrong connecting to RabbitMQ.
handler = logging.StreamHandler()
handler.setFormatter(self.formatter)
rabbitmq_logger = logging.getLogger('pika')
rabbitmq_logger.addHandler(handler)
rabbitmq_logger.propagate = False
rabbitmq_logger.setLevel(logging.WARNING)
if not self.connection or self.connection.is_closed:
self.connection = pika.BlockingConnection(pika.ConnectionParameters(** self.connection_params))
if not self.channel or self.channel.is_closed:
self.channel = self.connection.channel()
self.channel.queue_declare(queue='log_qeue_' + self.user, durable=True)
# Manually remove logger to avoid shutdown message.
rabbitmq_logger.removeHandler(handler)
def emit(self, record):
self.acquire()
try:
if not self.connection or self.connection.is_closed or not self.channel or self.channel.is_closed:
self.open_connection()
queue='log_qeue_' + self.user
self.channel.basic_publish(
exchange='',
routing_key=queue,
body=self.format(record),
properties=pika.BasicProperties(
delivery_mode=2)
)
except Exception:
self.channel, self.connection = None, None
self.handleError(record)
finally:
if self.close_after_emit:
self.close_connection()
self.release()
def format(self, record):
drip_record = {}
drip_record['timestamp'] = record.created
drip_record['owner'] = 'user'
drip_record['level'] = record.levelname
drip_record['loggerName'] = record.module
drip_record['message'] = record.message
drip_record['millis'] = record.created
self.sequenceNumber += 1
drip_record['sequenceNumber'] = self.sequenceNumber
drip_record['sourceClassName'] = record.module
drip_record['sourceMethodName'] = record.funcName
return json.dumps(drip_record)
\ No newline at end of file
pika==1.1.0
names==0.3.0
networkx==2.4
ansible==2.9.2
\ No newline at end of file
#!/usr/bin/env python
# Copyright 2017 S. Koulouzis
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ansible.plugins.callback import CallbackBase
__author__ = 'S. Koulouzis'
class ResultsCollector(CallbackBase):
def __init__(self, *args, **kwargs):
super(ResultsCollector, self).__init__(*args, **kwargs)
self.host_ok = []
self.host_unreachable = []
self.host_failed = []
def v2_runner_on_unreachable(self, result, ignore_errors=False):
name = result._host.get_name()
task = result._task.get_name()
#self.host_unreachable[result._host.get_name()] = result
self.host_unreachable.append(dict(ip=name, task=task, result=result))
def v2_runner_on_ok(self, result, *args, **kwargs):
name = result._host.get_name()
task = result._task.get_name()
if task == "setup":
pass
elif "Info" in task:
self.host_ok.append(dict(ip=name, task=task, result=result))
else:
self.host_ok.append(dict(ip=name, task=task, result=result))
def v2_runner_on_failed(self, result, *args, **kwargs):
name = result._host.get_name()
task = result._task.get_name()
self.host_failed.append(dict(ip=name, task=task, result=result))
\ No newline at end of file
#!/usr/bin/env python
import base64
import json
import logging
import os
import os.path
# import ansible_playbook
import sys
import time
from threading import Thread
from time import sleep
import pika
# import ansible_playbook
import docker_check
import docker_compose
import docker_engine
import docker_kubernetes
import docker_service
import docker_swarm
from vm_info import VmInfo
global rabbitmq_host
if len(sys.argv) > 1:
rabbitmq_host = sys.argv[1]
else:
rabbitmq_host = '127.0.0.1'
logger = logging.getLogger(__name__)
if not getattr(logger, 'handler_set', None):
logger.setLevel(logging.INFO)
h = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
h.setFormatter(formatter)
logger.addHandler(h)
logger.handler_set = True
connection = pika.BlockingConnection(pika.ConnectionParameters(host=rabbitmq_host))
channel = connection.channel()
channel.queue_declare(queue='deployer_queue')
done = False
def threaded_function(args):
while not done:
connection.process_data_events()
sleep(5)
def handle_delivery(message):
parsed_json = json.loads(message)
owner = parsed_json['owner']
params = parsed_json["parameters"]
node_num = 0
vm_list = set()
current_milli_time = lambda: int(round(time.time() * 1000))
try:
path = os.path.dirname(os.path.abspath(__file__)) + "/deployer_files/" + str(current_milli_time()) + "/"
except NameError:
import sys
path = os.path.dirname(os.path.abspath(sys.argv[0])) + "/deployer_files/" + str(current_milli_time()) + "/"
if not os.path.exists(path):
os.makedirs(path)
for param in params:
name = param["name"]
if name == "cluster":
manager_type = param["value"]
elif name == "credential":
value = param["value"]
value = base64.b64decode(value)
ip = param["attributes"]["IP"]
user = param["attributes"]["user"]
role = param["attributes"]["role"]
node_num += 1
key = path + "%d.txt" % node_num
fo = open(key, "w")
fo.write(value)
fo.close()
parentDir = os.path.dirname(os.path.abspath(key))
os.chmod(parentDir, 0o700)
os.chmod(key, 0o600)
vm = VmInfo(ip, user, key, role)
vm_list.add(vm)
elif name.startswith('k8s_'):
value = param["value"]
value = base64.b64decode(value)
k8s_folder = path + "/k8s/"
if not os.path.exists(k8s_folder):
os.makedirs(k8s_folder)
deployment_file = k8s_folder + name+".yml"
fo = open(deployment_file, "w")
fo.write(value)
fo.close()
elif name == "playbook":
value = param["value"]
playbook = path + "playbook.yml"
fo = open(playbook, "w")
fo.write(value)
fo.close()
elif name == "composer":
value = param["value"]
compose_file = path + "docker-compose.yml"
if not param["attributes"] is None and not param["attributes"]["name"] is None:
compose_name = param["attributes"]["name"]
docker_login = {}
if 'docker_login' in param["attributes"]:
docker_login['username'] = param["attributes"]["docker_login_username"]
docker_login['password'] = param["attributes"]["docker_login_password"]
docker_login['registry'] = param["attributes"]["docker_login_registry"]
docker_login = param["attributes"]["docker_login"]
else:
current_milli_time = lambda: int(round(time.time() * 1000))
compose_name = "service_" + str(current_milli_time())
fo = open(compose_file, "w")
fo.write(value)
fo.close()
elif name == "scale":
name_of_deployment = param["value"]
name_of_service = param["attributes"]["service"]
number_of_containers = param["attributes"]["number_of_containers"]
elif name == "swarm_info":
compose_name = param["attributes"]["name"]
if manager_type == "kubernetes":
ret = docker_kubernetes.run(vm_list, rabbitmq_host, owner)
ret = docker_kubernetes.deploy(vm_list, k8s_folder)
return ret
elif manager_type == "swarm":
ret = docker_engine.run(vm_list, rabbitmq_host, owner)
if "ERROR" in ret: return ret
ret = docker_swarm.run(vm_list, rabbitmq_host, owner)
if "ERROR" in ret: return ret
ret = docker_compose.run(vm_list, compose_file, compose_name, rabbitmq_host, owner, docker_login)
return ret
elif manager_type == "ansible":
ret = ansible_playbook.run(vm_list, playbook, rabbitmq_host, owner)
return ret
elif manager_type == "scale":
ret = docker_service.run(vm_list, name_of_deployment, name_of_service, number_of_containers, rabbitmq_host,
owner)
return ret
elif manager_type == "swarm_info":
ret = docker_check.run(vm_list, compose_name, rabbitmq_host, owner)
ret = '"' + json.dumps(ret) + '"'
return ret
else:
return "ERROR: invalid cluster"
def on_request(ch, method, props, body):
ret = handle_delivery(body)
parsed_json = json.loads(body)
params = parsed_json["parameters"]
for param in params:
name = param["name"]
if name == "cluster":
manager_type = param["value"]
break
if not ret and "ERROR" in ret:
res_name = "error"
elif manager_type == "ansible":
res_name = "ansible_output"
elif manager_type == "scale":
res_name = "scale_status"
elif manager_type == "swarm_info":
res_name = "swarm_info"
elif manager_type == "kubernetes":
res_name = "kubectl_get"
else:
res_name = "credential"
response = {}
outcontent = {}
current_milli_time = lambda: int(round(time.time() * 1000))
response["creationDate"] = current_milli_time()
response["parameters"] = []
par = {}
par["url"] = "null"
par["encoding"] = "UTF-8"
par["name"] = res_name
par["value"] = base64.b64encode(ret)
par["attributes"] = "null"
response["parameters"].append(par)
response = json.dumps(response)
logger.info("Response: " + response)
ch.basic_publish(exchange='',
routing_key=props.reply_to,
properties=pika.BasicProperties(correlation_id= \
props.correlation_id),
body=str(response))
ch.basic_ack(delivery_tag=method.delivery_tag)
channel.basic_qos(prefetch_count=1)
channel.basic_consume(on_request, queue='deployer_queue')
thread = Thread(target=threaded_function, args=(1,))
thread.start()
logger.info("Awaiting RPC requests")
try:
channel.start_consuming()
except KeyboardInterrupt:
# thread.stop()
done = True
thread.join()
logger.info("Threads successfully closed")
def get_interfaces(tosca_template_json):
return None
\ No newline at end of file
from setuptools import setup, find_packages
setup(
name='drip_deployer',
version='0.1',
packages=find_packages(),
# Fill in these to make your Egg ready for upload to
# PyPI
author='S. Koulouzis',
author_email='',
# summary = 'Just another Python package for the cheese shop',
url='',
license='',
long_description='Long description of the package',
# could also include long_description, download_url, classifiers, etc.
)
#!/usr/bin/env python
import pika
import json
import os
import time
from vm_info import VmInfo
import docker_kubernetes
import docker_engine
import docker_swarm
import docker_compose
import docker_service
import docker_check
import control_agent
import ansible_playbook
import sys, argparse
from threading import Thread
from time import sleep
import os.path
import logging
from os.path import expanduser
home = expanduser("~")
playbook_path=home+"/Downloads/playbook.yml"
playbook_path=sys.argv[1] #home+"/Downloads/playbook.yml"
ip = "147.228.242.97"
ip = sys.argv[2] #"147.228.242.97"
user="vm_user"
role = "master"
ssh_key_file=home+"/Downloads/id_rsa"
ssh_key_file = sys.argv[3] #home+"/Downloads/id_rsa"
vm_list = set()
vm = VmInfo(ip, user, ssh_key_file, role)
vm_list.add(vm)
rabbit_mq_host = sys.argv[4] #rabbit_mq_host
print sys.argv
print "playbook_path: "+playbook_path
print "ip: "+ip
print "ssh_key_file: "+ssh_key_file
print "rabbit_mq_host: "+rabbit_mq_host
ret = ansible_playbook.run(vm_list,playbook_path,rabbit_mq_host,"owner")
\ No newline at end of file
#! /usr/bin/env python
# Copyright 2017 --Yang Hu--
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class VmInfo:
def __init__(self, ip = "", user = "", key = "", role = ""):
self.ip = ip
self.user = user
self.key = key
self.role = role
def displayVm(self):
print "IP:", self.ip, " USER:", self.user
<?xml version="1.0" encoding="UTF-8"?> <?xml version="1.0" encoding="UTF-8"?>
<project version="4"> <project version="4">
<component name="ChangeListManager"> <component name="ChangeListManager">
<list default="true" id="e478ccae-5352-4e8e-9efb-3f5cda44e877" name="Default Changelist" comment=""> <list default="true" id="e478ccae-5352-4e8e-9efb-3f5cda44e877" name="Default Changelist" comment="" />
<change beforePath="$PROJECT_DIR$/.idea/workspace.xml" beforeDir="false" afterPath="$PROJECT_DIR$/.idea/workspace.xml" afterDir="false" />
</list>
<option name="SHOW_DIALOG" value="false" /> <option name="SHOW_DIALOG" value="false" />
<option name="HIGHLIGHT_CONFLICTS" value="true" /> <option name="HIGHLIGHT_CONFLICTS" value="true" />
<option name="HIGHLIGHT_NON_ACTIVE_CHANGELIST" value="false" /> <option name="HIGHLIGHT_NON_ACTIVE_CHANGELIST" value="false" />
...@@ -269,26 +267,26 @@ ...@@ -269,26 +267,26 @@
</state> </state>
<state x="792" y="334" width="827" height="663" key="FileChooserDialogImpl/67.34.1853.1046@67.34.1853.1046" timestamp="1578326180157" /> <state x="792" y="334" width="827" height="663" key="FileChooserDialogImpl/67.34.1853.1046@67.34.1853.1046" timestamp="1578326180157" />
<state x="1043" y="437" width="530" height="598" key="FileChooserDialogImpl/67.34.2493.1406@67.34.2493.1406" timestamp="1575907769017" /> <state x="1043" y="437" width="530" height="598" key="FileChooserDialogImpl/67.34.2493.1406@67.34.2493.1406" timestamp="1575907769017" />
<state width="1825" height="263" key="GridCell.Tab.0.bottom" timestamp="1578584059031"> <state width="2465" height="381" key="GridCell.Tab.0.bottom" timestamp="1578935126552">
<screen x="67" y="34" width="1853" height="1046" /> <screen x="67" y="34" width="2493" height="1406" />
</state> </state>
<state width="1825" height="263" key="GridCell.Tab.0.bottom/67.34.1853.1046@67.34.1853.1046" timestamp="1578584059031" /> <state width="1825" height="263" key="GridCell.Tab.0.bottom/67.34.1853.1046@67.34.1853.1046" timestamp="1578584059031" />
<state width="2465" height="382" key="GridCell.Tab.0.bottom/67.34.2493.1406@67.34.2493.1406" timestamp="1578250751968" /> <state width="2465" height="381" key="GridCell.Tab.0.bottom/67.34.2493.1406@67.34.2493.1406" timestamp="1578935126552" />
<state width="1825" height="263" key="GridCell.Tab.0.center" timestamp="1578584059030"> <state width="2465" height="381" key="GridCell.Tab.0.center" timestamp="1578935126549">
<screen x="67" y="34" width="1853" height="1046" /> <screen x="67" y="34" width="2493" height="1406" />
</state> </state>
<state width="1825" height="263" key="GridCell.Tab.0.center/67.34.1853.1046@67.34.1853.1046" timestamp="1578584059030" /> <state width="1825" height="263" key="GridCell.Tab.0.center/67.34.1853.1046@67.34.1853.1046" timestamp="1578584059030" />
<state width="2465" height="382" key="GridCell.Tab.0.center/67.34.2493.1406@67.34.2493.1406" timestamp="1578250751968" /> <state width="2465" height="381" key="GridCell.Tab.0.center/67.34.2493.1406@67.34.2493.1406" timestamp="1578935126549" />
<state width="1825" height="263" key="GridCell.Tab.0.left" timestamp="1578584059030"> <state width="2465" height="381" key="GridCell.Tab.0.left" timestamp="1578935126545">
<screen x="67" y="34" width="1853" height="1046" /> <screen x="67" y="34" width="2493" height="1406" />
</state> </state>
<state width="1825" height="263" key="GridCell.Tab.0.left/67.34.1853.1046@67.34.1853.1046" timestamp="1578584059030" /> <state width="1825" height="263" key="GridCell.Tab.0.left/67.34.1853.1046@67.34.1853.1046" timestamp="1578584059030" />
<state width="2465" height="382" key="GridCell.Tab.0.left/67.34.2493.1406@67.34.2493.1406" timestamp="1578250751968" /> <state width="2465" height="381" key="GridCell.Tab.0.left/67.34.2493.1406@67.34.2493.1406" timestamp="1578935126545" />
<state width="1825" height="263" key="GridCell.Tab.0.right" timestamp="1578584059030"> <state width="2465" height="381" key="GridCell.Tab.0.right" timestamp="1578935126550">
<screen x="67" y="34" width="1853" height="1046" /> <screen x="67" y="34" width="2493" height="1406" />
</state> </state>
<state width="1825" height="263" key="GridCell.Tab.0.right/67.34.1853.1046@67.34.1853.1046" timestamp="1578584059030" /> <state width="1825" height="263" key="GridCell.Tab.0.right/67.34.1853.1046@67.34.1853.1046" timestamp="1578584059030" />
<state width="2465" height="382" key="GridCell.Tab.0.right/67.34.2493.1406@67.34.2493.1406" timestamp="1578250751968" /> <state width="2465" height="381" key="GridCell.Tab.0.right/67.34.2493.1406@67.34.2493.1406" timestamp="1578935126550" />
<state width="2465" height="413" key="GridCell.Tab.1.bottom" timestamp="1577720249209"> <state width="2465" height="413" key="GridCell.Tab.1.bottom" timestamp="1577720249209">
<screen x="67" y="34" width="2493" height="1406" /> <screen x="67" y="34" width="2493" height="1406" />
</state> </state>
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment