Commit c4891ab0 authored by Spiros Koulouzis's avatar Spiros Koulouzis

added new planner and Kubernetes interface

parent 02c7f99e
...@@ -59,3 +59,20 @@ interface_types: ...@@ -59,3 +59,20 @@ interface_types:
provision: provision:
description: Provision the defined objects (resources). ObjectType can be SubTopology or VM. description: Provision the defined objects (resources). ObjectType can be SubTopology or VM.
tosca.interfaces.ARTICONF.Kubernetes:
derived_from: tosca.interfaces.node.lifecycle.Standard
inputs:
inventory_file:
type: string
required: true
default: k8s_hosts
playbook:
type: string
required: true
default: https://raw.githubusercontent.com/skoulouzis/CONF/DRIP_3.0/ansible_playbooks/install_k8s.yml
install:
description: install Kubernetes
...@@ -70,12 +70,14 @@ node_types: ...@@ -70,12 +70,14 @@ node_types:
derived_from: tosca.nodes.ARTICONF.Orchestrator derived_from: tosca.nodes.ARTICONF.Orchestrator
description: Kubernetes orchestrator description: Kubernetes orchestrator
interfaces: interfaces:
Standard: Kubernetes:
create: type: tosca.interfaces.ARTICONF.Kubernetes
install:
inputs: inputs:
inventory-file: k8s_hosts inventory_file: k8s_hosts
playbook: https://raw.githubusercontent.com/skoulouzis/CONF/DRIP_3.0/ansible_playbooks/install_k8s.yml playbook: https://raw.githubusercontent.com/skoulouzis/CONF/DRIP_3.0/ansible_playbooks/install_k8s.yml
#tosca.nodes.ARTICONF.Orchestrator.Swarm: #tosca.nodes.ARTICONF.Orchestrator.Swarm:
#derived_from: tosca.nodes.ARTICONF.Orchestrator #derived_from: tosca.nodes.ARTICONF.Orchestrator
#description: swarm orchestrator #description: swarm orchestrator
......
<?xml version="1.0" encoding="UTF-8"?> <?xml version="1.0" encoding="UTF-8"?>
<module type="PYTHON_MODULE" version="4"> <module type="PYTHON_MODULE" version="4">
<component name="NewModuleRootManager"> <component name="NewModuleRootManager">
<content url="file://$MODULE_DIR$"> <content url="file://$MODULE_DIR$" />
<excludeFolder url="file://$MODULE_DIR$/venv" /> <orderEntry type="inheritedJdk" />
</content>
<orderEntry type="jdk" jdkName="Python 2.7 (drip-deployer)" jdkType="Python SDK" />
<orderEntry type="sourceFolder" forTests="false" /> <orderEntry type="sourceFolder" forTests="false" />
</component> </component>
<component name="TestRunnerService"> <component name="PyDocumentationSettings">
<option name="PROJECT_TEST_RUNNER" value="Unittests" /> <option name="format" value="PLAIN" />
<option name="myDocStringFormat" value="Plain" />
</component> </component>
</module> </module>
\ No newline at end of file
<?xml version="1.0" encoding="UTF-8"?> <?xml version="1.0" encoding="UTF-8"?>
<project version="4"> <project version="4">
<component name="ProjectRootManager" version="2" project-jdk-name="Python 2.7 (drip-deployer)" project-jdk-type="Python SDK" /> <component name="ProjectRootManager" version="2" project-jdk-name="Python 3.8 (drip-deployer)" project-jdk-type="Python SDK" />
</project> </project>
\ No newline at end of file
FROM python:3.7-buster
RUN mkdir -p /usr/src/app
WORKDIR /usr/src/app
COPY requirements.txt /usr/src/app/
RUN pip3 install --no-cache-dir -r requirements.txt
COPY . /usr/src/app
EXPOSE 8081
ENTRYPOINT ["python3 __main__.py $RABBITMQ_HOST deployer_queue"]
# To change this license header, choose License Headers in Project Properties.
# To change this template file, choose Tools | Templates
# and open the template in the editor.
import json
import os
import os.path
import tempfile
import time
import logging
import pika
import yaml
import sys
from service import tosca
logger = logging.getLogger(__name__)
# if not getattr(logger, 'handler_set', None):
# logger.setLevel(logging.INFO)
# h = logging.StreamHandler()
# formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
# h.setFormatter(formatter)
# logger.addHandler(h)
# logger.handler_set = True
def init_chanel(args):
global rabbitmq_host
if len(args) > 1:
rabbitmq_host = args[1]
queue_name = args[2] # deployer_qeue
else:
rabbitmq_host = '127.0.0.1'
connection = pika.BlockingConnection(pika.ConnectionParameters(host=rabbitmq_host))
channel = connection.channel()
channel.queue_declare(queue=queue_name)
return channel
def start(this_channel):
this_channel.basic_qos(prefetch_count=1)
this_channel.basic_consume(queue=queue_name, on_message_callback=on_request)
logger.info(" [x] Awaiting RPC requests")
this_channel.start_consuming()
def on_request(ch, method, props, body):
response = handle_delivery(body)
ch.basic_publish(exchange='',
routing_key=props.reply_to,
properties=pika.BasicProperties(correlation_id=
props.correlation_id),
body=str(response))
ch.basic_ack(delivery_tag=method.delivery_tag)
def handle_delivery(message):
logger.info("Got: " + str(message))
try:
message = message.decode()
except (UnicodeDecodeError, AttributeError):
pass
parsed_json_message = json.loads(message)
owner = parsed_json_message['owner']
tosca_file_name = 'tosca_template'
tosca_template_json = parsed_json_message['toscaTemplate']
input_current_milli_time = lambda: int(round(time.time() * 1000))
interfaces = tosca.get_interfaces(tosca_template_json)
logger.info("template ----: \n" + yaml.dump(template_dict))
response = {'toscaTemplate': template_dict}
output_current_milli_time = int(round(time.time() * 1000))
response["creationDate"] = output_current_milli_time
response["parameters"] = []
logger.info("Returning plan")
logger.info("Output message:" + json.dumps(response))
return json.dumps(response)
if __name__ == "__main__":
logging.basicConfig(level=logging.INFO)
logger.info("Input args: " + sys.argv[0] + ' ' + sys.argv[1] + ' ' + sys.argv[2])
channel = init_chanel(sys.argv)
global queue_name
queue_name = sys.argv[2]
start(channel)
#!/usr/bin/env python
import json
from collections import namedtuple
from ansible.parsing.dataloader import DataLoader
from ansible.vars import VariableManager
from ansible.inventory import Inventory
from ansible.playbook.play import Play
from ansible.executor.task_queue_manager import TaskQueueManager
import ansible.executor.task_queue_manager
import ansible.inventory
import ansible.parsing.dataloader
import ansible.playbook.play
import ansible.plugins.callback
import ansible.vars
from ansible.executor.playbook_executor import PlaybookExecutor
from ansible.plugins import callback_loader
import os
import logging
import yaml
import sys
from results_collector import ResultsCollector
hosts='172.17.0.2,'
playbook_path = 'playbook.yml'
if not os.path.exists(playbook_path):
print '[ERROR] The playbook does not exist'
sys.exit()
user='vm_user'
ssh_key_file='id_ras'
extra_vars = {} #{'resultslocation':'/tmp/res','ansible_sudo_pass':'123'}
variable_manager = VariableManager()
loader = DataLoader()
inventory = Inventory(loader=loader, variable_manager=variable_manager, host_list=hosts)
Options = namedtuple('Options', ['listtags', 'listtasks', 'listhosts', 'syntax', 'connection','module_path', 'forks', 'remote_user', 'private_key_file', 'ssh_common_args', 'ssh_extra_args', 'sftp_extra_args', 'scp_extra_args', 'become', 'become_method', 'become_user', 'verbosity', 'check'])
options = Options(listtags=False, listtasks=False, listhosts=False, syntax=False, connection='smart', module_path=None, forks=None, remote_user=user, private_key_file=ssh_key_file, ssh_common_args=None, ssh_extra_args=None, sftp_extra_args=None, scp_extra_args=None, become=True, become_method='sudo', become_user='root', verbosity=None, check=False)
variable_manager.extra_vars = extra_vars
passwords = {}
pbex = PlaybookExecutor(playbooks=[playbook_path],
inventory=inventory,
variable_manager=variable_manager,
loader=loader,
options=options,
passwords=passwords,
)
results_callback = ResultsCollector()
pbex._tqm._stdout_callback = results_callback
results = pbex.run()
ok = results_callback.host_ok
answer = []
for res in ok:
resp = json.dumps({"host":res['ip'], "result":res['result']._result})
answer.append({"host":res['ip'], "result":res['result']._result})
unreachable = results_callback.host_unreachable
for res in unreachable:
resp = json.dumps({"host":res['ip'], "result":res['result']._result})
answer.append({"host":res['ip'], "result":res['result']._result})
host_failed = results_callback.host_failed
for res in host_failed:
resp = json.dumps({"host":res['ip'], "result":res['result']._result})
answer.append({"host":res['ip'], "result":res['result']._result})
print json.dumps(answer,indent=4)
\ No newline at end of file
This diff is collapsed.
#! /bin/bash
killall apt
rm /var/lib/dpkg/lock
dpkg --configure -a
apt-get update
export DEBIAN_FRONTEND=noninteractive
apt-get -o Dpkg::Options::="--force-confold" upgrade -q -y --force-yes
apt-get -y install software-properties-common python openssh-server sudo
service ssh restart
\ No newline at end of file
#! /usr/bin/env python
# Copyright 2017 --Yang Hu--
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = 'Yang Hu'
import paramiko, os
import logging
# from drip_logging.drip_logging_handler import *
logger = logging.getLogger(__name__)
if not getattr(logger, 'handler_set', None):
logger.setLevel(logging.INFO)
h = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
h.setFormatter(formatter)
logger.addHandler(h)
logger.handler_set = True
retry=0
def install_agent(vm, vm_list):
try:
logger.info("Starting control agent installation on: "+(vm.ip))
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(vm.ip, username=vm.user, key_filename=vm.key,timeout=30)
sftp = ssh.open_sftp()
sftp.chdir('/tmp/')
vm_cnt = 0
file_path = os.path.dirname(os.path.abspath(__file__))
fo = open(file_path + "/cluster_file", "w")
for i in vm_list:
vm_cnt += 1
sftp.put(i.key, "%d" % (vm_cnt))
fo.write("%s %s /tmp/%d %s\n" % (i.ip, i.user, vm_cnt, i.role))
fo.close()
sftp.put(file_path + "/cluster_file", "cluster_file")
sftp.put(file_path + "/control_agent.sh", "control_agent.sh")
stdin, stdout, stderr = ssh.exec_command("sudo sh /tmp/control_agent.sh")
stdout.read()
stdin, stdout, stderr = ssh.exec_command("nohup sudo python /root/Swarm-Agent/run.py>/dev/null 2>&1 &")
stdout.read()
logger.info("Finished control agent installation on: "+(vm.ip))
except Exception as e:
global retry
if retry < 10:
logger.warning(vm.ip + " " + str(e)+". Retrying")
retry+=1
return install_agent(vm, vm_list)
logger.error(vm.ip + " " + str(e))
print '%s: %s' % (vm.ip, e)
return "ERROR:" + vm.ip + " " + str(e)
ssh.close()
return "SUCCESS"
def run(vm_list):
for i in vm_list:
parentDir = os.path.dirname(os.path.abspath(i.key))
os.chmod(parentDir, 0o700)
os.chmod(i.key, 0o600)
if i.role == "master":
ret = install_agent(i, vm_list)
if "ERROR" in ret: return ret
return "SUCCESS"
#! /bin/bash
sudo apt-get update
sudo apt-get -y install git
sudo apt-get -y install wget
sudo apt-get -y install build-essential libssl-dev libffi-dev python-dev
cd /root/
sudo wget https://bootstrap.pypa.io/get-pip.py
sudo python get-pip.py
sudo pip install flask
sudo pip install paramiko
sudo git clone https://github.com/oceanshy/Swarm-Agent.git
This diff is collapsed.
version: '3.1'
services:
zookeeper:
image: zookeeper
deploy:
restart_policy:
condition: on-failure
networks:
- storm
nimbus:
image: alogo53/docker-storm
environment:
TOPOLOGY_URL: https://github.com/skoulouzis/lightning/releases/download/v0.1-beta/lightning-0.0.1-SNAPSHOT-jar-with-dependencies.jar
TOPOLOGY_NAME: lightning
TOPOLOGY_MAIN: de.pangaea.lightning.ENVRI_NRTQualityCheck
TOPOLOGY_ARGS: arg1 arg2
depends_on:
- zookeeper
deploy:
placement:
constraints: [node.role == manager]
ports:
- 6627:6627
networks:
- storm
#worker
supervisor:
image: storm
command: storm supervisor
depends_on:
- nimbus
- zookeeper
deploy:
restart_policy:
condition: on-failure
ports:
- 8000:8000
networks:
- storm
ui:
image: storm
command: storm ui
depends_on:
- nimbus
- zookeeper
- supervisor
deploy:
restart_policy:
condition: on-failure
ports:
- 8081:8080
networks:
#- monitoring
- storm
############### monitoring#####################
#influx:
#image: influxdb
#environment:
#INFLUXDB_DB: cadvisor
##volumes:
##- influx:/var/lib/influxdb
#deploy:
#replicas: 1
#placement:
#constraints:
#- node.role == manager
#resources:
#limits:
#cpus: "0.10"
#memory: "128M"
#reservations:
#cpus: "0.05"
#memory: "64M"
#networks:
#- monitoring
#grafana:
#image: alogo53/grafana-docker
#environment:
#DS_NAME: InfluxDB
#DS_TYPE: InfluxDB
#DS_ACCESS: proxy
#DS_URL: http://influx:8086
#DS_DB: cadvisor
#ports:
#- 0.0.0.0:3000:3000
##volumes:
##- grafana:/var/lib/grafana
#depends_on:
#- influx
#deploy:
#replicas: 1
#placement:
#constraints:
#- node.role == manager
#resources:
#limits:
#cpus: "0.10"
#memory: "128M"
#reservations:
#cpus: "0.05"
#memory: "64M"
##networks:
##- monitoring
#cadvisor:
#image: google/cadvisor
#ports:
#- 0.0.0.0:8080:8080
#hostname: '{{.Node.ID}}'
#command: -logtostderr -docker_only -storage_driver=influxdb -storage_driver_db=cadvisor -storage_driver_host=influx:8086
#volumes:
#- /:/rootfs:ro
#- /var/run:/var/run:rw
#- /sys:/sys:ro
#- /var/lib/docker/:/var/lib/docker:ro
#depends_on:
#- influx
#deploy:
#mode: global
#resources:
#limits:
#cpus: "0.10"
#memory: "128M"
#reservations:
#cpus: "0.05"
#memory: "64M"
#networks:
#- monitoring
logspout:
image: gliderlabs/logspout:latest
networks:
- logging
volumes:
- /etc/hostname:/etc/host_hostname:ro
- /var/run/docker.sock:/var/run/docker.sock
ports:
- "8001:80"
deploy:
mode: global
resources:
limits:
cpus: "0.10"
memory: "128M"
reservations:
cpus: "0.05"
memory: "64M"
networks:
- monitoring
#volumes:
#influx:
#driver: local
#grafana:
#driver: local
networks:
monitoring:
storm:
\ No newline at end of file
#! /usr/bin/env python
# Copyright 2017 --Yang Hu--
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = 'Yang Hu'
import paramiko, os
from vm_info import VmInfo
import json
import logging
import linecache
import sys
import ast
import re
# from drip_logging.drip_logging_handler import *
logger = logging.getLogger(__name__)
if not getattr(logger, 'handler_set', None):
#logger.setLevel(logging.INFO)
h = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
h.setFormatter(formatter)
logger.addHandler(h)
logger.handler_set = True
retry=0
def get_resp_line(line):
line = line.encode('utf-8').strip('\n').encode('string_escape')
return json.dumps(line)
def docker_check(vm, compose_name):
try:
logger.info("Starting docker info services on: "+vm.ip)
paramiko.util.log_to_file("deployment.log")
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(vm.ip, username=vm.user, key_filename=vm.key,timeout=30)
node_format = '\'{\"ID\":\"{{.ID}}\",\"hostname\":\"{{.Hostname}}\",\"status\":\"{{.Status}}\",\"availability\":\"{{.Availability}}\",\"status\":\"{{.Status}}\"}\''
cmd = 'sudo docker node ls --format ' + (node_format)
logger.info("Sending :"+cmd)
json_response = {}
cluster_node_info = []
stdin, stdout, stderr = ssh.exec_command(cmd)
logger.info("Got response running \"docker node ls\"")
node_ls_resp = stdout.readlines()
for i in node_ls_resp:
line = get_resp_line(i)
if line:
node_info = json.loads(line)
if not isinstance(node_info, dict):
node_info = ast.literal_eval(node_info)
cluster_node_info.append(node_info)
json_response ['cluster_node_info'] = cluster_node_info
services_format = '\'{\"ID\":\"{{.ID}}\",\"name\":\"{{.Name}}\",\"image\":\"{{.Image}}\",\"node\":\"{{.Node}}\",\"desired_state\":\"{{.DesiredState}}\",\"current_state\":\"{{.CurrentState}}\",\"error\":\"{{.Error}}\",\"ports\":\"{{.Ports}}\"}\''
cmd = 'sudo docker stack ps '+ compose_name +' --format ' + services_format
logger.info("Sending :"+cmd)
stdin, stdout, stderr = ssh.exec_command(cmd)
stack_ps_resp = stdout.readlines()
services_info = []
services_ids = []
nodes_hostname = set()
for i in stack_ps_resp:
line = get_resp_line(i)
if line:
json_dict = {}
json_dict = json.loads(line)
json_dict = json.loads(json.dumps(json_dict))
if not isinstance(json_dict, dict):
try:
json_dict = json.loads(json_dict)
except Exception as e:
json_dict = json_dict.replace('\"ports\":\"\"', '\"ports\":null').replace('\"\"', '\"')
json_dict = json_dict.replace('\"node\":\",\"', '\"node\":null,\"').replace('\"\"', '\"')
json_dict = json_dict.replace("\\", "").replace("Noxe2x80xa6", "No...")
json_dict = json.loads(json_dict)
nodes_hostname.add(json_dict['node'])
services_info.append(json_dict)
services_ids.append(json_dict['ID'])
json_response ['services_info'] = services_info
stack_format = '\'{"ID":"{{.ID}}","name":"{{.Name}}","mode":"{{.Mode}}","replicas":"{{.Replicas}}","image":"{{.Image}}"}\''
cmd = 'sudo docker stack services '+ compose_name +' --format ' + (stack_format)
logger.info("Sending :"+cmd)
stdin, stdout, stderr = ssh.exec_command(cmd)
logger.info("Got response running \"docker stack services\"")
stack_resp = stdout.readlines()
stack_info = []
for i in stack_resp:
line = get_resp_line(i)
if line:
json_dict = {}
json_dict = json.loads(line)
if not isinstance(json_dict, dict):
json_dict = json.loads(json_dict)
stack_info.append(json_dict)
json_response ['stack_info'] = stack_info
cmd = 'sudo docker node inspect '
for hostname in nodes_hostname:
if hostname:
cmd += ' '+hostname
logger.info("Sending :"+cmd)
stdin, stdout, stderr = ssh.exec_command(cmd)
inspect_resp = stdout.readlines()
response_str = ""
for i in inspect_resp:
line = i.rstrip("\n\r").encode()
if line:
response_str+=line
json_dict = {}
response_str = response_str.rstrip("\n\r").strip(' \t\n\r').strip().encode('string_escape')
json_dict = json.loads(response_str)
json_response['nodes_info'] = json_dict
#"{{.Status.ContainerStatus.ContainerID}}"
cmd = 'sudo docker inspect '
for id in services_ids:
cmd += ' '+id
logger.info("Sending :"+cmd)
stdin, stdout, stderr = ssh.exec_command(cmd)
logger.info("Got response running \"docker inspect\"")
inspect_resp = stdout.readlines()
response_str = ""
for i in inspect_resp:
line = i.rstrip("\n\r").encode()
if line:
response_str+=line
json_dict = {}
response_str = response_str.rstrip("\n\r").strip(' \t\n\r').strip().encode('string_escape')
json_dict = json.loads(response_str)
json_response['inspect_info'] = json_dict
logger.info("Finished docker info services on: "+vm.ip)
except Exception as e:
global retry
if retry < 10 and 'timed out' in str(e):
logger.warning(vm.ip + " " + str(e)+". Retrying")
retry+=1
return docker_check(vm, compose_name)
exc_type, exc_obj, tb = sys.exc_info()
f = tb.tb_frame
lineno = tb.tb_lineno
filename = f.f_code.co_filename
linecache.checkcache(filename)
line = linecache.getline(filename, lineno, f.f_globals)
print 'EXCEPTION IN ({}, LINE {} "{}"): {}'.format(filename, lineno, line.strip(), exc_obj)
#logger.error(vm.ip + " " + str(e)+ " line:" +lineno)
return "ERROR:" + vm.ip + " " + str(e)
ssh.close()
retry = 0
return json_response
def run(vm_list, compose_name,rabbitmq_host,owner):
rabbit = DRIPLoggingHandler(host=rabbitmq_host, port=5672,user=owner)
logger.addHandler(rabbit)
for i in vm_list:
if i.role == "master":
ret = docker_check(i, compose_name)
if "ERROR:" in ret:
return ret
return ret
\ No newline at end of file
#! /usr/bin/env python
# Copyright 2017 --Yang Hu--
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = 'Yang Hu'
import paramiko, os
from vm_info import VmInfo
import logging
# from drip_logging.drip_logging_handler import *
logger = logging.getLogger(__name__)
if not getattr(logger, 'handler_set', None):
logger.setLevel(logging.INFO)
h = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
h.setFormatter(formatter)
logger.addHandler(h)
logger.handler_set = True
retry = 0
def deploy_compose(vm, compose_file, compose_name, docker_login):
try:
logger.info("Starting docker compose deployment on: " + vm.ip)
paramiko.util.log_to_file("deployment.log")
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(vm.ip, username=vm.user, key_filename=vm.key, timeout=30)
sftp = ssh.open_sftp()
sftp.chdir('/tmp/')
sftp.put(compose_file, "docker-compose.yml")
if (docker_login):
# stdin, stdout, stderr = ssh.exec_command("sudo docker stack rm %s" % (compose_name))
# stdout.read()
# err = stderr.read()
# sleep 5
stdin, stdout, stderr = ssh.exec_command(
"sudo docker login -u " + docker_login['username'] + " -p " + docker_login['password'] + " " +
docker_login[
'registry'] + " && sudo docker stack deploy --with-registry-auth --compose-file /tmp/docker-compose.yml %s" % (
compose_name))
else:
# stdin, stdout, stderr = ssh.exec_command("sudo docker stack rm %s" % (compose_name))
# stdout.read()
# err = stderr.read()
cmd = "sudo docker stack deploy --with-registry-auth --compose-file /tmp/docker-compose.yml " + compose_name
logger.info("Sendding : " + cmd)
stdin, stdout, stderr = ssh.exec_command(cmd)
out = stdout.read()
err = stderr.read()
logger.info("stderr from: " + vm.ip + " " + err)
logger.info("stdout from: " + vm.ip + " " + out)
logger.info("Finished docker compose deployment on: " + vm.ip)
except Exception as e:
global retry
if retry < 10:
logger.warning(vm.ip + " " + str(e) + ". Retrying")
retry += 1
return deploy_compose(vm, compose_file, compose_name, docker_login)
logger.error(vm.ip + " " + str(e))
return "ERROR:" + vm.ip + " " + str(e)
ssh.close()
retry = 0
return "SUCCESS"
def run(vm_list, compose_file, compose_name, rabbitmq_host, owner, docker_login):
rabbit = DRIPLoggingHandler(host=rabbitmq_host, port=5672, user=owner)
logger.addHandler(rabbit)
for i in vm_list:
if i.role == "master":
ret = deploy_compose(i, compose_file, compose_name, docker_login)
if "ERROR" in ret:
return ret
else:
swarm_file = open(i.key)
ret = swarm_file.read()
swarm_file.close()
break
return ret
# ! /usr/bin/env python
# Copyright 2017 --Yang Hu--
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = 'Yang Hu'
import paramiko, os
import threading
import logging
# from drip_logging.drip_logging_handler import *
import multiprocessing
logger = logging.getLogger(__name__)
if not getattr(logger, 'handler_set', None):
logger.setLevel(logging.INFO)
h = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
h.setFormatter(formatter)
logger.addHandler(h)
logger.handler_set = True
retry = 0
def install_engine(vm, return_dict):
try:
logger.info("Starting docker engine installation on: " + (vm.ip))
paramiko.util.log_to_file("deployment.log")
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(vm.ip, username=vm.user, key_filename=vm.key, timeout=30)
stdin, stdout, stderr = ssh.exec_command("sudo dpkg --get-selections | grep docker")
temp_list = stdout.readlines()
temp_str = ""
for i in temp_list: temp_str += i
if temp_str.find("docker") != -1:
logger.info("Docker engine arleady installated on: " + (vm.ip) + " Skiping")
return_dict[vm.ip] = "SUCCESS"
return "SUCCESS"
sftp = ssh.open_sftp()
sftp.chdir('/tmp/')
file_path = os.path.dirname(os.path.abspath(__file__))
install_script = file_path + "/" + "docker_engine.sh"
sftp.put(install_script, "engine_setup.sh")
stdin, stdout, stderr = ssh.exec_command("sudo sh /tmp/engine_setup.sh")
output = stdout.read()
logger.info("stdout: " + (output))
output = stderr.read()
logger.info("stderr: " + (output))
logger.info("Finised docker engine installation on: " + (vm.ip))
except Exception as e:
global retry
if retry < 10:
logger.warning(vm.ip + " " + str(e) + ". Retrying")
retry += 1
return install_engine(vm, return_dict)
logger.error(vm.ip + " " + str(e))
return_dict[vm.ip] = "ERROR:" + vm.ip + " " + str(e)
return "ERROR:" + vm.ip + " " + str(e)
ssh.close()
retry = 0
return_dict[vm.ip] = "SUCCESS"
return "SUCCESS"
def run(vm_list, rabbitmq_host, owner):
rabbit = DRIPLoggingHandler(host=rabbitmq_host, port=5672, user=owner)
logger.addHandler(rabbit)
manager = multiprocessing.Manager()
return_dict = manager.dict()
jobs = []
for i in vm_list:
# ret = install_engine(i)
p = multiprocessing.Process(target=install_engine, args=(i, return_dict,))
jobs.append(p)
p.start()
for proc in jobs:
proc.join()
if "ERROR" in return_dict.values(): return "ERROR"
# if "ERROR" in ret: return ret
return "SUCCESS"
#! /bin/bash
sudo apt-get update
sudo apt-get -y install linux-image-extra-$(uname -r) linux-image-extra-virtual
sudo apt-get -y install apt-transport-https ca-certificates curl software-properties-common
sudo curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
sudo apt-key fingerprint 0EBFCD88
sudo add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
sudo apt-get -y update
sudo apt-get -y install docker-ce
#sudo echo "{ \"insecure-registries\":[\"129.7.98.3:5000\"] }" > /etc/docker/daemon.json
#sudo service docker restart
sudo docker plugin install --grant-all-permissions weaveworks/net-plugin:latest_release
sudo docker plugin disable weaveworks/net-plugin:latest_release
sudo docker plugin set weaveworks/net-plugin:latest_release WEAVE_MULTICAST=1
sudo docker plugin enable weaveworks/net-plugin:latest_release
sudo wget -O /usr/local/bin/weave https://github.com/weaveworks/weave/releases/download/latest_release/weave
sudo chmod a+x /usr/local/bin/weave
sudo weave status
sudo curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | sudo apt-key add
sudo add-apt-repository "deb http://apt.kubernetes.io/ kubernetes-xenial main"
sudo apt-get update
sudo apt-get install -y kubelet kubeadm kubectl kubernetes-cni
curl -L https://github.com/kubernetes/kompose/releases/download/v1.7.0/kompose-linux-amd64 -o kompose
chmod +x kompose
sudo mv ./kompose /usr/local/bin/kompose
# ! /usr/bin/env python
# Copyright 2017 --Yang Hu--
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = 'Yang Hu'
import paramiko, os
from vm_info import VmInfo
import linecache
import sys
import logging
import time
# from drip_logging.drip_logging_handler import *
from os import listdir
from os.path import isfile, join
logger = logging.getLogger(__name__)
if not getattr(logger, 'handler_set', None):
logger.setLevel(logging.INFO)
h = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
h.setFormatter(formatter)
logger.addHandler(h)
logger.handler_set = True
retry = 0
def print_exception():
exc_type, exc_obj, tb = sys.exc_info()
f = tb.tb_frame
lineno = tb.tb_lineno
filename = f.f_code.co_filename
linecache.checkcache(filename)
line = linecache.getline(filename, lineno, f.f_globals)
print 'EXCEPTION IN ({}, LINE {} "{}"): {}'.format(filename, lineno, line.strip(), exc_obj)
def install_manager(vm):
try:
logger.info("Starting kubernetes master installation on: " + (vm.ip))
parentDir = os.path.dirname(os.path.abspath(vm.key))
os.chmod(parentDir, 0o700)
os.chmod(vm.key, 0o600)
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(vm.ip, username=vm.user, key_filename=vm.key)
sftp = ssh.open_sftp()
file_path = os.path.dirname(os.path.abspath(__file__))
sftp.chdir('/tmp/')
install_script = file_path + "/" + "docker_kubernetes.sh"
sftp.put(install_script, "kubernetes_setup.sh")
# stdin, stdout, stderr = ssh.exec_command("sudo hostname ip-%s" % (vm.ip.replace('.','-')))
# stdout.read()
stdin, stdout, stderr = ssh.exec_command("sudo sh /tmp/kubernetes_setup.sh > log 2>&1")
out = stdout.read()
out = stderr.read()
# stdin, stdout, stderr = ssh.exec_command("sudo kubeadm kubernetes-xenialreset --force")
# stdout.read()
stdin, stdout, stderr = ssh.exec_command("sudo kubeadm reset --force >> log 2>&1")
stdout.read()
# stdin, stdout, stderr = ssh.exec_command("sudo kubeadm init --apiserver-advertise-address=%s" % (vm.ip))
stdin, stdout, stderr = ssh.exec_command("sudo kubeadm init")
retstr = stdout.readlines()
stdin, stdout, stderr = ssh.exec_command("mkdir -p $HOME/.kube")
stdout.read()
stdin, stdout, stderr = ssh.exec_command("sudo cp /etc/kubernetes/admin.conf $HOME/.kube/config")
stdout.read()
stdin, stdout, stderr = ssh.exec_command("sudo chown $(id -u):$(id -g) $HOME/.kube/config")
stdout.read()
stdin, stdout, stderr = ssh.exec_command("sudo sysctl net.bridge.bridge-nf-call-iptables=1")
retstr = stdout.readlines()
stdin, stdout, stderr = ssh.exec_command(
"kubectl apply -f \"https://cloud.weave.works/k8s/net?k8s-version=$(kubectl version | base64 | tr -d '\n')\"")
retstr = stdout.readlines()
stdin, stdout, stderr = ssh.exec_command(
"kubectl taint nodes --all node-role.kubernetes.io/master-")
retstr = stdout.readlines()
stdin, stdout, stderr = ssh.exec_command("sudo chown %s /tmp/admin.conf" % (vm.user))
stdout.read()
stdin, stdout, stderr = ssh.exec_command("sudo chgrp %s /tmp/admin.conf" % (vm.user))
stdout.read()
# sftp.get("/tmp/admin.conf", file_path + "/admin.conf")
logger.info("Finished kubernetes master installation on: " + (vm.ip))
except Exception as e:
global retry
# print '%s: %s' % (vm.ip, e)
logger.error(vm.ip + " " + str(e))
print_exception()
return "ERROR:" + vm.ip + " " + str(e)
ssh.close()
return retstr[-1]
def install_worker(join_cmd, vm):
try:
logger.info("Starting kubernetes slave installation on: " + (vm.ip))
logger.info("User: " + vm.user + " key file: " + vm.key)
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(vm.ip, username=vm.user, key_filename=vm.key, timeout=30)
parentDir = os.path.dirname(os.path.abspath(vm.key))
os.chmod(parentDir, 0o700)
os.chmod(vm.key, 0o600)
sftp = ssh.open_sftp()
sftp.chdir('/tmp/')
file_path = os.path.dirname(os.path.abspath(__file__))
install_script = file_path + "/" + "docker_kubernetes.sh"
sftp.put(install_script, "kubernetes_setup.sh")
# stdin, stdout, stderr = ssh.exec_command("sudo hostname ip-%s" % (vm.ip.replace('.', '-')))
# stdout.read()
stdin, stdout, stderr = ssh.exec_command("sudo sh /tmp/kubernetes_setup.sh")
stdout.read()
stdin, stdout, stderr = ssh.exec_command("sudo kubeadm reset")
stdout.read()
stdin, stdout, stderr = ssh.exec_command("sudo %s" % (join_cmd))
stdout.read()
logger.info("Finished kubernetes slave installation on: " + (vm.ip))
except Exception as e:
# print '%s: %s' % (vm.ip, e)
logger.error(vm.ip + " " + str(e))
return "ERROR:" + vm.ip + " " + str(e)
ssh.close()
return "SUCCESS"
def deploy_on_master(deployment_file, vm):
try:
k8s_files = [f for f in listdir(deployment_file) if isfile(join(deployment_file, f))]
logger.info("Starting deployment on: " + (vm.ip))
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(vm.ip, username=vm.user, key_filename=vm.key, timeout=30)
parentDir = os.path.dirname(os.path.abspath(vm.key))
os.chmod(parentDir, 0o700)
os.chmod(vm.key, 0o600)
stdin, stdout, stderr = ssh.exec_command("mkdir /tmp/k8s")
stdout.read()
sftp = ssh.open_sftp()
sftp.chdir('/tmp/k8s')
file_path = os.path.dirname(os.path.abspath(__file__))
for f in k8s_files:
k8s_file = deployment_file + "/" + f
sftp.put(k8s_file, f)
stdin, stdout, stderr = ssh.exec_command("kubectl create -f /tmp/k8s/ >> log 2>&1")
s_out = stdout.read()
e_out = stderr.read()
time.sleep(2)
# cmd = 'kubectl get svc --all-namespaces -o go-template=\'{{range .items}}{{range.spec.ports}}{{if .nodePort}}{{.nodePort}}{{"\\n"}}{{end}}{{end}}{{end}}\''
cmd = 'kubectl get svc --output json'
stdin, stdout, stderr = ssh.exec_command(cmd)
e_out = stderr.read()
json_output = stdout.read()
# exposed_ports_str = ''
# for port in exposed_ports:
# exposed_ports_str += port + ','
# exposed_ports_str = exposed_ports_str[:-1]
except Exception as e:
# print '%s: %s' % (vm.ip, e)
logger.error(vm.ip + " " + str(e))
return "ERROR:" + vm.ip + " " + str(e)
ssh.close()
return json_output
def deploy(vm_list, deployment_file):
for i in vm_list:
if i.role == "master":
return deploy_on_master(deployment_file, i)
def run(vm_list, rabbitmq_host, owner):
# rabbit = DRIPLoggingHandler(host=rabbitmq_host, port=5672,user=owner)
# logger.addHandler(rabbit)
for i in vm_list:
if i.role == "master":
join_cmd = install_manager(i)
if "ERROR" in join_cmd:
return join_cmd
else:
join_cmd = join_cmd.encode()
join_cmd = join_cmd.strip()
break
for i in vm_list:
if i.role == "slave":
worker_cmd = install_worker(join_cmd, i)
if "ERROR" in worker_cmd:
return worker_cmd
file_path = os.path.dirname(os.path.abspath(__file__))
kuber_file = open(file_path + "/admin.conf", "r")
kuber_string = kuber_file.read()
kuber_file.close()
return kuber_string
#! /bin/bash
sudo kubeadm reset --force
sudo sed -i -re 's/([a-z]{2}\.)?archive.ubuntu.com|security.ubuntu.com/old-releases.ubuntu.com/g' /etc/apt/sources.list
sudo apt-get update && apt-get install -y apt-transport-https ca-certificates curl software-properties-common
sudo add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
sudo apt-get update && apt-get install -y --allow-unauthenticated docker-ce=18.06.2~ce~3-0~ubuntu
echo "{\n \"exec-opts\": [\"native.cgroupdriver=systemd\"], \n \"log-driver\": \"json-file\", \n \"log-opts\": {\"max-size\": \"100m\"}, \n \"storage-driver\": \"overlay2\" \n}" | sudo tee /etc/docker/daemon.json
sudo mkdir -p /etc/systemd/system/docker.service.d
sudo systemctl daemon-reload
sudo systemctl restart docker
sudo apt-get update && apt-get install -y apt-transport-https curl
sudo curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key add -
echo "deb https://apt.kubernetes.io/ kubernetes-xenial main" | sudo tee /etc/apt/sources.list.d/kubernetes.list
sudo apt-get update && apt-get install -y kubelet kubeadm kubectl
sudo apt-mark hold kubelet kubeadm kubectl
#! /usr/bin/env python
# Copyright 2017 --Yang Hu--
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = 'Yang Hu'
import paramiko, os
from vm_info import VmInfo
import logging
# from drip_logging.drip_logging_handler import *
import multiprocessing
logger = logging.getLogger(__name__)
if not getattr(logger, 'handler_set', None):
logger.setLevel(logging.INFO)
h = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
h.setFormatter(formatter)
logger.addHandler(h)
logger.handler_set = True
retry=0
def scale_service(vm, application_name, service_name, service_num):
try:
logger.info("Starting docker service scaling on: "+vm.ip)
paramiko.util.log_to_file("deployment.log")
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(vm.ip, username=vm.user, key_filename=vm.key,timeout=30)
stdin, stdout, stderr = ssh.exec_command("sudo docker service scale %s_%s=%s" % (application_name, service_name, service_num))
stdout.read()
logger.info("Finished docker service scaling on: "+vm.ip)
except Exception as e:
global retry
if retry < 10:
logger.warning(vm.ip + " " + str(e)+". Retrying")
retry+=1
return scale_service(vm, application_name, service_name, service_num)
logger.error(vm.ip + " " + str(e))
return "ERROR:" + vm.ip + " " + str(e)
ssh.close()
return "SUCCESS"
def run(vm_list, application_name, service_name, service_num,rabbitmq_host,owner):
rabbit = DRIPLoggingHandler(host=rabbitmq_host, port=5672,user=owner)
logger.addHandler(rabbit)
for i in vm_list:
if i.role == "master":
ret = scale_service(i, application_name, service_name, service_num)
if "ERROR" in ret:
return ret
break
return ret
\ No newline at end of file
#! /usr/bin/env python
# Copyright 2017 --Yang Hu--
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__author__ = 'Yang Hu'
import paramiko, os
from vm_info import VmInfo
import logging
# from drip_logging.drip_logging_handler import *
import multiprocessing
logger = logging.getLogger(__name__)
if not getattr(logger, 'handler_set', None):
logger.setLevel(logging.INFO)
h = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
h.setFormatter(formatter)
logger.addHandler(h)
logger.handler_set = True
retry=0
def install_manager(vm):
try:
logger.info("Starting swarm manager installation on: "+(vm.ip))
paramiko.util.log_to_file("deployment.log")
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(vm.ip, username=vm.user, key_filename=vm.key,timeout=30)
stdin, stdout, stderr = ssh.exec_command("sudo docker info | grep 'Swarm'")
temp_list1 = stdout.readlines()
stdin, stdout, stderr = ssh.exec_command("sudo docker info | grep 'Is Manager'")
temp_list2 = stdout.readlines()
if temp_list1[0].find("Swarm: active") != -1:
if temp_list2[0].find("Is Manager: true") != -1:
stdin, stdout, stderr = ssh.exec_command("sudo docker swarm join-token worker")
retstr = stdout.readlines()
return retstr[2].encode()
stdin, stdout, stderr = ssh.exec_command("sudo docker swarm leave --force")
stdout.read()
stdin, stdout, stderr = ssh.exec_command("sudo docker swarm init --advertise-addr %s" % (vm.ip))
stdout.read()
stdin, stdout, stderr = ssh.exec_command("sudo docker swarm join-token worker")
retstr = stdout.readlines()
ret = retstr[2].encode()
logger.info("Finished swarm manager installation on: "+(vm.ip))
except Exception as e:
global retry
if retry < 10:
logger.warning(vm.ip + " " + str(e)+". Retrying")
retry+=1
return install_manager(vm)
logger.error(vm.ip + " " + str(e))
return "ERROR:" + vm.ip + " " + str(e)
ssh.close()
retry=0
return ret
def install_worker(join_cmd, vm,return_dict):
try:
logger.info("Starting swarm worker installation on: "+(vm.ip))
paramiko.util.log_to_file("deployment.log")
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(vm.ip, username=vm.user, key_filename=vm.key,timeout=30)
stdin, stdout, stderr = ssh.exec_command("sudo docker info | grep 'Swarm'")
temp_list1 = stdout.readlines()
if temp_list1[0].find("Swarm: active") != -1:
logger.info("Swarm worker arleady installated on: "+(vm.ip)+" Skiping")
return_dict[vm.ip] = "SUCCESS"
return "SUCCESS"
stdin, stdout, stderr = ssh.exec_command("sudo docker swarm leave --force")
stdout.read()
stdin, stdout, stderr = ssh.exec_command("sudo %s" % (join_cmd))
stdout.read()
logger.info("Finished swarm worker installation on: "+(vm.ip))
except Exception as e:
global retry
if retry < 10:
logger.warning(vm.ip + " " + str(e)+". Retrying")
retry+=1
return install_worker(join_cmd, vm,return_dict)
logger.error(vm.ip + " " + str(e))
return_dict[vm.ip] = "ERROR:"+vm.ip+" "+str(e)
return "ERROR:" + vm.ip + " " + str(e)
ssh.close()
retry=0
return_dict[vm.ip] = "SUCCESS"
return "SUCCESS"
def connect_wave(vm_list,master):
try:
logger.info("Starting wave connection on: "+(master.ip))
paramiko.util.log_to_file("deployment.log")
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
for i in vm_list:
if i.role == "slave":
ssh.connect(master.ip, username=master.user, key_filename=master.key)
logger.info("weave connect "+(i.ip))
stdin, stdout, stderr = ssh.exec_command("sudo weave connect "+i.ip)
out = stdout.read()
err = stderr.read()
logger.info("stdout: "+(out))
logger.info("stderr: "+(err))
ssh.connect(i.ip, username=master.user, key_filename=i.key)
stdin, stdout, stderr = ssh.exec_command("sudo weave connect "+master.ip)
out = stdout.read()
err = stderr.read()
logger.info("stdout: "+(out))
logger.info("stderr: "+(err))
logger.info("Finished wave connection on: "+(master.ip))
except Exception as e:
global retry
if retry < 10:
logger.warning(master.ip + " " + str(e)+". Retrying")
retry+=1
return connect_wave(vm_list,master)
logger.error(master.ip + " " + str(e))
return "ERROR:" + master.ip + " " + str(e)
ssh.close()
retry=0
return "SUCCESS"
def run(vm_list,rabbitmq_host,owner):
rabbit = DRIPLoggingHandler(host=rabbitmq_host, port=5672,user=owner)
logger.addHandler(rabbit)
master = None
for i in vm_list:
if i.role == "master":
master = i
join_cmd = install_manager(i)
if "ERROR:" in join_cmd:
return join_cmd
parentDir = os.path.dirname(os.path.abspath(i.key))
os.chmod(parentDir, 0o700)
os.chmod(i.key, 0o600)
swarm_file = open(i.key)
swarm_string = swarm_file.read()
swarm_file.close()
break
manager = multiprocessing.Manager()
return_dict = manager.dict()
jobs = []
for i in vm_list:
if i.role == "slave":
p = multiprocessing.Process(target=install_worker, args=(join_cmd, i,return_dict,))
jobs.append(p)
p.start()
for proc in jobs:
proc.join()
connect_wave(vm_list,master)
if "ERROR" in return_dict.values(): return "ERROR"
return swarm_string
\ No newline at end of file
import json
import logging
import pika
from python_logging_rabbitmq import RabbitMQHandler
class DRIPLoggingHandler(RabbitMQHandler):
def __init__(self, host='localhost', port=5672, username=None, password=None, user=None):
super(DRIPLoggingHandler, self).__init__(host=host, port=port, username=username, password=password)
self.user = user
def open_connection(self):
self.sequenceNumber = 0
"""
Connect to RabbitMQ.
"""
# Set logger for pika.
# See if something went wrong connecting to RabbitMQ.
handler = logging.StreamHandler()
handler.setFormatter(self.formatter)
rabbitmq_logger = logging.getLogger('pika')
rabbitmq_logger.addHandler(handler)
rabbitmq_logger.propagate = False
rabbitmq_logger.setLevel(logging.WARNING)
if not self.connection or self.connection.is_closed:
self.connection = pika.BlockingConnection(pika.ConnectionParameters(** self.connection_params))
if not self.channel or self.channel.is_closed:
self.channel = self.connection.channel()
self.channel.queue_declare(queue='log_qeue_' + self.user, durable=True)
# Manually remove logger to avoid shutdown message.
rabbitmq_logger.removeHandler(handler)
def emit(self, record):
self.acquire()
try:
if not self.connection or self.connection.is_closed or not self.channel or self.channel.is_closed:
self.open_connection()
queue='log_qeue_' + self.user
self.channel.basic_publish(
exchange='',
routing_key=queue,
body=self.format(record),
properties=pika.BasicProperties(
delivery_mode=2)
)
except Exception:
self.channel, self.connection = None, None
self.handleError(record)
finally:
if self.close_after_emit:
self.close_connection()
self.release()
def format(self, record):
drip_record = {}
drip_record['timestamp'] = record.created
drip_record['owner'] = 'user'
drip_record['level'] = record.levelname
drip_record['loggerName'] = record.module
drip_record['message'] = record.message
drip_record['millis'] = record.created
self.sequenceNumber += 1
drip_record['sequenceNumber'] = self.sequenceNumber
drip_record['sourceClassName'] = record.module
drip_record['sourceMethodName'] = record.funcName
return json.dumps(drip_record)
\ No newline at end of file
pika==1.1.0
names==0.3.0
networkx==2.4
ansible==2.9.2
\ No newline at end of file
#!/usr/bin/env python
# Copyright 2017 S. Koulouzis
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ansible.plugins.callback import CallbackBase
__author__ = 'S. Koulouzis'
class ResultsCollector(CallbackBase):
def __init__(self, *args, **kwargs):
super(ResultsCollector, self).__init__(*args, **kwargs)
self.host_ok = []
self.host_unreachable = []
self.host_failed = []
def v2_runner_on_unreachable(self, result, ignore_errors=False):
name = result._host.get_name()
task = result._task.get_name()
#self.host_unreachable[result._host.get_name()] = result
self.host_unreachable.append(dict(ip=name, task=task, result=result))
def v2_runner_on_ok(self, result, *args, **kwargs):
name = result._host.get_name()
task = result._task.get_name()
if task == "setup":
pass
elif "Info" in task:
self.host_ok.append(dict(ip=name, task=task, result=result))
else:
self.host_ok.append(dict(ip=name, task=task, result=result))
def v2_runner_on_failed(self, result, *args, **kwargs):
name = result._host.get_name()
task = result._task.get_name()
self.host_failed.append(dict(ip=name, task=task, result=result))
\ No newline at end of file
#!/usr/bin/env python
import base64
import json
import logging
import os
import os.path
# import ansible_playbook
import sys
import time
from threading import Thread
from time import sleep
import pika
# import ansible_playbook
import docker_check
import docker_compose
import docker_engine
import docker_kubernetes
import docker_service
import docker_swarm
from vm_info import VmInfo
global rabbitmq_host
if len(sys.argv) > 1:
rabbitmq_host = sys.argv[1]
else:
rabbitmq_host = '127.0.0.1'
logger = logging.getLogger(__name__)
if not getattr(logger, 'handler_set', None):
logger.setLevel(logging.INFO)
h = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
h.setFormatter(formatter)
logger.addHandler(h)
logger.handler_set = True
connection = pika.BlockingConnection(pika.ConnectionParameters(host=rabbitmq_host))
channel = connection.channel()
channel.queue_declare(queue='deployer_queue')
done = False
def threaded_function(args):
while not done:
connection.process_data_events()
sleep(5)
def handle_delivery(message):
parsed_json = json.loads(message)
owner = parsed_json['owner']
params = parsed_json["parameters"]
node_num = 0
vm_list = set()
current_milli_time = lambda: int(round(time.time() * 1000))
try:
path = os.path.dirname(os.path.abspath(__file__)) + "/deployer_files/" + str(current_milli_time()) + "/"
except NameError:
import sys
path = os.path.dirname(os.path.abspath(sys.argv[0])) + "/deployer_files/" + str(current_milli_time()) + "/"
if not os.path.exists(path):
os.makedirs(path)
for param in params:
name = param["name"]
if name == "cluster":
manager_type = param["value"]
elif name == "credential":
value = param["value"]
value = base64.b64decode(value)
ip = param["attributes"]["IP"]
user = param["attributes"]["user"]
role = param["attributes"]["role"]
node_num += 1
key = path + "%d.txt" % node_num
fo = open(key, "w")
fo.write(value)
fo.close()
parentDir = os.path.dirname(os.path.abspath(key))
os.chmod(parentDir, 0o700)
os.chmod(key, 0o600)
vm = VmInfo(ip, user, key, role)
vm_list.add(vm)
elif name.startswith('k8s_'):
value = param["value"]
value = base64.b64decode(value)
k8s_folder = path + "/k8s/"
if not os.path.exists(k8s_folder):
os.makedirs(k8s_folder)
deployment_file = k8s_folder + name+".yml"
fo = open(deployment_file, "w")
fo.write(value)
fo.close()
elif name == "playbook":
value = param["value"]
playbook = path + "playbook.yml"
fo = open(playbook, "w")
fo.write(value)
fo.close()
elif name == "composer":
value = param["value"]
compose_file = path + "docker-compose.yml"
if not param["attributes"] is None and not param["attributes"]["name"] is None:
compose_name = param["attributes"]["name"]
docker_login = {}
if 'docker_login' in param["attributes"]:
docker_login['username'] = param["attributes"]["docker_login_username"]
docker_login['password'] = param["attributes"]["docker_login_password"]
docker_login['registry'] = param["attributes"]["docker_login_registry"]
docker_login = param["attributes"]["docker_login"]
else:
current_milli_time = lambda: int(round(time.time() * 1000))
compose_name = "service_" + str(current_milli_time())
fo = open(compose_file, "w")
fo.write(value)
fo.close()
elif name == "scale":
name_of_deployment = param["value"]
name_of_service = param["attributes"]["service"]
number_of_containers = param["attributes"]["number_of_containers"]
elif name == "swarm_info":
compose_name = param["attributes"]["name"]
if manager_type == "kubernetes":
ret = docker_kubernetes.run(vm_list, rabbitmq_host, owner)
ret = docker_kubernetes.deploy(vm_list, k8s_folder)
return ret
elif manager_type == "swarm":
ret = docker_engine.run(vm_list, rabbitmq_host, owner)
if "ERROR" in ret: return ret
ret = docker_swarm.run(vm_list, rabbitmq_host, owner)
if "ERROR" in ret: return ret
ret = docker_compose.run(vm_list, compose_file, compose_name, rabbitmq_host, owner, docker_login)
return ret
elif manager_type == "ansible":
ret = ansible_playbook.run(vm_list, playbook, rabbitmq_host, owner)
return ret
elif manager_type == "scale":
ret = docker_service.run(vm_list, name_of_deployment, name_of_service, number_of_containers, rabbitmq_host,
owner)
return ret
elif manager_type == "swarm_info":
ret = docker_check.run(vm_list, compose_name, rabbitmq_host, owner)
ret = '"' + json.dumps(ret) + '"'
return ret
else:
return "ERROR: invalid cluster"
def on_request(ch, method, props, body):
ret = handle_delivery(body)
parsed_json = json.loads(body)
params = parsed_json["parameters"]
for param in params:
name = param["name"]
if name == "cluster":
manager_type = param["value"]
break
if not ret and "ERROR" in ret:
res_name = "error"
elif manager_type == "ansible":
res_name = "ansible_output"
elif manager_type == "scale":
res_name = "scale_status"
elif manager_type == "swarm_info":
res_name = "swarm_info"
elif manager_type == "kubernetes":
res_name = "kubectl_get"
else:
res_name = "credential"
response = {}
outcontent = {}
current_milli_time = lambda: int(round(time.time() * 1000))
response["creationDate"] = current_milli_time()
response["parameters"] = []
par = {}
par["url"] = "null"
par["encoding"] = "UTF-8"
par["name"] = res_name
par["value"] = base64.b64encode(ret)
par["attributes"] = "null"
response["parameters"].append(par)
response = json.dumps(response)
logger.info("Response: " + response)
ch.basic_publish(exchange='',
routing_key=props.reply_to,
properties=pika.BasicProperties(correlation_id= \
props.correlation_id),
body=str(response))
ch.basic_ack(delivery_tag=method.delivery_tag)
channel.basic_qos(prefetch_count=1)
channel.basic_consume(on_request, queue='deployer_queue')
thread = Thread(target=threaded_function, args=(1,))
thread.start()
logger.info("Awaiting RPC requests")
try:
channel.start_consuming()
except KeyboardInterrupt:
# thread.stop()
done = True
thread.join()
logger.info("Threads successfully closed")
def get_interfaces(tosca_template_json):
return None
\ No newline at end of file
from setuptools import setup, find_packages
setup(
name='drip_deployer',
version='0.1',
packages=find_packages(),
# Fill in these to make your Egg ready for upload to
# PyPI
author='S. Koulouzis',
author_email='',
# summary = 'Just another Python package for the cheese shop',
url='',
license='',
long_description='Long description of the package',
# could also include long_description, download_url, classifiers, etc.
)
#!/usr/bin/env python
import pika
import json
import os
import time
from vm_info import VmInfo
import docker_kubernetes
import docker_engine
import docker_swarm
import docker_compose
import docker_service
import docker_check
import control_agent
import ansible_playbook
import sys, argparse
from threading import Thread
from time import sleep
import os.path
import logging
from os.path import expanduser
home = expanduser("~")
playbook_path=home+"/Downloads/playbook.yml"
playbook_path=sys.argv[1] #home+"/Downloads/playbook.yml"
ip = "147.228.242.97"
ip = sys.argv[2] #"147.228.242.97"
user="vm_user"
role = "master"
ssh_key_file=home+"/Downloads/id_rsa"
ssh_key_file = sys.argv[3] #home+"/Downloads/id_rsa"
vm_list = set()
vm = VmInfo(ip, user, ssh_key_file, role)
vm_list.add(vm)
rabbit_mq_host = sys.argv[4] #rabbit_mq_host
print sys.argv
print "playbook_path: "+playbook_path
print "ip: "+ip
print "ssh_key_file: "+ssh_key_file
print "rabbit_mq_host: "+rabbit_mq_host
ret = ansible_playbook.run(vm_list,playbook_path,rabbit_mq_host,"owner")
\ No newline at end of file
#! /usr/bin/env python
# Copyright 2017 --Yang Hu--
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
class VmInfo:
def __init__(self, ip = "", user = "", key = "", role = ""):
self.ip = ip
self.user = user
self.key = key
self.role = role
def displayVm(self):
print "IP:", self.ip, " USER:", self.user
<?xml version="1.0" encoding="UTF-8"?> <?xml version="1.0" encoding="UTF-8"?>
<project version="4"> <project version="4">
<component name="ChangeListManager"> <component name="ChangeListManager">
<list default="true" id="e478ccae-5352-4e8e-9efb-3f5cda44e877" name="Default Changelist" comment=""> <list default="true" id="e478ccae-5352-4e8e-9efb-3f5cda44e877" name="Default Changelist" comment="" />
<change beforePath="$PROJECT_DIR$/.idea/workspace.xml" beforeDir="false" afterPath="$PROJECT_DIR$/.idea/workspace.xml" afterDir="false" />
</list>
<option name="SHOW_DIALOG" value="false" /> <option name="SHOW_DIALOG" value="false" />
<option name="HIGHLIGHT_CONFLICTS" value="true" /> <option name="HIGHLIGHT_CONFLICTS" value="true" />
<option name="HIGHLIGHT_NON_ACTIVE_CHANGELIST" value="false" /> <option name="HIGHLIGHT_NON_ACTIVE_CHANGELIST" value="false" />
...@@ -269,26 +267,26 @@ ...@@ -269,26 +267,26 @@
</state> </state>
<state x="792" y="334" width="827" height="663" key="FileChooserDialogImpl/67.34.1853.1046@67.34.1853.1046" timestamp="1578326180157" /> <state x="792" y="334" width="827" height="663" key="FileChooserDialogImpl/67.34.1853.1046@67.34.1853.1046" timestamp="1578326180157" />
<state x="1043" y="437" width="530" height="598" key="FileChooserDialogImpl/67.34.2493.1406@67.34.2493.1406" timestamp="1575907769017" /> <state x="1043" y="437" width="530" height="598" key="FileChooserDialogImpl/67.34.2493.1406@67.34.2493.1406" timestamp="1575907769017" />
<state width="1825" height="263" key="GridCell.Tab.0.bottom" timestamp="1578584059031"> <state width="2465" height="381" key="GridCell.Tab.0.bottom" timestamp="1578935126552">
<screen x="67" y="34" width="1853" height="1046" /> <screen x="67" y="34" width="2493" height="1406" />
</state> </state>
<state width="1825" height="263" key="GridCell.Tab.0.bottom/67.34.1853.1046@67.34.1853.1046" timestamp="1578584059031" /> <state width="1825" height="263" key="GridCell.Tab.0.bottom/67.34.1853.1046@67.34.1853.1046" timestamp="1578584059031" />
<state width="2465" height="382" key="GridCell.Tab.0.bottom/67.34.2493.1406@67.34.2493.1406" timestamp="1578250751968" /> <state width="2465" height="381" key="GridCell.Tab.0.bottom/67.34.2493.1406@67.34.2493.1406" timestamp="1578935126552" />
<state width="1825" height="263" key="GridCell.Tab.0.center" timestamp="1578584059030"> <state width="2465" height="381" key="GridCell.Tab.0.center" timestamp="1578935126549">
<screen x="67" y="34" width="1853" height="1046" /> <screen x="67" y="34" width="2493" height="1406" />
</state> </state>
<state width="1825" height="263" key="GridCell.Tab.0.center/67.34.1853.1046@67.34.1853.1046" timestamp="1578584059030" /> <state width="1825" height="263" key="GridCell.Tab.0.center/67.34.1853.1046@67.34.1853.1046" timestamp="1578584059030" />
<state width="2465" height="382" key="GridCell.Tab.0.center/67.34.2493.1406@67.34.2493.1406" timestamp="1578250751968" /> <state width="2465" height="381" key="GridCell.Tab.0.center/67.34.2493.1406@67.34.2493.1406" timestamp="1578935126549" />
<state width="1825" height="263" key="GridCell.Tab.0.left" timestamp="1578584059030"> <state width="2465" height="381" key="GridCell.Tab.0.left" timestamp="1578935126545">
<screen x="67" y="34" width="1853" height="1046" /> <screen x="67" y="34" width="2493" height="1406" />
</state> </state>
<state width="1825" height="263" key="GridCell.Tab.0.left/67.34.1853.1046@67.34.1853.1046" timestamp="1578584059030" /> <state width="1825" height="263" key="GridCell.Tab.0.left/67.34.1853.1046@67.34.1853.1046" timestamp="1578584059030" />
<state width="2465" height="382" key="GridCell.Tab.0.left/67.34.2493.1406@67.34.2493.1406" timestamp="1578250751968" /> <state width="2465" height="381" key="GridCell.Tab.0.left/67.34.2493.1406@67.34.2493.1406" timestamp="1578935126545" />
<state width="1825" height="263" key="GridCell.Tab.0.right" timestamp="1578584059030"> <state width="2465" height="381" key="GridCell.Tab.0.right" timestamp="1578935126550">
<screen x="67" y="34" width="1853" height="1046" /> <screen x="67" y="34" width="2493" height="1406" />
</state> </state>
<state width="1825" height="263" key="GridCell.Tab.0.right/67.34.1853.1046@67.34.1853.1046" timestamp="1578584059030" /> <state width="1825" height="263" key="GridCell.Tab.0.right/67.34.1853.1046@67.34.1853.1046" timestamp="1578584059030" />
<state width="2465" height="382" key="GridCell.Tab.0.right/67.34.2493.1406@67.34.2493.1406" timestamp="1578250751968" /> <state width="2465" height="381" key="GridCell.Tab.0.right/67.34.2493.1406@67.34.2493.1406" timestamp="1578935126550" />
<state width="2465" height="413" key="GridCell.Tab.1.bottom" timestamp="1577720249209"> <state width="2465" height="413" key="GridCell.Tab.1.bottom" timestamp="1577720249209">
<screen x="67" y="34" width="2493" height="1406" /> <screen x="67" y="34" width="2493" height="1406" />
</state> </state>
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment