Commit 30e1928d authored by Spiros Koulouzis's avatar Spiros Koulouzis

get all info of nodes

parent d2898192
......@@ -38,9 +38,10 @@ topology_template:
# Just in case we need it later on.
MONITORING_PROXY2: { get_property: [SELF, "Monitoring Proxy", "monitoring/proxy/api/endpoint" ] }
"Notifier_output":
type: "JSON"
Side_type: "JSON"
host_port: SET_ITS_VALUE
container_port: SET_ITS_VALUE
protocol: "tcp"
Environment_variables:
ENV1: ...
#So this is given for all containers?
......@@ -52,6 +53,7 @@ topology_template:
scaling_mode: single
requirements:
- host:
# Future work...
node: "VM_1"
node_filter:
capabilities:
......@@ -71,12 +73,12 @@ topology_template:
mount_type: r/rw
- monitoring_proxy: "Monitoring Proxy"
- sip_notifier: "SIPNotifyer"
# So this(below) should just map to
# volumes:
# graphite_data:
# inside docker compose.
"Volume_ID":
# So I do not know what information is needed for this, as it is my first time hearing about it
# I suggest we look at docker compose for it and extrapolate from there.
requirements:
- host:
node: "VM_1"
type: "Switch.Volume"
"SIPNotifier":
artifacts:
sipnotifier_image:
......@@ -394,7 +396,6 @@ topology_template:
type: linux
distribution: rhel
version: 6.5
"VM_5":
# Stub I think this should be generated by DRIP-planer. But we could have a diamond so SIDE can make it.
type: Switch.node.Compute
......
No preview for this file type
This diff is collapsed.
......@@ -24,18 +24,14 @@ import logging
import linecache
import sys
# create logger
logger = logging.getLogger('docker_swarm')
logger.setLevel(logging.DEBUG)
# create console handler and set level to debug
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
# create formatter
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
# add formatter to ch
ch.setFormatter(formatter)
# add ch to logger
logger.addHandler(ch)
logger = logging.getLogger(__name__)
if not getattr(logger, 'handler_set', None):
logger.setLevel(logging.INFO)
h = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
h.setFormatter(formatter)
logger.addHandler(h)
logger.handler_set = True
......@@ -80,23 +76,22 @@ def docker_check(vm, compose_name):
if i.encode():
json_str = json.loads(i.encode().strip('\n'))
stack_info.append(json_str)
json_response ['stack_info'] = stack_info
json_response ['stack_info'] = stack_info
cmd = 'sudo docker node inspect '
for hostname in nodes_hostname:
cmd += hostname
logger.info(cmd)
stdin, stdout, stderr = ssh.exec_command(cmd)
inspect_resp = stdout.readlines()
nodes_info = []
#for i in inspect_resp:
#if i.encode():
#json_str = json.loads(i.encode().strip('\n'))
#nodes_info.append(json_str)
#json_response ['nodes_info'] = nodes_info
response_str = ""
for i in inspect_resp:
if i.encode():
response_str+=i.encode().strip('\n')
json_str = json.loads(response_str.rstrip("\n\r").strip().encode('string_escape'))
json_response['nodes_info'] = json_str
logger.info("Finished docker info services on: "+vm.ip)
except Exception as e:
......
......@@ -21,18 +21,14 @@ import paramiko, os
from vm_info import VmInfo
import logging
# create logger
logger = logging.getLogger('docker_swarm')
logger.setLevel(logging.DEBUG)
# create console handler and set level to debug
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
# create formatter
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
# add formatter to ch
ch.setFormatter(formatter)
# add ch to logger
logger.addHandler(ch)
logger = logging.getLogger(__name__)
if not getattr(logger, 'handler_set', None):
logger.setLevel(logging.INFO)
h = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
h.setFormatter(formatter)
logger.addHandler(h)
logger.handler_set = True
def deploy_compose(vm, compose_file, compose_name):
......
......@@ -19,9 +19,22 @@ __author__ = 'Yang Hu'
import paramiko, os
import threading
import logging
logger = logging.getLogger(__name__)
if not getattr(logger, 'handler_set', None):
logger.setLevel(logging.INFO)
h = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
h.setFormatter(formatter)
logger.addHandler(h)
logger.handler_set = True
def install_engine(vm):
try:
print "%s: ====== Start Docker Engine Installing ======" % (vm.ip)
logger.info("Starting docker engine installation on: "+(vm.ip))
paramiko.util.log_to_file("deployment.log")
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
......@@ -38,9 +51,9 @@ def install_engine(vm):
sftp.put(install_script, "engine_setup.sh")
stdin, stdout, stderr = ssh.exec_command("sudo sh /tmp/engine_setup.sh")
stdout.read()
print "%s: ========= Docker Engine Installed =========" % (vm.ip)
logger.info("Finised docker engine installation on: "+(vm.ip))
except Exception as e:
print '%s: %s' % (vm.ip, e)
logger.error(vm.ip + " " + str(e))
return "ERROR:"+vm.ip+" "+str(e)
ssh.close()
return "SUCCESS"
......
......@@ -21,18 +21,14 @@ import paramiko, os
from vm_info import VmInfo
import logging
# create logger
logger = logging.getLogger('docker_swarm')
logger.setLevel(logging.DEBUG)
# create console handler and set level to debug
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
# create formatter
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
# add formatter to ch
ch.setFormatter(formatter)
# add ch to logger
logger.addHandler(ch)
logger = logging.getLogger(__name__)
if not getattr(logger, 'handler_set', None):
logger.setLevel(logging.INFO)
h = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
h.setFormatter(formatter)
logger.addHandler(h)
logger.handler_set = True
def scale_service(vm, application_name, service_name, service_num):
......
......@@ -21,18 +21,15 @@ import paramiko, os
from vm_info import VmInfo
import logging
# create logger
logger = logging.getLogger('docker_swarm')
logger.setLevel(logging.DEBUG)
# create console handler and set level to debug
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
# create formatter
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
# add formatter to ch
ch.setFormatter(formatter)
# add ch to logger
logger.addHandler(ch)
logger = logging.getLogger(__name__)
if not getattr(logger, 'handler_set', None):
logger.setLevel(logging.INFO)
h = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
h.setFormatter(formatter)
logger.addHandler(h)
logger.handler_set = True
def install_manager(vm):
......
......@@ -24,18 +24,14 @@ if len(sys.argv) > 1:
else:
rabbitmq_host = '127.0.0.1'
logger = logging.getLogger('rpc_server')
logger.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
logger.addHandler(ch)
fh = logging.FileHandler('deployer.log')
fh.setLevel(logging.DEBUG)
fh.setFormatter(formatter)
logger.addHandler(fh)
logger = logging.getLogger(__name__)
if not getattr(logger, 'handler_set', None):
logger.setLevel(logging.INFO)
h = logging.StreamHandler()
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
h.setFormatter(formatter)
logger.addHandler(h)
logger.handler_set = True
connection = pika.BlockingConnection(pika.ConnectionParameters(host=rabbitmq_host))
......@@ -55,7 +51,7 @@ def handleDelivery(message):
parsed_json = json.loads(message)
params = parsed_json["parameters"]
node_num = 0
vm_list = []
vm_list = set()
current_milli_time = lambda: int(round(time.time() * 1000))
try:
path = os.path.dirname(os.path.abspath(__file__)) + "/deployer_files/"+str(current_milli_time()) + "/"
......@@ -86,7 +82,7 @@ def handleDelivery(message):
os.chmod(key, 0o600)
vm = VmInfo(ip, user, key, role)
vm_list.append(vm)
vm_list.add(vm)
elif name == "playbook":
value = param["value"]
playbook = path + "playbook.yml"
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment