Commit d2898192 authored by Spiros Koulouzis's avatar Spiros Koulouzis

get IP of containers

parent c214b464
version: '3'
services:
# Platform - SWITCH
switch_monitoring_server:
image: salmant/ul_monitoring_server_container_image
ports:
- 8080:8080 # Optional, for debugging - WEB Interface
- 4242:4242
- 4245:4245
- 7199:7199
- 7000:7000
- 7001:7001
- 9160:9160
- 9042:9042
- 8012:8012
- 61621:61621
switch_monitoring_agent:
image: beia/monitoring_agent
environment:
- MONITORING_SERVER=switch_monitoring_server
- MONITORING_PREFIX=eu.switch.beia # USER CONFIGURABLE!!!
depends_on:
- switch_monitoring_server
switch_alarm_trigger:
image: salmant/ul_alarm_trigger_container_image
environment:
- AlarmTriggerYMLURL=http://194.249.1.72:5000/AlarmTrigger.yml # SIDE GENERATED VALUE!!!
- MONITORING_SERVER=switch_monitoring_server
- JSONAlertURL=https://gurujsonrpc.appspot.com/guru # SIDE GENERATED VALUE!!! - POINTS to another component of ASAP
- JSONAlertURLSIDEGUI=https://gurujsonrpc.appspot.com/guru # SIDE GENERATED VALUE!!!
ports:
- 18080:8080 # Optional, for debugging - Web interface to alarm trigger
depends_on:
- switch_monitoring_server
# APPLICATION - BEIA, MOG, WT ...
topology_template:
node_templates:
"AlertChecker":
artifacts:
alertchecker_image:
type: "tosca.artifacts.Deployment.Image.Container.Docker"
file: "beia/alerter"
repository: SWITCH_docker_hub
type: "Switch.nodes.Application.Container.Docker.VLAD_AlertChecker"
properties:
in_ports:
# So this might be a bit awkward.
# I am uncertain if OK if the names are not unique.
# type just used for validation. If so ignore it but inform me that I have wiggle room as far as form.
# If you want a different format I am flexible, as ports need to be redone
# Also previously the ports were defined differently. If it's preferable let me know.
"Port_1":
type: "JSON"
# One would imagine SWARM to assign host ports? Or not? Special Value if we do not care?
host_port: SET_ITS_VALUE
container_port: SET_ITS_VALUE
"Port_2":
type: "JSON"
host_port: SET_ITS_VALUE
container_port: SET_ITS_VALUE
"Config_Port":
type: "Control"
host_port: SET_ITS_VALUE
container_port: SET_ITS_VALUE
out_ports:
"Monitoring_proxy_port":
type: "JCatascopiaMagic"
# Are these 2 needed in output ports? I really do not know how it works.
host_port: SET_ITS_VALUE
container_port: SET_ITS_VALUE
# Might be interesting to add somethin like this but do not know how to add this to containers:
# As I was looking tought tosca I found this bit of "code" should return the same as above but with a bit more flexibility.
# Just in case we need it later on.
MONITORING_PROXY2: { get_property: [SELF, "Monitoring Proxy", "monitoring/proxy/api/endpoint" ] }
"Notifier_output":
type: "JSON"
host_port: SET_ITS_VALUE
container_port: SET_ITS_VALUE
Environment_variables:
ENV1: ...
#So this is given for all containers?
TOSCA: side.url:8000/api/234/tosca
MONITORING_PROXY: "Monitoring Proxy"
Live_variables:
#These are the variables that would be dynamic
LiveVar1: ...
scaling_mode: single
requirements:
- host:
node: "VM_1"
node_filter:
capabilities:
host:
cpu_frequency: 1GHz
mem_size: 1GB
num_cpus: 1
disk_size: 1GB
os:
os_version: 16.04
distribution: ubuntu
type: linux
architecture: x86_64
- volume:
name: volume_ID
link: "/var/pool/bla"
mount_type: r/rw
- monitoring_proxy: "Monitoring Proxy"
- sip_notifier: "SIPNotifyer"
"Volume_ID":
# So I do not know what information is needed for this, as it is my first time hearing about it
# I suggest we look at docker compose for it and extrapolate from there.
requirements:
- host:
node: "VM_1"
"SIPNotifier":
artifacts:
sipnotifier_image:
type: "tosca.artifacts.Deployment.Image.Container.Docker"
file: "beia/sipnotifier"
repository: SWITCH_docker_hub
# Is this right, as the information above is in the type(below).
type: "Switch.nodes.Application.Container.Docker.VLAD_AlertChecker"
properties:
in_ports:
"Port_1":
type: "JSON"
host_port: SET_ITS_VALUE
container_port: SET_ITS_VALUE
"SIPN_Control_Port":
type: "Control"
host_port: SET_ITS_VALUE
container_port: SET_ITS_VALUE
out_ports:
"Monitoring_proxy_port":
type: "JCatascopiaMagic"
host_port: SET_ITS_VALUE
container_port: SET_ITS_VALUE
"sip_notifier_output":
type: "JSON"
host_port: SET_ITS_VALUE
container_port: SET_ITS_VALUE
"5cc1a274-ae17-479f-807f-625c6aa6e0f0":
type: out
port: "redis"
Environment_variables:
ENV1: ...
#So this is given for all containers?
TOSCA: side.url:8000/api/234/tosca
MONITORING_PROXY: "Monitoring Proxy"
Live_variables:
#These are the variables that would be dynamic
LiveVar1: ...
# Scaling mode needs to be changable for scalable right?
scaling_mode: single
requirements:
- host:
node: "VM_2"
"Graphite":
artifacts:
graphite_image:
type: "tosca.artifacts.Deployment.Image.Container.Docker"
file: "vladwing/graphite"
repository: SWITCH_docker_hub
type: "Switch.nodes.Application.Container.Docker.VLAD_Graphite"
properties:
in_ports:
"Port_1":
type: "JSON"
host_port: SET_ITS_VALUE
container_port: SET_ITS_VALUE
"Config_Port":
type: "Control"
host_port: SET_ITS_VALUE
container_port: SET_ITS_VALUE
out_ports:
"Monitoring_proxy_port":
type: "JCatascopiaMagic"
host_port: SET_ITS_VALUE
container_port: SET_ITS_VALUE
Environment_variables:
ENV1: ...
#So this is given for all containers?
TOSCA: side.url:8000/api/234/tosca
MONITORING_PROXY: "Monitoring Proxy"
Live_variables:
#These are the variables that would be dynamic
LiveVar1: ...
scaling_mode: single
requirements:
- host:
node: "VM_3"
"monitoring_server":
artifacts:
monitoring_server_image:
type: "tosca.artifacts.Deployment.Image.Container.Docker"
file: "salmant/salman_monitoring_server_container_image"
repository: SWITCH_docker_hub
type: "Switch.nodes.Application.Container.Docker.MonitoringServer"
requirements:
- host:
node_filter:
capabilities:
host:
cpu_frequency: 1.2
mem_size: 4
num_cpus: 1
disk_size: 20
os:
os_version: 16.04
distribution: ubuntu
type: linux
architecture: x86_64
properties:
# Should this be inside properties? I moved it here as it makes more sense
QoS:
response_time: 50
in_ports:
"Monitoring_data_collection":
type: "JCatascopiaMagic"
host_port: SET_ITS_VALUE
container_port: SET_ITS_VALUE
out_ports:
"Monitoring_presentation":
type: "JCatascopiaAPI"
host_port: SET_ITS_VALUE
container_port: SET_ITS_VALUE
# OK so ports maping is a seperate thing? I do not know.
# Why does it need all that ports? Do all containers need ports that are
# not part of the schematic? I wish I knew. Or should these be on the schematic?
ports_mapping:
port_mapping_1:
host_port: 4242
container_port: 4242
port_mapping_0:
host_port: 8080
container_port: 8080
port_mapping_3:
host_port: 7199
container_port: 7199
port_mapping_2:
host_port: 4245
container_port: 4245
port_mapping_5:
host_port: 7001
container_port: 7001
port_mapping_4:
host_port: 7000
container_port: 7000
port_mapping_7:
host_port: 9042
container_port: 9042
port_mapping_6:
host_port: 9160
container_port: 9160
port_mapping_9:
host_port: 61621
container_port: 61621
port_mapping_8:
host_port: 8012
container_port: 8012
scaling_mode: single
requirements:
- host:
node: "VM_4"
"Monitoring Proxy":
monitoring_proxy_image:
type: "tosca.artifacts.Deployment.Image.Container.Docker"
file: "ASAP/monitoring_proxy"
repository: SWITCH_docker_hub
type: "Switch.nodes.Application.Container.Docker.Monitoring_Proxy"
properties:
in_ports:
"Monitoring_proxy_port":
type: "JCatascopiaMagic"
host_port: SET_ITS_VALUE
container_port: SET_ITS_VALUE
out_ports:
"Monitoring_data_collection":
type: "JCatascopiaMagic"
host_port: SET_ITS_VALUE
container_port: SET_ITS_VALUE
Environment_variables:
ENV1: ...
#So this is given for all containers?
TOSCA: side.url:8000/api/234/tosca
MONITORING_SERVER: moitoring_server
scaling_mode: single
requirements:
- host:
node: "VM_5"
"Data Collector":
artifacts:
data_collector_image:
type: "tosca.artifacts.Deployment.Image.Container.Docker"
file: "beia/data_collector"
repository: SWITCH_docker_hub
type: "Switch.nodes.Application.Container.Docker.VLAD_DataCollector"
properties:
in_ports:
"Sensors_in":
type: "Sensor_data"
host_port: SET_ITS_VALUE
container_port: SET_ITS_VALUE
"Config_Port":
type: "Control"
host_port: SET_ITS_VALUE
container_port: SET_ITS_VALUE
out_ports:
"Monitoring_proxy_port":
type: "JCatascopiaMagic"
host_port: SET_ITS_VALUE
container_port: SET_ITS_VALUE
"Data_output":
type: "JSON"
host_port: SET_ITS_VALUE
container_port: SET_ITS_VALUE
Environment_variables:
ENV1: ...
#So this is given for all containers?
TOSCA: side.url:8000/api/234/tosca
MONITORING_PROXY: "Monitoring Proxy"
Live_variables:
#These are the variables that would be dynamic
LiveVar1: ...
scaling_mode: single
requirements:
- host:
node: "VM_6"
"Config Manager":
type: "Switch.nodes.Application.Container.Docker.VLAD_ConfigManager"
artifacts:
sipnotifier_image:
type: "tosca.artifacts.Deployment.Image.Container.Docker"
file: "beia/config_manager"
repository: SWITCH_docker_hub
properties:
out_ports:
"BEIA_Container_control":
type: "Control"
host_port: SET_ITS_VALUE
container_port: SET_ITS_VALUE
"Side_endpoint":
type: "TOSCA"
host_port: SET_ITS_VALUE
container_port: SET_ITS_VALUE
in_ports:
"UI":
type: "Config_file"
host_port: SET_ITS_VALUE
container_port: SET_ITS_VALUE
Environment_variables:
ENV1: ...
#So this is given for all containers?
TOSCA: side.url:8000/api/234/tosca
MONITORING_PROXY: "Monitoring Proxy"
scaling_mode: single
requirements:
- host:
node: "VM_7"
"VM_1":
# Stub I think this should be generated by DRIP-planer. But we could have a diamond so SIDE can make it.
type: Switch.node.Compute
capabilities:
# Host container properties
host:
properties:
num_cpus: 1
disk_size: 10 GB
mem_size: 4096 MB
# Guest Operating System properties
os:
properties:
# host Operating System image properties
architecture: x86_64
type: linux
distribution: rhel
version: 6.5
"VM_2":
# Stub I think this should be generated by DRIP-planer. But we could have a diamond so SIDE can make it.
type: Switch.node.Compute
capabilities:
# Host container properties
host:
properties:
num_cpus: 1
disk_size: 10 GB
mem_size: 4096 MB
# Guest Operating System properties
os:
properties:
# host Operating System image properties
architecture: x86_64
type: linux
distribution: rhel
version: 6.5
"VM_3":
# Stub I think this should be generated by DRIP-planer. But we could have a diamond so SIDE can make it.
type: Switch.node.Compute
capabilities:
# Host container properties
host:
properties:
num_cpus: 1
disk_size: 10 GB
mem_size: 4096 MB
# Guest Operating System properties
os:
properties:
# host Operating System image properties
architecture: x86_64
type: linux
distribution: rhel
version: 6.5
"VM_4":
# Stub I think this should be generated by DRIP-planer. But we could have a diamond so SIDE can make it.
type: Switch.node.Compute
capabilities:
# Host container properties
host:
properties:
num_cpus: 1
disk_size: 10 GB
mem_size: 4096 MB
# Guest Operating System properties
os:
properties:
# host Operating System image properties
architecture: x86_64
type: linux
distribution: rhel
version: 6.5
"VM_5":
# Stub I think this should be generated by DRIP-planer. But we could have a diamond so SIDE can make it.
type: Switch.node.Compute
capabilities:
# Host container properties
host:
properties:
num_cpus: 1
disk_size: 10 GB
mem_size: 4096 MB
# Guest Operating System properties
os:
properties:
# host Operating System image properties
architecture: x86_64
type: linux
distribution: rhel
version: 6.5
"VM_6":
# Stub I think this should be generated by DRIP-planer. But we could have a diamond so SIDE can make it.
type: Switch.node.Compute
capabilities:
# Host container properties
host:
properties:
num_cpus: 1
disk_size: 10 GB
mem_size: 4096 MB
# Guest Operating System properties
os:
properties:
# host Operating System image properties
architecture: x86_64
type: linux
distribution: rhel
version: 6.5
"VM_7":
# Stub I think this should be generated by DRIP-planer. But we could have a diamond so SIDE can make it.
type: Switch.node.Compute
capabilities:
# Host container properties
host:
properties:
num_cpus: 1
disk_size: 10 GB
mem_size: 4096 MB
# Guest Operating System properties
os:
properties:
# host Operating System image properties
architecture: x86_64
type: linux
distribution: rhel
version: 6.5
artifact_types:
"tosca.artifacts.Deployment.Image.Container.Docker":
derived_from: "tosca.artifacts.Deployment.Image"
description: "Try of an app"
# So node_types are not 100% ok (They are about 10% ok)
# But I am not Sure what they are. So I need to investigate a bit further.
node_types:
"Switch.nodes.Application.Container.Docker.VLAD_THE_IMPALER_RTUSensorDataAcquisition":
properties:
name:
required: false
type: string
derived_from: "Switch.nodes.Application.Container.Docker"
"tosca.groups.Root":
"Switch.nodes.Application.Container.Docker":
properties:
in_ports:
entry_schema:
type: "Switch.datatypes.port"
required: false
type: map
dockers:
required: false
type: string
QoS:
required: false
type: "Switch.datatypes.QoS.AppComponent"
name:
required: false
type: string
out_ports:
entry_schema:
type: "Switch.datatypes.port"
required: false
type: map
ports_mapping:
entry_schema:
type: "Switch.datatypes.port_mapping"
type: map
scaling_mode:
required: false
type: string
ethernet_port:
entry_schema:
type: "Switch.datatypes.ethernet_port"
required: false
type: list
derived_from: "tosca.nodes.Container.Application"
"Switch.nodes.Compute":
artifacts:
gateway_image:
type: "tosca.artifacts.Deployment.Image.Container.Docker"
repository: SWITCH_docker_hub
file: "/???"
derived_from: "tosca.nodes.Compute"
"Switch.nodes.Application.Container.Docker.VLAD_AlertChecker":
artifacts:
sipnotifier_image:
type: "tosca.artifacts.Deployment.Image.Container.Docker"
repository: SWITCH_docker_hub
file: "beia/sip_notifier"
derived_from: "Switch.nodes.Application.Container.Docker"
"switch.Component.Component.Docker":
derived_from: "Switch.nodes.Application.Container.Docker"
"Switch.nodes.Network":
derived_from: "tosca.nodes.network.Network"
"Switch.nodes.EventListener":
derived_from: "tosca.nodes.Root"
"Switch.nodes.VirtualNetwork":
artifacts:
"switcher.cardiff_image":
type: "tosca.artifacts.Deployment.Image.Container.Docker"
repository: SWITCH_docker_hub
file: null
properties:
subnet:
default: "192.168.10.0"
type: string
netmask:
default: "255.255.255.0"
type: string
name:
type: string
derived_from: "tosca.nodes.Root"
"Switch.nodes.Application.Container.Docker.VLAD_BEIAComponent":
artifacts:
beiacomponent_image:
type: "tosca.artifacts.Deployment.Image.Container.Docker"
repository: SWITCH_docker_hub
file: "beia/hang"
properties:
Environment:
type: string
derived_from: "Switch.nodes.Application.Container.Docker"
"Switch.nodes.MonitoringAgent":
properties:
agent_id:
default: null
type: string
probes:
entry_schema:
type: "Switch.datatypes.monitoring.probe"
type: map
derived_from: "tosca.nodes.Root"
"Switch.nodes.AdaptationPolicy":
derived_from: "tosca.nodes.Root"
"Switch.nodes.Application.Container.Docker.VLAD_Graphite":
artifacts:
webui_image:
type: "tosca.artifacts.Deployment.Image.Container.Docker"
repository: SWITCH_docker_hub
file: "beia/nginx"
properties:
Environment:
entry_schema:
type: string
type: map
derived_from: "Switch.nodes.Application.Container.Docker"
"Switch.nodes.Application.Container.Docker.VLAD_Alerter":
properties:
Environment:
entry_schema:
type: string
type: map
derived_from: "Switch.nodes.Application.Container.Docker"
"Switch.nodes.Component":
derived_from: "tosca.nodes.Root"
"Switch.nodes.Constraint":
requirements:
- monitor_server_endpoint:
node: "Switch.nodes.Application.Container.Docker.MonitoringServer"
capability: "tosca.capabilities.Node"
relationship: "tosca.relationships.DependsOn"
properties:
QoS:
type: "Switch.datatypes.QoS.AppComponent"
derived_from: "tosca.nodes.Root"
"Switch.nodes.Application.Container.Docker.VLAD_SIPNotifier":
derived_from: "Switch.nodes.Application.Container.Docker"
"Switch.nodes.Application.Connection":
properties:
source:
type: "Switch.datatypes.Application.Connection.EndPoint"
bandwidth:
type: integer
multicast:
type: "Switch.datatypes.Network.Multicast"
jitter:
required: false
type: integer
target:
type: "Switch.datatypes.Application.Connection.EndPoint"
latency:
required: false
type: integer
QoS:
type: "Switch.datatypes.QoS.AppComponent"
derived_from: "tosca.nodes.Root"
"Switch.nodes.Requirement":
properties:
host:
type: "Switch.datatypes.hw.host"
os:
type: "Switch.datatypes.hw.os"
derived_from: "tosca.nodes.Root"
"Switch.nodes.ExternalComponent":
derived_from: "tosca.nodes.Root"
"Switch.nodes.DST":
properties:
dave:
type: string
derived_from: "tosca.nodes.Root"
"Switch.nodes.Application.Container.Docker.MonitoringServer":
properties:
ports_mapping:
entry_schema:
type: "Switch.datatypes.port_mapping"
type: map
derived_from: "Switch.nodes.Application.Container.Docker"
"Switch.nodes.Application.Container.Docker.VLAD_DataCollector":
artifacts:
data_collector_image:
type: "tosca.artifacts.Deployment.Image.Container.Docker"
repository: SWITCH_docker_hub
file: "beia/data_collector"
properties:
Environment:
entry_schema:
type: string
type: map
derived_from: "Switch.nodes.Application.Container.Docker"
"Switch.nodes.Application.Container.Docker.VLAD_WebUI":
derived_from: "Switch.nodes.Application.Container.Docker"
"Switch.nodes.Application.Container.Docker.VLAD_Dashboard":
derived_from: "Switch.nodes.Application.Container.Docker"
"Switch.nodes.MessagePasser":
derived_from: "tosca.nodes.Root"
repositories:
SWITCH_docker_hub:
url: "https://hub.docker.com/"
credential:
token_type: "X-Auth-Token"
token: 604bbe45ac7143a79e14f3158df67091
protocol: xauth
description: "switch repository in GitHub"
data_types:
"Switch.datatypes.monitoring.metric.threshold":
properties:
operator:
type: string
value:
type: integer
derived_from: "tosca.datatypes.Root"
"Switch.datatypes.enviorment.variables":
properties:
variable_name:
type: string
variable_value:
type: string
derived_from: "tosca.datatypes.Root"
"Switch.datatypes.port":
properties:
type:
type: string
port:
type: string
derived_from: "tosca.datatypes.Root"
"Switch.datatypes.Application.Connection.EndPoint":
properties:
netmask:
type: string
component_name:
type: string
port_name:
type: string
address:
type: string
derived_from: "tosca.datatypes.Root"
"Switch.datatypes.monitoring.probe":
properties:
active:
type: boolean
path:
required: false
type: string
static:
type: boolean
name:
type: string
metrics:
entry_schema:
type: "Switch.datatypes.monitoring.metric"
type: map
derived_from: "tosca.datatypes.Root"
"Switch.datatypes.hw.host":
properties:
cpu_frequency:
type: float
mem_size:
type: integer
num_cpus:
type: integer
disk_size:
type: integer
derived_from: "tosca.datatypes.Root"
"Switch.datatypes.ethernet_port":
properties:
subnet_name:
type: string
name:
type: string
address:
type: string
derived_from: "tosca.datatypes.Root"
"Switch.datatypes.hw.os":
properties:
os_version:
type: string
distribution:
type: string
type:
type: string
architecture:
type: string
derived_from: "tosca.datatypes.Root"
"Switch.datatypes.QoS.AppComponent":
properties:
response_time:
type: integer
derived_from: "tosca.datatypes.Root"
"Switch.datatypes.Application.Connection.Multicast":
properties:
multicastAddrPort:
type: string
multicastAddrIP:
type: string
derived_from: "tosca.datatypes.Root"
"Switch.datatypes.Network.Multicast":
properties:
multicastAddrPort:
type: string
multicastAddrIP:
type: string
derived_from: "tosca.datatypes.Root"
"Switch.datatypes.port_mapping":
properties:
host_port:
type: integer
container_port:
type: integer
derived_from: "tosca.datatypes.Root"
"Switch.datatypes.monitoring.metric":
properties:
thresholds:
entry_schema:
type: "Switch.datatypes.monitoring.metric.threshold"
required: false
type: map
type:
type: string
name:
type: string
unit:
required: false
type: string
derived_from: "tosca.datatypes.Root"
tosca_definitions_version: tosca_simple_yaml_1_0
topology_template:
node_templates:
"AlertChecker":
artifacts:
"alertchecker_image":
type: "tosca.artifacts.Deployment.Image.Container.Docker"
file: "beia/alerter"
repository: SWITCH_docker_hub
requirements:
- host:
node_filter:
capabilities:
host:
cpu_frequency: 1GHz
mem_size: 1GB
num_cpus: 1
disk_size: 1GB
os:
os_version: 16.04
distribution: ubuntu
type: linux
architecture: x86_64
type: "Switch.nodes.Application.Container.Docker.PEDRO.SANTOS_InputDistributor.Cardiff"
properties:
scaling_mode: single
QoS:
packet_loss: 0
response_time: 30ms
out_ports:
"24ba657c-8a18-4c7f-aed9-9498d88a85d2":
type: out
port: " 1 "
multicastAddrPort: 3000
inPort: 2000
in_ports:
"92aa59bd-2c48-4f25-a27f-1412dd2cc849":
type: in
port: " inPort "
waitingTime: 5
multicastAddrIP: "225.2.2.0"
ports_mapping:
port_mapping_0:
host_port: "${inPort}"
container_port: "${inPort}"
"573924ff-c075-4dfc-95c4-489e0a189040":
artifacts:
"inputdistributor.cardiff_image":
type: "tosca.artifacts.Deployment.Image.Container.Docker"
file: "mogswitch/InputDistributor:1.0"
repository: SWITCH_docker_hub
requirements:
- host:
node_filter:
capabilities:
host:
cpu_frequency: 1GHz
mem_size: 1GB
num_cpus: 1
disk_size: 1GB
os:
os_version: 16.04
distribution: ubuntu
type: linux
architecture: x86_64
type: "Switch.nodes.Application.Container.Docker.PEDRO.SANTOS_InputDistributor.Cardiff"
properties:
scaling_mode: single
QoS:
packet_loss: 0
response_time: 30ms
out_ports:
"030029d5-63ef-472f-9697-df1380505281":
type: out
port: " 1 "
multicastAddrPort: 3000
inPort: 2000
in_ports:
"40974a48-8bcb-48ba-80a6-4d3c5f26f502":
type: in
port: " inPort "
waitingTime: 5
multicastAddrIP: "225.2.2.2"
ports_mapping:
port_mapping_0:
host_port: "${multicastAddrPort}"
container_port: "${multicastAddrPort}"
"8bc5abcb-ae55-4f82-adc6-21d4a2475381":
type: "Switch.nodes.Application.Connection"
properties:
latency: 20ms
bandwidth: 1Gb
target:
netmask: ""
component_name: "8b5621b5-fbe9-4f4a-b68a-57b00cb07b6a"
port_name: "52d0aa2c-0775-44b1-857e-006817a3d637"
address: ""
source:
netmask: ""
component_name: "573924ff-c075-4dfc-95c4-489e0a189040"
port_name: "030029d5-63ef-472f-9697-df1380505281"
address: ""
"f9a17d09-d8bd-4f95-962a-3d86d71d1332":
type: "Switch.nodes.Application.Connection"
properties:
latency: 20ms
bandwidth: 1Gb
target:
netmask: ""
component_name: "8bffa661-cfa4-49df-ad83-7eba8498b405"
port_name: "2e50ce74-9396-4058-8402-997d196c9fd7"
address: ""
source:
netmask: ""
component_name: "573924ff-c075-4dfc-95c4-489e0a189040"
port_name: "030029d5-63ef-472f-9697-df1380505281"
address: ""
"a52c6766-6805-4c62-ab71-e2353388d4d5":
type: "Switch.nodes.Application.Connection"
properties:
latency: 20ms
bandwidth: 1Gb
target:
netmask: ""
component_name: "432dc3f6-4d1c-4092-a365-7837ef92279c"
port_name: "7c36b4c1-58e1-4011-b00f-7641e09b2e5a"
address: ""
source:
netmask: ""
component_name: "8b5621b5-fbe9-4f4a-b68a-57b00cb07b6a"
port_name: "88d8db9a-7621-440c-8c80-370a2020b810"
address: ""
"bce936d3-fee5-4217-9c44-710e78c94a70":
type: "Switch.nodes.Application.Connection"
properties:
latency: 20ms
bandwidth: 1Gb
target:
netmask: ""
component_name: "8b5621b5-fbe9-4f4a-b68a-57b00cb07b6a"
port_name: "a75bc9ef-b485-4db7-8226-9c3d76ae8a9e"
address: ""
source:
netmask: ""
component_name: "6be727ce-5fa1-4e6d-9a37-438d0ec94ff0"
port_name: "24ba657c-8a18-4c7f-aed9-9498d88a85d2"
address: ""
"8b5621b5-fbe9-4f4a-b68a-57b00cb07b6a":
requirements:
- host:
node_filter:
capabilities:
host:
cpu_frequency: 3Ghz
mem_size: 1GB
num_cpus: 1
disk_size: 1GB
os:
os_version: 16.04
distribution: ubuntu
type: linux
architecture: x86_64
type: "Switch.nodes.Application.Container.Docker.PEDRO.SANTOS_Switcher.Cardiff"
properties:
scaling_mode: single
QoS:
packet_loss: 0
response_time: 30ms
switcherOutAddrIP: "226.2.2.2"
multicastAddrPort: 3000
multicastAddrPort2: 3002
in_ports:
"a75bc9ef-b485-4db7-8226-9c3d76ae8a9e":
type: in
port: " switcherREST "
"62bdc6c5-b179-4661-bda7-a5f94ee17247":
type: in
port: " 3 "
"52d0aa2c-0775-44b1-857e-006817a3d637":
type: in
port: " 2 "
waitingTime: 5
multicastAddrIP: "225.2.2.0"
switcherOutAddrPort: 6000
multicastAddrIP2: "225.2.2.2"
switcherREST: 8008
out_ports:
"88d8db9a-7621-440c-8c80-370a2020b810":
type: out
port: " 1 "
ports_mapping:
port_mapping_0:
host_port: "${switcherREST}"
container_port: "${switcherREST}"
videoWidth: 176
videoHeight: 100
"5a7c790f-e0cd-4eae-9145-85d2d9aadc66":
type: "Switch.nodes.Application.Connection"
properties:
latency: 20ms
bandwidth: 1Gb
target:
netmask: ""
component_name: "7855f235-7cd8-4091-8acc-f1df70bae6e2"
port_name: "4e52e9f4-19d2-4b32-96ee-67bd5775ab93"
address: ""
source:
netmask: ""
component_name: "8b5621b5-fbe9-4f4a-b68a-57b00cb07b6a"
port_name: "88d8db9a-7621-440c-8c80-370a2020b810"
address: ""
"7855f235-7cd8-4091-8acc-f1df70bae6e2":
requirements:
- host:
node_filter:
capabilities:
host:
cpu_frequency: 3GHz
mem_size: 1GB
num_cpus: 2
disk_size: 1GB
os:
os_version: 16.04
distribution: ubuntu
type: linux
architecture: x86_64
type: "Switch.nodes.Application.Container.Docker.PEDRO.SANTOS_ProxyTranscoder"
properties:
scaling_mode: single
QoS:
packet_loss: 0
response_time: 30ms
out_ports:
"60eea86c-64ed-48da-b991-6bf7c49c861c":
type: out
port: " outPort "
multicastAddrPort: 3000
in_ports:
"4e52e9f4-19d2-4b32-96ee-67bd5775ab93":
type: in
port: " 1 "
multicastAddrIP: "225.2.2.0"
ports_mapping:
port_mapping_0:
host_port: 8085
container_port: 80
port_mapping_1:
host_port: 8086
container_port: 81
"849ea5ba-9938-46aa-8d3d-d525f5d87102":
type: "Switch.nodes.Application.Connection"
properties:
latency: 20ms
bandwidth: 1Gb
target:
netmask: ""
component_name: "58ed77c3-8397-4e27-a4ab-0be0fee0569c"
port_name: "6f14898d-007d-419c-a41b-02099611f77f"
address: ""
source:
netmask: ""
component_name: "6be727ce-5fa1-4e6d-9a37-438d0ec94ff0"
port_name: "24ba657c-8a18-4c7f-aed9-9498d88a85d2"
address: ""
"8bffa661-cfa4-49df-ad83-7eba8498b405":
requirements:
- host:
node_filter:
capabilities:
host:
cpu_frequency: 3GHz
mem_size: 1GB
num_cpus: 2
disk_size: 1GB
os:
os_version: 16.04
distribution: ubuntu
type: linux
architecture: x86_64
type: "Switch.nodes.Application.Container.Docker.PEDRO.SANTOS_ProxyTranscoder"
properties:
scaling_mode: single
QoS:
packet_loss: 0
response_time: 30ms
out_ports:
"4bd7ba2b-6f49-4648-9c9c-cdd118e011f3":
type: out
port: " outPort "
multicastAddrPort: 3000
in_ports:
"2e50ce74-9396-4058-8402-997d196c9fd7":
type: in
port: " 1 "
multicastAddrIP: "225.2.2.0"
ports_mapping:
port_mapping_0:
host_port: 8081
container_port: 80
"58ed77c3-8397-4e27-a4ab-0be0fee0569c":
requirements:
- host:
node_filter:
capabilities:
host:
cpu_frequency: 3GHz
mem_size: 1GB
num_cpus: 2
disk_size: 1GB
os:
os_version: 16.04
distribution: ubuntu
type: linux
architecture: x86_64
type: "Switch.nodes.Application.Container.Docker.PEDRO.SANTOS_ProxyTranscoder"
properties:
scaling_mode: single
QoS:
packet_loss: 0
response_time: 30ms
out_ports:
"1e8c4827-5dcc-40c6-840b-f14f09f9ae28":
type: out
port: " outPort "
multicastAddrPort: 3000
in_ports:
"6f14898d-007d-419c-a41b-02099611f77f":
type: in
port: " 1 "
multicastAddrIP: "225.2.2.2"
ports_mapping:
port_mapping_0:
host_port: 8082
container_port: 80
"432dc3f6-4d1c-4092-a365-7837ef92279c":
requirements:
- host:
node_filter:
capabilities:
host:
cpu_frequency: 3GHz
mem_size: 1GB
num_cpus: 2
disk_size: 1GB
os:
os_version: 16.04
distribution: ubuntu
type: linux
architecture: x86_64
type: "Switch.nodes.Application.Container.Docker.PEDRO.SANTOS_OutputTranscoder.Cardiff"
properties:
OutIP: "192.168.1.194"
QoS:
packet_loss: 0
response_time: 30ms
out_ports:
"78fec7a2-e803-4e4a-b5e2-b0afcdb7d3f1":
type: out
port: " outPort "
scaling_mode: single
OutPort: 4000
in_ports:
"7c36b4c1-58e1-4011-b00f-7641e09b2e5a":
type: in
port: " 1 "
ports_mapping:
port_mapping_0:
host_port: "${OutPort}"
container_port: "${OutPort}"
videoWidth: 176
videoHeight: 100
artifact_types:
"tosca.artifacts.Deployment.Image.Container.Docker":
derived_from: "tosca.artifacts.Deployment.Image"
description: "MOG use case"
node_types:
"Switch.nodes.Application.Container.Docker.PEDRO.SANTOS.MOG_Input_Distributor":
properties:
Input_RTP_TS_Port:
default: 2000
type: string
Waiting_Time:
default: 5
type: string
Output_Uncompressed_Video_Multicast_Address:
default: "225.2.2.0"
type: string
Output_Uncompressed_Video_Multicast_Port:
default: "3000 waiting time Waiting time (in seconds) for Input Distributor to receive TS stream 5 switcherOutAddrIP Multicast IP address where Video Switcher"
type: string
derived_from: "Switch.nodes.Application.Container.Docker"
"Switch.nodes.Application.Container.Docker.PEDRO.SANTOS_ProxyTranscoder":
properties:
multicastAddrPort:
default: 3000
type: "Switch.datatypes.port"
multicastAddrIP:
default: "225.2.2.0"
type: "Switch.datatypes.Network.Multicast"
derived_from: "Switch.nodes.Application.Container.Docker"
"Switch.nodes.Application.Container.Docker.VLAD_THE_IMPALER_RTUSensorDataAcquisition":
properties:
name:
required: false
type: string
derived_from: "Switch.nodes.Application.Container.Docker"
"Switch.nodes.Constraint":
requirements:
- monitor_server_endpoint:
node: "Switch.nodes.Application.Container.Docker.MonitoringServer"
capability: "tosca.capabilities.Node"
relationship: "tosca.relationships.DependsOn"
properties:
QoS:
type: "Switch.datatypes.QoS.AppComponent"
derived_from: "tosca.nodes.Root"
"Switch.nodes.Compute":
artifacts:
gateway_image:
type: "tosca.artifacts.Deployment.Image.Container.Docker"
repository: SWITCH_docker_hub
file: "/???"
derived_from: "tosca.nodes.Compute"
"Switch.nodes.Application.Container.Docker.PEDRO.SANTOS_InputDistributor":
properties:
inPort:
default: 2000
type: "Switch.datatypes.port"
derived_from: "Switch.nodes.Application.Container.Docker"
"Switch.nodes.Application.Container.Docker.BEIA_V1_RTUSensorDataAcquisitions":
derived_from: "Switch.nodes.Application.Container.Docker"
"tosca.groups.Root":
"Switch.nodes.Application.Container.Docker.PEDRO.SANTOS_InputDistributor.Cardiff":
properties:
multicastAddrPort:
default: 3000
type: "Switch.datatypes.port"
multicastAddrIP:
default: "225.2.2.0"
type: "Switch.datatypes.Network.Multicast"
inPort:
default: 2000
type: "Switch.datatypes.port"
waitingTime:
default: 5
type: integer
derived_from: "Switch.nodes.Application.Container.Docker"
"Switch.nodes.Application.Container.Docker.BEIA_Gateway":
properties:
Name:
default: BEIA_Gateway
type: string
derived_from: "Switch.nodes.Application.Container.Docker"
"Switch.nodes.Application.Container.Docker.BEIA_RTUSensorDataAcquisition":
derived_from: "Switch.nodes.Application.Container.Docker"
"Switch.nodes.Requirement":
properties:
host:
type: "Switch.datatypes.hw.host"
os:
type: "Switch.datatypes.hw.os"
derived_from: "tosca.nodes.Root"
"Switch.nodes.Application.Container.Docker.BEIA_RTUSensorData":
derived_from: "Switch.nodes.Application.Container.Docker"
"Switch.nodes.Application.Container.Docker.MOG_InputDistributor":
properties:
waitingTime:
default: 5
type: integer
multicastAddrIP:
default: "255.2.2.0"
type: string
multicastAddrPort:
default: 3000
type: integer
videoWidth:
default: 170
type: integer
inPort:
default: 2000
type: integer
videoHeight:
default: 100
type: integer
derived_from: "Switch.nodes.Application.Container.Docker"
"Switch.nodes.Application.Connection":
properties:
source:
type: "Switch.datatypes.Application.Connection.EndPoint"
bandwidth:
type: integer
multicast:
type: "Switch.datatypes.Network.Multicast"
jitter:
required: false
type: integer
target:
type: "Switch.datatypes.Application.Connection.EndPoint"
latency:
required: false
type: integer
QoS:
type: "Switch.datatypes.QoS.AppComponent"
derived_from: "tosca.nodes.Root"
"Switch.nodes.Application.Container.Docker":
properties:
in_ports:
entry_schema:
type: "Switch.datatypes.port"
required: false
type: map
dockers:
required: false
type: string
QoS:
required: false
type: "Switch.datatypes.QoS.AppComponent"
name:
required: false
type: string
out_ports:
entry_schema:
type: "Switch.datatypes.port"
required: false
type: map
ports_mapping:
entry_schema:
type: "Switch.datatypes.port_mapping"
type: map
scaling_mode:
required: false
type: string
ethernet_port:
entry_schema:
type: "Switch.datatypes.ethernet_port"
required: false
type: list
derived_from: "tosca.nodes.Container.Application"
"Switch.nodes.Application.Container.Docker.BEIA_V1_RTUSensorDataAcquisition":
derived_from: "Switch.nodes.Application.Container.Docker"
"Switch.nodes.ExternalComponent":
derived_from: "tosca.nodes.Root"
"Switch.nodes.Application.Container.Docker.PEDRO.SANTOS.MOG_Switcher":
properties:
Input_A_Uncompressed_Video_Multicast_Address:
default: "225.2.2.0"
type: string
Input_Video_Width:
default: 176
type: string
Input_B_Uncompressed_Video_Multicast_Port:
default: 3002
type: string
Input_B_Uncompressed_Video_Multicast_Address:
default: "225.2.2.1"
type: string
port:
default: 23
type: integer
Output_Uncompressed_Video_Multicast_Address:
default: "226.2.2.2"
type: string
Output_REST_PORT:
default: 8008
type: string
Output_Uncompressed_Video_Multicast_Port:
default: 6000
type: string
Input_Video_Height:
default: 100
type: string
Input_A_Uncompressed_Video_Multicast_Port:
default: 3000
type: string
Waiting_Time:
default: 5
type: string
derived_from: "Switch.nodes.Application.Container.Docker"
"Switch.nodes.DST":
properties:
dave:
type: string
derived_from: "tosca.nodes.Root"
"Switch.nodes.Application.Container.Docker.BEIA_V1_NotificationServer":
derived_from: "Switch.nodes.Application.Container.Docker"
"Switch.nodes.Application.Container.Docker.LOKSORR_Bb":
properties:
bb:
type: string
derived_from: "Switch.nodes.Application.Container.Docker"
"Switch.nodes.Application.Container.Docker.BEIA_RTUSensorDataManagement":
derived_from: "Switch.nodes.Application.Container.Docker"
"Switch.nodes.Application.Container.Docker.BEIA_NotificationServer":
derived_from: "Switch.nodes.Application.Container.Docker"
"Switch.nodes.Component":
derived_from: "tosca.nodes.Root"
"Switch.nodes.Network":
derived_from: "tosca.nodes.network.Network"
"Switch.nodes.Application.Container.Docker.PEDRO.SANTOS_Input":
properties:
port2:
default: 24
type: integer
derived_from: "Switch.nodes.Application.Container.Docker"
"Switch.nodes.Application.Container.Docker.BEIA_V1_Monitoring":
derived_from: "Switch.nodes.Application.Container.Docker"
"Switch.nodes.Application.Container.Docker.UL_JitsiMeet_docker":
properties:
ips:
type: string
deploy:
type: string
derived_from: "Switch.nodes.Application.Container.Docker"
"Switch.nodes.Application.Container.Docker.BEIA_Acquisition":
derived_from: "Switch.nodes.Application.Container.Docker"
"Switch.nodes.Application.Container.Docker.BEIA_DB":
derived_from: "Switch.nodes.Application.Container.Docker"
"Switch.nodes.Application.Container.Docker.BEIA_V1_DatabaseServer":
derived_from: "Switch.nodes.Application.Container.Docker"
"Switch.nodes.VirtualNetwork":
artifacts:
"switcher.cardiff_image":
type: "tosca.artifacts.Deployment.Image.Container.Docker"
repository: SWITCH_docker_hub
file: null
properties:
subnet:
default: "192.168.10.0"
type: string
netmask:
default: "255.255.255.0"
type: string
name:
type: string
derived_from: "tosca.nodes.Root"
"Switch.nodes.Application.Container.Docker.PEDRO.SANTOS_Switcher.Cardiff":
properties:
waitingTime:
default: 5
type: integer
multicastAddrIP:
default: "225.2.2.0"
type: "Switch.datatypes.Network.Multicast"
switcherREST:
default: switcherREST
type: "Switch.datatypes.port"
switcherOutAddrPort:
default: 6000
type: "Switch.datatypes.port"
multicastAddrIP2:
default: "225.2.2.2"
type: "Switch.datatypes.Network.Multicast"
switcherOutAddrIP:
default: "226.2.2.2"
type: "Switch.datatypes.Network.Multicast"
multicastAddrPort:
default: 3000
type: "Switch.datatypes.port"
videoWidth:
default: 176
type: integer
multicastAddrPort2:
default: 3002
type: "Switch.datatypes.port"
videoHeight:
default: 100
type: integer
derived_from: "Switch.nodes.Application.Container.Docker"
"Switch.nodes.EventListener":
derived_from: "tosca.nodes.Root"
"Switch.nodes.MonitoringAgent":
properties:
agent_id:
default: null
type: string
probes:
entry_schema:
type: "Switch.datatypes.monitoring.probe"
type: map
derived_from: "tosca.nodes.Root"
"Switch.nodes.Application.Container.Docker.BEIA_V1_TelemetryGateway":
derived_from: "Switch.nodes.Application.Container.Docker"
"Switch.nodes.AdaptationPolicy":
derived_from: "tosca.nodes.Root"
"Switch.nodes.Application.Container.Docker.MonitoringServer":
properties:
ports_mapping:
entry_schema:
type: "Switch.datatypes.port_mapping"
type: map
derived_from: "Switch.nodes.Application.Container.Docker"
"Switch.nodes.MessagePasser":
derived_from: "tosca.nodes.Root"
"Switch.nodes.Application.Container.Docker.PEDRO.SANTOS_OutputTranscoder.Cardiff":
properties:
OutIP:
default: "192.168.1.194"
type: "Switch.datatypes.Network.Multicast"
videoWidth:
default: 176
type: integer
OutPort:
default: 4000
type: "Switch.datatypes.port"
videoHeight:
default: 100
type: integer
derived_from: "Switch.nodes.Application.Container.Docker"
repositories:
SWITCH_docker_hub:
url: "https://github.com/switch-project"
credential:
token_type: "X-Auth-Token"
token: 604bbe45ac7143a79e14f3158df67091
protocol: xauth
description: "switch repository in GitHub"
data_types:
"Switch.datatypes.monitoring.metric.threshold":
properties:
operator:
type: string
value:
type: integer
derived_from: "tosca.datatypes.Root"
"Switch.datatypes.port":
properties:
type:
type: string
port:
type: string
derived_from: "tosca.datatypes.Root"
"Switch.datatypes.Application.Connection.EndPoint":
properties:
netmask:
type: string
component_name:
type: string
port_name:
type: string
address:
type: string
derived_from: "tosca.datatypes.Root"
"Switch.datatypes.monitoring.probe":
properties:
active:
type: boolean
path:
required: false
type: string
static:
type: boolean
name:
type: string
metrics:
entry_schema:
type: "Switch.datatypes.monitoring.metric"
type: map
derived_from: "tosca.datatypes.Root"
"Switch.datatypes.hw.host":
properties:
cpu_frequency:
type: float
mem_size:
type: integer
num_cpus:
type: integer
disk_size:
type: integer
derived_from: "tosca.datatypes.Root"
"Switch.datatypes.ethernet_port":
properties:
subnet_name:
type: string
name:
type: string
address:
type: string
derived_from: "tosca.datatypes.Root"
"Switch.datatypes.hw.os":
properties:
os_version:
type: string
distribution:
type: string
type:
type: string
architecture:
type: string
derived_from: "tosca.datatypes.Root"
"Switch.datatypes.QoS.AppComponent":
properties:
response_time:
type: integer
derived_from: "tosca.datatypes.Root"
"Switch.datatypes.Application.Connection.Multicast":
properties:
multicastAddrPort:
type: string
multicastAddrIP:
type: string
derived_from: "tosca.datatypes.Root"
"Switch.datatypes.Network.Multicast":
properties:
multicastAddrPort:
type: string
multicastAddrIP:
type: string
derived_from: "tosca.datatypes.Root"
"Switch.datatypes.port_mapping":
properties:
host_port:
type: integer
container_port:
type: integer
derived_from: "tosca.datatypes.Root"
"Switch.datatypes.monitoring.metric":
properties:
thresholds:
entry_schema:
type: "Switch.datatypes.monitoring.metric.threshold"
required: false
type: map
type:
type: string
name:
type: string
unit:
required: false
type: string
derived_from: "tosca.datatypes.Root"
tosca_definitions_version: tosca_simple_yaml_1_0
\ No newline at end of file
No preview for this file type
......@@ -39,6 +39,7 @@ import yaml
import sys
from results_collector import ResultsCollector
def install_prerequisites(vm):
try:
......
2017-10-24 17:02:29,283 - rpc_server - INFO - Awaiting RPC requests
2017-10-24 17:02:39,292 - rpc_server - INFO - Threads successfully closed
......@@ -20,11 +20,28 @@ __author__ = 'Yang Hu'
import paramiko, os
from vm_info import VmInfo
import json
import logging
import linecache
import sys
# create logger
logger = logging.getLogger('docker_swarm')
logger.setLevel(logging.DEBUG)
# create console handler and set level to debug
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
# create formatter
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
# add formatter to ch
ch.setFormatter(formatter)
# add ch to logger
logger.addHandler(ch)
def docker_check(vm, compose_name):
try:
print "%s: ====== Start Check Docker Services ======" % (vm.ip)
logger.info("Starting docker info services on: "+vm.ip)
paramiko.util.log_to_file("deployment.log")
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
......@@ -46,9 +63,12 @@ def docker_check(vm, compose_name):
stdin, stdout, stderr = ssh.exec_command(cmd)
stack_ps_resp = stdout.readlines()
services_info = []
nodes_hostname = set()
for i in stack_ps_resp:
if i.encode():
json_str = json.loads(i.encode().strip('\n'))
if json_str['node'] not in nodes_hostname:
nodes_hostname.add(json_str['node'])
services_info.append(json_str)
json_response ['services_info'] = services_info
stack_format = '\'{"ID":"{{.ID}}","name":"{{.Name}}","mode":"{{.Mode}}","replicas":"{{.Replicas}}","image":"{{.Image}}"}\''
......@@ -62,9 +82,34 @@ def docker_check(vm, compose_name):
stack_info.append(json_str)
json_response ['stack_info'] = stack_info
print "%s: =========== Check Finished ==============" % (vm.ip)
cmd = 'sudo docker node inspect '
for hostname in nodes_hostname:
cmd += hostname
logger.info(cmd)
stdin, stdout, stderr = ssh.exec_command(cmd)
inspect_resp = stdout.readlines()
nodes_info = []
#for i in inspect_resp:
#if i.encode():
#json_str = json.loads(i.encode().strip('\n'))
#nodes_info.append(json_str)
#json_response ['nodes_info'] = nodes_info
logger.info("Finished docker info services on: "+vm.ip)
except Exception as e:
print '%s: %s' % (vm.ip, e)
exc_type, exc_obj, tb = sys.exc_info()
f = tb.tb_frame
lineno = tb.tb_lineno
filename = f.f_code.co_filename
linecache.checkcache(filename)
line = linecache.getline(filename, lineno, f.f_globals)
print 'EXCEPTION IN ({}, LINE {} "{}"): {}'.format(filename, lineno, line.strip(), exc_obj)
logger.error(vm.ip + " " + str(e)+ " line:" +lineno)
return "ERROR:" + vm.ip + " " + str(e)
ssh.close()
return json_response
......
......@@ -19,11 +19,25 @@ __author__ = 'Yang Hu'
import paramiko, os
from vm_info import VmInfo
import logging
# create logger
logger = logging.getLogger('docker_swarm')
logger.setLevel(logging.DEBUG)
# create console handler and set level to debug
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
# create formatter
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
# add formatter to ch
ch.setFormatter(formatter)
# add ch to logger
logger.addHandler(ch)
def deploy_compose(vm, compose_file, compose_name):
try:
print "%s: ====== Start Docker Compose Deploying ======" % (vm.ip)
logger.info("Starting docker compose deployment on: "+vm.ip)
paramiko.util.log_to_file("deployment.log")
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
......@@ -33,9 +47,9 @@ def deploy_compose(vm, compose_file, compose_name):
sftp.put(compose_file, "docker-compose.yml")
stdin, stdout, stderr = ssh.exec_command("sudo docker stack deploy --compose-file /tmp/docker-compose.yml %s" % (compose_name))
stdout.read()
print "%s: ======= Deployment of Compose Finished =========" % (vm.ip)
logger.info("Finished docker compose deployment on: "+vm.ip)
except Exception as e:
print '%s: %s' % (vm.ip, e)
logger.error(vm.ip + " " + str(e))
return "ERROR:" + vm.ip + " " + str(e)
ssh.close()
return "SUCCESS"
......
......@@ -19,20 +19,34 @@ __author__ = 'Yang Hu'
import paramiko, os
from vm_info import VmInfo
import logging
# create logger
logger = logging.getLogger('docker_swarm')
logger.setLevel(logging.DEBUG)
# create console handler and set level to debug
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
# create formatter
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
# add formatter to ch
ch.setFormatter(formatter)
# add ch to logger
logger.addHandler(ch)
def scale_service(vm, application_name, service_name, service_num):
try:
print "%s: ====== Start Docker Service Scaling ======" % (vm.ip)
logger.info("Starting docker service scaling on: "+vm.ip)
paramiko.util.log_to_file("deployment.log")
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(vm.ip, username=vm.user, key_filename=vm.key)
stdin, stdout, stderr = ssh.exec_command("sudo docker service scale %s_%s=%s" % (application_name, service_name, service_num))
stdout.read()
print "%s: ======= Service Scaling Finished =========" % (vm.ip)
logger.info("Finished docker service scaling on: "+vm.ip)
except Exception as e:
print '%s: %s' % (vm.ip, e)
logger.error(vm.ip + " " + str(e))
return "ERROR:" + vm.ip + " " + str(e)
ssh.close()
return "SUCCESS"
......
......@@ -19,9 +19,25 @@ __author__ = 'Yang Hu'
import paramiko, os
from vm_info import VmInfo
import logging
# create logger
logger = logging.getLogger('docker_swarm')
logger.setLevel(logging.DEBUG)
# create console handler and set level to debug
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
# create formatter
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
# add formatter to ch
ch.setFormatter(formatter)
# add ch to logger
logger.addHandler(ch)
def install_manager(vm):
try:
print "%s: ====== Start Swarm Manager Installing ======" % (vm.ip)
logger.info("Starting swarm manager installation on: "+(vm.ip))
paramiko.util.log_to_file("deployment.log")
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
......@@ -42,16 +58,16 @@ def install_manager(vm):
stdin, stdout, stderr = ssh.exec_command("sudo docker swarm join-token worker")
retstr = stdout.readlines()
ret = retstr[2].encode()
print "%s: ========= Swarm Manager Installed =========" % (vm.ip)
logger.info("Finished swarm manager installation on: "+(vm.ip))
except Exception as e:
print '%s: %s' % (vm.ip, e)
logger.error(vm.ip + " " + str(e))
return "ERROR:" + vm.ip + " " + str(e)
ssh.close()
return ret
def install_worker(join_cmd, vm):
try:
print "%s: ====== Start Swarm Worker Installing ======" % (vm.ip)
logger.info("Starting swarm worker installation on: "+(vm.ip))
paramiko.util.log_to_file("deployment.log")
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
......@@ -63,9 +79,9 @@ def install_worker(join_cmd, vm):
stdout.read()
stdin, stdout, stderr = ssh.exec_command("sudo %s" % (join_cmd))
stdout.read()
print "%s: ========= Swarm Worker Installed =========" % (vm.ip)
logger.info("Finished swarm worker installation on: "+(vm.ip))
except Exception as e:
print '%s: %s' % (vm.ip, e)
logger.error(vm.ip + " " + str(e))
return "ERROR:" + vm.ip + " " + str(e)
ssh.close()
return "SUCCESS"
......
......@@ -16,6 +16,7 @@ import sys, argparse
from threading import Thread
from time import sleep
import os.path
import logging
if len(sys.argv) > 1:
......@@ -23,6 +24,19 @@ if len(sys.argv) > 1:
else:
rabbitmq_host = '127.0.0.1'
logger = logging.getLogger('rpc_server')
logger.setLevel(logging.DEBUG)
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
logger.addHandler(ch)
fh = logging.FileHandler('deployer.log')
fh.setLevel(logging.DEBUG)
fh.setFormatter(formatter)
logger.addHandler(fh)
connection = pika.BlockingConnection(pika.ConnectionParameters(host=rabbitmq_host))
channel = connection.channel()
......@@ -160,7 +174,8 @@ def on_request(ch, method, props, body):
response["parameters"].append(par)
response = json.dumps(response)
print "Response: %s " % response
logger.info("Response: " + response)
ch.basic_publish(exchange='',
routing_key=props.reply_to,
......@@ -176,7 +191,7 @@ channel.basic_consume(on_request, queue='deployer_queue')
thread = Thread(target = threaded_function, args = (1, ))
thread.start()
print(" [x] Awaiting RPC requests")
logger.info("Awaiting RPC requests")
......@@ -186,4 +201,4 @@ except KeyboardInterrupt:
#thread.stop()
done = True
thread.join()
print "threads successfully closed"
logger.info("Threads successfully closed")
......@@ -81,10 +81,10 @@ def handle_delivery(message):
return response
def test_local():
test_local()
home = expanduser("~")
transformer = DockerComposeTransformer(home+"/workspace/DRIP/docs/input_tosca_files/MOG_cardif.yml")
transformer = DockerComposeTransformer(home+"/workspace/DRIP/docs/input_tosca_files/BEIA_cardif.yml")
compose = transformer.getnerate_compose()
print yaml.dump(compose)
response = {}
current_milli_time = lambda: int(round(time.time() * 1000))
response["creationDate"] = current_milli_time()
......@@ -98,12 +98,12 @@ def test_local():
print response
if __name__ == "__main__":
# test_local()
print sys.argv
channel = init_chanel(sys.argv)
global queue_name
queue_name = sys.argv[2]
start(channel)
test_local()
# print sys.argv
# channel = init_chanel(sys.argv)
# global queue_name
# queue_name = sys.argv[2]
# start(channel)
# try:
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment