Commit 007e411c authored by Spiros Koulouzis's avatar Spiros Koulouzis

Fixed service file creation

parent 923425a9
......@@ -39,6 +39,8 @@ topology_template:
mysql:
type: tosca.nodes.ARTICONF.Container.Application.Docker
properties:
ports:
- "3306:3306"
volumes:
- db_data:/var/lib/mysql
environment:
......
tosca_definitions_version: tosca_simple_yaml_1_0
repositories: {docker_hub: 'https://hub.docker.com/'}
imports:
- {nodes: 'https://raw.githubusercontent.com/skoulouzis/DRIP/DRIP_3.0/TOSCA/types/nodes.yaml'}
- {capabilities: 'https://raw.githubusercontent.com/skoulouzis/DRIP/DRIP_3.0/TOSCA/types/capabilities.yaml'}
- {policies: 'https://raw.githubusercontent.com/skoulouzis/DRIP/DRIP_3.0/TOSCA/types/policies.yaml'}
topology_template:
node_templates:
concepcion_topology:
interfaces:
Standard: {create: dumy.yaml}
properties:
credential: {required: false, type: tosca.datatypes.Credential}
domain: UvA (Amsterdam, The Netherlands) XO Rack
name: name
provider: ExoGeni
credentials:
- properties:
accessKeyId: geni
cloud_provider_name: exogeni
attributes: {keystore: XXXX}
token_type: secretKey
token: XXXX
requirements:
- vm: {capability: tosca.capabilities.ARTICONF.VM, node: delfina_compute, relationship: tosca.relationships.DependsOn}
type: tosca.nodes.ARTICONF.VM.topology
delfina_compute:
interfaces:
Standard: {create: dumy.yaml}
properties: {cpu_frequency: 2.9 GHz, disk_size: 25000 MB, host_name: vm, mem_size: 3000 MB,
num_cpus: 1, os: Ubuntu 17.10, user_name: vm_user}
type: tosca.nodes.ARTICONF.VM.Compute
lawrence_kubernetes:
interfaces:
Standard: {create: interfaces/playbooks/kubernetes_install.yaml}
requirements:
- host: {capability: tosca.capabilities.Scalable, node: concepcion_topology,
relationship: tosca.relationships.HostedOn}
- host: {capability: tosca.capabilities.Compute, node: concepcion_topology,
relationship: tosca.relationships.HostedOn}
type: tosca.nodes.ARTICONF.Orchestrator.Kubernetes
mysql:
artifacts:
image: {file: 'mysql:5.7', repository: docker_hub, type: tosca.artifacts.Deployment.Image.Container.Docker}
properties:
environment: {MYSQL_DATABASE: wordpress, MYSQL_PASSWORD: wordpress, MYSQL_ROOT_PASSWORD: somewordpress,
MYSQL_USER: wordpress}
volumes: ['db_data:/var/lib/mysql']
ports: ['3306:3306']
requirements:
- host: {capability: tosca.capabilities.ARTICONF.Orchestrator, node: lawrence_kubernetes,
relationship: tosca.relationships.HostedOn}
type: tosca.nodes.ARTICONF.Container.Application.Docker
tammie_kubernetes:
interfaces:
Standard: {create: interfaces/playbooks/kubernetes_install.yaml}
requirements:
- host: {capability: tosca.capabilities.Scalable, node: concepcion_topology,
relationship: tosca.relationships.HostedOn}
- host: {capability: tosca.capabilities.Compute, node: concepcion_topology,
relationship: tosca.relationships.HostedOn}
type: tosca.nodes.ARTICONF.Orchestrator.Kubernetes
wordpress:
artifacts:
my_image: {file: 'wordpress:latest', repository: docker_hub, type: tosca.artifacts.Deployment.Image.Container.Docker}
properties:
environment: {WORDPRESS_DB_HOST: 'mysql:3306', WORDPRESS_DB_NAME: wordpress,
WORDPRESS_DB_PASSWORD: wordpress, WORDPRESS_DB_USER: wordpress}
ports: ['8000:80']
requirements:
- service:
node: mysql
relationship: {type: tosca.relationships.DependsOn}
- host: {capability: tosca.capabilities.ARTICONF.Orchestrator, node: lawrence_kubernetes,
relationship: tosca.relationships.HostedOn}
type: tosca.nodes.ARTICONF.Container.Application.Docker
outputs:
role:
- {delfina_compute: master}
user_name:
- {delfina_compute: vm_user}
ip:
- {delfina_compute: 145.100.XXXX}
......@@ -175,16 +175,19 @@ public class TOSCAUtils {
Map<String, Object> dockerValues = (Map<String, Object>) docker.getValue();
Map<String, Object> spec = new HashMap();
spec.put("type", "NodePort");
Map<String, Object> properties = (Map<String, Object>) dockerValues.get("properties");
List<String> toscaPortsList = (List<String>) properties.get("ports");
List< Map<String, Object>> portList = new ArrayList<>();
if (toscaPortsList != null) {
for (String portEntry : toscaPortsList) {
String[] portsArray = portEntry.split(":");
Map<String, Object> portMap = new HashMap();
portMap.put("port", portsArray[1]);
spec.put("ports", portMap);
portMap.put("port", Integer.valueOf(portsArray[1]));
portList.add(portMap);
}
spec.put("ports", portList);
}
Map<String, Object> selector = new HashMap();
......@@ -247,7 +250,7 @@ public class TOSCAUtils {
for (String portEntry : toscaPortsList) {
String[] portsArray = portEntry.split(":");
Map<String, Object> portMap = new HashMap();
portMap.put("containerPort", portsArray[0]);
portMap.put("containerPort", Integer.valueOf(portsArray[0]));
container.put("ports", portMap);
}
}
......
apiVersion: v1
clusters:
- cluster:
certificate-authority-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUN5RENDQWJDZ0F3SUJBZ0lCQURBTkJna3Foa2lHOXcwQkFRc0ZBREFWTVJNd0VRWURWUVFERXdwcmRXSmwKY201bGRHVnpNQjRYRFRFNU1Ea3lOVEUwTWpjME9Wb1hEVEk1TURreU1qRTBNamMwT1Zvd0ZURVRNQkVHQTFVRQpBeE1LYTNWaVpYSnVaWFJsY3pDQ0FTSXdEUVlKS29aSWh2Y05BUUVCQlFBRGdnRVBBRENDQVFvQ2dnRUJBSmU0CnNnc2I2ZEkvNk56WU5jbVZHY3FjamwxYVJTY3dQVG9YREhxWGRDZzZsUHlJWkRpcmNhRWlxS3dkcTE0RUF3c0UKQ0hZUjBUOWNoWWVkMFdmcHV4cGVzQWJ2RVBjRHNBbHFia21FaTN5VWZjYm5vb3JDUk56SWtNZWJhbWhNOGFIYwp6bFZpaTY1VHdoRXlKZGptd1MxRU5odTBJbktSNktOZHpIeTdhSk45dk5VY2JwZW5ON05KQkRCMmFwMTVNUmdUClVDUklQOHlneFU1V2V5NnA1SnJ1VHV1S1pLUVpxZm11cnBHb2tmQzJqOVdUTDZqQjdJcCthdEpWZFFNT2JJU04KRmM4Z0p5Q0huMUVPR0dwQXRIK1lsc3NpWnk2VFV6NEpLcXpSYmVwaVpUSVZNMUI5anh4alloSDVGNFR5UmllMgp6L0dwbCtDOFpzSXdSU2M3Z0wwQ0F3RUFBYU1qTUNFd0RnWURWUjBQQVFIL0JBUURBZ0trTUE4R0ExVWRFd0VCCi93UUZNQU1CQWY4d0RRWUpLb1pJaHZjTkFRRUxCUUFEZ2dFQkFBeWlqMzVDcTA1cWg5bjROYS8yNGJTYkh1aUEKdnMzUmVTSThISWZManhPNXVFMnJ6R3Y2bjBZMlVab1ZFemcrbldTdjVjNG81Qm1uTVQ2MkxNM1JYRDV0WW0yWAo0ZVI3QTk5d1RSYTcyY1lTYSs0emtPYVI3cisySUpDZndLZ0hEUzU2SFY2dkhwNWRZWHlUenlNK09MVVlMNnNECkx5eHZJSVlUTHZKQzIweE1aakFCQWR3UXM4eExKTE1zbEV1WDJUTXYwempxOWpFWHNDN2xXOXhJLzYzSU5nU2sKU2dDc0pONnY3ZUN0WUlHZWsrbndTengwL09vVXp2STFUd1ZGenFxS2Jwcm0rd2dQd1hITGtKSXBZeXg5SlM4Ywo0b2ZkMFBadE5kVUNDRXQ1R2w3b05PaUVkbU5JVmVqTVdDRWQxdlFLQ2VZejlzRk05WVQwUXlEakpTcz0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=
server: https://10.103.0.7:6443
name: kubernetes
contexts:
- context:
cluster: kubernetes
user: kubernetes-admin
name: kubernetes-admin@kubernetes
current-context: kubernetes-admin@kubernetes
kind: Config
preferences: {}
users:
- name: kubernetes-admin
user:
client-certificate-data: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUM4akNDQWRxZ0F3SUJBZ0lJRUgrWWpSQzJSUFF3RFFZSktvWklodmNOQVFFTEJRQXdGVEVUTUJFR0ExVUUKQXhNS2EzVmlaWEp1WlhSbGN6QWVGdzB4T1RBNU1qVXhOREkzTkRsYUZ3MHlNREE1TWpReE5ESTNOVFJhTURReApGekFWQmdOVkJBb1REbk41YzNSbGJUcHRZWE4wWlhKek1Sa3dGd1lEVlFRREV4QnJkV0psY201bGRHVnpMV0ZrCmJXbHVNSUlCSWpBTkJna3Foa2lHOXcwQkFRRUZBQU9DQVE4QU1JSUJDZ0tDQVFFQTFlYnovM2dVQWQvai9uay8KWTlZUmNRS3VQMEFZT28zNlRsT2RiRlR5cHE1bXo4UGpOMlV2b1pZMkNlazhCSCtLVXhTbzRWdWphelNxM0N5VwpiOS9ESERXNnpvM1JlS285NDNnWFpyVmZYQnpLUkcrUjE4WkRQRmNPSS9sU0Vkd0NlUUlFRnY3MnRIWjQxbHpQClBraTRVMFRvMkkrNE5hNjVwazJHMGxoSEVCQ1NYMHJNbFczc3ltTW9ONSthN21COSttVElLaThKZlZ0T0NydWkKMi9rclJJSzVkSlJ5eDZkWE5KMUp2ZlR0VFE3REgwZklrS0VaMWRRbGU4QmdMbzZhTi9UZDN6SDUwZmxuOTRsaApNdXR6RDM4Yks0a1o0Wnp3THFCRVQwSzlNbGhuZkJOREFjakpUVjVuVmU2bFZRVTdYcVhIL2pxNS9RbHFWclJuClJhV2JzUUlEQVFBQm95Y3dKVEFPQmdOVkhROEJBZjhFQkFNQ0JhQXdFd1lEVlIwbEJBd3dDZ1lJS3dZQkJRVUgKQXdJd0RRWUpLb1pJaHZjTkFRRUxCUUFEZ2dFQkFJeVpncU5zN3pWMkVsZHRSekFuYTZkUmFWUFhlMW1LNFVRRQo5OFhyZTR0WFdRYm4zZUJ6UkY5ZmpKZ0pSTFppTDlXM2lBbFl6c0I1YWxqMlRzQ3JwdHdIaHFRd2x6M2ZhZ21UCnJhWWNmV0FWOG9XVVFuanNBVStOZHZFU0hJMGQwQjQ4SG5VTWQ4Y3psWlFwOTZuRWNuLzZrMy8vRktrS01pQ0QKb2xlWG9VblB4Wm1CU3AxN3lWeUlQaE1qRFhFMFFoNTFxNnVieTdsbVk5WUkzQkhVQUQ3TTdQQkVvWm9QaEpyNgorSXZsVkZWRVhXR3QrNmFSSXBKZ3N6enVaaWVubTh6YTlXZDMzOWdJOXZhQk9Edjg3OHNBOFpJT0h5TVNlbTJOCmVzbXlxMnN6T1RiNXZlV0RYalQyYTdtMDlOOGZ4S3RQSk9jeTlROWxBUTZHd2NsUmZ2ND0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo=
client-key-data: LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFb2dJQkFBS0NBUUVBMWViei8zZ1VBZC9qL25rL1k5WVJjUUt1UDBBWU9vMzZUbE9kYkZUeXBxNW16OFBqCk4yVXZvWlkyQ2VrOEJIK0tVeFNvNFZ1amF6U3EzQ3lXYjkvREhEVzZ6bzNSZUtvOTQzZ1haclZmWEJ6S1JHK1IKMThaRFBGY09JL2xTRWR3Q2VRSUVGdjcydEhaNDFselBQa2k0VTBUbzJJKzROYTY1cGsyRzBsaEhFQkNTWDByTQpsVzNzeW1Nb041K2E3bUI5K21USUtpOEpmVnRPQ3J1aTIva3JSSUs1ZEpSeXg2ZFhOSjFKdmZUdFRRN0RIMGZJCmtLRVoxZFFsZThCZ0xvNmFOL1RkM3pINTBmbG45NGxoTXV0ekQzOGJLNGtaNFp6d0xxQkVUMEs5TWxobmZCTkQKQWNqSlRWNW5WZTZsVlFVN1hxWEgvanE1L1FscVZyUm5SYVdic1FJREFRQUJBb0lCQUR0WmdZbjR2MVJsMHRUZwp2MzNyLzVyanE0VlJPMmZEelJlK2k0ZHJhb2hsQzVIS1FGazJjaWpiak5MakxBdnpkMlhsN1pYWjMxWDNueERJCkxsV01PSTZ6T2NubC82RURXM2lwOFpSRjd0ZVlCV2RIcmFlNUV4N0M2T0dDWkFzZ2lHOGE2QmVaVnNwcnRNdUYKcE5zYlFrbVliU0xwZmFzbmQ4dDA1MXVsc3RINXhSV0U0WWRCZ2VZMFlWYlcrQ2FSeXN4T2lHVEE4cnRlek81UQpFdDN0WEZQUldWMmZyYzk0TkhkTUc0NXpqU29GVDMzN1l1R2pVSmN2VG1IQ2VHMUt0a3F3TjBpeHNsVmpOOENFCkFhdHNBaFBJT04rQ0VSQ3VLUVZMbkZtMUpUYzZRRHZlbzVadWh5VG04TkJXZjBSZURwc1RyazVVY2FOUXdPU2MKcHdKYjI0RUNnWUVBM1g3b2JmL1pvcng0WVZhbGw2ejVwQUV2cFFPdkNOajluRlhiQzU2Y0lhNGZmN2hRbGZ3LwpFMWxMa3NmNzBKNi95a283MWs2L0d2WEwya0hxZnRwczN4RXFBcDhIZUpTaERYcVJqa1BDeHk0S2F4TS9UK0ZhCmh6bTV5dFFCK2E5ajFwckZDa3B4WmtaMllJRE5xeDIxcUl1Vm15T0dNL0loRXZPMktiM3BmVlVDZ1lFQTl6azQKQ1Fya3NEWHNlUUl5YytxbTZxdEFWWEs2c0NwN2lUT2YwRjZWeFBzNGxHb0lkY0RaTEl6UGVuSERIUEM1S0ZCeAozSE1nUHlaUUVqalBVU3VLd3RqRFpUU05QRk02cVF5RGtBckkrVjdqckF0TGVHdTNTdFl6WUJETXhncFhBbEpKCktUY0hybkE5TkZnQ3I3ckltdXJvWmdwT3RhL2ZLTXZpQ0dKcVJPMENnWUI5MWlqVXZhTjJtaDdHSmtUanZBa0UKRFF4MWNuZFJ6bjJmQVFQMlFRRXcvVXkxOGhBT2RnV2J4NEp3L0o3cXNoWUNKbFNDZDdDSTc1WUdCS2JsdE5CZgpscy9JTjNNMkpUS3VockVGSXlnWW91YVdXSlFDbk9RaXFVQU5wSThPdkg4N0lDakxwT2x5RXB3VVRYa0xPMURHClZhOEVPWlY0RTVxSXV0OEdMZmZtRlFLQmdIc3g1anlMVXg2RUlJekVqWU82QU9lYjR2a3hyTm93c1ZMVGlPWDYKM0VOR3RSRXdMWHNRV0tpY21wOTNwVFQ4dUNmZ3ZueU9XaGxkN0RUSVhuY2liWWxmSHkrRm1vUGZMYklqN0VPUgpQRWtZQWZndndMSUhhMVU5bkdoWXR0SlJRTDZGWnJQRkdtelF2WThoOUdUQmZVbkZtWDJQMFYwMGdNNEJtMmQyCk0yS1pBb0dBVkFQenVYc1NFTUVKQzViVVozQWpVVG02WHFUS3R5dEtZcUdoTjVrcHlHZkMvRGtiSFJVeFhIUkoKMmFKeDM4cGF1b1IvYVVqUXJyQjdFbXQyYUh0Wkw2QTJFQUtWUDkyNWpUZFZPUDB1L0ZJSnV1S1RVNFpMWDd0VwpXMk5pWHVaR0xWQzhPekFwNHdLU290aHh1cFhveWhkV1FGRkNQRUJBY21KSStWK0ZZejA9Ci0tLS0tRU5EIFJTQSBQUklWQVRFIEtFWS0tLS0tCg==
......@@ -23,6 +23,8 @@ import sys
import logging
# from drip_logging.drip_logging_handler import *
from os import listdir
from os.path import isfile, join
logger = logging.getLogger(__name__)
......@@ -64,14 +66,16 @@ def install_manager(vm):
sftp.put(install_script, "kubernetes_setup.sh")
# stdin, stdout, stderr = ssh.exec_command("sudo hostname ip-%s" % (vm.ip.replace('.','-')))
# stdout.read()
stdin, stdout, stderr = ssh.exec_command("sudo sh /tmp/kubernetes_setup.sh")
stdin, stdout, stderr = ssh.exec_command("sudo sh /tmp/kubernetes_setup.sh > log 2>&1")
out = stdout.read()
out = stderr.read()
stdin, stdout, stderr = ssh.exec_command("sudo kubeadm kubernetes-xenialreset --force")
# stdin, stdout, stderr = ssh.exec_command("sudo kubeadm kubernetes-xenialreset --force")
# stdout.read()
stdin, stdout, stderr = ssh.exec_command("sudo kubeadm reset --force >> log 2>&1")
stdout.read()
# stdin, stdout, stderr = ssh.exec_command("sudo kubeadm init --apiserver-advertise-address=%s" % (vm.ip))
stdin, stdout, stderr = ssh.exec_command("sudo kubeadm init")
stdin, stdout, stderr = ssh.exec_command("sudo kubeadm init >> log 2>&1")
retstr = stdout.readlines()
stdin, stdout, stderr = ssh.exec_command("sudo cp /etc/kubernetes/admin.conf /tmp/")
......@@ -126,9 +130,45 @@ def install_worker(join_cmd, vm):
return "SUCCESS"
def deploy_on_master(deployment_file, vm):
try:
k8s_files = [f for f in listdir(deployment_file) if isfile(join(deployment_file, f))]
logger.info("Starting deployment on: " + (vm.ip))
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect(vm.ip, username=vm.user, key_filename=vm.key, timeout=30)
parentDir = os.path.dirname(os.path.abspath(vm.key))
os.chmod(parentDir, 0o700)
os.chmod(vm.key, 0o600)
stdin, stdout, stderr = ssh.exec_command("mkdir /tmp/k8s")
stdout.read()
sftp = ssh.open_sftp()
sftp.chdir('/tmp/k8s')
file_path = os.path.dirname(os.path.abspath(__file__))
for f in k8s_files:
k8s_file = deployment_file + "/" + f
sftp.put(k8s_file, f)
stdin, stdout, stderr = ssh.exec_command("kubectl create -f /tmp/k8s/ --kubeconfig /tmp/admin.conf >> log 2>&1")
s_out = stdout.read()
e_out = stderr.read()
print s_out
print e_out
except Exception as e:
# print '%s: %s' % (vm.ip, e)
logger.error(vm.ip + " " + str(e))
return "ERROR:" + vm.ip + " " + str(e)
ssh.close()
return "SUCCESS"
def deploy(vm_list, deployment_file):
for i in vm_list:
if i.role == "master":
deploy_on_master(deployment_file, i)
return None
......
#! /bin/bash
sed -i -re 's/([a-z]{2}\.)?archive.ubuntu.com|security.ubuntu.com/old-releases.ubuntu.com/g' /etc/apt/sources.list
apt-get update && apt-get install -y apt-transport-https ca-certificates curl software-properties-common
add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | apt-key add -
apt-get update && apt-get install -y --allow-unauthenticated docker-ce=18.06.2~ce~3-0~ubuntu
echo -e "{\n \"exec-opts\": [\"native.cgroupdriver=systemd\"], \n \"log-driver\": \"json-file\", \n \"log-opts\": {\"max-size\": \"100m\"}, \n \"storage-driver\": \"overlay2\" \n}" > /etc/docker/daemon.json
mkdir -p /etc/systemd/system/docker.service.d
systemctl daemon-reload
systemctl restart docker
sudo kubeadm reset --force
sudo sed -i -re 's/([a-z]{2}\.)?archive.ubuntu.com|security.ubuntu.com/old-releases.ubuntu.com/g' /etc/apt/sources.list
sudo apt-get update && apt-get install -y apt-transport-https ca-certificates curl software-properties-common
sudo add-apt-repository "deb [arch=amd64] https://download.docker.com/linux/ubuntu $(lsb_release -cs) stable"
curl -fsSL https://download.docker.com/linux/ubuntu/gpg | sudo apt-key add -
sudo apt-get update && apt-get install -y --allow-unauthenticated docker-ce=18.06.2~ce~3-0~ubuntu
echo "{\n \"exec-opts\": [\"native.cgroupdriver=systemd\"], \n \"log-driver\": \"json-file\", \n \"log-opts\": {\"max-size\": \"100m\"}, \n \"storage-driver\": \"overlay2\" \n}" | sudo tee /etc/docker/daemon.json
sudo mkdir -p /etc/systemd/system/docker.service.d
sudo systemctl daemon-reload
sudo systemctl restart docker
sudo apt-get update && apt-get install -y apt-transport-https curl
sudo curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key add -
echo "deb https://apt.kubernetes.io/ kubernetes-xenial main" | sudo tee /etc/apt/sources.list.d/kubernetes.list
sudo apt-get update && apt-get install -y kubelet kubeadm kubectl
sudo apt-mark hold kubelet kubeadm kubectl
apt-get update && apt-get install -y apt-transport-https curl
curl -s https://packages.cloud.google.com/apt/doc/apt-key.gpg | apt-key add -
echo "deb https://apt.kubernetes.io/ kubernetes-xenial main" | tee /etc/apt/sources.list.d/kubernetes.list
apt-get update && apt-get install -y kubelet kubeadm kubectl
apt-mark hold kubelet kubeadm kubectl
......@@ -129,8 +129,8 @@ def handle_delivery(message):
compose_name = param["attributes"]["name"]
if manager_type == "kubernetes":
ret = docker_kubernetes.run(vm_list, rabbitmq_host, owner)
# docker_kubernetes.deploy(vm_list, deployment_file)
# ret = docker_kubernetes.run(vm_list, rabbitmq_host, owner)
docker_kubernetes.deploy(vm_list, k8s_folder)
return ret
elif manager_type == "swarm":
ret = docker_engine.run(vm_list, rabbitmq_host, owner)
......
......@@ -2,6 +2,9 @@
<project version="4">
<component name="ChangeListManager">
<list default="true" id="462ede19-adfe-472b-975e-fefefa973fe0" name="Default Changelist" comment="slolved cap error">
<change beforePath="$PROJECT_DIR$/../drip-deployer/docker_kubernetes.sh" beforeDir="false" afterPath="$PROJECT_DIR$/../drip-deployer/docker_kubernetes.sh" afterDir="false" />
<change beforePath="$PROJECT_DIR$/.idea/workspace.xml" beforeDir="false" afterPath="$PROJECT_DIR$/.idea/workspace.xml" afterDir="false" />
<change beforePath="$PROJECT_DIR$/src/planner/basic_planner.py" beforeDir="false" afterPath="$PROJECT_DIR$/src/planner/basic_planner.py" afterDir="false" />
<change beforePath="$PROJECT_DIR$/venv/lib/python3.6/site-packages/easy-install.pth" beforeDir="false" />
<change beforePath="$PROJECT_DIR$/venv/lib/python3.6/site-packages/prettytable.py" beforeDir="false" />
<change beforePath="$PROJECT_DIR$/venv/lib/python3.6/site-packages/pyparsing.py" beforeDir="false" />
......@@ -81,7 +84,7 @@
<option name="ADD_CONTENT_ROOTS" value="true" />
<option name="ADD_SOURCE_ROOTS" value="true" />
<option name="SCRIPT_NAME" value="$PROJECT_DIR$/src/rpc_server.py" />
<option name="PARAMETERS" value="localhost planner_queue" />
<option name="PARAMETERS" value="test_local" />
<option name="SHOW_COMMAND_LINE" value="false" />
<option name="EMULATE_TERMINAL" value="false" />
<option name="MODULE_MODE" value="false" />
......@@ -161,8 +164,8 @@
</line-breakpoint>
<line-breakpoint enabled="true" suspend="THREAD" type="python-line">
<url>file://$PROJECT_DIR$/src/planner/basic_planner.py</url>
<line>322</line>
<option name="timeStamp" value="3" />
<line>152</line>
<option name="timeStamp" value="4" />
</line-breakpoint>
</breakpoints>
<default-breakpoints>
......
......@@ -24,7 +24,7 @@ def get_cpu_frequency():
def get_num_cpus():
return 1
return 2
def get_cloud_domain():
......
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: swagger-spring-example
name: swagger-spring-example
spec:
selector:
matchLabels:
app: swagger-spring-example
replicas: 1
strategy: {}
template:
metadata:
labels:
app: swagger-spring-example
spec:
containers:
- image: alogo53/swagger-spring-example:1.0.0
name: swagger-spring-example
ports:
- containerPort: 8080
resources: {}
restartPolicy: Always
status: {}
apiVersion: v1
kind: Service
metadata:
labels:
app: swagger-spring-example
name: swagger-spring-example
spec:
type: NodePort
ports:
- port: 8080
nodePort: 30000
selector:
app: swagger-spring-example
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment