Commit 513a8129 authored by Spiros Koulouzis's avatar Spiros Koulouzis

added simple compose

parent 655ee3e4
......@@ -14,14 +14,14 @@ spec:
env:
- name: VAR
value: VAL
#ports:
#- containerPort: 27017
#name: mongo
volumeMounts:
- name: mongo-persistent-storage
mountPath: /var/lib/mongo
volumes:
- name: mongo-persistent-storage
gcePersistentDisk:
pdName: mongo-disk
fsType: ext4
##ports:
##- containerPort: 27017
##name: mongo
#volumeMounts:
#- name: mongo-persistent-storage
#mountPath: /var/lib/mongo
#volumes:
#- name: mongo-persistent-storage
#gcePersistentDisk:
#pdName: mongo-disk
#fsType: ext4
......@@ -9,7 +9,7 @@ spec:
- resources:
limits :
cpu: 0.5
image: rabbitmq:3-management
image: rabbitmq:3.8-management
name: rabbitmq
env:
- name: VAR
......
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: swagger-spring-example
name: swagger-spring-example
spec:
selector:
matchLabels:
app: swagger-spring-example
replicas: 1
strategy: {}
template:
metadata:
labels:
app: swagger-spring-example
spec:
containers:
- image: alogo53/swagger-spring-example:1.0.0
name: swagger-spring-example
ports:
- containerPort: 8080
resources: {}
restartPolicy: Always
status: {}
apiVersion: v1
kind: Service
metadata:
labels:
app: swagger-spring-example
name: swagger-spring-example
spec:
type: NodePort
ports:
- port: 8080
nodePort: 30000
selector:
app: swagger-spring-example
......@@ -25,7 +25,7 @@ topology_template:
volumes:
- db-data:/data/db
environment:
ROOT_PASSWORD: somewordpress
ROOT_PASSWORD: root_passwd
DATABASE: db
USER: user
PASSWORD: passwd
......
......@@ -73,7 +73,7 @@ interface_types:
required: false
default: https://raw.githubusercontent.com/skoulouzis/CONF/DRIP_3.0/ansible_playbooks/install_k8s.yml
install:
description: install Kubernetes
description: install Kubernetes
k8s_scale:
description: Set the number of replicas for a Deployment, ReplicaSet, or Replication Controller, or the parallelism attribute of a Job.
k8s_create:
......
......@@ -78,7 +78,7 @@ node_types:
dashboard_url:
type: string
required: false
description: the dashboard access url
description: the dashboard access url
interfaces:
Kubernetes:
type: tosca.interfaces.ARTICONF.Kubernetes
......@@ -90,7 +90,7 @@ node_types:
playbook: https://raw.githubusercontent.com/skoulouzis/CONF/DRIP_3.0/ansible_playbooks/create_k8s.yml
configure:
inputs:
playbook: https://raw.githubusercontent.com/skoulouzis/CONF/DRIP_3.0/ansible_playbooks/create_k8s.yml
playbook: https://raw.githubusercontent.com/skoulouzis/CONF/DRIP_3.0/ansible_playbooks/dashboard.yaml
#tosca.nodes.ARTICONF.Orchestrator.Swarm:
......
......@@ -302,3 +302,25 @@ spec:
volumes:
- name: tmp-volume
emptyDir: {}
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: admin-user
#namespace: kubernetes-dashboard
namespace: default
#kube-system
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: admin-user
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: cluster-admin
subjects:
- kind: ServiceAccount
name: admin-user
#namespace: kubernetes-dashboard
namespace: default
......@@ -2,7 +2,8 @@
mvn -Dmaven.test.skip=true install
cd drip-planner && rm -rf venv && python3 -m venv venv && venv/bin/pip3 install -r requirements.txt
cd planner && rm -rf venv && python3 -m venv venv && venv/bin/pip3 install -r requirements.txt
cd ../
cd sure_tosca-flask-server && rm -rf venv && python3 -m venv venv && venv/bin/pip3 install -r requirements.txt && venv/bin/pip3 install -r test-requirements.txt
cd ../
cd deployer && rm -rf venv && python3 -m venv venv && venv/bin/pip3 install -r requirements.txt
......@@ -72,6 +72,7 @@ public class ToscaHelper {
private void init(String sureToscaBasePath) {
Configuration.getDefaultApiClient().setBasePath(sureToscaBasePath);
Configuration.getDefaultApiClient().setConnectTimeout(1200000);
Logger.getLogger(ToscaHelper.class.getName()).log(Level.FINE, "sureToscaBasePath: {0}", Configuration.getDefaultApiClient().getBasePath());
api = new DefaultApi(Configuration.getDefaultApiClient());
this.objectMapper = new ObjectMapper(new YAMLFactory().disable(YAMLGenerator.Feature.WRITE_DOC_START_MARKER));
objectMapper.disable(SerializationFeature.WRITE_DATES_AS_TIMESTAMPS);
......@@ -186,7 +187,7 @@ public class ToscaHelper {
}
}
public NodeTemplateMap setCredentialsInVMTopology(NodeTemplateMap vmTopologyMap, Credential credential) throws TypeExeption {
public NodeTemplateMap setCredentialsInVMTopology(NodeTemplateMap vmTopologyMap, Credential credential) throws TypeExeption {
NodeTemplate vmTopology = vmTopologyMap.getNodeTemplate();
if (vmTopology.getType().equals(VM_TOPOLOGY)) {
Map<String, Object> att = vmTopology.getAttributes();
......
FROM python:3.7-buster
RUN apt-get install ansible
RUN mkdir -p /usr/src/app
WORKDIR /usr/src/app
......
......@@ -98,8 +98,7 @@ def handle_delivery(message):
response = {'toscaTemplate': tosca_template_dict}
output_current_milli_time = int(round(time.time() * 1000))
response["creationDate"] = output_current_milli_time
response["parameters"] = []
logger.info("Returning plan")
logger.info("Returning Deployment")
logger.info("Output message:" + json.dumps(response))
return json.dumps(response)
......
pika==1.1.0
names==0.3.0
networkx==2.4
requests=2.22.0
requests==2.22.0
ansible==2.9.2
\ No newline at end of file
......@@ -210,11 +210,11 @@ def write_ansible_k8s_files(tosca_template_json, tmp_path):
def get_dashboard_url(vms):
dashboard_tasks_path = get_templates_directory_path('dashboard.yaml')
with open(dashboard_tasks_path, 'r') as stream:
tasks = yaml.load_all(stream)
tasks = list(yaml.load_all(stream))
for task in tasks:
if task['kind'] == 'Service' and 'name' in task['metadata'] and task['metadata']['name'] and task['metadata'][
'name'] == 'kubernetes-dashboard':
dashboard_port = task['port']['ports'][0]['nodePort']
dashboard_port = task['spec']['ports'][0]['nodePort']
for vm_name in vms:
attributes = vms[vm_name]['attributes']
role = attributes['role']
......
version: '3'
services:
mongo:
image: mongo:4
ports:
- "27017:27017"
rabbit:
image: rabbitmq:3.8-management
ports:
- "5671-5672:5671-5672"
- "15672:15672"
- "4369:4369"
- "15671:15671"
planner:
depends_on:
- rabbit
- sure-tosca
image: alogo53/planner:3.0.0
environment:
RABBITMQ_HOST: rabbit
provisioner:
depends_on:
- rabbit
- sure-tosca
image: alogo53/provisioner:3.0.0
environment:
RABBITMQ_HOST: rabbit
SURE_TOSCA_BASE_PATH: http://sure-tosca:8081/tosca-sure/1.0.0
deployer:
depends_on:
- rabbit
- sure-tosca
image: alogo53/deployer:3.0.0
environment:
RABBITMQ_HOST: rabbit
manager:
depends_on:
- rabbit
- mongo
- sure-tosca
image: alogo53/manager:3.0.0
environment:
RABBITMQ_HOST: rabbit
MONGO_HOST: mongo
SURE_TOSCA_BASE_PATH: http://sure-tosca:8081/tosca-sure/1.0.0
ports:
- "30001:8080"
sure-tosca:
image: alogo53/sure-tosca:3.0.0
ports:
- "8081:8081"
logspout:
ports:
- "30002:80"
volumes:
- /etc/hostname:/etc/host_hostname:ro
- /var/run/docker.sock:/var/run/docker.sock
environment:
publish: "127.0.0.1:30002:80"
image: gliderlabs/logspout:latest
#docker-compose build
#docker-compose up -d
#!/bin/bash
docker push alogo53/drip-manager:3.0.0
docker push alogo53/drip-provisioner:3.0.0
docker push alogo53/manager:3.0.0
docker push alogo53/provisioner:3.0.0
docker push alogo53/sure-tosca:3.0.0
docker push alogo53/drip-planner:3.0.0
docker push alogo53/planner:3.0.0
docker push alogo53/deployer:3.0.0
#!/bin/bash
echo "----------Building drip-manager Docker--------------"
cd drip-manager && mvn -Dmaven.test.skip=true dockerfile:build
echo "----------Building manager Docker--------------"
cd manager && mvn -Dmaven.test.skip=true dockerfile:build
cd ../
echo "----------Building drip-provisioner Docker--------------"
cd drip-provisioner && mvn -Dmaven.test.skip=true dockerfile:build
echo "----------Building provisioner Docker--------------"
cd provisioner && mvn -Dmaven.test.skip=true dockerfile:build
cd ../
echo "----------Building sure_tosca-flask-server Docker--------------"
cd sure_tosca-flask-server && docker build -t alogo53/sure-tosca:3.0.0 .
echo "----------Building drip-planner Docker--------------"
echo "----------Building planner Docker--------------"
cd ../
cd drip-planner && docker build -t alogo53/drip-planner:3.0.0 .
cd planner && docker build -t alogo53/planner:3.0.0 .
echo "----------Building deployer Docker--------------"
cd ../
cd planner && docker build -t alogo53/deployer:3.0.0 .
FROM openjdk:11
COPY target/drip-manager-3.0.0.jar drip-manager-3.0.0.jar
COPY target/manager-3.0.0.jar manager-3.0.0.jar
CMD jar -xf drip-manager-3.0.0.jar BOOT-INF/classes/application.properties && \
sed -ie "s/^message.broker.host=.*/message.broker.host=$RABBITMQ_HOST/" BOOT-INF/classes/application.properties && \
sed -ie "s/^db.host=.*/db.host=$MONGO_HOST/" BOOT-INF/classes/application.properties && \
sed -ie "s/^sure_tosca.base.path=.*/sure_tosca.base.path=$SURE_TOSCA_BASE_PATH/" BOOT-INF/classes/application.properties && \
CMD jar -xf manager-3.0.0.jar BOOT-INF/classes/application.properties && \
sed -ie "s#^message.broker.host=.*#message.broker.host=$RABBITMQ_HOST#" BOOT-INF/classes/application.properties && \
sed -ie "s#^db.host=.*#db.host=$MONGO_HOST#" BOOT-INF/classes/application.properties && \
sed -ie "s#^sure_tosca.base.path=.*#sure_tosca.base.path=$SURE_TOSCA_BASE_PATH#" BOOT-INF/classes/application.properties && \
cat BOOT-INF/classes/application.properties && \
jar -uf drip-manager-3.0.0.jar BOOT-INF/classes/application.properties && \
java -jar drip-manager-3.0.0.jar
jar -uf manager-3.0.0.jar BOOT-INF/classes/application.properties && \
java -jar manager-3.0.0.jar
......@@ -78,7 +78,6 @@
<dependency>
<groupId>javax.xml.bind</groupId>
<artifactId>jaxb-api</artifactId>
<version>2.3.1</version>
</dependency>
<dependency>
<groupId>com.sun.xml.bind</groupId>
......
......@@ -155,7 +155,6 @@ public class DRIPService {
Logger.getLogger(DRIPService.class.getName()).log(Level.FINE, "Found ToscaTemplate with id: {0}", id);
ToscaTemplate toscaTemplate = toscaTemplateService.getYaml2ToscaTemplate(ymlToscaTemplate);
helper.uploadToscaTemplate(toscaTemplate);
return toscaTemplate;
}
......
......@@ -17,4 +17,4 @@ db.name=drip
db.username=drip-user
db.password=drip-pass
sure_tosca.base.path=http://localhost:8081/tosca-sure/1.0.0
sure_tosca.base.path=http://localhost:8081/tosca-sure/1.0.0
\ No newline at end of file
FROM openjdk:11
COPY target/drip-provisioner-3.0.0-jar-with-dependencies.jar drip-provisioner-3.0.0-jar-with-dependencies.jar
COPY target/provisioner-3.0.0-jar-with-dependencies.jar provisioner-3.0.0-jar-with-dependencies.jar
COPY etc/ etc
CMD jar -xf drip-provisioner-3.0.0-jar-with-dependencies.jar application.properties && \
sed -ie "s/^message.broker.host=.*/message.broker.host=$RABBITMQ_HOST/" application.properties && \
sed -ie "s/^sure_tosca.base.path=.*/sure-tosca.base.path=$SURE_TOSCA_BASE_PATH/" application.properties && \
CMD jar -xf provisioner-3.0.0-jar-with-dependencies.jar application.properties && \
sed -ie "s#^message.broker.host=.*#message.broker.host=$RABBITMQ_HOST#" application.properties && \
sed -ie "s#^sure_tosca.base.path=.*#sure-tosca.base.path=$SURE_TOSCA_BASE_PATH#" application.properties && \
cat application.properties && \
jar -uf drip-provisioner-3.0.0-jar-with-dependencies.jar application.properties && \
java -jar drip-provisioner-3.0.0-jar-with-dependencies.jar
jar -uf provisioner-3.0.0-jar-with-dependencies.jar application.properties && \
java -jar provisioner-3.0.0-jar-with-dependencies.jar
#!/bin/bash
sudo docker stop $(sudo docker ps -q)
sudo docker start mongo-inst
sleep 2
sudo docker start some-rabbit
sleep 2
sudo docker run -d alogo53/sure-tosca:3.0.0
sleep 6
sudo docker run -e MONGO_HOST=172.17.0.2 -e RABBITMQ_HOST=172.17.0.3 -e SURE_TOSCA_BASE_PATH='http\:\/\/172.17.0.4\/8081\/tosca-sure\/1.0.0\/' -p 8085:8080 -d alogo53/drip-manager:3.0.0
sudo docker run -e RABBITMQ_HOST=172.17.0.3 -e SURE_TOSCA_BASE_PATH='http\:\/\/172.17.0.4\/8081\/tosca-sure\/1.0.0\/' -d alogo53/drip-planner:3.0.0
sudo docker run -e RABBITMQ_HOST=172.17.0.3 -e SURE_TOSCA_BASE_PATH='http\:\/\/172.17.0.4\/8081\/tosca-sure\/1.0.0\/' -d alogo53/drip-provisioner:3.0.0
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment