Skip to content
Projects
Groups
Snippets
Help
Loading...
Help
Submit feedback
Contribute to GitLab
Sign in
Toggle navigation
C
CONF
Project
Project
Details
Activity
Releases
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Registry
Registry
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
UvA
CONF
Commits
917597f8
Commit
917597f8
authored
Sep 30, 2019
by
Spiros Koulouzis
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
add k8s output to TOSCA
parent
007e411c
Changes
7
Hide whitespace changes
Inline
Side-by-side
Showing
7 changed files
with
160 additions
and
133 deletions
+160
-133
DeployService.java
.../main/java/nl/uva/sne/drip/api/service/DeployService.java
+115
-87
ProvisionService.java
...in/java/nl/uva/sne/drip/api/service/ProvisionService.java
+0
-9
TOSCAUtils.java
...c/main/java/nl/uva/sne/drip/commons/utils/TOSCAUtils.java
+3
-1
admin.conf
drip-deployer/admin.conf
+0
-19
docker_kubernetes.py
drip-deployer/docker_kubernetes.py
+35
-10
rpc_server.py
drip-deployer/rpc_server.py
+6
-4
workspace.xml
drip_planner2/.idea/workspace.xml
+1
-3
No files found.
drip-api/src/main/java/nl/uva/sne/drip/api/service/DeployService.java
View file @
917597f8
...
@@ -24,6 +24,7 @@ import java.util.Base64;
...
@@ -24,6 +24,7 @@ import java.util.Base64;
import
java.util.HashMap
;
import
java.util.HashMap
;
import
java.util.List
;
import
java.util.List
;
import
java.util.Map
;
import
java.util.Map
;
import
java.util.Set
;
import
java.util.concurrent.TimeoutException
;
import
java.util.concurrent.TimeoutException
;
import
java.util.logging.Level
;
import
java.util.logging.Level
;
import
java.util.logging.Logger
;
import
java.util.logging.Logger
;
...
@@ -372,84 +373,31 @@ public class DeployService {
...
@@ -372,84 +373,31 @@ public class DeployService {
return
deployResponse
;
return
deployResponse
;
}
}
if
(
name
.
equals
(
"ansible_output"
))
{
if
(
name
.
equals
(
"ansible_output"
))
{
String
value
=
p
.
getValue
();
setAnsibleOutput
(
p
,
deployInfo
);
ObjectMapper
mapper
=
new
ObjectMapper
();
}
mapper
.
configure
(
DeserializationFeature
.
FAIL_ON_UNKNOWN_PROPERTIES
,
false
);
if
(
name
.
equals
(
"kubectl_get"
))
{
String
decoded
=
new
String
(
Base64
.
getDecoder
().
decode
(
p
.
getValue
().
getBytes
()));
value
=
parseValue
(
value
);
Map
<
String
,
Object
>
kubectlGetOutput
=
Converter
.
jsonString2Map
(
decoded
);
List
<
AnsibleOutput
>
outputList
=
mapper
.
readValue
(
value
,
new
TypeReference
<
List
<
AnsibleOutput
>>()
{
Map
<
String
,
Object
>
outputs
=
new
HashMap
<>();
});
List
<
Map
<
String
,
Object
>>
items
=
(
List
<
Map
<
String
,
Object
>>)
kubectlGetOutput
.
get
(
"items"
);
for
(
Map
<
String
,
Object
>
entry
:
items
)
{
List
<
String
>
outputListIds
=
new
ArrayList
<>();
String
serviceName
=
(
String
)
((
Map
<
String
,
Object
>)
entry
.
get
(
"metadata"
)).
get
(
"name"
);
Map
<
String
,
String
>
nodeTypeCache
=
new
HashMap
<>();
List
<
Map
<
String
,
Object
>>
ports
=
(
List
<
Map
<
String
,
Object
>>)
((
Map
<
String
,
Object
>)
entry
.
get
(
"spec"
)).
get
(
"ports"
);
Map
<
String
,
String
>
domainCache
=
new
HashMap
<>();
for
(
Map
<
String
,
Object
>
port
:
ports
)
{
Map
<
String
,
String
>
osTypeCache
=
new
HashMap
<>();
Set
<
String
>
keys
=
port
.
keySet
();
Map
<
String
,
String
>
cloudProviderCache
=
new
HashMap
<>();
for
(
String
key
:
keys
)
{
outputs
=
TOSCAUtils
.
buildTOSCAOutput
(
outputs
,
serviceName
,
(
String
)
port
.
get
(
key
),
key
,
false
);
for
(
AnsibleOutput
ansOut
:
outputList
)
{
Map
<
String
,
Object
>
map
=
provisionService
.
findOne
(
deployInfo
.
getProvisionID
()).
getKeyValue
();
String
nodeType
=
nodeTypeCache
.
get
(
ansOut
.
getHost
());
String
domain
=
domainCache
.
get
(
ansOut
.
getHost
());
String
os
=
osTypeCache
.
get
(
ansOut
.
getHost
());
if
(
nodeType
==
null
)
{
List
<
Map
<
String
,
Object
>>
components
=
null
;
List
<
Map
<
String
,
Object
>>
topologies
=
null
;
if
(
map
.
containsKey
(
"components"
))
{
components
=
(
List
<
Map
<
String
,
Object
>>)
map
.
get
(
"components"
);
// topologies = (List<Map<String, Object>>) map.get("topologies");
}
else
{
for
(
String
key
:
map
.
keySet
())
{
Map
<
String
,
Object
>
subMap
=
(
Map
<
String
,
Object
>)
map
.
get
(
key
);
if
(
subMap
.
containsKey
(
"components"
)
&&
components
==
null
)
{
components
=
(
List
<
Map
<
String
,
Object
>>)
subMap
.
get
(
"components"
);
}
if
(
subMap
.
containsKey
(
"topologies"
)
&&
topologies
==
null
)
{
topologies
=
(
List
<
Map
<
String
,
Object
>>)
subMap
.
get
(
"topologies"
);
}
if
(
components
!=
null
&&
topologies
!=
null
)
{
break
;
}
}
}
}
for
(
Map
<
String
,
Object
>
component
:
components
)
{
String
publicAddress
=
null
;
for
(
String
addressName
:
PUBLIC_ADRESS_NAMES
)
{
if
(
component
.
containsKey
(
addressName
))
{
publicAddress
=
(
String
)
component
.
get
(
addressName
);
break
;
}
}
if
(
publicAddress
!=
null
&&
publicAddress
.
equals
(
ansOut
.
getHost
()))
{
nodeType
=
(
String
)
component
.
get
(
"nodeType"
);
for
(
String
siteName
:
CLOUD_SITE_NAMES
)
{
if
(
component
.
containsKey
(
siteName
))
{
domain
=
(
String
)
component
.
get
(
siteName
);
break
;
}
}
os
=
(
String
)
component
.
get
(
"OStype"
);
nodeTypeCache
.
put
(
ansOut
.
getHost
(),
nodeType
);
domainCache
.
put
(
ansOut
.
getHost
(),
domain
);
osTypeCache
.
put
(
ansOut
.
getHost
(),
os
);
break
;
}
}
}
}
ansOut
.
setVmType
(
nodeType
);
ansOut
.
setCloudDeploymentDomain
(
domain
);
ansOut
.
setProvisionID
(
deployInfo
.
getProvisionID
());
// ansOut.setCloudProvider(provider);
ansOut
=
ansibleOutputService
.
save
(
ansOut
);
BenchmarkResult
benchmarkResult
=
parseSaveBenchmarkResult
(
ansOut
);
outputListIds
.
add
(
ansOut
.
getId
());
}
}
// deployResponse.setAnsibleOutputList(outputListIds);
Map
<
String
,
Object
>
tosca
=
provisionService
.
findOne
(
deployInfo
.
getProvisionID
()).
getKeyValue
();
Map
<
String
,
Object
>
topologyTemplate
=
(
Map
<
String
,
Object
>)
((
Map
<
String
,
Object
>)
tosca
.
get
(
"topology_template"
));
topologyTemplate
.
put
(
"outputs"
,
outputs
);
deployResponse
.
setKvMap
(
tosca
);
}
}
}
}
return
deployResponse
;
return
deployResponse
;
}
}
...
@@ -568,20 +516,19 @@ public class DeployService {
...
@@ -568,20 +516,19 @@ public class DeployService {
return
resp
;
return
resp
;
}
}
private
Map
<
String
,
String
>
getDockerLogin
(
ProvisionResponse
pro
)
{
// private Map<String, String> getDockerLogin(ProvisionResponse pro) {
String
planID
=
pro
.
getPlanID
();
// String planID = pro.getPlanID();
PlanResponse
plan
=
plannerService
.
findOne
(
planID
);
// PlanResponse plan = plannerService.findOne(planID);
String
toscaID
=
plan
.
getToscaID
();
// String toscaID = plan.getToscaID();
if
(
toscaID
!=
null
)
{
// if (toscaID != null) {
ToscaRepresentation
tosca
=
toscaService
.
findOne
(
plan
.
getToscaID
());
// ToscaRepresentation tosca = toscaService.findOne(plan.getToscaID());
Map
<
String
,
Object
>
map
=
tosca
.
getKeyValue
();
// Map<String, Object> map = tosca.getKeyValue();
map
.
get
(
"repositories"
);
// map.get("repositories");
HashMap
dockerLogin
=
new
HashMap
();
// HashMap dockerLogin = new HashMap();
return
dockerLogin
;
// return dockerLogin;
}
// }
return
null
;
// return null;
}
// }
public
String
get
(
String
id
,
String
format
)
throws
JSONException
,
IOException
,
TimeoutException
,
InterruptedException
{
public
String
get
(
String
id
,
String
format
)
throws
JSONException
,
IOException
,
TimeoutException
,
InterruptedException
{
DeployResponse
deploy
=
findOne
(
id
);
DeployResponse
deploy
=
findOne
(
id
);
Map
<
String
,
Object
>
map
=
deploy
.
getKeyValue
();
Map
<
String
,
Object
>
map
=
deploy
.
getKeyValue
();
...
@@ -601,4 +548,85 @@ public class DeployService {
...
@@ -601,4 +548,85 @@ public class DeployService {
}
}
private
void
setAnsibleOutput
(
MessageParameter
p
,
DeployRequest
deployInfo
)
throws
JSONException
,
IOException
{
String
value
=
p
.
getValue
();
value
=
p
.
getValue
();
ObjectMapper
mapper
=
new
ObjectMapper
();
mapper
.
configure
(
DeserializationFeature
.
FAIL_ON_UNKNOWN_PROPERTIES
,
false
);
value
=
parseValue
(
value
);
List
<
AnsibleOutput
>
outputList
=
mapper
.
readValue
(
value
,
new
TypeReference
<
List
<
AnsibleOutput
>>()
{
});
List
<
String
>
outputListIds
=
new
ArrayList
<>();
Map
<
String
,
String
>
nodeTypeCache
=
new
HashMap
<>();
Map
<
String
,
String
>
domainCache
=
new
HashMap
<>();
Map
<
String
,
String
>
osTypeCache
=
new
HashMap
<>();
Map
<
String
,
String
>
cloudProviderCache
=
new
HashMap
<>();
for
(
AnsibleOutput
ansOut
:
outputList
)
{
Map
<
String
,
Object
>
map
=
provisionService
.
findOne
(
deployInfo
.
getProvisionID
()).
getKeyValue
();
String
nodeType
=
nodeTypeCache
.
get
(
ansOut
.
getHost
());
String
domain
=
domainCache
.
get
(
ansOut
.
getHost
());
String
os
=
osTypeCache
.
get
(
ansOut
.
getHost
());
if
(
nodeType
==
null
)
{
List
<
Map
<
String
,
Object
>>
components
=
null
;
List
<
Map
<
String
,
Object
>>
topologies
=
null
;
if
(
map
.
containsKey
(
"components"
))
{
components
=
(
List
<
Map
<
String
,
Object
>>)
map
.
get
(
"components"
);
// topologies = (List<Map<String, Object>>) map.get("topologies");
}
else
{
for
(
String
key
:
map
.
keySet
())
{
Map
<
String
,
Object
>
subMap
=
(
Map
<
String
,
Object
>)
map
.
get
(
key
);
if
(
subMap
.
containsKey
(
"components"
)
&&
components
==
null
)
{
components
=
(
List
<
Map
<
String
,
Object
>>)
subMap
.
get
(
"components"
);
}
if
(
subMap
.
containsKey
(
"topologies"
)
&&
topologies
==
null
)
{
topologies
=
(
List
<
Map
<
String
,
Object
>>)
subMap
.
get
(
"topologies"
);
}
if
(
components
!=
null
&&
topologies
!=
null
)
{
break
;
}
}
}
for
(
Map
<
String
,
Object
>
component
:
components
)
{
String
publicAddress
=
null
;
for
(
String
addressName
:
PUBLIC_ADRESS_NAMES
)
{
if
(
component
.
containsKey
(
addressName
))
{
publicAddress
=
(
String
)
component
.
get
(
addressName
);
break
;
}
}
if
(
publicAddress
!=
null
&&
publicAddress
.
equals
(
ansOut
.
getHost
()))
{
nodeType
=
(
String
)
component
.
get
(
"nodeType"
);
for
(
String
siteName
:
CLOUD_SITE_NAMES
)
{
if
(
component
.
containsKey
(
siteName
))
{
domain
=
(
String
)
component
.
get
(
siteName
);
break
;
}
}
os
=
(
String
)
component
.
get
(
"OStype"
);
nodeTypeCache
.
put
(
ansOut
.
getHost
(),
nodeType
);
domainCache
.
put
(
ansOut
.
getHost
(),
domain
);
osTypeCache
.
put
(
ansOut
.
getHost
(),
os
);
break
;
}
}
}
ansOut
.
setVmType
(
nodeType
);
ansOut
.
setCloudDeploymentDomain
(
domain
);
ansOut
.
setProvisionID
(
deployInfo
.
getProvisionID
());
// ansOut.setCloudProvider(provider);
ansOut
=
ansibleOutputService
.
save
(
ansOut
);
BenchmarkResult
benchmarkResult
=
parseSaveBenchmarkResult
(
ansOut
);
outputListIds
.
add
(
ansOut
.
getId
());
}
// deployResponse.setAnsibleOutputList(outputListIds);
}
}
}
drip-api/src/main/java/nl/uva/sne/drip/api/service/ProvisionService.java
View file @
917597f8
...
@@ -15,10 +15,6 @@
...
@@ -15,10 +15,6 @@
*/
*/
package
nl
.
uva
.
sne
.
drip
.
api
.
service
;
package
nl
.
uva
.
sne
.
drip
.
api
.
service
;
import
com.fasterxml.jackson.core.JsonParser
;
import
com.fasterxml.jackson.core.JsonProcessingException
;
import
com.fasterxml.jackson.databind.ObjectMapper
;
import
java.io.ByteArrayOutputStream
;
import
java.io.FileNotFoundException
;
import
java.io.FileNotFoundException
;
import
java.io.IOException
;
import
java.io.IOException
;
import
java.util.ArrayList
;
import
java.util.ArrayList
;
...
@@ -26,8 +22,6 @@ import java.util.HashMap;
...
@@ -26,8 +22,6 @@ import java.util.HashMap;
import
java.util.Iterator
;
import
java.util.Iterator
;
import
java.util.List
;
import
java.util.List
;
import
java.util.Map
;
import
java.util.Map
;
import
java.util.Properties
;
import
java.util.Set
;
import
java.util.concurrent.TimeoutException
;
import
java.util.concurrent.TimeoutException
;
import
java.util.logging.Level
;
import
java.util.logging.Level
;
import
java.util.logging.Logger
;
import
java.util.logging.Logger
;
...
@@ -42,7 +36,6 @@ import nl.uva.sne.drip.api.rpc.DRIPCaller;
...
@@ -42,7 +36,6 @@ import nl.uva.sne.drip.api.rpc.DRIPCaller;
import
nl.uva.sne.drip.api.v1.rest.ProvisionController
;
import
nl.uva.sne.drip.api.v1.rest.ProvisionController
;
import
nl.uva.sne.drip.commons.utils.Converter
;
import
nl.uva.sne.drip.commons.utils.Converter
;
import
nl.uva.sne.drip.drip.commons.data.v1.external.CloudCredentials
;
import
nl.uva.sne.drip.drip.commons.data.v1.external.CloudCredentials
;
import
nl.uva.sne.drip.drip.commons.data.v1.external.DeployParameter
;
import
nl.uva.sne.drip.drip.commons.data.internal.Message
;
import
nl.uva.sne.drip.drip.commons.data.internal.Message
;
import
nl.uva.sne.drip.drip.commons.data.internal.MessageParameter
;
import
nl.uva.sne.drip.drip.commons.data.internal.MessageParameter
;
import
nl.uva.sne.drip.drip.commons.data.v1.external.PlanResponse
;
import
nl.uva.sne.drip.drip.commons.data.v1.external.PlanResponse
;
...
@@ -61,8 +54,6 @@ import nl.uva.sne.drip.commons.utils.DRIPLogHandler;
...
@@ -61,8 +54,6 @@ import nl.uva.sne.drip.commons.utils.DRIPLogHandler;
import
nl.uva.sne.drip.commons.utils.TOSCAUtils
;
import
nl.uva.sne.drip.commons.utils.TOSCAUtils
;
import
static
nl
.
uva
.
sne
.
drip
.
commons
.
utils
.
TOSCAUtils
.
getVMsFromTopology
;
import
static
nl
.
uva
.
sne
.
drip
.
commons
.
utils
.
TOSCAUtils
.
getVMsFromTopology
;
import
static
nl
.
uva
.
sne
.
drip
.
commons
.
utils
.
TOSCAUtils
.
getVMsNodeNamesFromTopology
;
import
static
nl
.
uva
.
sne
.
drip
.
commons
.
utils
.
TOSCAUtils
.
getVMsNodeNamesFromTopology
;
import
nl.uva.sne.drip.drip.commons.data.v1.external.Key
;
import
nl.uva.sne.drip.drip.commons.data.v1.external.KeyPair
;
import
nl.uva.sne.drip.drip.commons.data.v1.external.ScaleRequest
;
import
nl.uva.sne.drip.drip.commons.data.v1.external.ScaleRequest
;
import
org.apache.commons.codec.binary.Base64
;
import
org.apache.commons.codec.binary.Base64
;
...
...
drip-commons/src/main/java/nl/uva/sne/drip/commons/utils/TOSCAUtils.java
View file @
917597f8
...
@@ -247,12 +247,14 @@ public class TOSCAUtils {
...
@@ -247,12 +247,14 @@ public class TOSCAUtils {
List
<
String
>
toscaPortsList
=
(
List
<
String
>)
properties
.
get
(
"ports"
);
List
<
String
>
toscaPortsList
=
(
List
<
String
>)
properties
.
get
(
"ports"
);
if
(
toscaPortsList
!=
null
)
{
if
(
toscaPortsList
!=
null
)
{
List
<
Map
<
String
,
Object
>>
portList
=
new
ArrayList
<>();
for
(
String
portEntry
:
toscaPortsList
)
{
for
(
String
portEntry
:
toscaPortsList
)
{
String
[]
portsArray
=
portEntry
.
split
(
":"
);
String
[]
portsArray
=
portEntry
.
split
(
":"
);
Map
<
String
,
Object
>
portMap
=
new
HashMap
();
Map
<
String
,
Object
>
portMap
=
new
HashMap
();
portMap
.
put
(
"containerPort"
,
Integer
.
valueOf
(
portsArray
[
0
]));
portMap
.
put
(
"containerPort"
,
Integer
.
valueOf
(
portsArray
[
0
]));
container
.
put
(
"ports"
,
portMap
);
portList
.
add
(
portMap
);
}
}
container
.
put
(
"ports"
,
portList
);
}
}
List
<
Map
<
String
,
Object
>>
containersList
=
new
ArrayList
<>();
List
<
Map
<
String
,
Object
>>
containersList
=
new
ArrayList
<>();
containersList
.
add
(
container
);
containersList
.
add
(
container
);
...
...
drip-deployer/admin.conf
View file @
917597f8
apiVersion
:
v1
clusters
:
-
cluster
:
certificate
-
authority
-
data
:
LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUN5RENDQWJDZ0F3SUJBZ0lCQURBTkJna3Foa2lHOXcwQkFRc0ZBREFWTVJNd0VRWURWUVFERXdwcmRXSmwKY201bGRHVnpNQjRYRFRFNU1Ea3lOVEUwTWpjME9Wb1hEVEk1TURreU1qRTBNamMwT1Zvd0ZURVRNQkVHQTFVRQpBeE1LYTNWaVpYSnVaWFJsY3pDQ0FTSXdEUVlKS29aSWh2Y05BUUVCQlFBRGdnRVBBRENDQVFvQ2dnRUJBSmU0CnNnc2I2ZEkvNk56WU5jbVZHY3FjamwxYVJTY3dQVG9YREhxWGRDZzZsUHlJWkRpcmNhRWlxS3dkcTE0RUF3c0UKQ0hZUjBUOWNoWWVkMFdmcHV4cGVzQWJ2RVBjRHNBbHFia21FaTN5VWZjYm5vb3JDUk56SWtNZWJhbWhNOGFIYwp6bFZpaTY1VHdoRXlKZGptd1MxRU5odTBJbktSNktOZHpIeTdhSk45dk5VY2JwZW5ON05KQkRCMmFwMTVNUmdUClVDUklQOHlneFU1V2V5NnA1SnJ1VHV1S1pLUVpxZm11cnBHb2tmQzJqOVdUTDZqQjdJcCthdEpWZFFNT2JJU04KRmM4Z0p5Q0huMUVPR0dwQXRIK1lsc3NpWnk2VFV6NEpLcXpSYmVwaVpUSVZNMUI5anh4alloSDVGNFR5UmllMgp6L0dwbCtDOFpzSXdSU2M3Z0wwQ0F3RUFBYU1qTUNFd0RnWURWUjBQQVFIL0JBUURBZ0trTUE4R0ExVWRFd0VCCi93UUZNQU1CQWY4d0RRWUpLb1pJaHZjTkFRRUxCUUFEZ2dFQkFBeWlqMzVDcTA1cWg5bjROYS8yNGJTYkh1aUEKdnMzUmVTSThISWZManhPNXVFMnJ6R3Y2bjBZMlVab1ZFemcrbldTdjVjNG81Qm1uTVQ2MkxNM1JYRDV0WW0yWAo0ZVI3QTk5d1RSYTcyY1lTYSs0emtPYVI3cisySUpDZndLZ0hEUzU2SFY2dkhwNWRZWHlUenlNK09MVVlMNnNECkx5eHZJSVlUTHZKQzIweE1aakFCQWR3UXM4eExKTE1zbEV1WDJUTXYwempxOWpFWHNDN2xXOXhJLzYzSU5nU2sKU2dDc0pONnY3ZUN0WUlHZWsrbndTengwL09vVXp2STFUd1ZGenFxS2Jwcm0rd2dQd1hITGtKSXBZeXg5SlM4Ywo0b2ZkMFBadE5kVUNDRXQ1R2w3b05PaUVkbU5JVmVqTVdDRWQxdlFLQ2VZejlzRk05WVQwUXlEakpTcz0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo
=
server
:
https
://
10
.
103
.
0
.
7
:
6443
name
:
kubernetes
contexts
:
-
context
:
cluster
:
kubernetes
user
:
kubernetes
-
admin
name
:
kubernetes
-
admin
@
kubernetes
current
-
context
:
kubernetes
-
admin
@
kubernetes
kind
:
Config
preferences
: {}
users
:
-
name
:
kubernetes
-
admin
user
:
client
-
certificate
-
data
:
LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSUM4akNDQWRxZ0F3SUJBZ0lJRUgrWWpSQzJSUFF3RFFZSktvWklodmNOQVFFTEJRQXdGVEVUTUJFR0ExVUUKQXhNS2EzVmlaWEp1WlhSbGN6QWVGdzB4T1RBNU1qVXhOREkzTkRsYUZ3MHlNREE1TWpReE5ESTNOVFJhTURReApGekFWQmdOVkJBb1REbk41YzNSbGJUcHRZWE4wWlhKek1Sa3dGd1lEVlFRREV4QnJkV0psY201bGRHVnpMV0ZrCmJXbHVNSUlCSWpBTkJna3Foa2lHOXcwQkFRRUZBQU9DQVE4QU1JSUJDZ0tDQVFFQTFlYnovM2dVQWQvai9uay8KWTlZUmNRS3VQMEFZT28zNlRsT2RiRlR5cHE1bXo4UGpOMlV2b1pZMkNlazhCSCtLVXhTbzRWdWphelNxM0N5VwpiOS9ESERXNnpvM1JlS285NDNnWFpyVmZYQnpLUkcrUjE4WkRQRmNPSS9sU0Vkd0NlUUlFRnY3MnRIWjQxbHpQClBraTRVMFRvMkkrNE5hNjVwazJHMGxoSEVCQ1NYMHJNbFczc3ltTW9ONSthN21COSttVElLaThKZlZ0T0NydWkKMi9rclJJSzVkSlJ5eDZkWE5KMUp2ZlR0VFE3REgwZklrS0VaMWRRbGU4QmdMbzZhTi9UZDN6SDUwZmxuOTRsaApNdXR6RDM4Yks0a1o0Wnp3THFCRVQwSzlNbGhuZkJOREFjakpUVjVuVmU2bFZRVTdYcVhIL2pxNS9RbHFWclJuClJhV2JzUUlEQVFBQm95Y3dKVEFPQmdOVkhROEJBZjhFQkFNQ0JhQXdFd1lEVlIwbEJBd3dDZ1lJS3dZQkJRVUgKQXdJd0RRWUpLb1pJaHZjTkFRRUxCUUFEZ2dFQkFJeVpncU5zN3pWMkVsZHRSekFuYTZkUmFWUFhlMW1LNFVRRQo5OFhyZTR0WFdRYm4zZUJ6UkY5ZmpKZ0pSTFppTDlXM2lBbFl6c0I1YWxqMlRzQ3JwdHdIaHFRd2x6M2ZhZ21UCnJhWWNmV0FWOG9XVVFuanNBVStOZHZFU0hJMGQwQjQ4SG5VTWQ4Y3psWlFwOTZuRWNuLzZrMy8vRktrS01pQ0QKb2xlWG9VblB4Wm1CU3AxN3lWeUlQaE1qRFhFMFFoNTFxNnVieTdsbVk5WUkzQkhVQUQ3TTdQQkVvWm9QaEpyNgorSXZsVkZWRVhXR3QrNmFSSXBKZ3N6enVaaWVubTh6YTlXZDMzOWdJOXZhQk9Edjg3OHNBOFpJT0h5TVNlbTJOCmVzbXlxMnN6T1RiNXZlV0RYalQyYTdtMDlOOGZ4S3RQSk9jeTlROWxBUTZHd2NsUmZ2ND0KLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo
=
client
-
key
-
data
:
LS0tLS1CRUdJTiBSU0EgUFJJVkFURSBLRVktLS0tLQpNSUlFb2dJQkFBS0NBUUVBMWViei8zZ1VBZC9qL25rL1k5WVJjUUt1UDBBWU9vMzZUbE9kYkZUeXBxNW16OFBqCk4yVXZvWlkyQ2VrOEJIK0tVeFNvNFZ1amF6U3EzQ3lXYjkvREhEVzZ6bzNSZUtvOTQzZ1haclZmWEJ6S1JHK1IKMThaRFBGY09JL2xTRWR3Q2VRSUVGdjcydEhaNDFselBQa2k0VTBUbzJJKzROYTY1cGsyRzBsaEhFQkNTWDByTQpsVzNzeW1Nb041K2E3bUI5K21USUtpOEpmVnRPQ3J1aTIva3JSSUs1ZEpSeXg2ZFhOSjFKdmZUdFRRN0RIMGZJCmtLRVoxZFFsZThCZ0xvNmFOL1RkM3pINTBmbG45NGxoTXV0ekQzOGJLNGtaNFp6d0xxQkVUMEs5TWxobmZCTkQKQWNqSlRWNW5WZTZsVlFVN1hxWEgvanE1L1FscVZyUm5SYVdic1FJREFRQUJBb0lCQUR0WmdZbjR2MVJsMHRUZwp2MzNyLzVyanE0VlJPMmZEelJlK2k0ZHJhb2hsQzVIS1FGazJjaWpiak5MakxBdnpkMlhsN1pYWjMxWDNueERJCkxsV01PSTZ6T2NubC82RURXM2lwOFpSRjd0ZVlCV2RIcmFlNUV4N0M2T0dDWkFzZ2lHOGE2QmVaVnNwcnRNdUYKcE5zYlFrbVliU0xwZmFzbmQ4dDA1MXVsc3RINXhSV0U0WWRCZ2VZMFlWYlcrQ2FSeXN4T2lHVEE4cnRlek81UQpFdDN0WEZQUldWMmZyYzk0TkhkTUc0NXpqU29GVDMzN1l1R2pVSmN2VG1IQ2VHMUt0a3F3TjBpeHNsVmpOOENFCkFhdHNBaFBJT04rQ0VSQ3VLUVZMbkZtMUpUYzZRRHZlbzVadWh5VG04TkJXZjBSZURwc1RyazVVY2FOUXdPU2MKcHdKYjI0RUNnWUVBM1g3b2JmL1pvcng0WVZhbGw2ejVwQUV2cFFPdkNOajluRlhiQzU2Y0lhNGZmN2hRbGZ3LwpFMWxMa3NmNzBKNi95a283MWs2L0d2WEwya0hxZnRwczN4RXFBcDhIZUpTaERYcVJqa1BDeHk0S2F4TS9UK0ZhCmh6bTV5dFFCK2E5ajFwckZDa3B4WmtaMllJRE5xeDIxcUl1Vm15T0dNL0loRXZPMktiM3BmVlVDZ1lFQTl6azQKQ1Fya3NEWHNlUUl5YytxbTZxdEFWWEs2c0NwN2lUT2YwRjZWeFBzNGxHb0lkY0RaTEl6UGVuSERIUEM1S0ZCeAozSE1nUHlaUUVqalBVU3VLd3RqRFpUU05QRk02cVF5RGtBckkrVjdqckF0TGVHdTNTdFl6WUJETXhncFhBbEpKCktUY0hybkE5TkZnQ3I3ckltdXJvWmdwT3RhL2ZLTXZpQ0dKcVJPMENnWUI5MWlqVXZhTjJtaDdHSmtUanZBa0UKRFF4MWNuZFJ6bjJmQVFQMlFRRXcvVXkxOGhBT2RnV2J4NEp3L0o3cXNoWUNKbFNDZDdDSTc1WUdCS2JsdE5CZgpscy9JTjNNMkpUS3VockVGSXlnWW91YVdXSlFDbk9RaXFVQU5wSThPdkg4N0lDakxwT2x5RXB3VVRYa0xPMURHClZhOEVPWlY0RTVxSXV0OEdMZmZtRlFLQmdIc3g1anlMVXg2RUlJekVqWU82QU9lYjR2a3hyTm93c1ZMVGlPWDYKM0VOR3RSRXdMWHNRV0tpY21wOTNwVFQ4dUNmZ3ZueU9XaGxkN0RUSVhuY2liWWxmSHkrRm1vUGZMYklqN0VPUgpQRWtZQWZndndMSUhhMVU5bkdoWXR0SlJRTDZGWnJQRkdtelF2WThoOUdUQmZVbkZtWDJQMFYwMGdNNEJtMmQyCk0yS1pBb0dBVkFQenVYc1NFTUVKQzViVVozQWpVVG02WHFUS3R5dEtZcUdoTjVrcHlHZkMvRGtiSFJVeFhIUkoKMmFKeDM4cGF1b1IvYVVqUXJyQjdFbXQyYUh0Wkw2QTJFQUtWUDkyNWpUZFZPUDB1L0ZJSnV1S1RVNFpMWDd0VwpXMk5pWHVaR0xWQzhPekFwNHdLU290aHh1cFhveWhkV1FGRkNQRUJBY21KSStWK0ZZejA9Ci0tLS0tRU5EIFJTQSBQUklWQVRFIEtFWS0tLS0tCg
==
drip-deployer/docker_kubernetes.py
View file @
917597f8
...
@@ -21,12 +21,12 @@ from vm_info import VmInfo
...
@@ -21,12 +21,12 @@ from vm_info import VmInfo
import
linecache
import
linecache
import
sys
import
sys
import
logging
import
logging
import
time
# from drip_logging.drip_logging_handler import *
# from drip_logging.drip_logging_handler import *
from
os
import
listdir
from
os
import
listdir
from
os.path
import
isfile
,
join
from
os.path
import
isfile
,
join
logger
=
logging
.
getLogger
(
__name__
)
logger
=
logging
.
getLogger
(
__name__
)
if
not
getattr
(
logger
,
'handler_set'
,
None
):
if
not
getattr
(
logger
,
'handler_set'
,
None
):
logger
.
setLevel
(
logging
.
INFO
)
logger
.
setLevel
(
logging
.
INFO
)
...
@@ -75,16 +75,34 @@ def install_manager(vm):
...
@@ -75,16 +75,34 @@ def install_manager(vm):
stdout
.
read
()
stdout
.
read
()
# stdin, stdout, stderr = ssh.exec_command("sudo kubeadm init --apiserver-advertise-address=%s" % (vm.ip))
# stdin, stdout, stderr = ssh.exec_command("sudo kubeadm init --apiserver-advertise-address=%s" % (vm.ip))
stdin
,
stdout
,
stderr
=
ssh
.
exec_command
(
"sudo kubeadm init
>> log 2>&1
"
)
stdin
,
stdout
,
stderr
=
ssh
.
exec_command
(
"sudo kubeadm init"
)
retstr
=
stdout
.
readlines
()
retstr
=
stdout
.
readlines
()
stdin
,
stdout
,
stderr
=
ssh
.
exec_command
(
"sudo cp /etc/kubernetes/admin.conf /tmp/"
)
stdin
,
stdout
,
stderr
=
ssh
.
exec_command
(
"mkdir -p $HOME/.kube"
)
stdout
.
read
()
stdin
,
stdout
,
stderr
=
ssh
.
exec_command
(
"sudo cp /etc/kubernetes/admin.conf $HOME/.kube/config"
)
stdout
.
read
()
stdout
.
read
()
stdin
,
stdout
,
stderr
=
ssh
.
exec_command
(
"sudo chown $(id -u):$(id -g) $HOME/.kube/config"
)
stdout
.
read
()
stdin
,
stdout
,
stderr
=
ssh
.
exec_command
(
"sudo sysctl net.bridge.bridge-nf-call-iptables=1"
)
retstr
=
stdout
.
readlines
()
stdin
,
stdout
,
stderr
=
ssh
.
exec_command
(
"kubectl apply -f
\"
https://cloud.weave.works/k8s/net?k8s-version=$(kubectl version | base64 | tr -d '
\n
')
\"
"
)
retstr
=
stdout
.
readlines
()
stdin
,
stdout
,
stderr
=
ssh
.
exec_command
(
"kubectl taint nodes --all node-role.kubernetes.io/master-"
)
retstr
=
stdout
.
readlines
()
stdin
,
stdout
,
stderr
=
ssh
.
exec_command
(
"sudo chown
%
s /tmp/admin.conf"
%
(
vm
.
user
))
stdin
,
stdout
,
stderr
=
ssh
.
exec_command
(
"sudo chown
%
s /tmp/admin.conf"
%
(
vm
.
user
))
stdout
.
read
()
stdout
.
read
()
stdin
,
stdout
,
stderr
=
ssh
.
exec_command
(
"sudo chgrp
%
s /tmp/admin.conf"
%
(
vm
.
user
))
stdin
,
stdout
,
stderr
=
ssh
.
exec_command
(
"sudo chgrp
%
s /tmp/admin.conf"
%
(
vm
.
user
))
stdout
.
read
()
stdout
.
read
()
sftp
.
get
(
"/tmp/admin.conf"
,
file_path
+
"/admin.conf"
)
#
sftp.get("/tmp/admin.conf", file_path + "/admin.conf")
logger
.
info
(
"Finished kubernetes master installation on: "
+
(
vm
.
ip
))
logger
.
info
(
"Finished kubernetes master installation on: "
+
(
vm
.
ip
))
except
Exception
as
e
:
except
Exception
as
e
:
global
retry
global
retry
...
@@ -151,25 +169,32 @@ def deploy_on_master(deployment_file, vm):
...
@@ -151,25 +169,32 @@ def deploy_on_master(deployment_file, vm):
k8s_file
=
deployment_file
+
"/"
+
f
k8s_file
=
deployment_file
+
"/"
+
f
sftp
.
put
(
k8s_file
,
f
)
sftp
.
put
(
k8s_file
,
f
)
stdin
,
stdout
,
stderr
=
ssh
.
exec_command
(
"kubectl create -f /tmp/k8s/
--kubeconfig /tmp/admin.conf
>> log 2>&1"
)
stdin
,
stdout
,
stderr
=
ssh
.
exec_command
(
"kubectl create -f /tmp/k8s/ >> log 2>&1"
)
s_out
=
stdout
.
read
()
s_out
=
stdout
.
read
()
e_out
=
stderr
.
read
()
e_out
=
stderr
.
read
()
print
s_out
time
.
sleep
(
2
)
print
e_out
# cmd = 'kubectl get svc --all-namespaces -o go-template=\'{{range .items}}{{range.spec.ports}}{{if .nodePort}}{{.nodePort}}{{"\\n"}}{{end}}{{end}}{{end}}\''
cmd
=
'kubectl get svc --output json'
stdin
,
stdout
,
stderr
=
ssh
.
exec_command
(
cmd
)
e_out
=
stderr
.
read
()
json_output
=
stdout
.
read
()
.
splitlines
()
# exposed_ports_str = ''
# for port in exposed_ports:
# exposed_ports_str += port + ','
# exposed_ports_str = exposed_ports_str[:-1]
except
Exception
as
e
:
except
Exception
as
e
:
# print '%s: %s' % (vm.ip, e)
# print '%s: %s' % (vm.ip, e)
logger
.
error
(
vm
.
ip
+
" "
+
str
(
e
))
logger
.
error
(
vm
.
ip
+
" "
+
str
(
e
))
return
"ERROR:"
+
vm
.
ip
+
" "
+
str
(
e
)
return
"ERROR:"
+
vm
.
ip
+
" "
+
str
(
e
)
ssh
.
close
()
ssh
.
close
()
return
"SUCCESS"
return
json_output
def
deploy
(
vm_list
,
deployment_file
):
def
deploy
(
vm_list
,
deployment_file
):
for
i
in
vm_list
:
for
i
in
vm_list
:
if
i
.
role
==
"master"
:
if
i
.
role
==
"master"
:
deploy_on_master
(
deployment_file
,
i
)
return
deploy_on_master
(
deployment_file
,
i
)
return
None
def
run
(
vm_list
,
rabbitmq_host
,
owner
):
def
run
(
vm_list
,
rabbitmq_host
,
owner
):
...
...
drip-deployer/rpc_server.py
View file @
917597f8
...
@@ -129,8 +129,8 @@ def handle_delivery(message):
...
@@ -129,8 +129,8 @@ def handle_delivery(message):
compose_name
=
param
[
"attributes"
][
"name"
]
compose_name
=
param
[
"attributes"
][
"name"
]
if
manager_type
==
"kubernetes"
:
if
manager_type
==
"kubernetes"
:
#
ret = docker_kubernetes.run(vm_list, rabbitmq_host, owner)
ret
=
docker_kubernetes
.
run
(
vm_list
,
rabbitmq_host
,
owner
)
docker_kubernetes
.
deploy
(
vm_list
,
k8s_folder
)
ret
=
docker_kubernetes
.
deploy
(
vm_list
,
k8s_folder
)
return
ret
return
ret
elif
manager_type
==
"swarm"
:
elif
manager_type
==
"swarm"
:
ret
=
docker_engine
.
run
(
vm_list
,
rabbitmq_host
,
owner
)
ret
=
docker_engine
.
run
(
vm_list
,
rabbitmq_host
,
owner
)
...
@@ -166,7 +166,7 @@ def on_request(ch, method, props, body):
...
@@ -166,7 +166,7 @@ def on_request(ch, method, props, body):
manager_type
=
param
[
"value"
]
manager_type
=
param
[
"value"
]
break
break
if
"ERROR"
in
ret
:
if
not
ret
and
"ERROR"
in
ret
:
res_name
=
"error"
res_name
=
"error"
elif
manager_type
==
"ansible"
:
elif
manager_type
==
"ansible"
:
res_name
=
"ansible_output"
res_name
=
"ansible_output"
...
@@ -174,6 +174,8 @@ def on_request(ch, method, props, body):
...
@@ -174,6 +174,8 @@ def on_request(ch, method, props, body):
res_name
=
"scale_status"
res_name
=
"scale_status"
elif
manager_type
==
"swarm_info"
:
elif
manager_type
==
"swarm_info"
:
res_name
=
"swarm_info"
res_name
=
"swarm_info"
elif
manager_type
==
"kubernetes"
:
res_name
=
"kubectl_get"
else
:
else
:
res_name
=
"credential"
res_name
=
"credential"
...
@@ -186,7 +188,7 @@ def on_request(ch, method, props, body):
...
@@ -186,7 +188,7 @@ def on_request(ch, method, props, body):
par
[
"url"
]
=
"null"
par
[
"url"
]
=
"null"
par
[
"encoding"
]
=
"UTF-8"
par
[
"encoding"
]
=
"UTF-8"
par
[
"name"
]
=
res_name
par
[
"name"
]
=
res_name
par
[
"value"
]
=
ret
par
[
"value"
]
=
base64
.
b64encode
(
ret
)
par
[
"attributes"
]
=
"null"
par
[
"attributes"
]
=
"null"
response
[
"parameters"
]
.
append
(
par
)
response
[
"parameters"
]
.
append
(
par
)
...
...
drip_planner2/.idea/workspace.xml
View file @
917597f8
...
@@ -2,9 +2,7 @@
...
@@ -2,9 +2,7 @@
<project
version=
"4"
>
<project
version=
"4"
>
<component
name=
"ChangeListManager"
>
<component
name=
"ChangeListManager"
>
<list
default=
"true"
id=
"462ede19-adfe-472b-975e-fefefa973fe0"
name=
"Default Changelist"
comment=
"slolved cap error"
>
<list
default=
"true"
id=
"462ede19-adfe-472b-975e-fefefa973fe0"
name=
"Default Changelist"
comment=
"slolved cap error"
>
<change
beforePath=
"$PROJECT_DIR$/../drip-deployer/docker_kubernetes.sh"
beforeDir=
"false"
afterPath=
"$PROJECT_DIR$/../drip-deployer/docker_kubernetes.sh"
afterDir=
"false"
/>
<change
beforePath=
"$PROJECT_DIR$/.idea/workspace.xml"
beforeDir=
"false"
afterPath=
"$PROJECT_DIR$/.idea/workspace.xml"
afterDir=
"false"
/>
<change
beforePath=
"$PROJECT_DIR$/.idea/workspace.xml"
beforeDir=
"false"
afterPath=
"$PROJECT_DIR$/.idea/workspace.xml"
afterDir=
"false"
/>
<change
beforePath=
"$PROJECT_DIR$/src/planner/basic_planner.py"
beforeDir=
"false"
afterPath=
"$PROJECT_DIR$/src/planner/basic_planner.py"
afterDir=
"false"
/>
<change
beforePath=
"$PROJECT_DIR$/venv/lib/python3.6/site-packages/easy-install.pth"
beforeDir=
"false"
/>
<change
beforePath=
"$PROJECT_DIR$/venv/lib/python3.6/site-packages/easy-install.pth"
beforeDir=
"false"
/>
<change
beforePath=
"$PROJECT_DIR$/venv/lib/python3.6/site-packages/prettytable.py"
beforeDir=
"false"
/>
<change
beforePath=
"$PROJECT_DIR$/venv/lib/python3.6/site-packages/prettytable.py"
beforeDir=
"false"
/>
<change
beforePath=
"$PROJECT_DIR$/venv/lib/python3.6/site-packages/pyparsing.py"
beforeDir=
"false"
/>
<change
beforePath=
"$PROJECT_DIR$/venv/lib/python3.6/site-packages/pyparsing.py"
beforeDir=
"false"
/>
...
@@ -84,7 +82,7 @@
...
@@ -84,7 +82,7 @@
<option
name=
"ADD_CONTENT_ROOTS"
value=
"true"
/>
<option
name=
"ADD_CONTENT_ROOTS"
value=
"true"
/>
<option
name=
"ADD_SOURCE_ROOTS"
value=
"true"
/>
<option
name=
"ADD_SOURCE_ROOTS"
value=
"true"
/>
<option
name=
"SCRIPT_NAME"
value=
"$PROJECT_DIR$/src/rpc_server.py"
/>
<option
name=
"SCRIPT_NAME"
value=
"$PROJECT_DIR$/src/rpc_server.py"
/>
<option
name=
"PARAMETERS"
value=
"
test_local
"
/>
<option
name=
"PARAMETERS"
value=
"
localhost planner_queue
"
/>
<option
name=
"SHOW_COMMAND_LINE"
value=
"false"
/>
<option
name=
"SHOW_COMMAND_LINE"
value=
"false"
/>
<option
name=
"EMULATE_TERMINAL"
value=
"false"
/>
<option
name=
"EMULATE_TERMINAL"
value=
"false"
/>
<option
name=
"MODULE_MODE"
value=
"false"
/>
<option
name=
"MODULE_MODE"
value=
"false"
/>
...
...
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment