Cleaned up pre deploy

parent 4f908524
......@@ -27,13 +27,17 @@ def upload_and_train(use_case: str, developer_id: int):
#FORWARD TO GPU SERVER WITH IP AND PORT
#data = {'use_case' : use_case,
# 'developer_id' : developer_id}
url = f'https://{network_constants.FEDERATED_TRAINING_HOSTNAME}:{network_constants.FEDERATED_TRAINING_REST_PORT}/api/Developers/use_cases/{use_case}/developer_id/{developer_id}/upload_and_train:'
#url= 'gpu3.itec.aau.at/home/itec/bogdan/Articonf/smart/tools/federated-training/app/routes/developers'
response = requests.post(
url,
verify = False,
proxies = { "http":None, "https":None },
files= request.files
files= request.files,
#data = data
)
return response
......
swagger: "2.0"
info:
title: Federated Learning microservice
description: This is the documentation for the federated learning microservice.
title: Federated Training microservice
description: This is the documentation for the federated Training microservice.
version: "1.0.0"
consumes:
......
swagger: "2.0"
info:
title: Federated Learning microservice
title: Federated Training microservice
description: This is the documentation for the federated learning microservice.
version: "1.0.0"
......
......@@ -34,7 +34,6 @@ def upload_and_train(use_case: str, developer_id: int):
#file_dict = request.files
#db_File_True = file_dict["dataset_file1"]
#db_File_False = file_dict["dataset_file2"]
#TODO IMPLEMENT HERE
#THEN start processing
last_train_metrics = main_proc.start_processing(use_case,developer_id)
......
......@@ -12,7 +12,7 @@ def last(use_case: str):
bottom = df.tail(1)
bottom = bottom.to_json(orient ="records")
print(bottom[1:-1])
metricsJson = bottom[1:-1] #remove the list [ { a: "1"} ... ]
metricsJson = bottom[1:-1] #remove the paranthesis of the list to make a dict [ { a: "1" , ... } ]
return Response(status=200, response=metricsJson)
except Exception as e:
print(e)
......@@ -22,6 +22,7 @@ def last(use_case: str):
def upload_and_train(use_case: str):
use_case_path = './processing/'+use_case+'/'
default_path = 'processing/default/'
#Remove old files
try:
if os.path.exists(use_case_path):
......@@ -31,20 +32,23 @@ def upload_and_train(use_case: str):
except OSError as error:
print(error)
return Response(status=400, response="Error occured when deleteing the old use_case directory")
#Start a new implementation of the model.
#TODO: get the python files and create them locally in the {use_case} folder
try:
os.mkdir(use_case_path)
#COPY DEFAULT FILES
default_path = 'processing/default/'
use_case_path +='/'
#COPY DEFAULT FILES
shutil.copyfile(default_path+'main_proc.py',use_case_path+'main_proc.py')
shutil.copyfile(default_path+'__init__.py',use_case_path+'__init__.py')
shutil.copyfile(default_path+'checkpoint_manager.py',use_case_path+'checkpoint_manager.py')
shutil.copyfile(default_path+'federated_algorithm.py',use_case_path+'federated_algorithm.py')
#TODO: get the python files and create them locally in the {use_case} folder
# copy dataset_file1 into use_case_path+'db/'
# copy dataset_file1 into use_case_path+'db/'
# copy preprocessing into use_case_path
# copy global_hyperparams into use_case_path
# copy model into use_case_path
#COPY flask files
#use flask? request.files.getlist('filename')[0] ???
......@@ -53,9 +57,9 @@ def upload_and_train(use_case: str):
return Response(status=400, response="Error occured when creating/copying the use_case files")
#TODO: after the files are copied, start training
#after the files are copied, start training
#TODO: check if training is working
# should write to ledger.csv the last train metrics after 2-3 mins
use_case_path = 'processing/'+use_case+'/'
sys.path.append(use_case_path)
import main_proc
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment