Implemented JSON responses

parent 18dde0e8
...@@ -73,11 +73,11 @@ paths: ...@@ -73,11 +73,11 @@ paths:
description: "Successful Request" description: "Successful Request"
'404': '404':
description: "Use case train session data does not exist" description: "Use case train session data does not exist"
/Owners/use_cases/{use_case}/upload: /Owners/use_cases/{use_case}/upload_and_train:
post: post:
security: security:
- JwtAdmin: [] - JwtAdmin: []
operationId: "routes.owners.upload" operationId: "routes.owners.upload_and_train"
tags: tags:
- "Owners" - "Owners"
summary: "Upload the files required for the federated training" summary: "Upload the files required for the federated training"
...@@ -202,7 +202,7 @@ paths: ...@@ -202,7 +202,7 @@ paths:
post: post:
security: security:
- JwtAdmin: [] - JwtAdmin: []
operationId: "routes.user.check_article" operationId: "routes.users.check_article"
tags: tags:
- "Users" - "Users"
summary: "Use the trained model to evaluate an input" summary: "Use the trained model to evaluate an input"
......
...@@ -10,8 +10,8 @@ print(os.getcwd()) ...@@ -10,8 +10,8 @@ print(os.getcwd())
import global_hyperparams as globals import global_hyperparams as globals
from preprocessing import get_preprocessed_train_test_data from preprocessing import get_preprocessed_train_test_data
from federated_algorithm import federated_computation_new, federated_computation_continue, save_state_to_file, load_state_from_file from federated_algorithm import federated_computation_new, federated_computation_continue#, save_state_to_file, load_state_from_file
from checkpoint_manager import save_to_file_CSV#,save_state_to_file, load_state_from_file from checkpoint_manager import save_to_file_CSV,save_state_to_file, load_state_from_file
...@@ -34,8 +34,10 @@ from checkpoint_manager import save_to_file_CSV#,save_state_to_file, load_state_ ...@@ -34,8 +34,10 @@ from checkpoint_manager import save_to_file_CSV#,save_state_to_file, load_state_
# print(type(metrics)) # print(type(metrics))
# print("DONE2") # print("DONE2")
def start_processing(developer_id:int = 0): def start_processing(use_case, developer_id:int = 0):
globals.initialize() globals.initialize(use_case,developer_id)
globals.TRAINER_ID = developer_id
train_dataset, test_dataset= get_preprocessed_train_test_data() train_dataset, test_dataset= get_preprocessed_train_test_data()
...@@ -43,7 +45,6 @@ def start_processing(developer_id:int = 0): ...@@ -43,7 +45,6 @@ def start_processing(developer_id:int = 0):
trained_metrics= metrics['train'] trained_metrics= metrics['train']
timestamp = save_state_to_file(state) timestamp = save_state_to_file(state)
globals.TRAINER_ID = developer_id
globals.DATASET_ID = timestamp globals.DATASET_ID = timestamp
written_row = save_to_file_CSV(globals.TRAINER_ID,timestamp,globals.DATASET_ID,trained_metrics['sparse_categorical_accuracy'],trained_metrics['loss']) written_row = save_to_file_CSV(globals.TRAINER_ID,timestamp,globals.DATASET_ID,trained_metrics['sparse_categorical_accuracy'],trained_metrics['loss'])
......
import os
#from processing.text_processing.federated_algorithm import federated_computation_continue
#from processing.text_processing.version_handler import save_state_to_file
print(os.getcwd())
#import processing.text_processing.global_hyperparams as globals
#from processing.text_processing.preprocessing import get_preprocessed_train_test_data
import global_hyperparams as globals
from preprocessing import get_preprocessed_train_test_data
from federated_algorithm import federated_computation_new, federated_computation_continue, save_state_to_file, load_state_from_file
from checkpoint_manager import save_to_file_CSV#,save_state_to_file, load_state_from_file
# globals.initialize()
# train_dataset, test_dataset= get_preprocessed_train_test_data()
# state,metrics = federated_computation_new(train_dataset,test_dataset)
# last_model_id = save_state_to_file(state)
# #model_filename = "ckpt_1622721644"
# #restored_state = load_state_from_file(model_filename)
# #state,metrics = federated_computation_continue(train_dataset, test_dataset, restored_state)
# #last_model_id = save_state_to_file(state)
# trained_metrics= metrics['train']
# save_to_file_CSV(globals.TRAINER_ID,last_model_id,globals.DATASET_ID,trained_metrics['sparse_categorical_accuracy'],trained_metrics['loss'])
# print("DONE")
# print(type(state))
# print(type(metrics))
# print("DONE2")
def start_processing(developer_id:int = 0):
globals.initialize()
train_dataset, test_dataset= get_preprocessed_train_test_data()
state,metrics = federated_computation_new(train_dataset,test_dataset)
trained_metrics= metrics['train']
timestamp = save_state_to_file(state)
globals.TRAINER_ID = developer_id
globals.DATASET_ID = timestamp
written_row = save_to_file_CSV(globals.TRAINER_ID,timestamp,globals.DATASET_ID,trained_metrics['sparse_categorical_accuracy'],trained_metrics['loss'])
return written_row
\ No newline at end of file
def initialize(): def initialize(use_case,trainer_id = 0,dataset_id = 0):
global MAX_LENGTH #Lenght of sentences to be fed into the NN. Similar to image size i.e. 100pixels x 100pixels, but it's 1D. global MAX_LENGTH #Lenght of sentences to be fed into the NN. Similar to image size i.e. 100pixels x 100pixels, but it's 1D.
MAX_LENGTH = 40 MAX_LENGTH = 40
...@@ -20,8 +20,8 @@ def initialize(): ...@@ -20,8 +20,8 @@ def initialize():
global EPOCHS #number of epochs the model will be trained global EPOCHS #number of epochs the model will be trained
EPOCHS = 5 EPOCHS = 5
global TRAINER_ID # ID of the trainer entity. global TRAINER_ID # ID of the trainer entity.
TRAINER_ID = 0 #0 = Owner of the use_case TRAINER_ID = trainer_id #0 = Owner of the use_case
global DATASET_ID # ID of the dataset used global DATASET_ID # ID of the dataset used
DATASET_ID = 0 #0 = "Main"/Original dataset DATASET_ID = dataset_id #0 = "Main"/Original dataset
global USE_CASE #Use_case name global USE_CASE #Use_case name
USE_CASE = None USE_CASE = use_case
\ No newline at end of file \ No newline at end of file
Trainer_id,Model_id,Dataset_id,Accuracy,Loss Trainer_id,Model_id,Dataset_id,Accuracy,Loss
0,1623160388,0,0.25,nan 0,1623766462,1623766462,0.5,nan
0,1623160474,0,0.5,nan
1,1623333361,0,0.5,nan
1,1623406445,0,0.5,nan
0,1623419415,1623419415,0.0,nan
...@@ -2,14 +2,16 @@ import os ...@@ -2,14 +2,16 @@ import os
#from processing.text_processing.federated_algorithm import federated_computation_continue #from processing.text_processing.federated_algorithm import federated_computation_continue
#from processing.text_processing.version_handler import save_state_to_file #from processing.text_processing.version_handler import save_state_to_file
print(os.getcwd())
#import processing.text_processing.global_hyperparams as globals #import processing.text_processing.global_hyperparams as globals
#from processing.text_processing.preprocessing import get_preprocessed_train_test_data #from processing.text_processing.preprocessing import get_preprocessed_train_test_data
import global_hyperparams as globals import global_hyperparams as globals
from preprocessing import get_preprocessed_train_test_data from preprocessing import get_preprocessed_train_test_data
from federated_algorithm import federated_computation_new, federated_computation_continue from federated_algorithm import federated_computation_new, federated_computation_continue#, save_state_to_file, load_state_from_file
from checkpoint_manager import save_to_file_CSV, save_state_to_file, load_state_from_file from checkpoint_manager import save_to_file_CSV,save_state_to_file, load_state_from_file
...@@ -32,9 +34,10 @@ from checkpoint_manager import save_to_file_CSV, save_state_to_file, load_state_ ...@@ -32,9 +34,10 @@ from checkpoint_manager import save_to_file_CSV, save_state_to_file, load_state_
# print(type(metrics)) # print(type(metrics))
# print("DONE2") # print("DONE2")
def start_processing(use_case:str,developer_id:int = 0): def start_processing(use_case, developer_id:int = 0):
globals.initialize() globals.initialize(use_case,developer_id)
globals.USE_CASE = use_case globals.TRAINER_ID = developer_id
train_dataset, test_dataset= get_preprocessed_train_test_data() train_dataset, test_dataset= get_preprocessed_train_test_data()
...@@ -42,10 +45,7 @@ def start_processing(use_case:str,developer_id:int = 0): ...@@ -42,10 +45,7 @@ def start_processing(use_case:str,developer_id:int = 0):
trained_metrics= metrics['train'] trained_metrics= metrics['train']
timestamp = save_state_to_file(state) timestamp = save_state_to_file(state)
globals.TRAINER_ID = developer_id
globals.DATASET_ID = timestamp globals.DATASET_ID = timestamp
written_row = save_to_file_CSV(globals.TRAINER_ID,timestamp,globals.DATASET_ID,trained_metrics['sparse_categorical_accuracy'],trained_metrics['loss']) written_row = save_to_file_CSV(globals.TRAINER_ID,timestamp,globals.DATASET_ID,trained_metrics['sparse_categorical_accuracy'],trained_metrics['loss'])
return written_row return written_row
\ No newline at end of file
start_processing("text_processing")
\ No newline at end of file
# import pandas as pd
# train_df = pd.read_csv('processing/fake-news/train.csv', header=0)
# test_df = pd.read_csv('processing/fake-news/test.csv', header=0)
# train_df = train_df.fillna(' ')
# test_df = test_df.fillna(' ')
# train_df['all_info'] = train_df['text'] + train_df['title'] + train_df['author']
# test_df['all_info'] = test_df['text'] + test_df['title'] + test_df['author']
# target = train_df['label'].values
import pandas as pd
from sklearn.model_selection import train_test_split
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import LSTM, Dense, Dropout, Embedding
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing.sequence import pad_sequences
import collections
import numpy as np
import tensorflow as tf
import tensorflow_federated as tff
real = pd.read_csv("processing/fake_news/prototype_db_fake_real/True.csv")
fake = pd.read_csv("processing/fake_news/prototype_db_fake_real/Fake.csv")
# dropping rows that have urls as text and date, real's dates look fine, also dropping ones that have no text
fake_drop = fake.drop(index=[9358,15507,15508,18933])
fake_drop = fake_drop.drop(fake_drop.loc[fake_drop.text == ' '].index)
real_drop = real.drop(real.loc[real.text == ' '].index)
# Give labels to data before combining
fake['label'] = 1
real['label'] = 0
combined = pd.concat([fake, real])
no_reuters = combined.copy()
no_reuters.text = no_reuters.text.str.replace('Reuters', '')
combined = no_reuters.copy()
## train/test split the text data and labels
df_text = combined['text'] #features is now
labels = combined['label'] #or maybe use target?
target = combined['label'].values
print("##################label")
print(type(labels))
print(labels)
print("###########")
print(type(combined['label'].values))
print(combined['label'].values)
print("df_text_type:")
print(type(df_text))
############################ ^ORIGINAL DB
# train_df = pd.read_csv('processing/text_processing/prototype_db_fake_real/train.csv', header=0)
# test_df = pd.read_csv('processing/text_processing/prototype_db_fake_real/test.csv', header=0)
# train_df = train_df.fillna(' ')
# test_df = test_df.fillna(' ')
# train_df['all_info'] = train_df['text'] + train_df['title'] + train_df['author']
# test_df['all_info'] = test_df['text'] + test_df['title'] + test_df['author']
# target = train_df['label'].values
# print(type(train_df['label'].values))
# print(train_df['label'].values)
# df_text = train_df['all_info']
######################################################################################
tokenizer = Tokenizer(oov_token = "<OOV>", num_words=6000)
tokenizer.fit_on_texts(df_text)
MAX_LENGTH = 40
VOCAB_SIZE = 6000
sequences_train = tokenizer.texts_to_sequences(df_text)
padded_train = pad_sequences(sequences_train, padding = 'post', maxlen=MAX_LENGTH)
#Data_train, data_text, label_train, label_test
X_train, X_test, y_train, y_test = train_test_split(padded_train, target, test_size=0.2)
X_train = tf.convert_to_tensor(X_train)
X_test = tf.convert_to_tensor(X_test)
y_train = tf.convert_to_tensor(y_train)
y_test = tf.convert_to_tensor(y_test)
print(X_train.shape)
print(y_train.shape)
print("Type of X_train, X_test, y_train, y_test")
print(type(X_train))
print(type(X_test))
print(type(y_train))
print(type(y_test))
###################################################################################\
#FED PREPROCESSING
NUM_CLIENTS = 4
SHUFFLE_BUFFER = 5000
BATCH_SIZE = 512
def preprocess(dataset):
def element_fn(x, y):
return collections.OrderedDict([
('x', x),
('y', y)#tf.cast(tf.reshape(y, [1]), tf.float32))
])
return dataset.map(element_fn).shuffle(
SHUFFLE_BUFFER).batch(BATCH_SIZE)
def generate_clients_datasets(n, source_x, source_y):
clients_dataset=[]
for i in range(n):
dataset=tf.data.Dataset.from_tensor_slices(([source_x[i]], [source_y[i]]))
dataset=preprocess(dataset)
clients_dataset.append(dataset)
return clients_dataset
train_dataset=generate_clients_datasets(NUM_CLIENTS, X_train, y_train)
test_dataset=generate_clients_datasets(NUM_CLIENTS, X_test, y_test)
# Grab a single batch of data so that TFF knows what data looks like.
# sample_batch = tf.nest.map_structure(
# lambda x: x.numpy(), iter(train_dataset[0]).next())
INPUT_SPEC = train_dataset[0].element_spec
print("DONE PREPROCESSING")
#################################################################################
EMBED_DIM = 10
def get_simple_LSTM_model():
model = Sequential()
model.add(Embedding(VOCAB_SIZE, EMBED_DIM, input_length=MAX_LENGTH))
model.add(Dropout(0.3))
model.add(LSTM(100))
model.add(Dropout(0.3))
model.add(Dense(64, activation='relu'))
model.add(Dropout(0.3))
model.add(Dense(1, activation='sigmoid'))
# model.compile(loss='binary_crossentropy',
# optimizer='adam',
# metrics=[tf.keras.metrics.Precision(), tf.keras.metrics.Recall()])
return model
def model_fn():
keras_model = get_simple_LSTM_model()
#return tff.learning.from_compiled_keras_model(keras_model, sample_batch) original
return tff.learning.from_keras_model(
keras_model,
input_spec=INPUT_SPEC,
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=[tf.keras.metrics.SparseCategoricalAccuracy()])
# Training and evaluating the model
iterative_process = tff.learning.build_federated_averaging_process(model_fn,client_optimizer_fn=lambda: tf.keras.optimizers.SGD(lr=0.5))
state = iterative_process.initialize()
EPOCHS = 5
for n in range(EPOCHS):
state, metrics = iterative_process.next(state, train_dataset)
print('round {}, training metrics={}'.format(n+1, metrics))
evaluation = tff.learning.build_federated_evaluation(model_fn)
eval_metrics = evaluation(state.model, train_dataset)
print('Training evaluation metrics={}'.format(eval_metrics))
test_metrics = evaluation(state.model, test_dataset)
print('Test evaluation metrics={}'.format(test_metrics))
# model = get_simple_LSTM_model()
# print(model.summary())
# best_model_file_name = "processing/text_processing/models/best_model_LSTM.hdf5"
# callbacks=[
# tf.keras.callbacks.EarlyStopping(monitor="val_loss", patience=15,
# verbose=1, mode="min", restore_best_weights=True),
# tf.keras.callbacks.ModelCheckpoint(filepath=best_model_file_name, verbose=1, save_best_only=True)
# ]
# history = model.fit(X_train,
# y_train,
# epochs=EPOCHS,
# validation_data=(X_test, y_test),
# callbacks=callbacks)
# model.save(best_model_file_name)
# model = tf.keras.models.load_model(best_model_file_name)
# import pandas as pd
# train_df = pd.read_csv('processing/fake-news/train.csv', header=0)
# test_df = pd.read_csv('processing/fake-news/test.csv', header=0)
# train_df = train_df.fillna(' ')
# test_df = test_df.fillna(' ')
# train_df['all_info'] = train_df['text'] + train_df['title'] + train_df['author']
# test_df['all_info'] = test_df['text'] + test_df['title'] + test_df['author']
# target = train_df['label'].values
import pandas as pd
import re
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
from tensorflow.keras.models import Model
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import LSTM, Activation, Dense, Dropout, Input, Embedding
from tensorflow.keras.optimizers import RMSprop
from tensorflow.keras.preprocessing.text import Tokenizer
from tensorflow.keras.preprocessing import sequence
from tensorflow.keras.callbacks import EarlyStopping
from tensorflow.keras.preprocessing.sequence import pad_sequences
from nltk import word_tokenize
import collections
import numpy as np
import tensorflow as tf
import tensorflow_federated as tff
real = pd.read_csv("processing/fake_news/prototype_db_fake_real/True.csv")
fake = pd.read_csv("processing/fake_news/prototype_db_fake_real/Fake.csv")
# dropping rows that have urls as text and date, real's dates look fine, also dropping ones that have no text
fake_drop = fake.drop(index=[9358,15507,15508,18933])
fake_drop = fake_drop.drop(fake_drop.loc[fake_drop.text == ' '].index)
real_drop = real.drop(real.loc[real.text == ' '].index)
# Give labels to data before combining
fake['label'] = 1
real['label'] = 0
combined = pd.concat([fake, real])
no_reuters = combined.copy()
no_reuters.text = no_reuters.text.str.replace('Reuters', '')
combined = no_reuters.copy()
## train/test split the text data and labels
train_df_text = combined['text'] #features is now
labels = combined['label'] #or maybe use target?
target = combined['label'].values
print("##################label")
print(type(labels))
print(labels)
print("###########")
print(type(combined['label'].values))
print(combined['label'].values)
print("train_df_type:")
print(type(train_df_text))
############################ ^ORIGINAL DB
# train_df = pd.read_csv('processing/text_processing/prototype_db_fake_real/train.csv', header=0)
# test_df = pd.read_csv('processing/text_processing/prototype_db_fake_real/test.csv', header=0)
# train_df = train_df.fillna(' ')
# test_df = test_df.fillna(' ')
# train_df['all_info'] = train_df['text'] + train_df['title'] + train_df['author']
# test_df['all_info'] = test_df['text'] + test_df['title'] + test_df['author']
# target = train_df['label'].values
# print(type(train_df['label'].values))
# print(train_df['label'].values)
# train_df_text = train_df['all_info']
######################################################################################
tokenizer = Tokenizer(oov_token = "<OOV>", num_words=6000)
tokenizer.fit_on_texts(train_df_text)
max_length = 40
vocab_size = 6000
sequences_train = tokenizer.texts_to_sequences(train_df_text)
padded_train = pad_sequences(sequences_train, padding = 'post', maxlen=max_length)
X_train, X_test, y_train, y_test = train_test_split(padded_train, target, test_size=0.2)
print(X_train.shape)
print(y_train.shape)
#################################################################################
embed_dim = 10
def get_simple_LSTM_model():
model = Sequential()
model.add(Embedding(vocab_size, embed_dim, input_length=max_length))
model.add(Dropout(0.3))
model.add(LSTM(100))
model.add(Dropout(0.3))
model.add(Dense(64, activation='relu'))
model.add(Dropout(0.3))
model.add(Dense(1, activation='sigmoid'))
return model
model = get_simple_LSTM_model()
print(model.summary())
best_model_file_name = "processing/text_processing/models/best_model_LSTM.hdf5"
callbacks=[
tf.keras.callbacks.EarlyStopping(monitor="val_loss", patience=15,
verbose=1, mode="min", restore_best_weights=True),
tf.keras.callbacks.ModelCheckpoint(filepath=best_model_file_name, verbose=1, save_best_only=True)
]
model.compile(loss='binary_crossentropy',
optimizer='adam',
metrics=[tf.keras.metrics.Precision(), tf.keras.metrics.Recall()])
history = model.fit(X_train,
y_train,
epochs=5,
validation_data=(X_test, y_test),
callbacks=callbacks)
model.save(best_model_file_name)
model = tf.keras.models.load_model(best_model_file_name)
...@@ -15,7 +15,8 @@ def last(use_case: str): ...@@ -15,7 +15,8 @@ def last(use_case: str):
bottom = df.tail(1) bottom = df.tail(1)
bottom = str(bottom) bottom = str(bottom)
print(bottom) print(bottom)
return Response(status=200, response=bottom) metricsJson = trainMetricsToJSON(bottom)
return Response(status=200, response=metricsJson)
except Exception as e: except Exception as e:
print(e) print(e)
return Response(status=400, response="Trained model data doesn't exist") return Response(status=400, response="Trained model data doesn't exist")
...@@ -35,6 +36,21 @@ def upload_and_train(use_case: str, developer_id: int): ...@@ -35,6 +36,21 @@ def upload_and_train(use_case: str, developer_id: int):
#THEN start processing #THEN start processing
last_train_metrics = main_proc.start_processing(use_case,developer_id) last_train_metrics = main_proc.start_processing(use_case,developer_id)
print (last_train_metrics) print (last_train_metrics)
return Response(status=200, response=last_train_metrics) #Trainer_id,Model_id,Dataset_id,Accuracy,Loss
#0,1623160388,0,0.25,nan
metricsJson = trainMetricsToJSON(last_train_metrics)
return Response(status=200, response=metricsJson)
def trainMetricsToJSON(last_train_metrics : list):
metricsDict = dict()
metricsDict["Trainer_id"] = last_train_metrics[0]
metricsDict["Model_id"] = last_train_metrics[1]
metricsDict["Dataset_id"] = last_train_metrics[2]
metricsDict["Accuracy"] = last_train_metrics[3]
metricsDict["Loss"] = last_train_metrics[4]
return json.dumps(metricsDict)
#upload_and_train("text_processing",1)
upload_and_train("text_processing",1)
\ No newline at end of file
...@@ -13,7 +13,8 @@ def last(use_case: str): ...@@ -13,7 +13,8 @@ def last(use_case: str):
bottom = df.tail(1) bottom = df.tail(1)
bottom = str(bottom) bottom = str(bottom)
print(bottom) print(bottom)
return Response(status=200, response=bottom) metricsJson = trainMetricsToJSON(bottom)
return Response(status=200, response=metricsJson)
except Exception as e: except Exception as e:
print(e) print(e)
return Response(status=400, response="Trained model data doesn't exist") return Response(status=400, response="Trained model data doesn't exist")
...@@ -21,13 +22,13 @@ def last(use_case: str): ...@@ -21,13 +22,13 @@ def last(use_case: str):
def upload_and_train(use_case: str): def upload_and_train(use_case: str):
use_case_path = './processing/'+use_case use_case_path = './processing/'+use_case+'/'
#Remove old files #Remove old files
try: try:
if os.path.exists(use_case_path): if os.path.exists(use_case_path):
print("Use_case path") print("Use_case path")
print(use_case_path) print(use_case_path)
shutil.rmtree(use_case_path) shutil.rmtree(use_case_path)#Deletes old folder with all the files
except OSError as error: except OSError as error:
print(error) print(error)
return Response(status=400, response="Error occured when deleteing the old use_case directory") return Response(status=400, response="Error occured when deleteing the old use_case directory")
...@@ -62,8 +63,17 @@ def upload_and_train(use_case: str): ...@@ -62,8 +63,17 @@ def upload_and_train(use_case: str):
last_train_metrics = main_proc.start_processing(use_case,0) last_train_metrics = main_proc.start_processing(use_case,0)
print (last_train_metrics) print (last_train_metrics)
return Response(status=200, response=last_train_metrics) metricsJson = trainMetricsToJSON(last_train_metrics)
return Response(status=200, response=metricsJson)
last("text_processing")
def trainMetricsToJSON(last_train_metrics : list):
upload_and_train("test") metricsDict = dict()
\ No newline at end of file metricsDict["Trainer_id"] = last_train_metrics[0]
metricsDict["Model_id"] = last_train_metrics[1]
metricsDict["Dataset_id"] = last_train_metrics[2]
metricsDict["Accuracy"] = last_train_metrics[3]
metricsDict["Loss"] = last_train_metrics[4]
return json.dumps(metricsDict)
#last("text_processing")
#upload_and_train("text_processing") #warning it deletes the files
\ No newline at end of file
...@@ -5,7 +5,7 @@ from flask import Response, request ...@@ -5,7 +5,7 @@ from flask import Response, request
def check_article(use_case: str): def check_article(use_case: str):
#body = request.STRING #body = request.STRING
#TODO Working on it
#FOR USE_CASE {use_case} #FOR USE_CASE {use_case}
#insert body into the trained model #insert body into the trained model
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment