From dce16507a0e8219f1bb04a2ec9a0afad521fac47 Mon Sep 17 00:00:00 2001 From: TAMARA JERINIC Date: Thu, 19 May 2022 17:14:46 +0200 Subject: Izmenjen prediktor i omogućeno čuvanje metrika. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- backend/api/api/Models/Predictor.cs | 26 ++++++++++++++------ backend/api/api/Program.cs | 2 +- backend/api/api/appsettings.json | 7 +++--- backend/microservice/api/controller.py | 33 +++++++++++++++++++------ backend/microservice/api/newmlservice.py | 41 +++++++++++++------------------- 5 files changed, 66 insertions(+), 43 deletions(-) (limited to 'backend') diff --git a/backend/api/api/Models/Predictor.cs b/backend/api/api/Models/Predictor.cs index bfe95a0f..95dd3a23 100644 --- a/backend/api/api/Models/Predictor.cs +++ b/backend/api/api/Models/Predictor.cs @@ -14,19 +14,31 @@ namespace api.Models public string output { get; set; } public bool isPublic { get; set; } public bool accessibleByLink { get; set; } - public DateTime dateCreated { get; set; } + //public DateTime dateCreated { get; set; } public string experimentId { get; set; } public string modelId { get; set; } public string h5FileId { get; set; } - public Metric[] metrics { get; set; } - public Metric[] finalMetrics { get; set; } - } - public class Metric + //public Metric[] metrics { get; set; } + + public float[] metricsLoss { get; set; } + public float[] metricsValLoss { get; set; } + public float[] metricsAcc { get; set; } + public float[] metricsValAcc { get; set; } + public float[] metricsMae { get; set; } + + public float[] metricsValMae { get; set; } + + public float[] metricsMse { get; set; } + public float[] metricsValMse { get; set; } + //public Metric[] finalMetrics { get; set; } + } + + /*public class Metric { string Name { get; set; } string JsonValue { get; set; } - } - + }*/ + } \ No newline at end of file diff --git a/backend/api/api/Program.cs b/backend/api/api/Program.cs index cf64d58d..5977e843 100644 --- a/backend/api/api/Program.cs +++ b/backend/api/api/Program.cs @@ -37,7 +37,7 @@ builder.Services.AddScoped(); builder.Services.AddScoped(); builder.Services.AddScoped(); builder.Services.AddHostedService(); -builder.Services.AddHostedService(); +//builder.Services.AddHostedService(); //Ml Api Ip Filter builder.Services.AddScoped(container => diff --git a/backend/api/api/appsettings.json b/backend/api/api/appsettings.json index 44d63ac3..77708086 100644 --- a/backend/api/api/appsettings.json +++ b/backend/api/api/appsettings.json @@ -16,15 +16,16 @@ "UserStoreDatabaseSettings": { /* LocalHost*/ - /*"ConnectionString": "mongodb://127.0.0.1:27017/", + "ConnectionString": "mongodb://127.0.0.1:27017/", "DatabaseName": "si_project", "CollectionName": "users", "DatasetCollectionName": "Dataset", "ModelCollectionName": "Model", "PredictorCollectionName": "Predictor", "FilesCollectionName": "Files", - "ExperimentCollectionName": "Experiment"*/ + "ExperimentCollectionName": "Experiment" + /* "ConnectionString": "mongodb+srv://si_user:si_user@sidatabase.twtfm.mongodb.net/myFirstDatabase?retryWrites=true&w=majority", "DatabaseName": "si_db", "CollectionName": "users", @@ -32,6 +33,6 @@ "ModelCollectionName": "Model", "PredictorCollectionName": "Predictor", "FilesCollectionName": "Files", - "ExperimentCollectionName": "Experiment" + "ExperimentCollectionName": "Experiment" */ } } \ No newline at end of file diff --git a/backend/microservice/api/controller.py b/backend/microservice/api/controller.py index 6f483008..0eee571c 100644 --- a/backend/microservice/api/controller.py +++ b/backend/microservice/api/controller.py @@ -69,15 +69,26 @@ def train(): #dataset, paramsModel, paramsExperiment, callback) - filepath,result,finalMetrics= newmlservice.train(data, paramsModel, paramsExperiment,paramsDataset, train_callback) + filepath,histMetrics= newmlservice.train(data, paramsModel, paramsExperiment,paramsDataset, train_callback) """ f = request.json['filepath'] dataset = pd.read_csv(f) filepath,result=newmlservice.train(dataset,request.json['model'],train_callback) print(result) """ - - + #m = [] + #for attribute, value in result.items(): + #m.append(histMetrics(attribute,str(value)).__dict__) + ''' + m = [] + for attribute, value in result.items(): + m.append({"Name" : attribute, "JsonValue" : value})) + + print("**************************************************************") + print(m) + + print("**************************************************************") + ''' url = config.api_url + "/file/h5" files = {'file': open(filepath, 'rb')} r=requests.post(url, files=files,data={"uploaderId":paramsExperiment['uploaderId']}) @@ -92,15 +103,23 @@ def train(): "experimentId" : paramsExperiment["_id"], "modelId" : paramsModel["_id"], "h5FileId" : fileId, - "metrics" : result, - "finalMetrics":finalMetrics + "metricsLoss":histMetrics[0], + "metricsValLoss":histMetrics[1], + "metricsAcc":histMetrics[2], + "metricsValAcc":histMetrics[3], + "metricsMae":histMetrics[4], + "metricsValMae":histMetrics[5], + "metricsMse":histMetrics[6], + "metricsValMse":histMetrics[7] + + } #print(predictor) - + url = config.api_url + "/Predictor/add" r = requests.post(url, json=predictor).text - print(r) + #print(r) return r @app.route('/predict', methods = ['POST']) diff --git a/backend/microservice/api/newmlservice.py b/backend/microservice/api/newmlservice.py index bcff5a33..85be0c2f 100644 --- a/backend/microservice/api/newmlservice.py +++ b/backend/microservice/api/newmlservice.py @@ -303,7 +303,7 @@ def train(dataset, paramsModel,paramsExperiment,paramsDataset,callback): ###OPTIMIZATORI print(paramsModel['optimizer']) if(paramsModel['optimizer']=='Adam'): - opt=tf.keras.optimizers.Adam(learning_rate=3) + opt=tf.keras.optimizers.Adam(learning_rate=float(paramsModel['learningRate'])) elif(paramsModel['optimizer']=='Adadelta'): opt=tf.keras.optimizers.Adadelta(learning_rate=float(paramsModel['learningRate'])) @@ -370,7 +370,7 @@ def train(dataset, paramsModel,paramsExperiment,paramsDataset,callback): - classifier.compile(loss =paramsModel["lossFunction"] , optimizer =opt, metrics = ['mae','mse']) + classifier.compile(loss =paramsModel["lossFunction"] , optimizer =opt, metrics = ['accuracy','mae','mse']) history=classifier.fit( x=x_train, y=y_train, epochs = paramsModel['epochs'],batch_size=int(paramsModel['batchSize']),callbacks=callback(x_test, y_test,paramsModel['_id']),validation_data=(x_val, y_val)) @@ -383,9 +383,9 @@ def train(dataset, paramsModel,paramsExperiment,paramsDataset,callback): scores = classifier.evaluate(x_test, y_test) #print("\n%s: %.2f%%" % (classifier.metrics_names[1], scores[1]*100)) - + ''' classifier.save(filepath, save_format='h5') - metrics={} + macro_averaged_precision=sm.precision_score(y_test, y_pred, average = 'macro') micro_averaged_precision=sm.precision_score(y_test, y_pred, average = 'micro') macro_averaged_recall=sm.recall_score(y_test, y_pred, average = 'macro') @@ -401,12 +401,12 @@ def train(dataset, paramsModel,paramsExperiment,paramsDataset,callback): {"Name":"macro_averaged_f1","JsonValue": str(macro_averaged_f1)}, {"Name":"micro_averaged_f1", "JsonValue": str(micro_averaged_f1)} ] - + ''' #vizuelizacija u python-u #from ann_visualizer.visualize import ann_viz; #ann_viz(classifier, title="My neural network") - return filepath,hist,metrics + return filepath,[hist['loss'],hist['val_loss'],hist['accuracy'],hist['val_accuracy'],hist['mae'],hist['val_mae'],hist['mse'],hist['val_mse']] elif(problem_type=='binarni-klasifikacioni'): #print('*************************************************************************binarni') @@ -444,6 +444,7 @@ def train(dataset, paramsModel,paramsExperiment,paramsDataset,callback): history=classifier.fit( x=x_train, y=y_train, epochs = paramsModel['epochs'],batch_size=int(paramsModel['batchSize']),callbacks=callback(x_test, y_test,paramsModel['_id']),validation_data=(x_val, y_val)) hist=history.history + y_pred=classifier.predict(x_test) y_pred=(y_pred>=0.5).astype('int') @@ -452,7 +453,7 @@ def train(dataset, paramsModel,paramsExperiment,paramsDataset,callback): # ann_viz(classifier, title="My neural network") classifier.save(filepath, save_format='h5') - + """ accuracy = float(sm.accuracy_score(y_test,y_pred)) precision = float(sm.precision_score(y_test,y_pred)) recall = float(sm.recall_score(y_test,y_pred)) @@ -461,22 +462,9 @@ def train(dataset, paramsModel,paramsExperiment,paramsDataset,callback): f1 = float(sm.f1_score(y_test,y_pred)) fpr, tpr, _ = sm.roc_curve(y_test,y_pred) logloss = float(sm.log_loss(y_test, y_pred)) - metrics= [ - {"Name":"accuracy" ,"JsonValue": str(accuracy)}, - {"Name":"precision","JsonValue": str(precision)}, - {"Name":"recall" , "JsonValue":str(recall)}, - {"Name":"specificity" ,"JsonValue":str(specificity)}, - {"Name":"f1" ,"JsonValue": str(f1)}, - {"Name":"tn" , "JsonValue":str(tn)}, - {"Name":"fp" , "JsonValue":str(fp)}, - {"Name":"fn" , "JsonValue":str(fn)}, - {"Name":"tp" , "JsonValue":str(tp)}, - {"Name":"fpr" ,"JsonValue": str(fpr.tolist())}, - {"Name":"tpr" , "JsonValue":str(tpr.tolist())}, - {"Name":"logloss" , "JsonValue":str(logloss)} - ] + """ - return filepath,hist,metrics + return filepath,[hist['loss'],hist['val_loss'],hist['accuracy'],hist['val_accuracy'],hist['mae'],hist['val_mae'],hist['mse'],hist['val_mse']] elif(problem_type=='regresioni'): reg=paramsModel['layers'][0]['regularisation'] @@ -519,8 +507,10 @@ def train(dataset, paramsModel,paramsExperiment,paramsDataset,callback): #print(classifier.evaluate(x_test, y_test)) classifier.save(filepath, save_format='h5') - + ''' + mse = float(sm.mean_squared_error(y_test,y_pred)) + mae = float(sm.mean_absolute_error(y_test,y_pred)) mape = float(sm.mean_absolute_percentage_error(y_test,y_pred)) rmse = float(np.sqrt(sm.mean_squared_error(y_test,y_pred))) @@ -531,6 +521,7 @@ def train(dataset, paramsModel,paramsExperiment,paramsDataset,callback): n = 40 k = 2 adj_r2 = float(1 - ((1-r2)*(n-1)/(n-k-1))) + metrics= [ {"Name":"mse","JsonValue":str(mse)}, @@ -541,8 +532,8 @@ def train(dataset, paramsModel,paramsExperiment,paramsDataset,callback): {"Name":"r2","JsonValue":str( r2)}, {"Name":"adj_r2","JsonValue":str(adj_r2)} ] - - return filepath,hist,metrics + ''' + return filepath,[hist['loss'],hist['val_loss'],[],[],hist['mae'],hist['val_mae'],hist['mse'],hist['val_mse']] def roc_auc_score_multiclass(actual_class, pred_class, average = "macro"): -- cgit v1.2.3