aboutsummaryrefslogtreecommitdiff
path: root/backend/microservice/api/newmlservice.py
diff options
context:
space:
mode:
authorDanijel Anđelković <adanijel99@gmail.com>2022-05-12 12:49:28 +0200
committerDanijel Anđelković <adanijel99@gmail.com>2022-05-12 12:49:28 +0200
commit2ce380a489f0c7acda75b7fa659a8148935c7462 (patch)
tree8fc0d163a609602f5ea51507dd9c33ce6dfc78e9 /backend/microservice/api/newmlservice.py
parent2477f1796ba88ab1ae7d8aa869a55a8b37d1d8bb (diff)
parent166bceb500bd3475c196b7aa9ad9f68c4f83cefc (diff)
Merge branch 'redesign' of http://gitlab.pmf.kg.ac.rs/igrannonica/neuronstellar into redesign
Diffstat (limited to 'backend/microservice/api/newmlservice.py')
-rw-r--r--backend/microservice/api/newmlservice.py16
1 files changed, 7 insertions, 9 deletions
diff --git a/backend/microservice/api/newmlservice.py b/backend/microservice/api/newmlservice.py
index c401a3e6..6a863013 100644
--- a/backend/microservice/api/newmlservice.py
+++ b/backend/microservice/api/newmlservice.py
@@ -384,17 +384,15 @@ def train(dataset, paramsModel,paramsExperiment,paramsDataset,callback):
classifier.save(filepath, save_format='h5')
-
- accuracy=metrics.accuracy_score(y_test, y_pred)
- macro_averaged_precision=metrics.precision_score(y_test, y_pred, average = 'macro')
- micro_averaged_precision=metrics.precision_score(y_test, y_pred, average = 'micro')
- macro_averaged_recall=metrics.recall_score(y_test, y_pred, average = 'macro')
- micro_averaged_recall=metrics.recall_score(y_test, y_pred, average = 'micro')
- macro_averaged_f1=metrics.f1_score(y_test, y_pred, average = 'macro')
- micro_averaged_f1=metrics.f1_score(y_test, y_pred, average = 'micro')
+ metrics={}
+ macro_averaged_precision=sm.precision_score(y_test, y_pred, average = 'macro')
+ micro_averaged_precision=sm.precision_score(y_test, y_pred, average = 'micro')
+ macro_averaged_recall=sm.recall_score(y_test, y_pred, average = 'macro')
+ micro_averaged_recall=sm.recall_score(y_test, y_pred, average = 'micro')
+ macro_averaged_f1=sm.f1_score(y_test, y_pred, average = 'macro')
+ micro_averaged_f1=sm.f1_score(y_test, y_pred, average = 'micro')
metrics= {
- "accuracy" : float(accuracy),
"macro_averaged_precision" :float(macro_averaged_precision),
"micro_averaged_precision" : float(micro_averaged_precision),
"macro_averaged_recall" : float(macro_averaged_recall),