aboutsummaryrefslogtreecommitdiff
path: root/backend/microservice/api/controller.py
diff options
context:
space:
mode:
authorNevena Bojovic <nenabojov@gmail.com>2022-03-29 19:17:11 +0200
committerNevena Bojovic <nenabojov@gmail.com>2022-03-29 19:17:11 +0200
commite8790a5b30170a99fc6ef61e24614456543febd7 (patch)
treee5a5768a8489764e5e3a9f1f91761ad4c6eb9bcc /backend/microservice/api/controller.py
parent82030306a115d06a33bed48a5a0e15a053f0ae7e (diff)
parentf3d5f6fa046912c996e2581d9082087ccf7919d6 (diff)
Merge branch 'dev' of http://gitlab.pmf.kg.ac.rs/igrannonica/neuronstellar into dev
Diffstat (limited to 'backend/microservice/api/controller.py')
-rw-r--r--backend/microservice/api/controller.py42
1 files changed, 42 insertions, 0 deletions
diff --git a/backend/microservice/api/controller.py b/backend/microservice/api/controller.py
new file mode 100644
index 00000000..ceed02ad
--- /dev/null
+++ b/backend/microservice/api/controller.py
@@ -0,0 +1,42 @@
+import flask
+from flask import request, jsonify
+import ml_socket
+import ml_service
+import tensorflow as tf
+import pandas as pd
+
+app = flask.Flask(__name__)
+app.config["DEBUG"] = True
+app.config["SERVER_NAME"] = "127.0.0.1:5543"
+
+class train_callback(tf.keras.callbacks.Callback):
+ def __init__(self, x_test, y_test):
+ self.x_test = x_test
+ self.y_test = y_test
+ #
+ def on_epoch_end(self, epoch, logs=None):
+ print(epoch)
+ #print('Evaluation: ', self.model.evaluate(self.x_test,self.y_test),"\n") #broj parametara zavisi od izabranih metrika loss je default
+
+@app.route('/train', methods = ['POST'])
+def train():
+ print("******************************TRAIN*************************************************")
+ f = request.json["dataset"]
+ dataset = pd.read_csv(f)
+ #
+ result = ml_service.train(dataset, request.json["model"], train_callback)
+ print(result)
+ return jsonify(result)
+
+@app.route('/predict', methods = ['POST'])
+def predict():
+ f = request.json['filepath']
+ dataset = pd.read_csv(f)
+ m = request.json['modelpath']
+ #model = tf.keras.models.load_model(m)
+ #
+ #model.predict?
+
+print("App loaded.")
+ml_socket.start()
+app.run() \ No newline at end of file