aboutsummaryrefslogtreecommitdiff
path: root/backend/microservice/api/controller.py
blob: d2b8ed2cf0fb5dc3023d0fe45c652ebcfc7d4c3c (plain) (blame)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
from gc import callbacks
import flask
from flask import request, jsonify
import newmlservice
import tensorflow as tf
import pandas as pd
import json
import requests
import config

app = flask.Flask(__name__)
app.config["DEBUG"] = True
app.config["SERVER_NAME"] = config.hostIP
  
class train_callback(tf.keras.callbacks.Callback):
    def __init__(self, x_test, y_test):
        self.x_test = x_test
        self.y_test = y_test
    #
    def on_epoch_end(self, epoch, logs=None):
        print(epoch)
        #ml_socket.send(epoch)
        #file = request.files.get("file")
        url = config.api_url + "/Model/epoch"
        requests.post(url, epoch).text
        #print('Evaluation: ', self.model.evaluate(self.x_test,self.y_test),"\n") #broj parametara zavisi od izabranih metrika loss je default

@app.route('/train', methods = ['POST'])
def train():
    print("******************************TRAIN*************************************************")
    
    f = request.files.get("file")
    data = pd.read_csv(f)
    paramsModel = json.loads(request.form["model"])
    paramsExperiment = json.loads(request.form["experiment"])
    paramsDataset = json.loads(request.form["dataset"])
    #dataset, paramsModel, paramsExperiment, callback)
    filepath,result = newmlservice.train(data, paramsModel, paramsExperiment,paramsDataset, train_callback)
    """
    f = request.json['filepath']
    dataset = pd.read_csv(f)
    filepath,result=newmlservice.train(dataset,request.json['model'],train_callback)
    print(result)
    """

    url = config.api_url + "/file/h5"
    files = {'file': open(filepath, 'rb')}
    r=requests.post(url, files=files)
    fileId=r.text
    return jsonify(result)

@app.route('/predict', methods = ['POST'])
def predict():
    h5 = request.files.get("h5file")
    model = tf.keras.models.load_model(h5)
    paramsExperiment = json.loads(request.form["experiment"])
    paramsPredictor = json.loads(request.form["predictor"])
    print("********************************model loaded*******************************")
    result = newmlservice.predict(paramsExperiment, paramsPredictor, model)
    return result

@app.route('/preprocess',methods=['POST'])
def returnColumnsInfo():
    print("********************************PREPROCESS*******************************")
    dataset = json.loads(request.form["dataset"])
    file = request.files.get("file")
    data=pd.read_csv(file)
    
    #dataset={}
    #f = request.json['filepath']
    #data=pd.read_csv(f)

    preprocess = newmlservice.returnColumnsInfo(data)
    #samo 10 jedinstvenih posto ih ima previse, bilo bi dobro da promenimo ovo da to budu 10 najzastupljenijih vrednosti
    for col in preprocess["columnInfo"]:
        col["uniqueValues"] = col["uniqueValues"][0:10]
        col["uniqueValuesCount"] = col["uniqueValuesCount"][0:10]
    dataset["columnInfo"] = preprocess["columnInfo"]
    dataset["nullCols"] = preprocess["allNullColl"]
    dataset["nullRows"] = preprocess["allNullRows"]
    dataset["colCount"] = preprocess["colCount"]
    dataset["rowCount"] = preprocess["rowCount"]
    dataset["isPreProcess"] = True
    print(dataset)
    return jsonify(dataset)
    
print("App loaded.")
app.run()