aboutsummaryrefslogtreecommitdiff
path: root/backend
diff options
context:
space:
mode:
Diffstat (limited to 'backend')
-rw-r--r--backend/api/api/Controllers/ExperimentController.cs23
-rw-r--r--backend/api/api/Controllers/FileController.cs9
-rw-r--r--backend/api/api/Controllers/ModelController.cs3
-rw-r--r--backend/api/api/Interfaces/IAuthService.cs (renamed from backend/api/api/Services/IAuthService.cs)0
-rw-r--r--backend/api/api/Interfaces/IDatasetService.cs (renamed from backend/api/api/Services/IDatasetService.cs)2
-rw-r--r--backend/api/api/Interfaces/IExperimentService.cs (renamed from backend/api/api/Services/IExperimentService.cs)3
-rw-r--r--backend/api/api/Interfaces/IFileService.cs (renamed from backend/api/api/Services/IFileService.cs)0
-rw-r--r--backend/api/api/Interfaces/IJwtToken.cs (renamed from backend/api/api/Models/IJwtToken.cs)0
-rw-r--r--backend/api/api/Interfaces/IMLWebSocketService.cs (renamed from backend/api/api/Services/IMLWebSocketService.cs)0
-rw-r--r--backend/api/api/Interfaces/IMlConnectionService.cs (renamed from backend/api/api/Services/IMlConnectionService.cs)0
-rw-r--r--backend/api/api/Interfaces/IModelService.cs (renamed from backend/api/api/Services/IModelService.cs)0
-rw-r--r--backend/api/api/Interfaces/IPredictorService.cs (renamed from backend/api/api/Services/IPredictorService.cs)0
-rw-r--r--backend/api/api/Interfaces/IUserService.cs (renamed from backend/api/api/Services/IUserService.cs)0
-rw-r--r--backend/api/api/Models/ColumnInfo.cs14
-rw-r--r--backend/api/api/Models/Dataset.cs6
-rw-r--r--backend/api/api/Models/Experiment.cs6
-rw-r--r--backend/api/api/Models/Model.cs17
-rw-r--r--backend/api/api/Services/DatasetService.cs4
-rw-r--r--backend/api/api/Services/ExperimentService.cs10
-rw-r--r--backend/api/api/Services/FillAnEmptyDb.cs82
-rw-r--r--backend/api/api/Services/JwtToken.cs (renamed from backend/api/api/Models/JwtToken.cs)0
-rw-r--r--backend/api/api/Services/PasswordCrypt.cs (renamed from backend/api/api/Models/PasswordCrypt.cs)0
-rw-r--r--backend/microservice/api/controller.py18
-rw-r--r--backend/microservice/api/newmlservice.py231
24 files changed, 286 insertions, 142 deletions
diff --git a/backend/api/api/Controllers/ExperimentController.cs b/backend/api/api/Controllers/ExperimentController.cs
index 3fa02943..eecbe756 100644
--- a/backend/api/api/Controllers/ExperimentController.cs
+++ b/backend/api/api/Controllers/ExperimentController.cs
@@ -88,5 +88,28 @@ namespace api.Controllers
var experiments=_experimentService.GetMyExperiments(uploaderId);
return Ok(experiments);
}
+
+ // PUT api/<ExperimentController>/{name}
+ [HttpPut("{id}")]
+ [Authorize(Roles = "User")]
+ public ActionResult Put(string id, [FromBody] Experiment experiment)
+ {
+ string uploaderId = getUserId();
+
+ if (uploaderId == null)
+ return BadRequest();
+
+ var existingDataset = _experimentService.GetOneExperiment(uploaderId, id);
+
+ //ne mora da se proverava
+ if (existingDataset == null)
+ return NotFound($"Experiment with ID = {id} or user with ID = {uploaderId} not found");
+
+ experiment.lastUpdated = DateTime.UtcNow;
+
+ _experimentService.Update(uploaderId, id, experiment);
+
+ return Ok($"Experiment with ID = {id} updated");
+ }
}
}
diff --git a/backend/api/api/Controllers/FileController.cs b/backend/api/api/Controllers/FileController.cs
index 9baf6294..99d98a78 100644
--- a/backend/api/api/Controllers/FileController.cs
+++ b/backend/api/api/Controllers/FileController.cs
@@ -94,9 +94,9 @@ namespace api.Controllers
return Ok(fileModel._id);
}
- [HttpGet("csvread/{hasHeader}/{fileId}/{skipRows}/{takeRows}")]
+ [HttpGet("csvread/{fileId}/{skipRows}/{takeRows}")]
[Authorize(Roles = "User,Guest")]
- public ActionResult<string> CsvRead(bool hasHeader, string fileId, int skipRows = 0, int takeRows = 10)
+ public ActionResult<string> CsvRead(string fileId, int skipRows = 0, int takeRows = 10)
{
string uploaderId = getUserId();
@@ -109,10 +109,7 @@ namespace api.Controllers
- if (hasHeader)
- return String.Join("\n", System.IO.File.ReadLines(filePath).Skip(skipRows+1).Take(takeRows));
- else
- return String.Join("\n", System.IO.File.ReadLines(filePath).Skip(skipRows).Take(takeRows));
+ return String.Join("\n", System.IO.File.ReadLines(filePath).Skip(skipRows+1).Take(takeRows));
}
diff --git a/backend/api/api/Controllers/ModelController.cs b/backend/api/api/Controllers/ModelController.cs
index f279bf7a..1c1ea364 100644
--- a/backend/api/api/Controllers/ModelController.cs
+++ b/backend/api/api/Controllers/ModelController.cs
@@ -187,8 +187,11 @@ namespace api.Controllers
/*if (_modelService.CheckHyperparameters(1, model.hiddenLayerNeurons, model.hiddenLayers, model.outputNeurons) == false)
return BadRequest("Bad parameters!");*/
+ model.uploaderId = getUserId();
+
var existingModel = _modelService.GetOneModel(model.uploaderId, model.name);
+
if (existingModel != null && !overwrite)
return NotFound($"Model with name = {model.name} exisits");
else
diff --git a/backend/api/api/Services/IAuthService.cs b/backend/api/api/Interfaces/IAuthService.cs
index 9a109208..9a109208 100644
--- a/backend/api/api/Services/IAuthService.cs
+++ b/backend/api/api/Interfaces/IAuthService.cs
diff --git a/backend/api/api/Services/IDatasetService.cs b/backend/api/api/Interfaces/IDatasetService.cs
index bb06208d..f493a2ec 100644
--- a/backend/api/api/Services/IDatasetService.cs
+++ b/backend/api/api/Interfaces/IDatasetService.cs
@@ -5,7 +5,7 @@ namespace api.Services
{
public interface IDatasetService
{
- Dataset GetOneDataset(string userId, string id);
+ Dataset GetOneDataset(string userId, string name);
Dataset GetOneDataset(string id);
List<Dataset> SearchDatasets(string name);
List<Dataset> GetMyDatasets(string userId);
diff --git a/backend/api/api/Services/IExperimentService.cs b/backend/api/api/Interfaces/IExperimentService.cs
index 47c86046..2a69cff9 100644
--- a/backend/api/api/Services/IExperimentService.cs
+++ b/backend/api/api/Interfaces/IExperimentService.cs
@@ -8,5 +8,8 @@ namespace api.Services
public Experiment Get(string id);
public List<Experiment> GetMyExperiments(string id);
public Experiment Get(string uploaderId, string name);
+ Experiment GetOneExperiment(string userId, string name);
+ void Update(string userId, string id, Experiment experiment);
+
}
} \ No newline at end of file
diff --git a/backend/api/api/Services/IFileService.cs b/backend/api/api/Interfaces/IFileService.cs
index e061dfdb..e061dfdb 100644
--- a/backend/api/api/Services/IFileService.cs
+++ b/backend/api/api/Interfaces/IFileService.cs
diff --git a/backend/api/api/Models/IJwtToken.cs b/backend/api/api/Interfaces/IJwtToken.cs
index 5c54e4e3..5c54e4e3 100644
--- a/backend/api/api/Models/IJwtToken.cs
+++ b/backend/api/api/Interfaces/IJwtToken.cs
diff --git a/backend/api/api/Services/IMLWebSocketService.cs b/backend/api/api/Interfaces/IMLWebSocketService.cs
index 52efb7fc..52efb7fc 100644
--- a/backend/api/api/Services/IMLWebSocketService.cs
+++ b/backend/api/api/Interfaces/IMLWebSocketService.cs
diff --git a/backend/api/api/Services/IMlConnectionService.cs b/backend/api/api/Interfaces/IMlConnectionService.cs
index d5dda9f2..d5dda9f2 100644
--- a/backend/api/api/Services/IMlConnectionService.cs
+++ b/backend/api/api/Interfaces/IMlConnectionService.cs
diff --git a/backend/api/api/Services/IModelService.cs b/backend/api/api/Interfaces/IModelService.cs
index 00299979..00299979 100644
--- a/backend/api/api/Services/IModelService.cs
+++ b/backend/api/api/Interfaces/IModelService.cs
diff --git a/backend/api/api/Services/IPredictorService.cs b/backend/api/api/Interfaces/IPredictorService.cs
index 16f0432a..16f0432a 100644
--- a/backend/api/api/Services/IPredictorService.cs
+++ b/backend/api/api/Interfaces/IPredictorService.cs
diff --git a/backend/api/api/Services/IUserService.cs b/backend/api/api/Interfaces/IUserService.cs
index d34d410a..d34d410a 100644
--- a/backend/api/api/Services/IUserService.cs
+++ b/backend/api/api/Interfaces/IUserService.cs
diff --git a/backend/api/api/Models/ColumnInfo.cs b/backend/api/api/Models/ColumnInfo.cs
index 04450fef..dcf5171c 100644
--- a/backend/api/api/Models/ColumnInfo.cs
+++ b/backend/api/api/Models/ColumnInfo.cs
@@ -4,9 +4,10 @@
{
public ColumnInfo() { }
- public ColumnInfo(string columnName, bool isNumber, int numNulls, float mean, float min, float max, float median, string[] uniqueValues)
+ public ColumnInfo(string columnName, string columnType, bool isNumber, int numNulls, float mean, float min, float max, float median, string[] uniqueValues, int[]uniqueValuesCount, float[] uniqueValuesPercent, float q1, float q3)
{
this.columnName = columnName;
+ this.columnType = columnType;
this.isNumber = isNumber;
this.numNulls = numNulls;
this.mean = mean;
@@ -14,9 +15,14 @@
this.max = max;
this.median = median;
this.uniqueValues = uniqueValues;
+ this.uniqueValuesPercent = uniqueValuesPercent;
+ this.uniqueValuesCount = uniqueValuesCount;
+ this.q1 = q1;
+ this.q3 = q3;
}
public string columnName { get; set; }
+ public string columnType { get; set; }
public bool isNumber { get; set; }
public int numNulls { get; set; }
public float mean { get; set; }
@@ -24,6 +30,12 @@
public float max { get; set; }
public float median { get; set; }
public string[] uniqueValues { get; set; }
+ public int[] uniqueValuesCount { get; set; }
+ public float[] uniqueValuesPercent { get; set; }
+
+
+ public float q1 { get; set; }
+ public float q3 { get; set; }
}
}
diff --git a/backend/api/api/Models/Dataset.cs b/backend/api/api/Models/Dataset.cs
index cc7185f0..7acd4382 100644
--- a/backend/api/api/Models/Dataset.cs
+++ b/backend/api/api/Models/Dataset.cs
@@ -12,9 +12,9 @@ namespace api.Models
[BsonId]
[BsonRepresentation(BsonType.ObjectId)]//mongo data type to .net
public string _id { get; set; }
- public string name { get; set; }
+
public string description { get; set; }
- public string[] header { get; set; }
+ public string name { get; set; }
public string fileId { get; set; }
public string extension { get; set; }
public bool isPublic { get; set; }
@@ -22,8 +22,6 @@ namespace api.Models
public DateTime dateCreated { get; set; }
public DateTime lastUpdated { get; set; }
public string delimiter { get; set; }
- public bool hasHeader { get; set; }
-
public ColumnInfo[] columnInfo { get; set; }
public int rowCount { get; set; }
public int nullCols { get; set; }
diff --git a/backend/api/api/Models/Experiment.cs b/backend/api/api/Models/Experiment.cs
index 6f665c52..3af063be 100644
--- a/backend/api/api/Models/Experiment.cs
+++ b/backend/api/api/Models/Experiment.cs
@@ -10,15 +10,15 @@ namespace api.Models
public string _id { get; set; }
public string name { get; set; }
public string description { get; set; }
+ public string type { get; set; }
public List<string> ModelIds { get; set; }
public string datasetId { get; set; }
public string uploaderId { get; set; }
public string[] inputColumns { get; set; }
public string outputColumn { get; set; }
- public bool randomOrder { get; set; }
- public bool randomTestSet { get; set; }
- public float randomTestSetDistribution { get; set; }
public string nullValues { get; set; }
+ public DateTime dateCreated { get; set; }
+ public DateTime lastUpdated { get; set; }
public NullValues[] nullValuesReplacers { get; set; }
public ColumnEncoding[] encodings { get; set; }
diff --git a/backend/api/api/Models/Model.cs b/backend/api/api/Models/Model.cs
index a9dbfbdd..d8921713 100644
--- a/backend/api/api/Models/Model.cs
+++ b/backend/api/api/Models/Model.cs
@@ -26,17 +26,30 @@ namespace api.Models
public string optimizer { get; set; }
public string lossFunction { get; set; }
//public int inputNeurons { get; set; }
- public int hiddenLayerNeurons { get; set; }
public int hiddenLayers { get; set; }
public int batchSize { get; set; }
// na izlazu je moguce da bude vise neurona (klasifikacioni problem sa vise od 2 klase)
public int outputNeurons { get; set; }
- public string[] hiddenLayerActivationFunctions { get; set; }
+ public Layer[] layers { get; set; }
public string outputLayerActivationFunction { get; set; }
public string[] metrics { get; set; }
public int epochs { get; set; }
//public bool isTrained { get; set; }
//public NullValues[] nullValues { get; set; }
+ public bool randomOrder { get; set; }
+ public bool randomTestSet { get; set; }
+ public float randomTestSetDistribution { get; set; }
+ }
+
+ public class Layer
+ {
+ public int layerNumber { get; set; }
+ public string activationFunction { get; set; }
+ public int neurons { get; set; }
+ public string regularisation { get; set; }
+ public float regularisationRate { get; set; }
}
}
+
+
diff --git a/backend/api/api/Services/DatasetService.cs b/backend/api/api/Services/DatasetService.cs
index fe177d77..f39cac29 100644
--- a/backend/api/api/Services/DatasetService.cs
+++ b/backend/api/api/Services/DatasetService.cs
@@ -62,9 +62,9 @@ namespace api.Services
return _dataset.Find(dataset => dataset.isPublic == true && dataset.isPreProcess).ToList();
}
- public Dataset GetOneDataset(string userId, string id)
+ public Dataset GetOneDataset(string userId, string name)
{
- return _dataset.Find(dataset => dataset.uploaderId == userId && dataset._id == id && dataset.isPreProcess).FirstOrDefault();
+ return _dataset.Find(dataset => dataset.uploaderId == userId && dataset.name == name && dataset.isPreProcess).FirstOrDefault();
}
//odraditi za pretragu getOne
diff --git a/backend/api/api/Services/ExperimentService.cs b/backend/api/api/Services/ExperimentService.cs
index 7bdf9a6e..539e4c08 100644
--- a/backend/api/api/Services/ExperimentService.cs
+++ b/backend/api/api/Services/ExperimentService.cs
@@ -36,5 +36,15 @@ namespace api.Services
return _experiment.Find(e=>e.uploaderId==id).ToList();
}
+
+ public Experiment GetOneExperiment(string userId, string name)
+ {
+ return _experiment.Find(experiment => experiment.uploaderId == userId && experiment.name == name).FirstOrDefault();
+ }
+
+ public void Update(string userId, string id, Experiment experiment)
+ {
+ _experiment.ReplaceOne(experiment => experiment.uploaderId == userId && experiment._id == id, experiment);
+ }
}
}
diff --git a/backend/api/api/Services/FillAnEmptyDb.cs b/backend/api/api/Services/FillAnEmptyDb.cs
index 216e1221..99bbb91f 100644
--- a/backend/api/api/Services/FillAnEmptyDb.cs
+++ b/backend/api/api/Services/FillAnEmptyDb.cs
@@ -55,7 +55,6 @@ namespace api.Services
dataset.uploaderId = "000000000000000000000000";
dataset.name = "Titanik dataset";
dataset.description = "Titanik dataset";
- dataset.header = new string[] { "PassengerId", "Survived", "Pclass", "Name", "Sex", "Age", "SibSp", "Parch", "Ticket", "Fare", "Cabin", "Embarked" };
dataset.fileId = _fileService.GetFileId(fullPath);
dataset.extension = ".csv";
dataset.isPublic = true;
@@ -63,22 +62,22 @@ namespace api.Services
dataset.dateCreated = DateTime.Now;
dataset.lastUpdated = DateTime.Now;
dataset.delimiter = "";
- dataset.hasHeader = true;
dataset.columnInfo = new ColumnInfo[] { };
dataset.columnInfo = new[]
{
- new ColumnInfo( "PassengerId", true, 0, 446, 1, 891, 446, new string[]{ }),
- new ColumnInfo( "Survived", true, 0, 0.38383838534355164f, 0, 1, 0, new string[]{ }),
- new ColumnInfo( "Pclass", true, 0, 2.3086419105529785f, 1, 3, 3, new string[]{ }),
- new ColumnInfo( "Name", false, 0, 0, 0, 0, 0, new string[]{"Braund, Mr. Owen Harris", "Boulos, Mr. Hanna", "Frolicher-Stehli, Mr. Maxmillian", "Gilinski, Mr. Eliezer", "Murdlin, Mr. Joseph", "Rintamaki, Mr. Matti", "Stephenson, Mrs. Walter Bertram (Martha Eustis)", "Elsbury, Mr. William James", "Bourke, Miss. Mary", "Chapman, Mr. John Henry"}),
- new ColumnInfo( "Sex", false, 0, 0, 0, 0, 0, new string[]{ "male", "female" }),
- new ColumnInfo( "Age", true, 177, 29.69911766052246f, 0.41999998688697815f, 80, 28, new string[]{ }),
- new ColumnInfo( "SibSp", true, 0, 0.523007869720459f, 0, 8, 0, new string[]{ }),
- new ColumnInfo( "Parch", true, 0, 0.3815937042236328f, 0, 6, 0, new string[]{ }),
- new ColumnInfo( "Ticket", false, 0, 0, 0, 0, 0, new string[]{ "347082", "CA. 2343", "1601", "3101295", "CA 2144", "347088", "S.O.C. 14879", "382652", "LINE", "PC 17757" }),
- new ColumnInfo( "Fare", true, 0, 32.20420837402344f, 0, 512.3292236328125f, 14.45419979095459f, new string[]{ }),
- new ColumnInfo( "Cabin", false, 687, 0, 0, 0, 0, new string[]{ "B96 B98", "G6", "C23 C25 C27", "C22 C26", "F33", "F2", "E101", "D", "C78", "C93" }),
- new ColumnInfo( "Embarked", false, 2, 0.3815937042236328f, 0, 6, 0, new string[]{ "S", "C", "Q" }),
+
+ new ColumnInfo( "PassengerId", "columnType", true, 0, 446, 1, 891, 446, new string[]{ }, new int[] {}, new float[] {}, 0.01f,0.1f ),
+ new ColumnInfo( "Survived", "columnType", true, 0, 0.38383838534355164f, 0, 1, 0, new string[]{ }, new int[] {}, new float[] {}, 0.01f,0.1f ),
+ new ColumnInfo( "Pclass", "columnType", true, 0, 2.3086419105529785f, 1, 3, 3, new string[]{ }, new int[] {}, new float[] {}, 0.01f,0.1f ),
+ new ColumnInfo( "Name", "columnType", false, 0, 0, 0, 0, 0, new string[]{"Braund, Mr. Owen Harris", "Boulos, Mr. Hanna", "Frolicher-Stehli, Mr. Maxmillian", "Gilinski, Mr. Eliezer", "Murdlin, Mr. Joseph", "Rintamaki, Mr. Matti", "Stephenson, Mrs. Walter Bertram (Martha Eustis)", "Elsbury, Mr. William James", "Bourke, Miss. Mary", "Chapman, Mr. John Henry"}, new int[] {}, new float[] {}, 0.01f,0.1f ),
+ new ColumnInfo( "Sex", "columnType", false, 0, 0, 0, 0, 0, new string[]{ "male", "female" }, new int[] {}, new float[] {}, 0.01f,0.1f ),
+ new ColumnInfo( "Age", "columnType", true, 177, 29.69911766052246f, 0.41999998688697815f, 80, 28, new string[]{ }, new int[] {}, new float[] {}, 0.01f,0.1f ),
+ new ColumnInfo( "SibSp", "columnType", true, 0, 0.523007869720459f, 0, 8, 0, new string[]{ }, new int[] {}, new float[] {}, 0.01f,0.1f ),
+ new ColumnInfo( "Parch", "columnType", true, 0, 0.3815937042236328f, 0, 6, 0, new string[]{ }, new int[] {}, new float[] {}, 0.01f,0.1f ),
+ new ColumnInfo( "Ticket", "columnType", false, 0, 0, 0, 0, 0, new string[]{ "347082", "CA. 2343", "1601", "3101295", "CA 2144", "347088", "S.O.C. 14879", "382652", "LINE", "PC 17757" }, new int[] {}, new float[] {}, 0.01f,0.1f ),
+ new ColumnInfo( "Fare", "columnType", true, 0, 32.20420837402344f, 0, 512.3292236328125f, 14.45419979095459f, new string[]{ }, new int[] {}, new float[] {}, 0.01f,0.1f ),
+ new ColumnInfo( "Cabin", "columnType", false, 687, 0, 0, 0, 0, new string[]{ "B96 B98", "G6", "C23 C25 C27", "C22 C26", "F33", "F2", "E101", "D", "C78", "C93" }, new int[] {}, new float[] {}, 0.01f,0.1f ),
+ new ColumnInfo( "Embarked", "columnType", false, 2, 0.3815937042236328f, 0, 6, 0, new string[]{ "S", "C", "Q" }, new int[] {}, new float[] {}, 0.01f,0.1f ),
};
dataset.rowCount = 891;
dataset.nullCols = 3;
@@ -99,11 +98,9 @@ namespace api.Services
model.type = "binarni-klasifikacioni";
model.optimizer = "Adam";
model.lossFunction = "mean_squared_error";
- model.hiddenLayerNeurons = 3;
model.hiddenLayers = 5;
model.batchSize = 8;
model.outputNeurons = 0;
- model.hiddenLayerActivationFunctions = new string[] { "relu", "relu", "relu", "relu", "relu" };
model.outputLayerActivationFunction = "sigmoid";
model.metrics = new string[] { };
model.epochs = 5;
@@ -121,9 +118,8 @@ namespace api.Services
experiment.uploaderId = "000000000000000000000000";
experiment.inputColumns = new string[] { "Embarked" };
experiment.outputColumn = "Survived";
- experiment.randomOrder = true;
- experiment.randomTestSet = true;
- experiment.randomTestSetDistribution = 0.30000001192092896f;
+ experiment.dateCreated = DateTime.Now;
+ experiment.lastUpdated = DateTime.Now;
experiment.nullValues = "delete_rows";
experiment.nullValuesReplacers = new NullValues[] { };
experiment.encodings = new[]
@@ -179,20 +175,19 @@ namespace api.Services
dataset.dateCreated = DateTime.Now;
dataset.lastUpdated = DateTime.Now;
dataset.delimiter = "";
- dataset.hasHeader = true;
dataset.columnInfo = new[]
{
- new ColumnInfo( "Unnamed: 0", true, 0, 26969.5f, 0, 53939, 26969.5f, new string[]{ }),
- new ColumnInfo( "carat", true, 0, 0.7979397773742676f, 0.20000000298023224f, 5.010000228881836f, 0.699999988079071f, new string[]{ }),
- new ColumnInfo( "cut", false, 0, 0, 0, 0, 0, new string[]{ "Ideal", "Premium", "Very Good", "Good", "Fair" }),
- new ColumnInfo( "color", false, 0, 0, 0, 0, 0, new string[]{"G", "E", "F", "H", "D", "I", "I", "J"}),
- new ColumnInfo( "clarity", false, 0, 0, 0, 0, 0, new string[]{ "SI1", "VS2","SI2", "VS1", "VVS2", "VVS1", "IF", "I1" }),
- new ColumnInfo( "depth", true, 0, 61.74940490722656f, 43, 79, 61.79999923706055f, new string[]{ }),
- new ColumnInfo( "table", true, 0, 57.457183837890625f, 43, 95, 57, new string[]{ }),
- new ColumnInfo( "price", true, 0, 3932.7998046875f, 326, 18823, 2401, new string[]{ }),
- new ColumnInfo( "x", true, 0, 5.731157302856445f, 0, 10.739999771118164f, 5.699999809265137f, new string[]{ }),
- new ColumnInfo( "y", true, 0, 5.73452615737915f, 0, 58.900001525878906f, 5.710000038146973f, new string[]{ }),
- new ColumnInfo( "z", true, 0, 3.538733720779419f, 0, 31.799999237060547f, 3.5299999713897705f, new string[]{ })
+ new ColumnInfo( "Unnamed: 0", "columnType", true, 0, 26969.5f, 0, 53939, 26969.5f, new string[]{ }, new int[] {}, new float[] {}, 0.01f,0.1f ),
+ new ColumnInfo( "carat", "columnType", true, 0, 0.7979397773742676f, 0.20000000298023224f, 5.010000228881836f, 0.699999988079071f, new string[]{ }, new int[] {}, new float[] {}, 0.01f,0.1f ),
+ new ColumnInfo( "cut", "columnType", false, 0, 0, 0, 0, 0, new string[]{ "Ideal", "Premium", "Very Good", "Good", "Fair" }, new int[] {}, new float[] {}, 0.01f,0.1f ),
+ new ColumnInfo( "color", "columnType", false, 0, 0, 0, 0, 0, new string[]{"G", "E", "F", "H", "D", "I", "I", "J"}, new int[] {}, new float[] {}, 0.01f,0.1f ),
+ new ColumnInfo( "clarity", "columnType", false, 0, 0, 0, 0, 0, new string[]{ "SI1", "VS2","SI2", "VS1", "VVS2", "VVS1", "IF", "I1" }, new int[] {}, new float[] {}, 0.01f,0.1f ),
+ new ColumnInfo( "depth", "columnType", true, 0, 61.74940490722656f, 43, 79, 61.79999923706055f, new string[]{ }, new int[] {}, new float[] {}, 0.01f,0.1f ),
+ new ColumnInfo( "table", "columnType", true, 0, 57.457183837890625f, 43, 95, 57, new string[]{ }, new int[] {}, new float[] {}, 0.01f,0.1f ),
+ new ColumnInfo( "price", "columnType", true, 0, 3932.7998046875f, 326, 18823, 2401, new string[]{ }, new int[] {}, new float[] {}, 0.01f,0.1f ),
+ new ColumnInfo( "x", "columnType", true, 0, 5.731157302856445f, 0, 10.739999771118164f, 5.699999809265137f, new string[]{ }, new int[] {}, new float[] {}, 0.01f,0.1f ),
+ new ColumnInfo( "y", "columnType", true, 0, 5.73452615737915f, 0, 58.900001525878906f, 5.710000038146973f, new string[]{ }, new int[] {}, new float[] {}, 0.01f,0.1f ),
+ new ColumnInfo( "z", "columnType", true, 0, 3.538733720779419f, 0, 31.799999237060547f, 3.5299999713897705f, new string[]{ }, new int[] {}, new float[] {}, 0.01f,0.1f )
};
dataset.rowCount = 53940;
dataset.nullCols = 0;
@@ -214,11 +209,9 @@ namespace api.Services
model.type = "regresioni";
model.optimizer = "Adam";
model.lossFunction = "mean_absolute_error";
- model.hiddenLayerNeurons = 2;
model.hiddenLayers = 4;
model.batchSize = 5;
model.outputNeurons = 0;
- model.hiddenLayerActivationFunctions = new string[] { "relu", "relu", "relu", "relu" };
model.outputLayerActivationFunction = "relu";
model.metrics = new string[] { };
model.epochs = 5;
@@ -236,9 +229,8 @@ namespace api.Services
experiment.uploaderId = "000000000000000000000000";
experiment.inputColumns = new string[] { "Unnamed: 0", "carat", "cut", "color", "clarity", "depth", "table", "x", "y", "z" };
experiment.outputColumn = "price";
- experiment.randomOrder = true;
- experiment.randomTestSet = true;
- experiment.randomTestSetDistribution = 0.30000001192092896f;
+ experiment.dateCreated = DateTime.Now;
+ experiment.lastUpdated = DateTime.Now;
experiment.nullValues = "delete_rows";
experiment.nullValuesReplacers = new NullValues[] { };
experiment.encodings = new[]
@@ -297,14 +289,13 @@ namespace api.Services
dataset.dateCreated = DateTime.Now;
dataset.lastUpdated = DateTime.Now;
dataset.delimiter = "";
- dataset.hasHeader = true;
dataset.columnInfo = new[]
{
- new ColumnInfo( "sepal_length", true, 0, 5.8433332443237305f, 4.300000190734863f, 7.900000095367432f, 5.800000190734863f, new string[]{ }),
- new ColumnInfo( "sepal_width", true, 0, 3.053999900817871f, 2, 4.400000095367432f, 3, new string[]{ }),
- new ColumnInfo( "petal_length", true, 0, 3.758666753768921f, 1, 6.900000095367432f, 4.349999904632568f, new string[]{ }),
- new ColumnInfo( "petal_width", true, 0, 1.1986666917800903f, 0.10000000149011612f, 2.5f, 1.2999999523162842f, new string[]{}),
- new ColumnInfo( "class", false, 0, 0, 0, 0, 0, new string[]{ "Iris-setosa", "Iris-versicolor", "Iris-virginica" }),
+ new ColumnInfo( "sepal_length", "columnType", true, 0, 5.8433332443237305f, 4.300000190734863f, 7.900000095367432f, 5.800000190734863f, new string[]{ }, new int[] {}, new float[] {}, 0.01f, 0.1f ),
+ new ColumnInfo( "sepal_width", "columnType", true, 0, 3.053999900817871f, 2, 4.400000095367432f, 3, new string[]{ }, new int[] {}, new float[] {}, 0.01f,0.1f ),
+ new ColumnInfo( "petal_length", "columnType", true, 0, 3.758666753768921f, 1, 6.900000095367432f, 4.349999904632568f, new string[]{ }, new int[] {}, new float[] {}, 0.01f,0.1f ),
+ new ColumnInfo( "petal_width", "columnType", true, 0, 1.1986666917800903f, 0.10000000149011612f, 2.5f, 1.2999999523162842f, new string[]{}, new int[] {}, new float[] {}, 0.01f,0.1f ),
+ new ColumnInfo( "class", "columnType", false, 0, 0, 0, 0, 0, new string[]{ "Iris-setosa", "Iris-versicolor", "Iris-virginica" }, new int[] {}, new float[] {}, 0.01f,0.1f ),
};
dataset.nullCols = 150;
dataset.nullRows = 0;
@@ -324,11 +315,9 @@ namespace api.Services
model.type = "multi-klasifikacioni";
model.optimizer = "Adam";
model.lossFunction = "sparse_categorical_crossentropy";
- model.hiddenLayerNeurons = 3;
model.hiddenLayers = 3;
model.batchSize = 4;
model.outputNeurons = 0;
- model.hiddenLayerActivationFunctions = new string[] { "relu", "relu", "softmax" };
model.outputLayerActivationFunction = "softmax";
model.metrics = new string[] { };
model.epochs = 1;
@@ -346,9 +335,8 @@ namespace api.Services
experiment.uploaderId = "000000000000000000000000";
experiment.inputColumns = new string[] { "sepal_length", "sepal_width", "petal_length", "petal_width" };
experiment.outputColumn = "class";
- experiment.randomOrder = true;
- experiment.randomTestSet = true;
- experiment.randomTestSetDistribution = 0.20000000298023224f;
+ experiment.dateCreated = DateTime.Now;
+ experiment.lastUpdated = DateTime.Now;
experiment.nullValues = "delete_rows";
experiment.nullValuesReplacers = new NullValues[] { };
experiment.encodings = new[]
diff --git a/backend/api/api/Models/JwtToken.cs b/backend/api/api/Services/JwtToken.cs
index 20b0bc73..20b0bc73 100644
--- a/backend/api/api/Models/JwtToken.cs
+++ b/backend/api/api/Services/JwtToken.cs
diff --git a/backend/api/api/Models/PasswordCrypt.cs b/backend/api/api/Services/PasswordCrypt.cs
index 016fde51..016fde51 100644
--- a/backend/api/api/Models/PasswordCrypt.cs
+++ b/backend/api/api/Services/PasswordCrypt.cs
diff --git a/backend/microservice/api/controller.py b/backend/microservice/api/controller.py
index 9b83b8e7..41035cc7 100644
--- a/backend/microservice/api/controller.py
+++ b/backend/microservice/api/controller.py
@@ -107,26 +107,32 @@ def predict():
@app.route('/preprocess',methods=['POST'])
def returnColumnsInfo():
print("********************************PREPROCESS*******************************")
+
dataset = json.loads(request.form["dataset"])
file = request.files.get("file")
data=pd.read_csv(file)
-
- #dataset={}
+ '''
#f = request.json['filepath']
#data=pd.read_csv(f)
-
+ dataset={}
+ '''
preprocess = newmlservice.returnColumnsInfo(data)
#samo 10 jedinstvenih posto ih ima previse, bilo bi dobro da promenimo ovo da to budu 10 najzastupljenijih vrednosti
+
for col in preprocess["columnInfo"]:
- col["uniqueValues"] = col["uniqueValues"][0:10]
- col["uniqueValuesCount"] = col["uniqueValuesCount"][0:10]
+ col["uniqueValues"] = col["uniqueValues"][0:6]
+ col["uniqueValuesCount"] = col["uniqueValuesCount"][0:6]
+ col['uniqueValuesPercent']=col['uniqueValuesPercent'][0:6]
dataset["columnInfo"] = preprocess["columnInfo"]
dataset["nullCols"] = preprocess["allNullColl"]
dataset["nullRows"] = preprocess["allNullRows"]
dataset["colCount"] = preprocess["colCount"]
dataset["rowCount"] = preprocess["rowCount"]
dataset["isPreProcess"] = True
- print(dataset)
+ #print(dataset)
+
+
+
return jsonify(dataset)
print("App loaded.")
diff --git a/backend/microservice/api/newmlservice.py b/backend/microservice/api/newmlservice.py
index 604e4d3c..9e26b03a 100644
--- a/backend/microservice/api/newmlservice.py
+++ b/backend/microservice/api/newmlservice.py
@@ -1,6 +1,7 @@
from enum import unique
from itertools import count
import os
+from sys import breakpointhook
import pandas as pd
from sklearn import datasets, multiclass
import tensorflow as tf
@@ -38,27 +39,38 @@ def returnColumnsInfo(dataset):
unique=datafront[kolona].value_counts()
uniquevalues=[]
uniquevaluescount=[]
+ uniquevaluespercent=[]
for val, count in unique.iteritems():
- uniquevalues.append(val)
- uniquevaluescount.append(count)
+ if(val):
+ uniquevalues.append(val)
+ uniquevaluescount.append(count)
+ percent=count/rowCount
+ uniquevaluespercent.append(percent)
#print(uniquevalues)
#print(uniquevaluescount)
mean=0
median=0
minimum=0
maximum=0
+ q1=0
+ q3=0
nullCount=datafront[kolona].isnull().sum()
if(nullCount>0):
allNullCols=allNullCols+1
- frontreturn={'columnName':kolona,
+ frontreturn={
+ 'columnName':kolona,
'isNumber':False,
'uniqueValues':uniquevalues,
'uniqueValuesCount':uniquevaluescount,
- 'median':float(mean),
- 'mean':float(median),
+ 'uniqueValuesPercent':uniquevaluespercent,
+ 'mean':float(mean),
+ 'median':float(median),
'numNulls':int(nullCount),
'min':float(minimum),
'max':float(maximum),
+ 'q1':float(q1),
+ 'q3':float(q3),
+
}
dict.append(frontreturn)
else:
@@ -66,18 +78,39 @@ def returnColumnsInfo(dataset):
maximum=max(datafront[kolona])
mean=datafront[kolona].mean()
median=s.median(datafront[kolona].copy().dropna())
+ q1= np.percentile(datafront[kolona].copy().dropna(), 25)
+ q3= np.percentile(datafront[kolona].copy().dropna(), 75)
nullCount=datafront[kolona].isnull().sum()
if(nullCount>0):
allNullCols=allNullCols+1
- frontreturn={'columnName':kolona,
+
+ #pretvaranje u kategorijsku
+ datafront = datafront.astype({kolona: str})
+ print(datafront.dtypes)
+ unique=datafront[kolona].value_counts()
+ uniquevaluesn=[]
+ uniquevaluescountn=[]
+ uniquevaluespercentn=[]
+ for val, count in unique.iteritems():
+ if(val):
+ uniquevaluesn.append(val)
+ uniquevaluescountn.append(count)
+ percent=count/rowCount
+ uniquevaluespercentn.append(percent)
+ frontreturn={
+ 'columnName':kolona,
'isNumber':1,
- 'uniqueValues':[],
- 'uniqueValuesCount':[],
+ 'uniqueValues':uniquevaluesn,
+ 'uniqueValuesCount':uniquevaluescountn,
+ 'uniqueValuesPercent':uniquevaluespercentn,
'mean':float(mean),
'median':float(median),
'numNulls':int(nullCount),
'min':float(minimum),
'max':float(maximum),
+ 'q1':float(q1),
+ 'q3':float(q3),
+
}
dict.append(frontreturn)
NullRows = datafront[datafront.isnull().any(axis=1)]
@@ -115,17 +148,28 @@ class TrainingResult:
'''
def train(dataset, paramsModel,paramsExperiment,paramsDataset,callback):
+ ###UCITAVANJE SETA
problem_type = paramsModel["type"]
#print(problem_type)
data = pd.DataFrame()
#print(data)
for col in paramsExperiment["inputColumns"]:
#print(col)
- data[col]=dataset[col]
+ if(col!=paramsExperiment["outputColumn"]):
+ data[col]=dataset[col]
output_column = paramsExperiment["outputColumn"]
data[output_column] = dataset[output_column]
#print(data)
+ ###KATEGORIJSKE KOLONE
+ kategorijskekolone=[]
+ ###PRETVARANJE NUMERICKIH U KATREGORIJSKE AKO JE KORISNIK TAKO OZNACIO
+ columnInfo=paramsDataset['columnInfo']
+ for col in columnInfo:
+ if(col['columnType']=="Kategorijski"):
+ data[col['columnName']]=data[col['columnName']].apply(str)
+ kategorijskekolone.append(col['coumnName'])
+
###NULL
null_value_options = paramsExperiment["nullValues"]
null_values_replacers = paramsExperiment["nullValuesReplacers"]
@@ -149,16 +193,18 @@ def train(dataset, paramsModel,paramsExperiment,paramsDataset,callback):
#
# Brisanje kolona koje ne uticu na rezultat
#
+ '''
num_rows=data.shape[0]
for col in data.columns:
if((data[col].nunique()==(num_rows)) and (data[col].dtype==np.object_)):
data.pop(col)
#
+ '''
### Enkodiranje
encodings=paramsExperiment["encodings"]
- datafront=dataset.copy()
- svekolone=datafront.columns
- kategorijskekolone=datafront.select_dtypes(include=['object']).columns
+ #datafront=dataset.copy()
+ #svekolone=datafront.columns
+ #kategorijskekolone=datafront.select_dtypes(include=['object']).columns
for kolonaEncoding in encodings:
kolona = kolonaEncoding["columnName"]
@@ -235,75 +281,75 @@ def train(dataset, paramsModel,paramsExperiment,paramsDataset,callback):
#
#
###OPTIMIZATORI
- """
- if(params['optimizer']=='adam'):
- opt=tf.keras.optimizers.Adam(learning_rate=params['learningRate'])
+
+ if(paramsModel['optimizer']=='Adam'):
+ opt=tf.keras.optimizers.Adam(learning_rate=float(paramsModel['learningRate']))
- elif(params['optimizer']=='adadelta'):
- opt=tf.keras.optimizers.Adadelta(learning_rate=params['learningRate'])
+ elif(paramsModel['optimizer']=='Adadelta'):
+ opt=tf.keras.optimizers.Adadelta(learning_rate=float(paramsModel['learningRate']))
- elif(params['optimizer']=='adagrad'):
- opt=tf.keras.optimizers.Adagrad(learning_rate=params['learningRate'])
+ elif(paramsModel['optimizer']=='Adagrad'):
+ opt=tf.keras.optimizers.Adagrad(learning_rate=float(paramsModel['learningRate']))
- elif(params['optimizer']=='adamax'):
- opt=tf.keras.optimizers.Adamax(learning_rate=params['learningRate'])
+ elif(paramsModel['optimizer']=='Adamax'):
+ opt=tf.keras.optimizers.Adamax(learning_rate=float(paramsModel['learningRate']))
- elif(params['optimizer']=='nadam'):
- opt=tf.keras.optimizers.Nadam(learning_rate=params['learningRate'])
+ elif(paramsModel['optimizer']=='Nadam'):
+ opt=tf.keras.optimizers.Nadam(learning_rate=float(paramsModel['learningRate']))
- elif(params['optimizer']=='sgd'):
- opt=tf.keras.optimizers.SGD(learning_rate=params['learningRate'])
+ elif(paramsModel['optimizer']=='Sgd'):
+ opt=tf.keras.optimizers.SGD(learning_rate=float(paramsModel['learningRate']))
- elif(params['optimizer']=='ftrl'):
- opt=tf.keras.optimizers.Ftrl(learning_rate=params['learningRate'])
+ elif(paramsModel['optimizer']=='Ftrl'):
+ opt=tf.keras.optimizers.Ftrl(learning_rate=float(paramsModel['learningRate']))
- elif(params['optimizer']=='rmsprop'):
- opt=tf.keras.optimizers.RMSprop(learning_rate=params['learningRate'])
+ elif(paramsModel['optimizer']=='Rmsprop'):
+ opt=tf.keras.optimizers.RMSprop(learning_rate=float(paramsModel['learningRate']))
###REGULARIZACIJA
#regularisation={'kernelType':'l1 ili l2 ili l1_l2','kernelRate':default=0.01 ili jedna od vrednosti(0.0001,0.001,0.1,1,2,3) ili neka koju je korisnik zadao,'biasType':'','biasRate':'','activityType','activityRate'}
- reg=params['regularisation']
-
- ###Kernel
- if(reg['kernelType']=='l1'):
- kernelreg=tf.keras.regularizers.l1(reg['kernelRate'])
- elif(reg['kernelType']=='l2'):
- kernelreg=tf.keras.regularizers.l2(reg['kernelRate'])
- elif(reg['kernelType']=='l1l2'):
- kernelreg=tf.keras.regularizers.l1_l2(l1=reg['kernelRate'][0],l2=reg['kernelRate'][1])
-
- ###Bias
- if(reg['biasType']=='l1'):
- biasreg=tf.keras.regularizers.l1(reg['biasRate'])
- elif(reg['biasType']=='l2'):
- biasreg=tf.keras.regularizers.l2(reg['biasRate'])
- elif(reg['biasType']=='l1l2'):
- biasreg=tf.keras.regularizers.l1_l2(l1=reg['biasRate'][0],l2=reg['biasRate'][1])
-
- ###Activity
- if(reg['kernelType']=='l1'):
- activityreg=tf.keras.regularizers.l1(reg['activityRate'])
- elif(reg['kernelType']=='l2'):
- activityreg=tf.keras.regularizers.l2(reg['activityRate'])
- elif(reg['kernelType']=='l1l2'):
- activityreg=tf.keras.regularizers.l1_l2(l1=reg['activityRate'][0],l2=reg['activityRate'][1])
- """
+
+
filepath=os.path.join("temp/",paramsExperiment['_id']+"_"+paramsModel['_id']+".h5")
if(problem_type=='multi-klasifikacioni'):
#print('multi')
+
+ reg=paramsModel['regularisation'][0]
+ regRate=float(paramsModel['regularisationRate'][0])
+ if(reg=='l1'):
+ kernelreg=tf.keras.regularizers.l1(regRate)
+ biasreg=tf.keras.regularizers.l1(regRate)
+ activityreg=tf.keras.regularizers.l1(regRate)
+ elif(reg=='l2'):
+ kernelreg=tf.keras.regularizers.l2(regRate)
+ biasreg=tf.keras.regularizers.l2(regRate)
+ activityreg=tf.keras.regularizers.l2(regRate)
+
classifier=tf.keras.Sequential()
-
- classifier.add(tf.keras.layers.Dense(units=paramsModel['hiddenLayerNeurons'], activation=paramsModel['hiddenLayerActivationFunctions'][0],input_dim=x_train.shape[1]))#prvi skriveni + definisanje prethodnog-ulaznog
+ classifier.add(tf.keras.layers.Dense(units=paramsModel['hiddenLayerNeurons'], activation=paramsModel['hiddenLayerActivationFunctions'][0],input_dim=x_train.shape[1], kernel_regularizer=kernelreg, bias_regularizer=biasreg, activity_regularizer=activityreg))#prvi skriveni + definisanje prethodnog-ulaznog
+
for i in range(paramsModel['hiddenLayers']-1):#ako postoji vise od jednog skrivenog sloja
- #print(i)
- classifier.add(tf.keras.layers.Dense(units=paramsModel['hiddenLayerNeurons'], activation=paramsModel['hiddenLayerActivationFunctions'][i+1]))#i-ti skriveni sloj
+ ###Kernel
+ reg=paramsModel['regularisation'][i+1]
+ regRate=float(paramsModel['regularisationRate'][i+1])
+ if(reg=='l1'):
+ kernelreg=tf.keras.regularizers.l1(regRate)
+ biasreg=tf.keras.regularizers.l1(regRate)
+ activityreg=tf.keras.regularizers.l1(regRate)
+ elif(reg=='l2'):
+ kernelreg=tf.keras.regularizers.l2(regRate)
+ biasreg=tf.keras.regularizers.l2(regRate)
+ activityreg=tf.keras.regularizers.l2(regRate)
+
+ classifier.add(tf.keras.layers.Dense(units=paramsModel['hiddenLayerNeurons'], activation=paramsModel['hiddenLayerActivationFunctions'][i+1],kernel_regularizer=kernelreg, bias_regularizer=biasreg, activity_regularizer=activityreg))#i-ti skriveni sloj
+
classifier.add(tf.keras.layers.Dense(units=5, activation=paramsModel['outputLayerActivationFunction']))#izlazni sloj
- classifier.compile(loss =paramsModel["lossFunction"] , optimizer = paramsModel['optimizer'] , metrics =paramsModel['metrics'])
+ classifier.compile(loss =paramsModel["lossFunction"] , optimizer = opt, metrics =paramsModel['metrics'])
- history=classifier.fit(x_train, y_train, epochs = paramsModel['epochs'],batch_size=paramsModel['batchSize'],callbacks=callback(x_test, y_test,paramsModel['_id']))
+ history=classifier.fit(x_train, y_train, epochs = paramsModel['epochs'],batch_size=float(paramsModel['batchSize']),callbacks=callback(x_test, y_test,paramsModel['_id']))
hist=history.history
#plt.plot(hist['accuracy'])
@@ -325,17 +371,39 @@ def train(dataset, paramsModel,paramsExperiment,paramsDataset,callback):
elif(problem_type=='binarni-klasifikacioni'):
#print('*************************************************************************binarni')
+ reg=paramsModel['regularisation'][0]
+ regRate=float(paramsModel['regularisationRate'][0])
+ if(reg=='l1'):
+ kernelreg=tf.keras.regularizers.l1(regRate)
+ biasreg=tf.keras.regularizers.l1(regRate)
+ activityreg=tf.keras.regularizers.l1(regRate)
+ elif(reg=='l2'):
+ kernelreg=tf.keras.regularizers.l2(regRate)
+ biasreg=tf.keras.regularizers.l2(regRate)
+ activityreg=tf.keras.regularizers.l2(regRate)
classifier=tf.keras.Sequential()
- classifier.add(tf.keras.layers.Dense(units=paramsModel['hiddenLayerNeurons'], activation=paramsModel['hiddenLayerActivationFunctions'][0],input_dim=x_train.shape[1]))#prvi skriveni + definisanje prethodnog-ulaznog
+ classifier.add(tf.keras.layers.Dense(units=paramsModel['hiddenLayerNeurons'], activation=paramsModel['hiddenLayerActivationFunctions'][0],input_dim=x_train.shape[1],kernel_regularizer=kernelreg, bias_regularizer=biasreg, activity_regularizer=activityreg))#prvi skriveni + definisanje prethodnog-ulaznog
+
for i in range(paramsModel['hiddenLayers']-1):#ako postoji vise od jednog skrivenog sloja
#print(i)
- classifier.add(tf.keras.layers.Dense(units=paramsModel['hiddenLayerNeurons'], activation=paramsModel['hiddenLayerActivationFunctions'][i+1]))#i-ti skriveni sloj
+ reg=paramsModel['regularisation'][i+1]
+ regRate=float(paramsModel['regularisationRate'][i+1])
+ if(reg=='l1'):
+ kernelreg=tf.keras.regularizers.l1(regRate)
+ biasreg=tf.keras.regularizers.l1(regRate)
+ activityreg=tf.keras.regularizers.l1(regRate)
+ elif(reg=='l2'):
+ kernelreg=tf.keras.regularizers.l2(regRate)
+ biasreg=tf.keras.regularizers.l2(regRate)
+ activityreg=tf.keras.regularizers.l2(regRate)
+ classifier.add(tf.keras.layers.Dense(units=paramsModel['hiddenLayerNeurons'], activation=paramsModel['hiddenLayerActivationFunctions'][i+1],kernel_regularizer=kernelreg, bias_regularizer=biasreg, activity_regularizer=activityreg))#i-ti skriveni sloj
+
classifier.add(tf.keras.layers.Dense(units=1, activation=paramsModel['outputLayerActivationFunction']))#izlazni sloj
- classifier.compile(loss =paramsModel["lossFunction"] , optimizer = paramsModel['optimizer'] , metrics =paramsModel['metrics'])
+ classifier.compile(loss =paramsModel["lossFunction"] , optimizer = opt , metrics =paramsModel['metrics'])
- history=classifier.fit(x_train, y_train, epochs = paramsModel['epochs'],batch_size=paramsModel['batchSize'],callbacks=callback(x_test, y_test,paramsModel['_id']))
+ history=classifier.fit(x_train, y_train, epochs = paramsModel['epochs'],batch_size=float(paramsModel['batchSize']),callbacks=callback(x_test, y_test,paramsModel['_id']))
hist=history.history
y_pred=classifier.predict(x_test)
y_pred=(y_pred>=0.5).astype('int')
@@ -351,17 +419,40 @@ def train(dataset, paramsModel,paramsExperiment,paramsDataset,callback):
return filepath,hist
elif(problem_type=='regresioni'):
+ reg=paramsModel['regularisation'][0]
+ regRate=float(paramsModel['regularisationRate'][0])
+ if(reg=='l1'):
+ kernelreg=tf.keras.regularizers.l1(regRate)
+ biasreg=tf.keras.regularizers.l1(regRate)
+ activityreg=tf.keras.regularizers.l1(regRate)
+ elif(reg=='l2'):
+ kernelreg=tf.keras.regularizers.l2(regRate)
+ biasreg=tf.keras.regularizers.l2(regRate)
+ activityreg=tf.keras.regularizers.l2(regRate)
classifier=tf.keras.Sequential()
- classifier.add(tf.keras.layers.Dense(units=paramsModel['hiddenLayerNeurons'], activation=paramsModel['hiddenLayerActivationFunctions'][0],input_dim=x_train.shape[1]))#prvi skriveni + definisanje prethodnog-ulaznog
+ classifier.add(tf.keras.layers.Dense(units=paramsModel['hiddenLayerNeurons'], activation=paramsModel['hiddenLayerActivationFunctions'][0],input_dim=x_train.shape[1],kernel_regularizer=kernelreg, bias_regularizer=biasreg, activity_regularizer=activityreg))#prvi skriveni + definisanje prethodnog-ulaznog
+
for i in range(paramsModel['hiddenLayers']-1):#ako postoji vise od jednog skrivenog sloja
#print(i)
- classifier.add(tf.keras.layers.Dense(units=paramsModel['hiddenLayerNeurons'], activation=paramsModel['hiddenLayerActivationFunctions'][i+1]))#i-ti skriveni sloj
- classifier.add(tf.keras.layers.Dense(units=1))
+ reg=paramsModel['regularisation'][i+1]
+ regRate=float(paramsModel['regularisationRate'][i+1])
+ if(reg=='l1'):
+ kernelreg=tf.keras.regularizers.l1(regRate)
+ biasreg=tf.keras.regularizers.l1(regRate)
+ activityreg=tf.keras.regularizers.l1(regRate)
+ elif(reg=='l2'):
+ kernelreg=tf.keras.regularizers.l2(regRate)
+ biasreg=tf.keras.regularizers.l2(regRate)
+ activityreg=tf.keras.regularizers.l2(regRate)
+
+ classifier.add(tf.keras.layers.Dense(units=paramsModel['hiddenLayerNeurons'], activation=paramsModel['hiddenLayerActivationFunctions'][i+1],kernel_regularizer=kernelreg, bias_regularizer=biasreg, activity_regularizer=activityreg))#i-ti skriveni sloj
+
+ classifier.add(tf.keras.layers.Dense(units=1),activation=paramsModel['outputLayerActivationFunction'])
- classifier.compile(loss =paramsModel["lossFunction"] , optimizer = paramsModel['optimizer'] , metrics =paramsModel['metrics'])
+ classifier.compile(loss =paramsModel["lossFunction"] , optimizer = opt , metrics =paramsModel['metrics'])
- history=classifier.fit(x_train, y_train, epochs = paramsModel['epochs'],batch_size=paramsModel['batchSize'],callbacks=callback(x_test, y_test,paramsModel['_id']))
+ history=classifier.fit(x_train, y_train, epochs = paramsModel['epochs'],batch_size=float(paramsModel['batchSize']),callbacks=callback(x_test, y_test,paramsModel['_id']))
hist=history.history
y_pred=classifier.predict(x_test)
#print(classifier.evaluate(x_test, y_test))