-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy patheasy_df_adam.cpp
48 lines (47 loc) · 1.2 KB
/
easy_df_adam.cpp
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
#include "micrograd.hpp"
#include "value.hpp"
#include "mlp.hpp"
using namespace microgradCpp;
int main()
{
// DatasetType dataset = get_iris();
DataFrame df;
df.from_csv("./data/iris.csv");
df.normalize();
df.encode_column("variety");
df.print();
df.shuffle();
df.print();
// return 0;
// stop();
// return 0;
// shuffle(dataset);
double TRAIN_SIZE{0.8};
// Create MLP model
// Input: 4 features, hidden layers: [7,7], output: 3 classes
// Define the model and hyperparameters
// MLP model(4, {10, 10, 3});
MLP model(4, {7, 7, 3});
auto params = model.parameters();
double learning_rate = 0.01;
int epochs; // = 100;
std::cout << "Epoch : ?";
std::cin >> epochs;
// Initialize Adam optimizer
AdamOptimizer optimizer(params, learning_rate);
// Train and evaluate the model
// train_eval(dataset, TRAIN_SIZE, model, learning_rate, epochs);
// train_eval(dataset, TRAIN_SIZE, model, optimizer, epochs);
// Train and evaluate the model
train_eval(df, TRAIN_SIZE, model, optimizer, epochs);
return 0;
}
/*
*/
/*
Notes
-----------
g++ -std=c++17 -Iinclude -O2 -o main easy_df_adam.cpp
// or
make run
*/