Exemplo n.º 1
0
    def train(self):

        logging.info('Starting the training...')

        train_loss = []
        val_loss = []
        hidden_start = torch.zeros(self.batch_size, self.model.rnn_size)
        for e in tqdm(range(self.epochs), ascii=True, desc='Epochs'):
            t_loss = self.train_epoch(e)
            v_loss = self.val_epoch(e, hidden_start)

            #keep the losses from each epoch
            train_loss.append(t_loss)
            val_loss.append(v_loss)

        plot_loss(train_loss, val_loss)
from data.datasets import StatsDatasetRegression
from models.linear_regression import LinearRegression
from trainer.regression_trainer import RegressionTrainer
import pandas as pd
import visualizer
import torch

# ----------------------------------------------------------------------------------------------
# This file trains and tests performance of the linear regression model on the optimized dataset
# ----------------------------------------------------------------------------------------------

# MODEL VARIABLES
MODEL = LinearRegression(4, 2)
TRAINING_SET = StatsDatasetRegression(pd.read_csv("../../data/datasets/processed/opt_reg_train.csv"))
TESTING_SET = StatsDatasetRegression(pd.read_csv("../../data/datasets/processed/opt_reg_test.csv"))
EPOCHS = 500
LEARNING_RATE = 0.007
OPTIMIZER = torch.optim.SGD(MODEL.parameters(), lr=LEARNING_RATE)
LOSS = torch.nn.MSELoss()

if __name__ == '__main__':
    trainer = RegressionTrainer(MODEL, TRAINING_SET, TESTING_SET, EPOCHS, OPTIMIZER, LOSS)
    trainer.train()
    trainer.print_best_results()
    visualizer.plot_accuracy(trainer.epochs, trainer.val_accuracy, "../../results/graphs/accuracy/opt_reg_acc.png")
    visualizer.plot_loss(trainer.epochs, trainer.val_loss, "../../results/graphs/loss/opt_reg_loss.png")




from trainer.classification_trainer import ClassificationTrainer
import visualizer
import pandas as pd
import torch

# -------------------------------------------------------------------------------
# This file trains and tests performance of the MLP-network on the simple dataset
# -------------------------------------------------------------------------------

# MODEL VARIABLES
MODEL = MLPNet(10, 6, 3)
TRAINING_SET = StatsDatasetClassification(
    pd.read_csv("../../data/datasets/processed/simple_train_data.csv"))
TESTING_SET = StatsDatasetClassification(
    pd.read_csv("../../data/datasets/processed/simple_test_data.csv"))
EPOCHS = 300
LEARNING_RATE = 0.005
OPTIMIZER = torch.optim.SGD(MODEL.parameters(), lr=LEARNING_RATE)
LOSS = torch.nn.CrossEntropyLoss()

if __name__ == '__main__':
    trainer = ClassificationTrainer(MODEL, TRAINING_SET, TESTING_SET, EPOCHS,
                                    OPTIMIZER, LOSS)
    trainer.train()
    trainer.print_best_results()
    visualizer.plot_accuracy(
        trainer.epochs, trainer.val_accuracy,
        "../../results/graphs/accuracy/simple_mlp_acc.png")
    visualizer.plot_loss(trainer.epochs, trainer.val_loss,
                         "../../results/graphs/loss/simple_mlp_loss.png")
Exemplo n.º 4
0
import dataset
import neutal_nets
import visualizer

# HYPERPARAMETERS
batch_size = 32
number_of_epochs = 5

train_samples, validation_samples = dataset.get_data()

# compile and train the model using the generator function
train_generator = dataset.generate(train_samples, batch_size)
validation_generator = dataset.generate(validation_samples, batch_size)

model = neutal_nets.le_net(0.5)

model.compile(loss='mse', optimizer='adam')
history_object = model.fit_generator(train_generator,
                                     samples_per_epoch=len(train_samples),
                                     validation_data=validation_generator,
                                     nb_val_samples=len(validation_samples),
                                     nb_epoch=number_of_epochs,
                                     verbose=1)
# save model
model.save('dg_model_lenet.h5')

print(history_object.history.keys())

visualizer.plot_loss(history_object)