Exemplo n.º 1
0
def main(args):

    #load data
    train_DataLoader, _  = scripts.get_data(data_path="../data/SeoulBikeData.csv",testData = True)
    # train_DataLoader = DataLoader(TensorDataset(train_x, train_y), batch_size=30)

    model = NeuralNetworkClassification(input_dim=9).float()

    lr = 1e-1
    n_epochs = 100
    loss_fn = nn.BCELoss(reduction='mean')
    optimizer = optim.SGD(model.parameters(), lr=lr)

    size = len(train_DataLoader.dataset)
    for epoch in range(n_epochs):
        l = 0.0
        for X,y in train_DataLoader:
            model.train() #set model to training mode
            ypred = model(X) #Foward pass
            loss = loss_fn(ypred,y.float()) # Calcutation of loss
            loss.backward()

            optimizer.step()
            optimizer.zero_grad()

            l += (loss/size)

        if (epoch+1) % 10 == 0:
            print("Epoch : {}, loss :  {:.5f}".format(epoch+1,l))
        writer.add_scalar("train_loss", l, epoch)
        for tag, parm in model.named_parameters():
            writer.add_histogram(tag, parm.grad.data.cpu().numpy(), epoch)

    torch.save(model.state_dict(), "./trained_model.pth")
Exemplo n.º 2
0
def main(args):

    #load data
    train_DataLoader, _, embedding_size = scripts.get_data(
        data_path="../data/Data1/train_eng.csv", testData=True)
    # train_DataLoader = DataLoader(TensorDataset(train_x, train_y), batch_size=30)

    model = LSTM(input_size=embedding_size)
    loss_function = nn.MSELoss()
    optimizer = torch.optim.Adam(model.parameters(), lr=0.001)

    writer = SummaryWriter()

    # Commented out IPython magic to ensure Python compatibility.
    # %load_ext tensorboard

    # Commented out IPython magic to ensure Python compatibility.
    # %tensorboard --logdir runs

    epochs = 50
    start_time = time.time()

    for i in range(epochs):
        correct = 0
        total = 0
        start_epoch = time.time()
        for item in train_DataLoader:
            seq = item[0]
            label = item[1]
            optimizer.zero_grad()
            model.hidden_cell = (torch.zeros(1, 1, model.hidden_layer_size),
                                 torch.zeros(1, 1, model.hidden_layer_size))

            y_pred = model(seq)
            for j in range(y_pred.shape[0]):
                predicted = np.argmax(F.softmax(y_pred[j].data))
                true = np.argmax(label[j].data)
                if (predicted == true):
                    correct += 1
                total += 1
            single_loss = loss_function(y_pred, label.float())
            single_loss.backward()
            optimizer.step()

        time_per_epoch = time.time() - start_epoch
        f_measure = f1_loss(label, y_pred)
        writer.add_scalar("train_loss", single_loss.item(), i)
        writer.add_scalar("train_acc", (100 * correct / total), i)
        writer.add_scalar("train_measure", f_measure, i)
        writer.add_scalar("train_time", time_per_epoch, i)
        for tag, parm in model.named_parameters():
            writer.add_histogram(tag, parm.grad.data.cpu().numpy(), i)
        print(
            f'epoch: {i:3} loss: {single_loss.item():10.8f}, accuracy: {(100 * correct / total)}, f-measure: {f_measure}, time = {time_per_epoch}'
        )

    print("MODEL TIME EXECUTION--- %s seconds ---" %
          (time.time() - start_time))
    print(f'epoch: {i:3} loss: {single_loss.item():10.10f}')
    torch.save(model.state_dict(), 'model.pt')
Exemplo n.º 3
0
        return x


# evaluate the model
def evaluate_model(test_dl, model):
    predictions, actuals = list(), list()
    for inputs, targets in test_dl:
        yhat = model(inputs)
        yhat = yhat.detach().numpy()
        actual = targets.numpy()
        actual = actual.reshape((len(actual), 1))
        # round to class values
        yhat = yhat.round()
        predictions.append(yhat)
        actuals.append(actual)
    predictions, actuals = np.vstack(predictions), np.vstack(actuals)
    # calculate accuracy
    acc = accuracy_score(actuals, predictions)
    return acc


if __name__ == '__main__':
    _, testDataLoader = scripts.get_data(data_path="../data/SeoulBikeData.csv",
                                         testData=True)
    model = NeuralNetworkClassification(input_dim=9)
    model.load_state_dict(torch.load("./trained_model.pth"))

    model.eval()
    model_acc = evaluate_model(testDataLoader, model)
    print("Model Accuracy : {:.1f}%".format(model_acc * 100))
Exemplo n.º 4
0
def main(args):
    train_DataLoader, _, embedding_size = scripts.get_data(
        data_path="../data/Data1/train_eng.csv", testData=True)

    import time
    import torch.nn.functional as F
    start_time = time.time()

    lrs = [0.001]
    max_epochs = [50]
    loss_functions = [nn.BCEWithLogitsLoss()]
    n_layers = [200]

    for epochs in max_epochs:
        for lr in lrs:
            for loss_function in loss_functions:
                for hidden_layer_size in n_layers:
                    model = LSTM(input_size=embedding_size,
                                 hidden_layer_size=hidden_layer_size,
                                 embedding_dim=embedding_size)
                    optimizers = [torch.optim.Adam(model.parameters(), lr)]
                    for optimizer in optimizers:
                        print("MODEL")
                        # print("__________________________________________________________")
                        # print("Parameters:")
                        # print("Amount of epochs", epochs)
                        # print("Learning rate", lr)
                        # print("Loss function", loss_function)
                        # print("Hidden layer size", hidden_layer_size)
                        # print("Optimizer", optimizer)
                        for i in range(epochs):
                            correct = 0
                            total = 0
                            start_epoch = time.time()
                            for item in train_DataLoader:
                                seq = item[0]
                                label = item[1]
                                optimizer.zero_grad()
                                model.hidden_cell = (
                                    torch.zeros(1, 1, model.hidden_layer_size),
                                    torch.zeros(1, 1, model.hidden_layer_size))

                                y_pred = model(seq)
                                for j in range(y_pred.shape[0]):
                                    predicted = np.argmax(
                                        F.softmax(y_pred[j].data))
                                    true = np.argmax(label[j].data)
                                    if (predicted == true):
                                        correct += 1
                                    total += 1
                                single_loss = loss_function(
                                    y_pred, label.float())
                                single_loss.backward()
                                optimizer.step()

                            time_per_epoch = time.time() - start_epoch
                            f_measure = f1_loss(label, y_pred)
                            writer.add_scalar("train_loss", single_loss.item(),
                                              i)
                            writer.add_scalar("train_acc",
                                              (100 * correct / total), i)
                            writer.add_scalar("train_measure", f_measure, i)
                            writer.add_scalar("train_time", time_per_epoch, i)
                            for tag, parm in model.named_parameters():
                                writer.add_histogram(
                                    tag,
                                    parm.grad.data.cpu().numpy(), i)
                            print(
                                f'epoch: {i:3} loss: {single_loss.item():10.8f}, accuracy: {(100 * correct / total)}, f-measure: {f_measure}, time = {time_per_epoch}'
                            )

                        print("MODEL TIME EXECUTION--- %s seconds ---" %
                              (time.time() - start_time))
                        print(
                            f'epoch: {i:3} loss: {single_loss.item():10.10f}')
                        print()
                        print()
    torch.save(model.state_dict(), 'model.pt')
Exemplo n.º 5
0
from context import scripts
import scripts
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torch
import pandas as pd

device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')

batch_sz = 128

df_movie_features, _, df_ratings, df_ratings_test = scripts.get_data(
    data_path="../data/Data1/train.csv", testData=True)


class RecommenderNet(nn.Module):
    def __init__(self,
                 n_users,
                 n_movies,
                 n_factors=50,
                 embedding_dropout=0.02,
                 dropout_rate=0.2):
        super().__init__()

        self.u = nn.Embedding(n_users, n_factors)
        self.m = nn.Embedding(n_movies, n_factors)
        self.drop = nn.Dropout(embedding_dropout)
        self.hidden1 = nn.Sequential(nn.Linear(100, 128), nn.ReLU(),
                                     nn.Dropout(dropout_rate))
        self.hidden2 = nn.Sequential(nn.Linear(128, 256), nn.ReLU(),
Exemplo n.º 6
0
    eval_loss = 0
    eval_acc = 0
    model.eval()
    with torch.no_grad():
        for batch in data_batches:
            correct = 0
            total = 0
            predictions = model(batch[0]).squeeze(1)
            for j in range(predictions.shape[0]):
                predicted = np.argmax(F.softmax(predictions[j].data))
                true = np.argmax(batch[1][j].data)
                if (predicted == true):
                    correct += 1
                total += 1
            loss = loss_function(predictions, batch[1])
            eval_loss += loss.item()
            eval_acc += (correct / total)
    return eval_loss / len(data_batches), eval_acc / len(data_batches)


if __name__ == '__main__':
    _, testDataLoader, embedding_size = scripts.get_data(
        data_path="../data/Data1/train_eng.csv", testData=True)
    model = noLSTM(input_size=embedding_size)
    loss_function = nn.MSELoss()
    optimizer = torch.optim.Adam(model.parameters(), lr=0.001)

    model.load_state_dict(torch.load('./model.pt'))
    test_loss, test_acc = evaluate_model(model, testDataLoader, loss_function)
    print(f'Accuracy on test data : {test_acc * 100:.2f}%')
Exemplo n.º 7
0
def main(args):

    #load data
    train_DataLoader, _ = scripts.get_data(
        data_path="../data/Data1/train_data.csv", testData=True)
    # train_DataLoader = DataLoader(TensorDataset(train_x, train_y), batch_size=30)

    model = noLSTM()
    loss_function = nn.MSELoss()
    optimizer = torch.optim.Adam(model.parameters(), lr=0.001)

    import time

    epochs = 50
    import torch.nn.functional as F

    start_time = time.time()

    losses = []
    acc = []
    measures = []
    times = []
    i_array = []

    for i in range(epochs):
        correct = 0
        total = 0
        start_epoch = time.time()
        for item in loader:
            seq = item[0]
            label = item[1]
            optimizer.zero_grad()
            model.hidden_cell = (torch.zeros(1, 1, model.hidden_layer_size),
                                 torch.zeros(1, 1, model.hidden_layer_size))

            y_pred = model(seq)
            for j in range(y_pred.shape[0]):
                predicted = np.argmax(F.softmax(y_pred[j].data))
                true = np.argmax(label[j].data)
                if (predicted == true):
                    correct += 1
                total += 1
            single_loss = loss_function(y_pred, label.float())
            single_loss.backward()
            optimizer.step()

        time_per_epoch = time.time() - start_epoch
        f_measure = f1_loss(label, y_pred)
        i_array.append(i)
        writer.add_scalar("train_loss", single_loss.item(), i)
        losses.append(single_loss.item())
        writer.add_scalar("train_acc", (100 * correct / total), i)
        acc.append((100 * correct / total))
        writer.add_scalar("train_measure", f_measure, i)
        measures.append(f_measure)
        writer.add_scalar("train_time", time_per_epoch, i)
        times.append(time_per_epoch)
        for tag, parm in model.named_parameters():
            writer.add_histogram(tag, parm.grad.data.cpu().numpy(), i)
        print(
            f'epoch: {i:3} loss: {single_loss.item():10.8f}, accuracy: {(100 * correct / total)}, f-measure: {f_measure}, time = {time_per_epoch}'
        )

    print("MODEL TIME EXECUTION--- %s seconds ---" %
          (time.time() - start_time))
    print(f'epoch: {i:3} loss: {single_loss.item():10.10f}')
    torch.save(model.state_dict(), 'model.pt')