Exemplo n.º 1
0
def baseline_inactive(rate=LEARNING_RATE, l2_reg=0.0, activation='sigmoid'):
    X, y, Xt, yt = load_data()
    net = NeuralNet(n_hidden=len(L),
                    layers=L,
                    input_size=INPUT_SIZE,
                    n_classes=NUM_CLASSES,
                    batch_size=BATCH_SIZE,
                    activation=activation,
                    lr=rate,
                    reg=l2_reg)
    params = net.initialize().copy()
    return net.train(X, y, Xt, yt, params, inactive=True)
Exemplo n.º 2
0
def hog_model(n=5,
              rate=LEARNING_RATE,
              l2_reg=0.0,
              activation='relu',
              aug=False,
              hog=True):
    if aug:
        TrainLoss = np.zeros((n, N_ITERATIONS * 10))
    else:
        TrainLoss = np.zeros((n, N_ITERATIONS * 5))
    TestLoss = np.zeros_like(TrainLoss)
    Accuracy = np.zeros_like(TrainLoss)

    for i in range(n):
        X, y, Xt, yt = load_data(aug=aug, hog=True)
        X = X.T
        Xt = Xt.T
        net = NeuralNet(n_hidden=len(L_HOG),
                        layers=L_HOG,
                        input_size=INPUT_SIZE_HOG,
                        n_classes=NUM_CLASSES,
                        batch_size=BATCH_SIZE,
                        activation=activation,
                        lr=rate,
                        reg=l2_reg)
        params = net.initialize().copy()
        y, TrainLoss[i], TestLoss[i], Accuracy[i] = net.train(
            X, y, Xt, yt, params)

    train_loss = np.mean(TrainLoss, axis=0)
    test_loss = np.mean(TestLoss, axis=0)

    plt.figure(figsize=(18, 9))
    plt.grid()
    plt.xlabel('Number of iterations')
    plt.ylabel('Loss')
    X = np.arange(train_loss.shape[0]) * 200
    plt.plot(X, train_loss, label='train_loss')
    plt.plot(X, test_loss, label='test_loss')
    plt.legend()

    plt.show()
Exemplo n.º 3
0
def baseline_accuracy(n=5,
                      rate=LEARNING_RATE,
                      l2_reg=0.0,
                      activation='sigmoid',
                      aug=False):
    accuracy_list = []
    for _ in range(n):
        X, y, Xt, yt = load_data(aug=aug)
        net = NeuralNet(n_hidden=len(L),
                        layers=L,
                        input_size=INPUT_SIZE,
                        n_classes=NUM_CLASSES,
                        batch_size=BATCH_SIZE,
                        activation=activation,
                        lr=rate)
        params = net.initialize().copy()
        accuracy_list.append(net.train(X, y, Xt, yt, params, metrics=True))

    accuracy_list = np.asarray(accuracy_list)
    print('Accuracy:', np.mean(accuracy_list))
    print('Standard Deviation:', np.std(accuracy_list))
Exemplo n.º 4
0
def learning_rates():
    alpha = np.logspace(-4, 1, 6)
    TrainLoss = np.zeros((6, N_ITERATIONS * 5))
    TestLoss = np.zeros_like(TrainLoss)
    Accuracy = np.zeros_like(TrainLoss)

    for i in range(alpha.shape[0]):
        print('Learning Rate:', alpha[i])
        X, y, Xt, yt = load_data()
        net = NeuralNet(n_hidden=len(L),
                        layers=L,
                        input_size=INPUT_SIZE,
                        n_classes=NUM_CLASSES,
                        batch_size=BATCH_SIZE,
                        activation='sigmoid',
                        lr=alpha[i])
        params = net.initialize().copy()
        y, TrainLoss[i], TestLoss[i], Accuracy[i] = net.train(
            X, y, Xt, yt, params)

    np.save('lr_train_loss', TrainLoss)
    np.save('lr_test_loss', TestLoss)
    np.save('lr_accuracy', Accuracy)

    arr = np.load('lr_train_loss.npy')
    plt.figure(figsize=(18, 9))
    plt.grid()
    plt.xlabel('Number of iterations')
    plt.ylabel('Training Loss')
    X = np.arange(arr.shape[1]) * 200
    for i in range(arr.shape[0]):
        label_ = 'lr = ' + str(10**(i - 4))
        plt.plot(X, arr[i], label=label_)
        plt.legend()

    plt.show()
Exemplo n.º 5
0
def baseline_noisy(n=5,
                   rate=LEARNING_RATE,
                   l2_reg=0.0,
                   activation='sigmoid',
                   fwd_std=0.0,
                   bkd_std=0.0):
    TrainLoss = np.zeros((n, N_ITERATIONS * 5))
    TestLoss = np.zeros_like(TrainLoss)
    Accuracy = np.zeros_like(TrainLoss)

    for i in range(n):
        X, y, Xt, yt = load_data()
        net = NeuralNet(n_hidden=len(L),
                        layers=L,
                        input_size=INPUT_SIZE,
                        n_classes=NUM_CLASSES,
                        batch_size=BATCH_SIZE,
                        activation=activation,
                        lr=rate,
                        reg=l2_reg,
                        fwd_std=0.0,
                        bkd_std=0.0)
        params = net.initialize().copy()
        y, TrainLoss[i], TestLoss[i], Accuracy[i] = net.train(
            X, y, Xt, yt, params)

    train_loss = np.mean(TrainLoss, axis=0)
    test_loss = np.mean(TestLoss, axis=0)

    for i in range(n):
        X, y, Xt, yt = load_data()
        net = NeuralNet(n_hidden=len(L),
                        layers=L,
                        input_size=INPUT_SIZE,
                        n_classes=NUM_CLASSES,
                        batch_size=BATCH_SIZE,
                        activation=activation,
                        lr=rate,
                        reg=l2_reg,
                        fwd_std=fwd_std,
                        bkd_std=0.0)
        params = net.initialize().copy()
        y, TrainLoss[i], TestLoss[i], Accuracy[i] = net.train(
            X, y, Xt, yt, params)

    train_loss_fwd = np.mean(TrainLoss, axis=0)
    test_loss_fwd = np.mean(TestLoss, axis=0)

    for i in range(n):
        X, y, Xt, yt = load_data()
        net = NeuralNet(n_hidden=len(L),
                        layers=L,
                        input_size=INPUT_SIZE,
                        n_classes=NUM_CLASSES,
                        batch_size=BATCH_SIZE,
                        activation=activation,
                        lr=rate,
                        reg=l2_reg,
                        fwd_std=0.0,
                        bkd_std=bkd_std)
        params = net.initialize().copy()
        y, TrainLoss[i], TestLoss[i], Accuracy[i] = net.train(
            X, y, Xt, yt, params)

    train_loss_bkd = np.mean(TrainLoss, axis=0)
    test_loss_bkd = np.mean(TestLoss, axis=0)

    plt.figure(figsize=(18, 9))
    plt.grid()
    plt.xlabel('Number of iterations')
    plt.ylabel('Test Loss')
    X = np.arange(train_loss.shape[0]) * 200
    plt.plot(X, test_loss, label='test_loss')
    plt.plot(X, test_loss_fwd, label='test_loss_fwd')
    plt.plot(X, test_loss_bkd, label='test_loss_bkd')
    plt.legend()

    plt.show()

    plt.figure(figsize=(18, 9))
    plt.grid()
    plt.xlabel('Number of iterations')
    plt.ylabel('Train Loss')
    X = np.arange(train_loss.shape[0]) * 200
    plt.plot(X, train_loss, label='train_loss')
    plt.plot(X, train_loss_fwd, label='train_loss_fwd')
    plt.plot(X, train_loss_bkd, label='train_loss_bkd')
    plt.legend()

    plt.show()
Exemplo n.º 6
0
        return self.x_data[index], self.y_data[index]

    # we can call len(dataset) to return the size
    def __len__(self):
        return self.n_samples


dataset = ChatDataset()
train_loader = DataLoader(dataset=dataset,
                          batch_size=batch_size,
                          shuffle=True,
                          num_workers=0)

device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

model = NeuralNet(input_size, hidden_size, output_size).to(device)

criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(model.parameters(), lr=learning_rate)

# Train the model
for epoch in range(num_epochs):
    for (words, labels) in train_loader:
        words = words.to(device)
        labels = labels.to(dtype=torch.long).to(device)

        outputs = model(words)

        loss = criterion(outputs, labels)

        optimizer.zero_grad()