예제 #1
0
def main():
    # Load data
    kwargs = {'num_workers': 1, 'pin_memory': True}
    train_loader = torch.utils.data.DataLoader(datasets.MNIST(
        './mnist', train=True, download=True, transform=transforms.ToTensor()),
                                               batch_size=batch_size,
                                               shuffle=True,
                                               **kwargs)
    test_loader = torch.utils.data.DataLoader(datasets.MNIST(
        './mnist', train=False, transform=transforms.ToTensor()),
                                              batch_size=batch_size,
                                              shuffle=True,
                                              **kwargs)

    # Model and optimizers
    model = Model(hidden_size, args.permute).to(device)
    model.train()

    if args.mode == "lstm" or args.mode == "mlstm" or args.mode == "nlstm" or args.mode == "alstm" or args.mode == "plstm":
        optim = torch.optim.RMSprop(model.parameters(), lr=args.lr, alpha=0.9)
        optim_orth = None
    else:
        non_orth_params, log_orth_params = get_parameters(model)
        optim = torch.optim.RMSprop(non_orth_params, args.lr)
        optim_orth = torch.optim.RMSprop(log_orth_params, lr=args.lr_orth)

    best_test_acc = 0.
    iters = 0
    for epoch in range(epochs):
        processed = 0
        for batch_idx, (batch_x, batch_y) in enumerate(train_loader):
            batch_x, batch_y = batch_x.to(device).view(-1,
                                                       784), batch_y.to(device)

            logits = model(batch_x)
            loss = model.loss(logits, batch_y)

            optim.zero_grad()
            if optim_orth:
                optim_orth.zero_grad()

            loss.backward()

            torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)

            optim.step()
            if optim_orth:
                optim_orth.step()

            with torch.no_grad():
                correct = model.correct(logits, batch_y)

            processed += len(batch_x)
            print(
                'Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}\tAccuracy: {:.2f}%\tBest: {:.2f}%'
                .format(epoch, processed, len(train_loader.dataset),
                        100. * batch_idx / len(train_loader), loss.item(),
                        100 * correct / len(batch_x), best_test_acc))

            logger.file.write(
                'Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}\tAccuracy: {:.2f}%\tBest: {:.2f}%'
                .format(epoch, processed, len(train_loader.dataset),
                        100. * batch_idx / len(train_loader), loss.item(),
                        100 * correct / len(batch_x), best_test_acc))
            writer.add_scalars('train_loss', {args.mode: loss.item()}, iters)
            writer.add_scalars('train_acc',
                               {args.mode: 100 * correct / len(batch_x)},
                               iters)
            iters += 1

        model.eval()
        with torch.no_grad():
            test_loss = 0.
            correct = 0.
            for batch_x, batch_y in test_loader:
                batch_x, batch_y = batch_x.to(device).view(
                    -1, 784), batch_y.to(device)
                logits = model(batch_x)
                test_loss += model.loss(logits, batch_y).float()
                correct += model.correct(logits, batch_y).float()

        test_loss /= len(test_loader)
        test_acc = 100 * correct / len(test_loader.dataset)
        best_test_acc = max(test_acc, best_test_acc)
        print()
        print(
            "Test set: Average loss: {:.4f}, Accuracy: {:.2f}%, Best Accuracy: {:.2f}%"
            .format(test_loss, test_acc, best_test_acc))
        print()

        logger.file.write(
            "Test set: Average loss: {:.4f}, Accuracy: {:.2f}%, Best Accuracy: {:.2f}%"
            .format(test_loss, test_acc, best_test_acc))
        writer.add_scalars('test_loss', {args.mode: test_loss}, epoch)
        writer.add_scalars('test_acc', {args.mode: test_acc}, epoch)

        model.train()

    logger.close()

    print('Best acc:')
    print(best_test_acc)

    with open("./all_results.txt", "a") as f:
        fcntl.flock(f, fcntl.LOCK_EX)
        f.write("%s\n" % args.checkpoint)
        f.write("best_acc %f\n\n" % best_test_acc)
        fcntl.flock(f, fcntl.LOCK_UN)
예제 #2
0
def main():
    # Load data
    kwargs = {'num_workers': 1, 'pin_memory': True}
    train_loader = torch.utils.data.DataLoader(
        TIMIT('./timit_data', mode="train"),
        batch_size=batch_size, shuffle=True, **kwargs)
    # Load test and val in one big batch
    test_loader = torch.utils.data.DataLoader(
        TIMIT('./timit_data', mode="test"),
        batch_size=400, shuffle=True, **kwargs)
    val_loader = torch.utils.data.DataLoader(
        TIMIT('./timit_data', mode="val"),
        batch_size=192, shuffle=True, **kwargs)


    # Model and optimizers
    model = Model(hidden_size).to(device)
    model.train()

    if args.mode == "lstm":
        optim = torch.optim.RMSprop(model.parameters(), lr=args.lr)
        optim_orth = None
    else:
        non_orth_params, log_orth_params = get_parameters(model)
        optim = torch.optim.Adam(non_orth_params, args.lr)
        optim_orth = torch.optim.RMSprop(log_orth_params, lr=args.lr_orth)

    best_test = 1e7
    best_validation = 1e7

    for epoch in range(epochs):
        init_time = datetime.datetime.now()
        processed = 0
        step = 1
        for batch_idx, (batch_x, batch_y, len_batch) in enumerate(train_loader):
            batch_x, batch_y, len_batch = batch_x.to(device), batch_y.to(device), len_batch.to(device)

            logits = model(batch_x)
            loss = model.loss(logits, batch_y, len_batch)

            optim.zero_grad()
            if optim_orth:
                optim_orth.zero_grad()

            loss.backward()

            optim.step()
            if optim_orth:
                optim_orth.step()

            processed += len(batch_x)
            step += 1

            print("Epoch {} [{}/{} ({:.0f}%)]\tLoss: {:.2f} "
                  .format(epoch, processed, len(train_loader.dataset),
                      100. * processed / len(train_loader.dataset), loss))

        model.eval()
        with torch.no_grad():
            # There's just one batch for test and validation
            for batch_x, batch_y, len_batch in test_loader:
                batch_x, batch_y, len_batch = batch_x.to(device), batch_y.to(device), len_batch.to(device)
                logits = model(batch_x)
                loss_test = model.loss(logits, batch_y, len_batch)

            for batch_x, batch_y, len_batch in val_loader:
                batch_x, batch_y, len_batch = batch_x.to(device), batch_y.to(device), len_batch.to(device)
                logits = model(batch_x)
                loss_val = model.loss(logits, batch_y, len_batch)

            if loss_val < best_validation:
                best_validation = loss_val
                best_test = loss_test

        print()
        print("Val:  Loss: {:.2f}\tBest: {:.2f}".format(loss_val, best_validation))
        print("Test: Loss: {:.2f}\tBest: {:.2f}".format(loss_test, best_test))
        print()

        model.train()
예제 #3
0
def main():
    # --- Set data params ----------------
    n_classes = 9
    n_characters = n_classes + 1
    K = 10
    n_train = iterations * batch_size
    n_len = L + 2 * K

    train_x, train_y = copying_data(L, K, n_train)
    train_x = train_x
    train_y = train_y

    model = Model(n_classes, hidden_size).to(device)
    model.train()

    if args.mode == "lstm":
        optim = torch.optim.RMSprop(model.parameters(), lr=args.lr)
        optim_orth = None
    else:
        non_orth_params, log_orth_params = get_parameters(model)
        optim = torch.optim.RMSprop(non_orth_params, args.lr)
        optim_orth = torch.optim.RMSprop(log_orth_params, lr=args.lr_orth)

    x_onehot = torch.FloatTensor(batch_size, n_len, n_characters).to(device)

    for step in range(iterations):
        batch_x = train_x[step * batch_size:(step + 1) * batch_size].to(device)
        onehot(x_onehot, batch_x)
        batch_y = train_y[step * batch_size:(step + 1) * batch_size].to(device)

        logits = model(x_onehot)
        loss = model.loss(logits, batch_y)

        optim.zero_grad()
        if optim_orth:
            optim_orth.zero_grad()

        loss.backward()

        optim.step()
        if optim_orth:
            optim_orth.step()

        with torch.no_grad():
            accuracy = model.accuracy(logits, batch_y)

        print("Iter {}: Loss= {:.6f}, Accuracy= {:.5f}".format(
            step, loss, accuracy))

    print("Optimization Finished!")

    model.eval()
    with torch.no_grad():
        test_x, test_y = copying_data(L, K, batch_size)
        test_x, test_y = test_x.to(device), test_y.to(device)
        onehot(x_onehot, test_x)
        logits = model(x_onehot)
        loss = model.loss(logits, test_y)
        accuracy = model.accuracy(logits, test_y)
        print("Test result: Loss= {:.6f}, Accuracy= {:.5f}".format(
            loss, accuracy))
예제 #4
0
def main():
    # Load data
    kwargs = {'num_workers': 1, 'pin_memory': True}
    train_loader = torch.utils.data.DataLoader(TIMIT(args.datadir,
                                                     mode="train"),
                                               batch_size=batch_size,
                                               shuffle=True,
                                               **kwargs)
    # Load test and val in one big batch
    test_loader = torch.utils.data.DataLoader(TIMIT(args.datadir, mode="test"),
                                              batch_size=400,
                                              shuffle=True,
                                              **kwargs)
    val_loader = torch.utils.data.DataLoader(TIMIT(args.datadir, mode="val"),
                                             batch_size=192,
                                             shuffle=True,
                                             **kwargs)

    # Model and optimizers
    model = Model(hidden_size).to(device)
    model.train()

    if args.mode == "lstm" or args.mode == "mlstm" or args.mode == "nlstm" or args.mode == "alstm":
        #optim = torch.optim.RMSprop(model.parameters(), lr=args.lr, alpha=0.1, momentum=0.9)
        optim = torch.optim.Adam(model.parameters(), args.lr)
        optim_orth = None
    else:
        non_orth_params, log_orth_params = get_parameters(model)
        optim = torch.optim.Adam(non_orth_params, args.lr)
        optim_orth = torch.optim.RMSprop(log_orth_params, lr=args.lr_orth)

    best_test = 1e7
    best_validation = 1e7
    iters = 0

    for epoch in range(epochs):
        init_time = datetime.datetime.now()
        processed = 0
        step = 1
        for batch_idx, (batch_x, batch_y,
                        len_batch) in enumerate(train_loader):
            batch_x, batch_y, len_batch = batch_x.to(device), batch_y.to(
                device), len_batch.to(device)

            logits = model(batch_x)
            loss = model.loss(logits, batch_y, len_batch)

            optim.zero_grad()
            if optim_orth:
                optim_orth.zero_grad()

            loss.backward()

            # torch.nn.utils.clip_grad_norm_(model.parameters(), 1.0)

            optim.step()
            if optim_orth:
                optim_orth.step()

            processed += len(batch_x)
            step += 1

            print("Epoch {} [{}/{} ({:.0f}%)]\tLoss: {:.2f} ".format(
                epoch, processed, len(train_loader.dataset),
                100. * processed / len(train_loader.dataset), loss))

            logger.file.write(
                "Epoch {} [{}/{} ({:.0f}%)]\tLoss: {:.2f} ".format(
                    epoch, processed, len(train_loader.dataset),
                    100. * processed / len(train_loader.dataset), loss))

            writer.add_scalars('train_loss', {args.mode: loss.item()}, iters)
            iters += 1

        model.eval()
        with torch.no_grad():
            # There's just one batch for test and validation
            for batch_x, batch_y, len_batch in test_loader:
                batch_x, batch_y, len_batch = batch_x.to(device), batch_y.to(
                    device), len_batch.to(device)
                logits = model(batch_x)
                loss_test = model.loss(logits, batch_y, len_batch)

            for batch_x, batch_y, len_batch in val_loader:
                batch_x, batch_y, len_batch = batch_x.to(device), batch_y.to(
                    device), len_batch.to(device)
                logits = model(batch_x)
                loss_val = model.loss(logits, batch_y, len_batch)

            if loss_val < best_validation:
                best_validation = loss_val
                bestval_test = loss_test
                state = {
                    'epoch': epoch + 1,
                    'state_dict': model.state_dict(),
                    'loss': best_validation,
                    'optimizer': optim.state_dict(),
                }
                filepath = os.path.join(args.checkpoint,
                                        'model_best_val.pth.tar')
                torch.save(state, filepath)

            if loss_test < best_test:
                best_test = loss_test
                besttest_val = loss_val
                state = {
                    'epoch': epoch + 1,
                    'state_dict': model.state_dict(),
                    'loss': best_test,
                    'optimizer': optim.state_dict(),
                }
                filepath = os.path.join(args.checkpoint,
                                        'model_best_test.pth.tar')
                torch.save(state, filepath)

        print()
        print("Val:  Loss: {:.2f}\tBest: {:.2f}\tBestVal_Test: {:.2f}".format(
            loss_val, best_validation, bestval_test))
        print("Test: Loss: {:.2f}\tBest: {:.2f}\tBestTest_Val: {:.2f}".format(
            loss_test, best_test, besttest_val))

        logger.file.write(
            "Val:  Loss: {:.2f}\tBest: {:.2f}\tBestVal_Test: {:.2f}".format(
                loss_val, best_validation, bestval_test))
        logger.file.write(
            "Test: Loss: {:.2f}\tBest: {:.2f}\tBestTest_Val: {:.2f}".format(
                loss_test, best_test, besttest_val))

        writer.add_scalars('val_loss', {args.mode: loss_val}, epoch)
        writer.add_scalars('test_loss', {args.mode: loss_test}, epoch)

        print()

        model.train()

    logger.close()

    print("BestVal: {:.2f} \tBestVal_Test: {:.2f}".format(
        best_validation, bestval_test))
    print("BestTest: {:.2f}\tBestTest_Val: {:.2f}".format(
        best_test, besttest_val))

    with open("./all_results_timit.txt", "a") as f:
        fcntl.flock(f, fcntl.LOCK_EX)
        f.write("%s\n" % args.checkpoint)
        f.write("BestVal: {:.2f} \tBestVal_Test: {:.2f}\n".format(
            best_validation, bestval_test))
        f.write("BestTest: {:.2f}\tBestTest_Val: {:.2f}\n\n".format(
            best_test, besttest_val))
        fcntl.flock(f, fcntl.LOCK_UN)
예제 #5
0
파일: 2_mnist.py 프로젝트: KKDeng/expRNN
def main():
    # Load data
    kwargs = {'num_workers': 1, 'pin_memory': True}
    train_loader = torch.utils.data.DataLoader(
        datasets.MNIST('./mnist', train=True, download=True, transform=transforms.ToTensor()),
        batch_size=batch_size, shuffle=True, **kwargs)
    test_loader = torch.utils.data.DataLoader(
        datasets.MNIST('./mnist', train=False, transform=transforms.ToTensor()),
        batch_size=batch_size, shuffle=True, **kwargs)

    # Model and optimizers
    model = Model(hidden_size, args.permute).to(device)
    model.train()

    if args.mode == "lstm":
        optim = torch.optim.RMSprop(model.parameters(), lr=args.lr)
        optim_orth = None
    else:
        non_orth_params, log_orth_params = get_parameters(model)
        optim = torch.optim.RMSprop(non_orth_params, args.lr)
        optim_orth = torch.optim.RMSprop(log_orth_params, lr=args.lr_orth)

    best_test_acc = 0.
    for epoch in range(epochs):
        processed = 0
        for batch_idx, (batch_x, batch_y) in enumerate(train_loader):
            batch_x, batch_y = batch_x.to(device).view(-1, 784), batch_y.to(device)

            logits = model(batch_x)
            loss = model.loss(logits, batch_y)

            optim.zero_grad()
            # Zeroing out the optim_orth is not really necessary, but we do it for consistency
            if optim_orth:
                optim_orth.zero_grad()

            loss.backward()

            optim.step()
            if optim_orth:
                optim_orth.step()

            with torch.no_grad():
                correct = model.correct(logits, batch_y)

            processed += len(batch_x)
            print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}\tAccuracy: {:.2f}%\tBest: {:.2f}%'.format(
                epoch, processed, len(train_loader.dataset),
                100. * batch_idx / len(train_loader), loss.item(), 100 * correct/len(batch_x), best_test_acc))


        model.eval()
        with torch.no_grad():
            test_loss = 0.
            correct = 0.
            for batch_x, batch_y in test_loader:
                batch_x, batch_y = batch_x.to(device).view(-1, 784), batch_y.to(device)
                logits = model(batch_x)
                test_loss += model.loss(logits, batch_y).float()
                correct += model.correct(logits, batch_y).float()

        test_loss /= len(test_loader)
        test_acc = 100 * correct / len(test_loader.dataset)
        best_test_acc = max(test_acc, best_test_acc)
        print()
        print("Test set: Average loss: {:.4f}, Accuracy: {:.2f}%, Best Accuracy: {:.2f}%"
                .format(test_loss, test_acc, best_test_acc))
        print()

        model.train()