Пример #1
0
def train(config):
    """
    Performs training and evaluation of MLP model.
    NOTE: You should the model on the whole test set each eval_freq iterations.
    """
    # YOUR TRAINING CODE GOES HERE
    transform = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
    ])

    train_data = CIFAR10(DATA_DIR_DEFAULT,
                         train=True,
                         download=True,
                         transform=transform)
    data_loader = DataLoader(train_data, batch_size=config.batch_size)

    model = CNN(3, 10)

    criterion = nn.CrossEntropyLoss()
    optimizer = torch.optim.Adam(model.parameters(), lr=1e-4)

    for step, (batch_inputs, batch_targets) in enumerate(data_loader):
        # print(batch_inputs.size())
        hit = 0
        n, dim, _, __ = batch_inputs.size()

        # for i in range(n):
        #     temp_x = torch.unsqueeze(batch_inputs[i], 0)
        #     print(temp_x.size())
        #     y_pre = model.forward(temp_x)
        y_pre = model.forward(batch_inputs)
        for i in range(n):
            y_ev, _ = max(enumerate(y_pre[i]), key=itemgetter(1))
            y = batch_targets[i].item()
            if y_ev == y:
                hit += 1

        torch.nn.utils.clip_grad_norm(model.parameters(), max_norm=10)

        # Add more code here ...
        loss = criterion(y_pre, batch_targets)  # fixme
        accuracy = hit / n * 100  # fixme

        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        if step % config.eval_freq == 0:
            print("loss: ", loss.item())
            print("accuracy: ", accuracy)

        if step == config.max_steps:
            # If you receive a PyTorch data-loader error, check this bug report:
            # https://github.com/pytorch/pytorch/pull/9655
            break

    print('Done training.')
Пример #2
0
def main():
    # Load net
    cnn = CNN()
    loss_func = nn.MultiLabelSoftMarginLoss()
    optimizer = optim.Adam(cnn.parameters(), lr=learning_rate)
    if torch.cuda.is_available():
        cnn.cuda()
        loss_func.cuda()

    # Load data
    train_dataloader = dataset.get_train_data_loader()
    test_dataloader = dataset.get_test_data_loader()

    # Train model
    for epoch in range(num_epochs):
        cnn.train()
        for i, (images, labels) in enumerate(train_dataloader):
            images = Variable(images)
            labels = Variable(labels.long())
            if torch.cuda.is_available():
                images = images.cuda()
                labels = labels.cuda()
            predict_labels = cnn(images)
            loss = loss_func(predict_labels, labels)
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
            if (i + 1) % 100 == 0:
                print("epoch:", epoch, "step:", i, "loss:", loss.item())

        # Save and test model
        if (epoch + 1) % 10 == 0:
            filename = "model" + str(epoch + 1) + ".pkl"
            torch.save(cnn.state_dict(), filename)
            cnn.eval()
            correct = 0
            total = 0
            for (image, label) in test_dataloader:
                vimage = Variable(image)
                if torch.cuda.is_available():
                    vimage = vimage.cuda()
                output = cnn(vimage)
                predict_label = ""
                for k in range(4):
                    predict_label += config.CHAR_SET[np.argmax(
                        output[0, k * config.CHAR_SET_LEN:(k + 1) *
                               config.CHAR_SET_LEN].data.cpu().numpy())]
                true_label = one_hot.vec2text(label.numpy()[0])
                total += label.size(0)
                if predict_label == true_label:
                    correct += 1
                if total % 200 == 0:
                    print(
                        'Test Accuracy of the model on the %d test images: %f %%'
                        % (total, 100 * correct / total))
            print('Test Accuracy of the model on the %d test images: %f %%' %
                  (total, 100 * correct / total))
            print("save and test model...")
    torch.save(cnn.state_dict(), "./model.pkl")  # current is model.pkl
    print("save last model")
Пример #3
0
def train(trainloader):
    """
    Performs training and evaluation of CNN model.
    NOTE: You should the model on the whole test set each eval_freq iterations.
    """
    # YOUR TRAINING CODE GOES HERE
    n_channels = 3
    n_classes = 5

    cnn = CNN(n_channels, n_classes)

    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

    cnn.to(device)

    # Loss and Optimizer
    criterion = nn.CrossEntropyLoss()
    optimizer = torch.optim.Adam(cnn.parameters(), lr=LEARNING_RATE_DEFAULT)

    losses = []
    accuracies = []

    for epoch in range(MAX_EPOCHS_DEFAULT):
        timestart = time.time()
        running_loss = 0.0
        for step, (batch_x, batch_y) in enumerate(trainloader):

            # zero the parameter gradients
            optimizer.zero_grad()

            # Forward + Backward + Optimize

            batch_x, batch_y = batch_x.to(device), batch_y.to(device)

            outputs = cnn(batch_x)
            loss = criterion(outputs, batch_y)
            loss.backward()
            optimizer.step()

            running_loss += loss.item()

            if step % EVAL_FREQ_DEFAULT == EVAL_FREQ_DEFAULT - 1:
                print('[epoch: %d, step: %5d] loss: %.4f' %
                      (epoch, step, running_loss / EVAL_FREQ_DEFAULT))
                losses.append(running_loss / EVAL_FREQ_DEFAULT)
                running_loss = 0.0
                accu = accuracy(outputs, batch_y)
                accuracies.append(accu)
                print('Accuracy on the %d train images: %.3f %%' %
                      (batch_y.size(0), accu))

        break

        print('epoch %d cost %3f sec' % (epoch, time.time() - timestart))

    print('---Finished Training---')

    return cnn, losses, accuracies
def main(args):
    transform = transforms.ToTensor()
    mode = args.mode
    train_loader = torch.utils.data.DataLoader(
      UrbanSound8KDataset('UrbanSound8K_train.pkl', mode),
      batch_size=args.batch_size,
      shuffle=True,
      num_workers=args.worker_count,
      pin_memory=True
    )
    val_loader = torch.utils.data.DataLoader(
     UrbanSound8KDataset('UrbanSound8K_test.pkl', mode),
     batch_size=args.batch_size,
     shuffle=True,
     num_workers=args.worker_count,
     pin_memory=True
    )

    ## Build a model based on mode
    if args.mode == 'MLMC':
        model = CNN(height=145, width=41, channels=1, class_count=10, dropout=args.dropout,mode = args.mode)
    else:
        model = CNN(height=85, width=41, channels=1, class_count=10, dropout=args.dropout,mode = args.mode)

    ## Redefine the criterion to be softmax cross entropy
    criterion = nn.CrossEntropyLoss()

    ## Use adam optimizer. AdamW is Adam with L-2 regularisation.
    optimizer = torch.optim.AdamW(model.parameters(), lr=args.learning_rate, weight_decay=args.weight_decay)

    log_dir = get_summary_writer_log_dir(args)
    print(f"Writing logs to {log_dir}")
    summary_writer = SummaryWriter(
            str(log_dir),
            flush_secs=5
    )
    trainer = Trainer(
        model, train_loader, val_loader, criterion, optimizer, summary_writer,
        DEVICE, args.checkpoint_path, checkpoint_frequency = args.checkpoint_frequency
    )

    trainer.train(
        args.epochs,
        args.val_frequency,
        print_frequency=args.print_frequency,
        log_frequency=args.log_frequency,
    )

    summary_writer.close()
Пример #5
0
def train(model_name='model.pkl'):
    cnn = CNN()
    cnn.train()
    print('init net')
    criterion = nn.MultiLabelSoftMarginLoss()
    optimizer = torch.optim.Adam(cnn.parameters(),
                                 lr=setting.TRAIN_LEARNING_RATE)

    # Train the Model
    train_dataloader = dataset.get_train_data_loader()
    for epoch in range(setting.TRAIN_NUM_EPOCHS):
        for i, (images, labels) in enumerate(train_dataloader):
            images = Variable(images)
            labels = Variable(labels.float())
            predict_labels = cnn(images)
            loss = criterion(predict_labels, labels)
            optimizer.zero_grad()
            loss.backward()
            optimizer.step()
        print('epoch: % -3s loss: %s' % (epoch, loss.item()))
    torch.save(cnn.state_dict(), model_name)  # current is model.pkl
    print('save last model')
Пример #6
0
def train_model(embedding_size, hidden_size, filter_width, max_or_mean,
                max_num_epochs, batch_size, learning_rate, loss_margin,
                training_checkpoint, dropout_prob, eval_batch_size):
    global load_model_path, train_data, source_questions
    global dev_data, dev_label_dict, test_data, test_label_dict
    global dev_pos_data, dev_neg_data, test_pos_data, test_neg_data, target_questions

    # Generate model
    cnn = CNN(embedding_size, hidden_size, filter_width, max_or_mean,
              dropout_prob)
    optimizer = optim.Adam(cnn.parameters(), lr=learning_rate)
    criterion = nn.MultiMarginLoss(margin=loss_margin)
    init_epoch = 1

    # Load model
    if load_model_path is not None:
        print("Loading model from \"" + load_model_path + "\"...")
        init_epoch = load_model(load_model_path, cnn, optimizer)

    # Training
    print("***************************************")
    print("Starting run with following parameters:")
    print(" --embedding size:   %d" % (cnn.input_size))
    print(" --hidden size:      %d" % (cnn.hidden_size))
    print(" --filter width:     %d" % (cnn.n))
    print(" --dropout:          %f" % (cnn.dropout_prob))
    print(" --pooling:          %s" % (cnn.max_or_mean))
    print(" --initial epoch:    %d" % (init_epoch))
    print(" --number of epochs: %d" % (max_num_epochs))
    print(" --batch size:       %d" % (batch_size))
    print(" --learning rate:    %f" % (learning_rate))
    print(" --loss margin:      %f" % (loss_margin))

    start = time.time()
    current_loss = 0

    for iter in range(init_epoch, max_num_epochs + 1):
        current_loss += train(cnn, criterion, optimizer, train_data,
                              source_questions, batch_size, 21)
        if iter % training_checkpoint == 0:
            print("Epoch %d: Average Train Loss: %.5f, Time: %s" %
                  (iter,
                   (current_loss / training_checkpoint), timeSince(start)))
            d_auc = evaluate_auc(cnn, dev_pos_data, dev_neg_data,
                                 target_questions, eval_batch_size)
            t_auc = evaluate_auc(cnn, test_pos_data, test_neg_data,
                                 target_questions, eval_batch_size)
            print("Dev AUC(0.05): %.2f" % (d_auc))
            print("Test AUC(0.05): %.2f" % (t_auc))

            current_loss = 0

            if SAVE_MODEL:
                state = {}
                state["model"] = cnn.state_dict()
                state["optimizer"] = optimizer.state_dict()
                state["epoch"] = iter
                save_model(save_model_path, "cnn_dt", state,
                           iter == max_num_epochs)

    # Compute final results
    print("-------")
    print("FINAL RESULTS:")
    d_auc = evaluate_auc(cnn, dev_pos_data, dev_neg_data, target_questions,
                         eval_batch_size)
    t_auc = evaluate_auc(cnn, test_pos_data, test_neg_data, target_questions,
                         eval_batch_size)
    print("Training time: %s" % (timeSince(start)))
    print("Dev AUC(0.05): %.2f" % (d_auc))
    print("Test AUC(0.05): %.2f" % (t_auc))

    if SAVE_MODEL:
        state = {}
        state["model"] = cnn.state_dict()
        state["optimizer"] = optimizer.state_dict()
        state[
            "epoch"] = max_num_epochs if init_epoch < max_num_epochs else init_epoch
        save_model(save_model_path, "cnn", state, True)

    return (d_auc, t_auc)
Пример #7
0
def train():
    """
    Performs training and evaluation of MLP cnn.
    NOTE: You should the cnn on the whole test set each eval_freq iterations.
    """
    # YOUR TRAINING CODE GOES HERE
    transform = transforms.Compose([
        transforms.ToTensor(),
        transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))
    ])
    train_data = datasets.CIFAR10('data',
                                  train=True,
                                  download=True,
                                  transform=transform)
    test_data = datasets.CIFAR10('data',
                                 train=False,
                                 download=True,
                                 transform=transform)
    train_on_gpu = torch.cuda.is_available()
    num_train = len(train_data)
    train_loader = torch.utils.data.DataLoader(train_data,
                                               batch_size=FLAGS.batch_size,
                                               shuffle=True,
                                               num_workers=0)
    test_loader = torch.utils.data.DataLoader(test_data,
                                              batch_size=FLAGS.batch_size,
                                              shuffle=False,
                                              num_workers=0)
    classes = [
        'airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog',
        'horse', 'ship', 'truck'
    ]
    cnn = CNN(3, 10)
    if train_on_gpu:
        cnn.cuda()
    criterion = nn.CrossEntropyLoss()
    optimizer = optim.Adam(cnn.parameters(), lr=FLAGS.learning_rate)

    for epoch in range(1, FLAGS.max_steps):
        class_correct = list(0. for i in range(10))
        class_total = list(0. for i in range(10))
        train_loss = 0.0
        test_loss = 0.0
        cnn.train()
        for data, target in train_loader:
            if train_on_gpu:
                data, target = data.cuda(), target.cuda()
            optimizer.zero_grad()
            output = cnn(data)
            loss = criterion(output, target)
            loss.backward()
            optimizer.step()
            _, pred = torch.max(output, 1)
            correct_tensor = pred.eq(target.data.view_as(pred))
            train_loss += loss.item() * data.size(0)
            correct = np.squeeze(
                correct_tensor.numpy()) if not train_on_gpu else np.squeeze(
                    correct_tensor.cpu().numpy())
            for i in range(len(target.data)):
                label = target.data[i]
                class_correct[label] += correct[i].item()
                class_total[label] += 1

        if epoch % FLAGS.eval_freq == 0:
            test_correct = list(0. for i in range(10))
            test_total = list(0. for i in range(10))
            cnn.eval()
            for data, target in test_loader:
                if train_on_gpu:
                    data, target = data.cuda(), target.cuda()
                output = cnn(data)
                _, pred = torch.max(output, 1)
                correct_tensor = pred.eq(target.data.view_as(pred))
                correct = np.squeeze(correct_tensor.numpy()
                                     ) if not train_on_gpu else np.squeeze(
                                         correct_tensor.cpu().numpy())
                loss = criterion(output, target)
                test_loss += loss.item() * data.size(0)
                for i in range(len(target.data)):
                    label = target.data[i]
                    test_correct[label] += correct[i].item()
                    test_total[label] += 1

            train_loss = train_loss / len(train_loader.dataset)
            test_loss = test_loss / len(test_loader.dataset)
            plot_epoch.append(epoch)
            plot_train_loss.append(train_loss)
            plot_test_loss.append(test_loss)
            print(
                'Epoch: {} \tTraining Loss: {:.6f} \tTest Loss: {:.6f}'.format(
                    epoch, train_loss, test_loss))

            percent_train = accuracy(class_correct, class_total) * 100
            percent_test = accuracy(test_correct, test_total) * 100
            plot_train_accuracy.append(percent_train)
            plot_test_accuracy.append(percent_test)
            print('train accuracy: ', percent_train, 'test accuracy: ',
                  percent_test)

    fig1 = plt.subplot(2, 1, 1)
    fig2 = plt.subplot(2, 1, 2)
    fig1.plot(plot_epoch,
              plot_train_accuracy,
              c='red',
              label='training data accuracy')
    fig1.plot(plot_epoch,
              plot_test_accuracy,
              c='blue',
              label='test data accuracy')
    fig1.legend()
    fig2.plot(plot_epoch, plot_train_loss, c='green', label='train CE loss')
    fig2.plot(plot_epoch, plot_test_loss, c='yellow', label='test CE loss')
    fig2.legend()
    plt.show()
Пример #8
0
    if (model_type == "CNN"):
        # dropout_keep_prob, embedding_size, batch_size, lr, dev_size, vocabulary_size, max_document_length, input_size, hidden_size, output_dim, n_filters, filter_sizes, num_epochs = get_params(model_type)

        hidden_size = 128
        pool_size = 2
        n_filters = 128
        filter_sizes = [3, 8]
        num_epochs = 5

        to_train = True

        cnn_model = CNN(vocab_size, embedding_size, n_filters, filter_sizes,
                        pool_size, hidden_size, num_classes, dropout_keep_prob)

        # optimization algorithm
        optimizer = torch.optim.Adam(cnn_model.parameters(), lr=lr)
        # train and evaluation
        if (to_train):
            # train and evaluation
            run_train(num_epochs, cnn_model, train_iterator, valid_iterator,
                      optimizer, loss_func, model_type)

        # load weights
        cnn_model.load_state_dict(
            torch.load(os.path.join(path, "saved_weights_CNN.pt")))
        # predict
        test_loss, test_acc = evaluate(cnn_model, test_iterator, loss_func)
        print(f'Test Loss: {test_loss:.3f} | Test Acc: {test_acc * 100:.2f}%')

    if (model_type == "LSTM"):