Пример #1
0
def train():
    """
    Performs training and evaluation of ConvNet model.

    TODO:
    Implement training and evaluation of ConvNet model. Evaluate your model on the whole test set each eval_freq iterations.
    """

    ### DO NOT CHANGE SEEDS!
    # Set the random seeds for reproducibility
    np.random.seed(42)

    # ## Prepare all functions
    # # Get number of units in each hidden layer specified in the string such as 100,100
    # if FLAGS.dnn_hidden_units:
    #     dnn_hidden_units = FLAGS.dnn_hidden_units.split(",")
    #     dnn_hidden_units = [int(dnn_hidden_unit_) for dnn_hidden_unit_ in dnn_hidden_units]
    # else:
    #     dnn_hidden_units = []

    # Set path to data
    data_dir = FLAGS.data_dir

    data = cifar10_utils.get_cifar10(data_dir)

    # Prepare the test set
    input_dims_test = data['test'].images.shape
    height = input_dims_test[2]
    width = input_dims_test[3]
    channels = input_dims_test[1]
    # num_images_test = input_dims_test[0]
    # image_dims_ravel = height * width * channels

    X_test = data["test"].images
    Y_test = data["test"].labels

    # Make acceptable input for test
    # X_test = X_test.reshape((num_images_test, image_dims_ravel))

    # make usable by pytorch
    X_test = torch.tensor(X_test, requires_grad=False).type(dtype).to(device)
    Y_test = torch.tensor(Y_test, requires_grad=False).type(dtype).to(device)

    # Determine the channels
    n_channels = channels

    model = ConvNet(n_channels=n_channels, n_classes=10)
    model.cuda()

    accuracy_train_log = list()
    accuracy_test_log = list()
    loss_train_log = list()
    loss_test_log = list()

    # FLAGS hold command line arguments
    batch_size = FLAGS.batch_size
    numb_iterations = FLAGS.max_steps
    learning_rate = FLAGS.learning_rate
    evaluation_freq = FLAGS.eval_freq
    logging.info(f"learning rate: %2d " % learning_rate)

    criterion = nn.CrossEntropyLoss()
    optimizer = optim.Adam(model.parameters(), lr=learning_rate)

    # X_train = data['train'].images.reshape((data['train'].images.shape[0],
    #                                         image_dims_ravel))

    X_train = data['train'].images
    Y_train = data['train'].labels

    X_train = torch.tensor(X_train, requires_grad=False).type(dtype).to(device)
    Y_train = torch.tensor(Y_train, requires_grad=False).type(dtype).to(device)

    targs_train = Y_train.argmax(dim=1)

    # running_loss = loss_current.detach().item()

    for step in range(numb_iterations):

        X_batch, Y_batch = data['train'].next_batch(batch_size)

        # X_batch = X_batch.reshape((batch_size, image_dims_ravel))

        # Convert to tensors which are handled by the device
        X_batch = torch.from_numpy(X_batch).type(dtype).to(device)
        Y_batch = torch.from_numpy(Y_batch).type(dtype).to(device)

        # why do we need this again?
        optimizer.zero_grad()

        targs = Y_batch.argmax(dim=1)
        outputs = model.forward(X_batch)
        loss_current = criterion(outputs, targs)
        loss_current.backward()
        optimizer.step()

        running_loss = loss_current.detach().item()

        if step % evaluation_freq == 0:
            list_acc = list()
            list_loss = list()
            for i in range(0, 70):
                selection = random.sample(range(1, 5000), 64)
                targs_train = Y_train[selection].argmax(dim=1)
                outputs_train = model(X_train[selection])
                loss_current_train = criterion(outputs_train, targs_train).detach().item()
                acc_current_train = accuracy(outputs_train, Y_train[selection])
                list_loss.append(loss_current_train)
                list_acc.append(acc_current_train)
            loss_train_log.append(np.mean(list_loss))
            accuracy_train_log.append(np.mean(list_acc))
            logging.info(f"train performance: loss = %4f, accuracy = %4f ", loss_train_log[-1], accuracy_train_log[-1])

            # loss_train_log.append(running_loss)
            # accuracy_train_log.append(accuracy(outputs, Y_batch))
            # logging.info(f"train performance: loss = %4f, accuracy = %4f ", loss_train_log[-1], accuracy_train_log[-1])

            # Get performance on the test set
           # targs_test = Y_test.argmax(dim=1)
           # outputs_test = model(X_test)
           # test_loss_current = criterion(outputs_test, targs_test).detach().item()
            list_acc = list()
            list_loss= list()
            for i in range(0, 15):
                selection = random.sample(range(1, 1000), 64)
                targs_test = Y_test[selection].argmax(dim=1)
                outputs_test = model(X_test[selection])
                loss_current_test = criterion(outputs_test, targs_test).detach().item()
                acc_current_test = accuracy(outputs_test, Y_test[selection])
                list_loss.append(loss_current_test)
                list_acc.append(acc_current_test)
            loss_test_log.append(np.mean(list_loss))
            accuracy_test_log.append(np.mean(list_acc))
            logging.info(f"test performance: loss = %4f , accuracy = %4f\n", loss_test_log[-1], accuracy_test_log[-1])

            # TODO: implement early stopping ?

    path = "./convnet_results_pytorch/"
    date_time = datetime.now().replace(second=0, microsecond=0).strftime(format="%Y-%m-%d-%H-%M")
    np.save(os.path.join(path, date_time + "accuracy_test"), accuracy_test_log)
    np.save(os.path.join(path, date_time + "loss_test"), loss_test_log)
    np.save(os.path.join(path, date_time + "loss_train"), loss_train_log)
    np.save(os.path.join(path, date_time + "accuracy_train"), accuracy_train_log)
Пример #2
0
def train():
    """
  Performs training and evaluation of ConvNet model. 

  TODO:
  Implement training and evaluation of ConvNet model. Evaluate your model on the whole test set each eval_freq iterations.
  """
    # set cuda
    cuda = False

    ### DO NOT CHANGE SEEDS!
    # Set the random seeds for reproducibility
    np.random.seed(42)

    # import the test data
    cifar10 = cifar10_utils.get_cifar10(FLAGS.data_dir)

    loss_function = torch.nn.CrossEntropyLoss()
    neuralnet = ConvNet(3, 10)
    if cuda:
        neuralnet.cuda()  # run on GPU
    sgd_back = torch.optim.Adam(neuralnet.parameters(), lr=FLAGS.learning_rate)

    #lists with losses and accuracies
    train_losses = []
    train_accs = []
    test_losses = []
    test_accs = []
    graph_x = []

    # for i in range(50):
    for i in range(FLAGS.max_steps):
        neuralnet.train()
        x, y = cifar10['train'].next_batch(FLAGS.batch_size)
        if cuda:
            x = torch.from_numpy(x).cuda()
            y = torch.from_numpy(y).cuda()
        else:
            x = torch.from_numpy(x)
            y = torch.from_numpy(y)

        # predict on the train data and calculate the gradients
        out = neuralnet.forward(x)  #output after the softmax

        train_loss = loss_function(out, y.argmax(dim=1))

        sgd_back.zero_grad()
        train_loss.backward()
        sgd_back.step()

        # save the losses for every eval_freqth loop
        if i % FLAGS.eval_freq == 0 or i == (FLAGS.max_steps - 1):
            # if i % 1 == 0 or i == (FLAGS.max_steps - 1):
            neuralnet.eval()
            with torch.no_grad():
                test_x, test_y = cifar10['test'].next_batch(1000)
                if cuda:
                    torch.cuda.empty_cache()
                    test_x = torch.from_numpy(test_x).cuda()
                    test_y = torch.from_numpy(test_y).cuda()
                else:
                    test_x = torch.from_numpy(test_x)
                    test_y = torch.from_numpy(test_y)

                train_out = neuralnet.forward(x)  # output after the softmax
                test_out = neuralnet.forward(test_x)
                if cuda:
                    torch.cuda.empty_cache()

                train_loss = loss_function(train_out, y.argmax(dim=1))
                test_loss = loss_function(test_out, test_y.argmax(dim=1))
                train_acc = accuracy(out, y)
                test_acc = accuracy(test_out, test_y)

                train_losses.append(train_loss)
                train_accs.append(train_acc)
                test_losses.append(test_loss)
                test_accs.append(test_acc)
                graph_x.append(i)

                print("iteration:", i)
                print("Test accuracy:", test_accs[-1])
                print("Test loss:", test_losses[-1])

    plt.figure()
    plt.subplot(1, 2, 1)
    plt.plot(graph_x, train_losses, label="train loss")
    plt.plot(graph_x, test_losses, label="test loss")
    plt.title('Losses')
    plt.legend()

    plt.subplot(1, 2, 2)
    plt.plot(graph_x, train_accs, label="train acc")
    plt.plot(graph_x, test_accs, label="test acc")
    plt.title('Accuracies')
    plt.legend()

    print("Final test accuracy:", test_accs[-1])
    print("Final test loss:", test_losses[-1])

    plt.savefig("conv_acc_and_loss.png")

    with open('conv_acc_and_loss.txt', 'w') as f:
        f.write("test_losses \n")
        f.write(str(test_losses) + "\n \n")
        f.write("test accs \n")
        f.write(str(test_accs))