def train():
    """
    Performs training and evaluation of MLP model.

    TODO:
    Implement training and evaluation of MLP model. Evaluate your model on the whole test set each eval_freq iterations.
    """

    ### DO NOT CHANGE SEEDS!
    # Set the random seeds for reproducibility
    np.random.seed(42)

    ## Prepare all functions
    # Get number of units in each hidden layer specified in the string such as 100,100
    if FLAGS.dnn_hidden_units:
        dnn_hidden_units = FLAGS.dnn_hidden_units.split(",")
        dnn_hidden_units = [
            int(dnn_hidden_unit_) for dnn_hidden_unit_ in dnn_hidden_units
        ]
    else:
        dnn_hidden_units = []

    dataset = cifar10_utils.get_cifar10(DATA_DIR_DEFAULT)
    a, b, c = dataset['train'].images.shape[1:]
    n_classes = dataset['train'].labels.shape[1]
    n_inputs = a * b * c

    mlp = MLP(n_inputs, dnn_hidden_units, n_classes)

    if (FLAGS.optimizer == 'SGD'):
        optimizer = optim.SGD(mlp.parameters(), lr=FLAGS.learning_rate)
    else:
        optimizer = optim.Adam(mlp.parameters(),
                               lr=FLAGS.learning_rate,
                               weight_decay=1e-2)

    crossentropy = nn.CrossEntropyLoss()

    test_input, test_labels = dataset['test'].images, dataset['test'].labels
    test_labels = np.argmax(test_labels, axis=1)
    test_input = np.reshape(test_input, (test_input.shape[0], n_inputs))
    test_input, test_labels = torch.from_numpy(test_input), torch.from_numpy(
        test_labels).long()

    max_accuracy = 0
    min_loss = 0
    for step in range(FLAGS.max_steps):
        input, labels = dataset['train'].next_batch(FLAGS.batch_size)
        labels = np.argmax(labels, axis=1)
        input = np.reshape(input, (FLAGS.batch_size, n_inputs))
        input, labels = torch.from_numpy(input), torch.from_numpy(
            labels).long()
        predictions = mlp.forward(input)

        loss = crossentropy(predictions, labels)
        # clean up old gradients
        mlp.zero_grad()
        loss.backward()
        optimizer.step()

        if (step % FLAGS.eval_freq == 0):
            test_prediction = mlp.forward(test_input)
            test_loss = crossentropy(test_prediction, test_labels)
            test_accuracy = accuracy(test_prediction, test_labels)
            if (max_accuracy < test_accuracy):
                max_accuracy = test_accuracy
                min_loss = test_loss
            # sys.stdout = open(str(FLAGS.dnn_hidden_units)+'_'+str(FLAGS.learning_rate)+'_'+str(FLAGS.max_steps)+'_'+str(FLAGS.batch_size)+'_'+str(FLAGS.batch_size)+'_'+str(FLAGS.optimizer)+'_mlp.csv', 'a')
            # print("{},{:f},{:f}".format(step, test_loss, test_accuracy))

    # sys.stdout = open(
    #     str(FLAGS.dnn_hidden_units) + '_' + str(FLAGS.learning_rate) + '_' + str(FLAGS.max_steps) + '_' + str(
    #         FLAGS.batch_size) + '_' + str(FLAGS.batch_size) + '_' + str(FLAGS.optimizer) + '_mlp.csv', 'a')
    print("max accuracy{:f}, minimum loss{:f}".format(max_accuracy, min_loss))
Example #2
0
def train():
    """
    Performs training and evaluation of MLP model.
    Implement training and evaluation of MLP model. Evaluate your model on the whole test set each eval_freq iterations.
    """

    # DO NOT CHANGE SEEDS!
    # Set the random seeds for reproducibility
    np.random.seed(42)

    # Prepare all functions
    # Get number of units in each hidden layer specified in the string such as 100,100
    if FLAGS.dnn_hidden_units:
        dnn_hidden_units = FLAGS.dnn_hidden_units.split(",")
        dnn_hidden_units = [
            int(dnn_hidden_unit_) for dnn_hidden_unit_ in dnn_hidden_units
        ]
    else:
        dnn_hidden_units = []

    data = cifar10_utils.get_cifar10(DATA_DIR_DEFAULT)

    n_inputs = np.prod(data['train'].images.shape[1:])
    n_classes = data['train'].labels.shape[1]
    n_test = data['test'].images.shape[0]

    x_test, y_test = data['test'].next_batch(n_test)
    x_test = torch.from_numpy(x_test.reshape((n_test, n_inputs)))
    y_test = torch.from_numpy(y_test.argmax(axis=1)).long()

    net = MLP(n_inputs, dnn_hidden_units, n_classes)
    criterion = nn.CrossEntropyLoss()
    if FLAGS.optimizer == 'SGD':
        optimizer = optim.SGD(net.parameters(),
                              lr=FLAGS.learning_rate,
                              momentum=0.0)
    else:
        optimizer = optim.Adam(net.parameters(),
                               lr=FLAGS.learning_rate,
                               weight_decay=1e-2)

    losses = {'train': [], 'test': []}
    accuracies = {'train': [], 'test': []}
    eval_steps = []

    for s in range(FLAGS.max_steps):
        x, y = data['train'].next_batch(FLAGS.batch_size)
        x = torch.from_numpy(x.reshape((FLAGS.batch_size, n_inputs)))
        y = torch.from_numpy(y.argmax(axis=1)).long()

        # FORWARD, BACKWARD, AND STEP
        out = net.forward(x)
        net.zero_grad()
        loss = criterion(out, y)
        loss.backward()
        optimizer.step()

        # Evaluation
        if s % FLAGS.eval_freq == 0 or s == FLAGS.max_steps - 1:
            eval_steps.append(s)
            losses['train'].append(loss)
            accuracies['train'].append(accuracy(out, y))

            out = net.forward(x_test)
            losses['test'].append(criterion(out, y_test))
            accuracies['test'].append(accuracy(out, y_test))

            print('Iter {:04d}: Test: {:.2f} ({:f}), Train: {:.2f} ({:f})'.
                  format(s, 100 * accuracies['test'][-1], losses['test'][-1],
                         100 * accuracies['train'][-1], losses['train'][-1]))

    # Plotting
    for d, n in [(accuracies, 'Accuracy'), (losses, 'Loss')]:
        plt.figure()
        plt.plot(eval_steps, d['train'], label='train')
        plt.plot(eval_steps, d['test'], label='test')
        plt.xlabel('Step')
        plt.ylabel(n)
        plt.legend()
        plt.tight_layout()
        plt.savefig('torch_' + n.lower() + '.pdf')

    print('Best testing loss: {:.2f} accuracy: {:.2f}'.format(
        np.min(losses['test']), 100 * np.max(accuracies['test'])))
def train():
    """
  Performs training and evaluation of MLP model. 

  TODO:
  Implement training and evaluation of MLP model. Evaluate your model on the whole test set each eval_freq iterations.
  """

    ### DO NOT CHANGE SEEDS!
    # Set the random seeds for reproducibility
    np.random.seed(42)

    ## Prepare all functions
    # Get number of units in each hidden layer specified in the string such as 100,100
    if FLAGS.dnn_hidden_units:
        dnn_hidden_units = FLAGS.dnn_hidden_units.split(",")
        dnn_hidden_units = [
            int(dnn_hidden_unit_) for dnn_hidden_unit_ in dnn_hidden_units
        ]
    else:
        dnn_hidden_units = []

    ########################
    # PUT YOUR CODE HERE  #
    #######################
    #raise NotImplementedError

    cifar10 = cifar10_utils.get_cifar10('cifar10/cifar-10-batches-py')
    #x_train_batch, y_train_batch = cifar10['train'].next_batch(5)
    train_data = cifar10['train']
    x_test, y_test = cifar10['test'].images, cifar10['test'].labels

    nsamples_x, channels, y_size, x_size = np.shape(x_test)
    nsamples_y, n_classes = np.shape(y_test)
    input_per_image = y_size * x_size * channels

    MLP_classifier = MLP(input_per_image, dnn_hidden_units, n_classes)

    cross_entropy_loss = nn.CrossEntropyLoss()

    #Evaluation
    list_train_acc = []
    list_test_acc = []
    list_train_loss = []
    list_test_loss = []

    #Reshape here as we do multiple test while training
    x_test = x_test.reshape((nsamples_x, input_per_image))
    x_test = torch.tensor(x_test)
    y_test = torch.tensor(y_test)
    _, y_test_max = torch.max(y_test, 1)

    optimizer = optim.SGD(MLP_classifier.parameters(), FLAGS.learning_rate)

    for step in range(FLAGS.max_steps):

        #Get batch and reshape for input
        x_train_batch, y_train_batch = train_data.next_batch(FLAGS.batch_size)
        x_train_batch = x_train_batch.reshape(
            (FLAGS.batch_size, input_per_image))
        x_train_batch = torch.tensor(x_train_batch, requires_grad=True)
        y_train_batch = torch.tensor(y_train_batch)
        _, y_train_batch_max = torch.max(y_train_batch, 1)

        #Feed forward, get loss and gradient of the loss function and backpropagate.
        output = MLP_classifier.forward(x_train_batch)
        #break

        train_loss = cross_entropy_loss(output, y_train_batch_max)
        MLP_classifier.zero_grad()
        train_loss.backward()
        optimizer.step()
        #'''
        #print(x_train_batch)
        #print(output)

        if (step % FLAGS.eval_freq) == 0 or (step == FLAGS.max_steps - 1):
            output_test = MLP_classifier.forward(x_test)
            test_acc = accuracy(output_test, y_test)
            test_loss = cross_entropy_loss.forward(output_test, y_test_max)
            list_test_acc.append(test_acc)
            list_test_loss.append(test_loss)

            train_acc = accuracy(output, y_train_batch)
            list_train_loss.append(train_loss)
            list_train_acc.append(train_acc)

    #Print and plot results
    print(list_test_acc)
    print(list_test_loss)
    steps_x = range(len(list_test_acc))
    plt.plot(steps_x, list_test_acc, label="Test accuracy")
    plt.plot(steps_x, list_train_acc, label="Train accuracy")
    plt.xlabel("Step")
    plt.ylabel("Accuracy")
    plt.title("Train and test accuracies", fontsize=18, fontweight="bold")
    plt.legend()
    #plt.savefig('accuracies.png', bbox_inches='tight')
    plt.show()

    plt.plot(steps_x, list_test_loss, label="Test loss")
    plt.plot(steps_x, list_train_loss, label="Train loss")
    plt.xlabel("Step")
    plt.ylabel("Loss")
    plt.title("Train and test loss", fontsize=18, fontweight="bold")
    plt.legend()
    #plt.savefig('loss.png', bbox_inches='tight')
    plt.show()