def calculateTest(test_data_cpu,
                  test_labels_cpu,
                  numBatches,
                  classifier,
                  criterion=None,
                  printing=False,
                  lossCalc=True,
                  test_labels_onehot=None):
    ## because of memory limitations, we calc test in batches
    startInd = 0
    batchLen = int(np.shape(test_data_cpu)[0] / numBatches)
    endInd = batchLen
    test_loss = 0
    total_loss = 0
    total_acc = 0

    for batch in range(numBatches):
        X_test = test_data_cpu[startInd:endInd]
        if torch.cuda.is_available():
            X_test = X_test.cuda()
        test_output = classifier(X_test)
        test_out_np = test_output.cpu().detach().numpy()
        if lossCalc:
            Y_test = test_labels_onehot[startInd:endInd]
            if torch.cuda.is_available():
                Y_test = Y_test.cuda()
            test_loss = criterion(test_output, Y_test.long()).data.item()
        test_acc = accuracy(test_out_np, test_labels_cpu[startInd:endInd])
        startInd = endInd
        endInd += batchLen
        total_acc += test_acc
        total_loss += test_loss

        X_test.detach()
        if lossCalc:
            Y_test.detach()

    if printing:
        print("-----------------------")
        print("test accuracy: " + str(total_acc / numBatches))
        print("-----------------------")

    return total_acc / numBatches, total_loss / numBatches
Esempio n. 2
0
def test(testing_file, nb):
    classifiers = classifier.list_classifiers_name_id()
    accuracies = list()
    data = dict()
    classifier_0_1 = ''
    classifier_bad = ''
    for num in nb:
        for classifi in classifiers:
            if (classifi.__contains__(str(num))):
                if (classifi.__contains__('0_1')):
                    classifier_0_1 = classifi
                if (classifi.__contains__('bad_' + str(num))):
                    classifier_bad = classifi
        print(num)
        print(classifier_0_1)
        print(classifier_bad)
        accur = accuracy(testing_file, classifier_0_1, classifier_bad)
        accuracies.append(accur)
        data[num] = accur
        print(accur)
    print(data)
    return data
Esempio n. 3
0
def train(config):

    # Initialize the device which to run the model on
    device = torch.device(config.device)

    # Initialize the model that we are going to use
    if config.model_type == 'RNN':
        model = VanillaRNN(config.input_length + 1, config.input_dim,
                           config.num_hidden, config.num_classes, device)
    elif config.model_type == 'LSTM':
        model = LSTM(config.input_length + 1, config.input_dim,
                     config.num_hidden, config.num_classes, device)
    else:
        print("Unknown model type, please use RNN or LSTM")
        exit()

    model.store_hidden = True
    # Initialize the dataset and data loader (note the +1)
    dataset = PalindromeDataset(config.input_length + 1)
    data_loader = DataLoader(dataset, config.batch_size, num_workers=1)

    # Setup the loss and optimizer
    criterion = torch.nn.CrossEntropyLoss()
    optimizer = optim.RMSprop(model.parameters(), lr=config.learning_rate)
    accuracies = []

    for step, (batch_inputs, batch_targets) in enumerate(data_loader):

        # Only for time measurement of step through network
        t1 = time.time()

        batch_inputs = batch_inputs.to(device)
        batch_targets = batch_targets.to(device)

        ############################################################################
        # QUESTION: what happens here and why?
        ############################################################################
        torch.nn.utils.clip_grad_norm(model.parameters(),
                                      max_norm=config.max_norm)
        ############################################################################

        optimizer.zero_grad()
        outputs = model(batch_inputs)
        loss = criterion(outputs, batch_targets)
        loss.backward()
        loss = loss.data.item()
        optimizer.step()
        outputs = outputs.cpu().detach().numpy()

        acc = accuracy(outputs, batch_targets.cpu().detach().numpy())
        accuracies.append(acc)
        grads = [
            torch.norm(t.grad).cpu().detach() for t in model.hiddenActivity
        ]

        # Just for time measurement
        t2 = time.time()
        examples_per_second = config.batch_size / float(t2 - t1)

        if step % 10 == 0:

            print(
                "[{}] Train Step {:04d}/{:04d}, Batch Size = {}, Examples/Sec = {:.2f}, "
                "Accuracy = {:.2f}, Loss = {:.3f}".format(
                    datetime.now().strftime("%Y-%m-%d %H:%M"), step,
                    config.train_steps, config.batch_size, examples_per_second,
                    acc, loss))

        if step == config.train_steps:
            # If you receive a PyTorch data-loader error, check this bug report:
            # https://github.com/pytorch/pytorch/pull/9655
            break

    print('Done training.')
    drawPlotMagn(
        grads,
        './' + str(config.model_type) + '_len:' + str(config.input_length) +
        '_lr:' + str(config.learning_rate) + '_grads_over_time.jpg',
        "Gradients over time steps with " + str(config.model_type), 1)
def train():
    """
  Performs training and evaluation of MLP model. 

  TODO:
  Implement training and evaluation of MLP model. Evaluate your model on the whole test set each eval_freq iterations.
  """

    ### DO NOT CHANGE SEEDS!
    # Set the random seeds for reproducibility
    np.random.seed(42)

    ## Prepare all functions
    # Get number of units in each hidden layer specified in the string such as 100,100
    if FLAGS.dnn_hidden_units:
        dnn_hidden_units = FLAGS.dnn_hidden_units.split(",")
        dnn_hidden_units = [
            int(dnn_hidden_unit_) for dnn_hidden_unit_ in dnn_hidden_units
        ]
    else:
        dnn_hidden_units = []

    # Get negative slope parameter for LeakyReLU
    neg_slope = FLAGS.neg_slope

    cifar10 = cifar10_utils.get_cifar10("./cifar10/cifar-10-batches-py")
    training_set = cifar10['train']
    test_set = cifar10['test']

    f = vars(FLAGS)
    input_size = 3 * 32 * 32
    number_of_classes = 10
    batch_size = f['batch_size']
    ### definition of architecture:
    layers = dnn_hidden_units + [number_of_classes]
    mlp = MLP(input_size, layers, number_of_classes, neg_slope,
              f['learning_rate'])
    lastEpochNum = 0
    batchCounter = 0
    epoch_acc = 0
    epoch_loss = 0

    ## preparing test data
    test_data, test_labels = test_set.images, test_set.labels
    test_data = np.reshape(test_data, (np.shape(test_data)[0], input_size))
    ### normalize
    test_data = np.subtract(test_data, np.mean(test_data, 0))
    test_data = np.divide(test_data, np.amax(test_data, 0))

    training_accuracies = []
    test_accuracies = []

    training_losses = []
    test_losses = []

    while training_set.epochs_completed <= f['max_steps']:
        if lastEpochNum != training_set.epochs_completed:

            lastEpochNum = training_set.epochs_completed
            training_acc = epoch_acc / batchCounter
            tr_loss = epoch_loss / batchCounter
            training_losses.append(tr_loss)
            training_accuracies.append(training_acc)
            print("epoch " + str(lastEpochNum) +
                  " avg accuracy on training data: " + str(training_acc))
            batchCounter = 0
            epoch_acc = 0
            epoch_loss = 0

            ## also calculate accuracy on the test data for better visualization
            test_output = mlp.forward(test_data)
            test_loss = mlp.loss.forward(test_output, test_labels)
            test_acc = accuracy(test_output, test_labels)
            test_accuracies.append(test_acc)
            test_losses.append(test_loss)

        ## testing after number of batches, given the parameter
        if batchCounter % f['eval_freq'] == 0:
            test_output = mlp.forward(test_data)
            test_acc = accuracy(test_output, test_labels)
            print("-----------------------")
            print("test accuracy: " + str(test_acc))
            print("-----------------------")

        batch_data, batch_labels = training_set.next_batch(batch_size)
        batch_data_flat = np.reshape(batch_data, (batch_size, input_size))
        ### normalize
        batch_data_flat = np.subtract(batch_data_flat,
                                      np.mean(batch_data_flat, 0))
        batch_data_flat = np.divide(batch_data_flat,
                                    np.amax(batch_data_flat, 0))

        ### forward pass
        output = mlp.forward(batch_data_flat)
        loss = mlp.loss.forward(output, batch_labels)
        ## backward
        loss_gradient = mlp.loss.backward(output, batch_labels)
        mlp.backward(loss_gradient)

        acc = accuracy(output, batch_labels)
        epoch_acc += acc
        epoch_loss += loss
        batchCounter += 1

    drawPlot(training_accuracies, test_accuracies,
             './mlp-accuracies_numpy.png',
             'MLP numpy - accuracies on training and test data', 5)
    drawPlot(training_losses, test_losses, './mlp-loss_numpy.png',
             'MLP numpy - loss on training and test data', 6)
def train():
    """
  Performs training and evaluation of ConvNet model. 

  TODO:
  Implement training and evaluation of ConvNet model. Evaluate your model on the whole test set each eval_freq iterations.
  """

    ### DO NOT CHANGE SEEDS!
    # Set the random seeds for reproducibility
    np.random.seed(42)

    cifar10 = cifar10_utils.get_cifar10("./cifar10/cifar-10-batches-py")
    training_set = cifar10['train']
    test_set = cifar10['test']
    f = vars(FLAGS)
    input_size = 3 * 32 * 32
    number_of_classes = 10
    number_of_channels = 3
    batch_size = f['batch_size']
    ### definition of architecture:
    cnn = ConvNet(number_of_channels, number_of_classes)

    lastEpochNum = 0
    batchCounter = 0
    epoch_acc = 0
    epoch_loss = 0

    optimizer = optim.Adam(cnn.parameters(), lr=f['learning_rate'])
    criterion = torch.nn.CrossEntropyLoss()

    ## preparing test data
    print(np.shape(test_set.images))
    test_data, test_labels = test_set.images, test_set.labels

    ### normalize
    test_data = np.subtract(test_data, np.mean(test_data, 0))
    test_data = np.divide(test_data, np.amax(test_data, 0))
    ## transforming one-hot labels to class labels for loss function
    test_labels_class = np.argmax(test_labels, 1)

    X_test, Y_test = Variable(torch.Tensor(test_data)), Variable(
        torch.Tensor(test_labels_class))

    training_accuracies = []
    test_accuracies = []
    training_losses = []
    test_losses = []
    ## training loop
    while training_set.epochs_completed <= f['max_steps']:

        ## average accuracy calculation after epoch
        if lastEpochNum != training_set.epochs_completed:
            lastEpochNum = training_set.epochs_completed
            training_acc = epoch_acc / batchCounter
            tr_loss = epoch_loss / batchCounter
            training_losses.append(tr_loss)
            training_accuracies.append(training_acc)
            print("epoch " + str(lastEpochNum) +
                  " avg accuracy on training data: " + str(training_acc))
            batchCounter = 0
            epoch_acc = 0
            epoch_loss = 0

            ## also calculate accuracy on the test data for better visualization
            test_acc, test_loss = calculateTest(X_test,
                                                test_labels,
                                                400,
                                                cnn,
                                                criterion,
                                                test_labels_onehot=Y_test)
            test_accuracies.append(test_acc)
            test_losses.append(test_loss)

        ## testing and printng after number of batches, given the parameter
        if batchCounter % f['eval_freq'] == 0:
            calculateTest(X_test,
                          test_labels,
                          400,
                          cnn,
                          printing=True,
                          lossCalc=False)

        ## fetching batch and training
        batch_data, batch_labels = training_set.next_batch(batch_size)
        #batch_data_flat = np.reshape(batch_data, (batch_size, input_size))
        ### normalize
        batch_data = np.subtract(batch_data, np.mean(batch_data, 0))
        batch_data = np.divide(batch_data, np.amax(batch_data, 0))
        ## transforming one-hot labels to class labels for loss function
        batch_labels_class = np.argmax(batch_labels, 1)

        X, Y = Variable(torch.Tensor(batch_data)), Variable(
            torch.Tensor(batch_labels_class))
        if torch.cuda.is_available():
            X = X.cuda()
            Y = Y.cuda()

        optimizer.zero_grad()
        outputs = cnn(X)
        loss = criterion(outputs, Y.long())
        loss.backward()
        loss = loss.data.item()
        optimizer.step()
        outputs = outputs.cpu().detach().numpy()

        acc = accuracy(outputs, batch_labels)
        epoch_acc += acc
        epoch_loss += loss
        batchCounter += 1

        X.detach()
        Y.detach()

    drawPlot(training_accuracies, test_accuracies, './cnn-accuracies.png',
             'ConvNet - accuracies on training and test data', 3)
    drawPlot(training_losses, test_losses, './cnn-loss_numpy.png',
             'ConvNet - loss on training and test data', 4)