コード例 #1
0
ファイル: search.py プロジェクト: sun-su/CNN
def random_search(learning_rates, num_epochs, threshold=0.42, data_aug=True):
    """
    Random search learning rates
    Args:
        learning_rates -- List of floats
        num_epochs -- Boolean, number of epochs to run
        threshold -- Float, whether to continue training after the first epoch
    Return:
        Just print best
    """
    best_val_acc = -1
    best_lr = -1
    val_accs = []
    lrs = []
    
    if data_aug:
        train_generator, X_test, y_test = get_cifar10()
    else:
        X_train, y_train, X_test, y_test = get_cifar10(data_augmentation=False)
    
    for count, lr in enumerate(learning_rates):
        start = time.time()
        print('Count:', count+1)
        print('learning_rate: {:.4e}'.format(lr))
        
        # Create a model
        model = inception_v1((32, 32, 3), 10)
        opt = keras.optimizers.Adam(lr, decay=1e-6)
        model.compile(opt, loss='categorical_crossentropy', metrics=['accuracy'])
        
        if data_aug:
            h = model.fit_generator(train_generator, epochs=1, verbose=1)
            if h.history['acc'][0] > threshold:
                model.fit_generator(train_generator, epochs=num_epochs-1, verbose=1)
        else:
            h = model.fit(X_train, y_train, batch_size=128, epochs=1, verbose=1)
            if h.history['acc'][0] > threshold:
                h = model.fit(X_train, y_train, batch_size=128, epochs=num_epochs-1, verbose=1)
        
        val_res = model.evaluate(X_test, y_test, batch_size=128, verbose=1)
        print('Validation loss: %.4f, accuracy: %.4f' % (val_res[0], val_res[1]))
        if best_val_acc < val_res[1]:
            best_val_acc = val_res[1]
            best_lr = lr
        val_accs.append(val_res[1])  
        lrs.append(lr)
        print('Time: %.2f\n' % (time.time()-start))
    print('Best val_accuracy: %.4f, learning_rate: %.4e\n' % (best_val_acc, best_lr))
    
    for item in sorted(list(zip(lrs, val_accs)), key=lambda x: x[1], reverse=True):
        print('learning_rate: %.4e, val_accuracy: %.4f' % (item[0], item[1]))
コード例 #2
0
ファイル: cifar10_resnet.py プロジェクト: sun-su/CNN
def main():
    if DATA_AUGMENTATION:
        print('Using data augmentation.')
        train_generator, X_test, y_test = get_cifar10(batch_size=BATCH_SIZE,
                                                      center=True,
                                                      normalization=True,
                                                      data_augmentation=True)
    else:
        print('Not using data augmentation.')
        (X_train, y_train), (X_test,
                             y_test) = get_cifar10(batch_size=BATCH_SIZE,
                                                   center=True,
                                                   normalization=True,
                                                   data_augmentation=False)

    # Create a ResNet-20 model
    model = ResNet_20(input_shape=(32, 32, 3),
                      n_classes=10,
                      data_format='channels_last',
                      initializer='he_normal',
                      regularizer=l2(REGULARIZATION))

    opt = keras.optimizers.Adam(LEARNING_RATE, decay=1e-6)
    model.compile(optimizer=opt,
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])

    # Callbacks
    hists = History()
    es = keras.callbacks.EarlyStopping(monitor='val_acc', patience=15)
    reduce_lr = keras.callbacks.ReduceLROnPlateau(monitor='val_acc',
                                                  factor=0.1,
                                                  patience=10,
                                                  verbose=1)
    callbacks = [hists, es, reduce_lr]

    if DATA_AUGMENTATION:
        model.fit_generator(train_generator,
                            epochs=EPOCHS,
                            validation_data=(X_test, y_test),
                            callbacks=callbacks)
    else:
        model.fit(X_train,
                  y_train,
                  batch_size=BATCH_SIZE,
                  epochs=EPOCHS,
                  validation_data=(X_test, y_test),
                  callbacks=callbacks)

    hists.plot()
コード例 #3
0
def main():
    if DATA_AUGMENTATION:
        print('Using data augmentation.')
        train_generator, X_test, y_test = get_cifar10(batch_size=BATCH_SIZE,
                                                      center=True,
                                                      normalization=True,
                                                      data_augmentation=True)
    else:
        print('Not using data augmentation.')
        (X_train, y_train), (X_test,
                             y_test) = get_cifar10(batch_size=BATCH_SIZE,
                                                   center=True,
                                                   normalization=True,
                                                   data_augmentation=False)

    model = inception_v1(input_shape=(32, 32, 3),
                         n_classes=10,
                         initializer=KERNEL_INITIALIZER)
    model.summary()
    optimizer = keras.optimizers.Adam(LEARNING_RATE, decay=1e-6)
    model.compile(optimizer=optimizer,
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])

    # Prepare callbacks for model saving and for learning rate adjustment.
    hists = History()
    es = keras.callbacks.EarlyStopping(monitor='val_acc', patience=15)
    lr_reducer = keras.callbacks.ReduceLROnPlateau(monitor='val_acc',
                                                   factor=0.1,
                                                   cooldown=0,
                                                   patience=10,
                                                   verbose=1)
    callbacks = [hists]

    if DATA_AUGMENTATION:
        model.fit_generator(train_generator,
                            epochs=EPOCHS,
                            validation_data=(X_test, y_test),
                            callbacks=callbacks)
    else:
        model.fit(X_train,
                  y_train,
                  batch_size=BATCH_SIZE,
                  epochs=EPOCHS,
                  validation_data=(X_test, y_test),
                  callbacks=callbacks)

    hists.plot()
コード例 #4
0
def train():

  """
  Performs training and evaluation of ConvNet model. 

  """

  # Set the random seeds for reproducibility
  np.random.seed(42)

  cifar10 = cifar10_utils.get_cifar10('cifar10/cifar-10-batches-py')
  myConvNet = ConvNet(3, 10)
  loss_criterion = nn.CrossEntropyLoss()
  optimizer = optim.Adam(myConvNet.parameters())
  accuracies = {'train': [], 'test': []}
  loss_curve = {'train': [], 'test': []}


  for j in range(FLAGS.max_steps):
    x, y = cifar10['train'].next_batch(FLAGS.batch_size)
    x = torch.from_numpy(x).contiguous()
    y = torch.from_numpy(y).contiguous()
    optimizer.zero_grad()
    outputs = myConvNet(x)
    loss = loss_criterion(outputs, torch.argmax(y, 1))
    loss.backward()
    optimizer.step()


    if j % FLAGS.eval_freq == 0:
      accuracies['train'].append(accuracy(outputs.detach().numpy(), y.detach().numpy()))
      loss_curve['train'].append(loss.detach().numpy())


      x, y = cifar10['test'].images, cifar10['test'].labels
      x = torch.from_numpy(x)
      y = torch.from_numpy(y)
      x = x[:1000]
      y = y[:1000]
      outputs = myConvNet(x)
      loss = loss_criterion(outputs, torch.argmax(y, 1))
      loss_curve['test'].append(loss.detach().numpy())
      print(j)
      print(accuracy(outputs.detach().numpy(), y.detach().numpy()))

      accuracies['test'].append(accuracy(outputs.detach().numpy(), y.detach().numpy()))

  accuracies['train'].append(accuracy(outputs.detach().numpy(), y.detach().numpy()))
  loss_curve['train'].append(loss.detach().numpy())
  x, y = cifar10['test'].images, cifar10['test'].labels
  x = torch.from_numpy(x)
  y = torch.from_numpy(y)
  x= x[:1000]
  y = y[:1000]
  outputs = myConvNet(x)
  loss = loss_criterion(outputs, torch.argmax(y, 1))
  loss_curve['test'].append(loss.detach().numpy())
  print(accuracy(outputs.detach().numpy(), y.detach().numpy()))
  accuracies['test'].append(accuracy(outputs.detach().numpy(), y.detach().numpy()))
  plot_results(accuracies, loss_curve)
コード例 #5
0
def main(argv):
    if DATA_AUGMENTATION:
        print('Using data augmentation.')
        train_generator, X_test, y_test = get_cifar10(batch_size=BATCH_SIZE,
                                                      center=True,
                                                      normalization=True,
                                                      data_augmentation=True)
    else:
        print('Not using data augmentation.')
        (X_train, y_train), (X_test,
                             y_test) = get_cifar10(batch_size=BATCH_SIZE,
                                                   center=True,
                                                   normalization=True,
                                                   data_augmentation=False)

    # Create a keras model
    model = my_model(reg=REGULARIZATION, bn=BATCH_NORMALIZATION)
    opt = keras.optimizers.Adam(LEARNING_RATE, decay=1e-6)
    model.compile(optimizer=opt,
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])

    # Callbacks
    hists = History()
    es = keras.callbacks.EarlyStopping(monitor='val_acc', patience=15)
    reduce_lr = keras.callbacks.ReduceLROnPlateau(monitor='val_acc',
                                                  factor=0.1,
                                                  patience=10,
                                                  verbose=1)
    lr_scheduler = keras.callbacks.LearningRateScheduler(lr_schedule)
    callbacks = [hists, lr_scheduler]

    if DATA_AUGMENTATION:
        model.fit_generator(train_generator,
                            epochs=EPOCHS,
                            validation_data=(X_test, y_test),
                            callbacks=callbacks)
    else:
        model.fit(X_train,
                  y_train,
                  batch_size=BATCH_SIZE,
                  epochs=EPOCHS,
                  validation_data=(X_test, y_test),
                  callbacks=callbacks)

    hists.plot()
コード例 #6
0
def train():
    """
    Performs training and evaluation of MLP model.
    """

    ### DO NOT CHANGE SEEDS!
    # Set the random seeds for reproducibility
    np.random.seed(42)

    ## Prepare all functions
    # Get number of units in each hidden layer specified in the string such as 100,100
    if FLAGS.dnn_hidden_units:
        dnn_hidden_units = FLAGS.dnn_hidden_units.split(",")
        dnn_hidden_units = [
            int(dnn_hidden_unit_) for dnn_hidden_unit_ in dnn_hidden_units
        ]
    else:
        dnn_hidden_units = []

    ########################
    # PUT YOUR CODE HERE  #
    #######################

    # set up the data
    cifar10 = cifar10_utils.get_cifar10(FLAGS.data_dir)
    test_images, test_labels = cifar10['test'].images, cifar10['test'].labels
    test_vectors = reshape_images(test_images)

    # set up the model
    mlp_model = MLP(3072, dnn_hidden_units, 10)
    loss_module = CrossEntropyModule()

    accuracies = []
    losses = []
    for i in range(FLAGS.max_steps):
        images, labels = cifar10['train'].next_batch(FLAGS.batch_size)
        image_vectors = reshape_images(images)

        # forward pass
        model_pred = mlp_model.forward(image_vectors)

        # backward pass
        loss = loss_module.forward(model_pred, labels)
        loss_grad = loss_module.backward(model_pred, labels)
        mlp_model.backward(loss_grad)

        # update all weights and biases
        mlp_model.update(FLAGS.learning_rate)

        # evaluate the model on the data set every eval_freq steps
        if i % FLAGS.eval_freq == 0:
            test_pred = mlp_model.forward(test_vectors)
            test_accuracy = accuracy(test_pred, test_labels)
            accuracies.append(test_accuracy)
            losses.append(loss)

    plot_curve(accuracies, 'Accuracy')
    plot_curve(losses, 'Loss')
コード例 #7
0
def train():
    """
    Performs training and evaluation of MLP model.

    TODO:
    Implement training and evaluation of MLP model. Evaluate your model on the whole test set each eval_freq iterations.
    """

    ### DO NOT CHANGE SEEDS!
    # Set the random seeds for reproducibility
    np.random.seed(42)

    ## Prepare all functions
    # Get number of units in each hidden layer specified in the string such as 100,100
    if FLAGS.dnn_hidden_units:
        dnn_hidden_units = FLAGS.dnn_hidden_units.split(",")
        dnn_hidden_units = [
            int(dnn_hidden_unit_) for dnn_hidden_unit_ in dnn_hidden_units
        ]
    else:
        dnn_hidden_units = []

    dataset = cifar10_utils.get_cifar10(DATA_DIR_DEFAULT)
    a, b, c = dataset['train'].images.shape[1:]
    n_classes = dataset['train'].labels.shape[1]
    n_inputs = a * b * c

    mlp = MLP(n_inputs, dnn_hidden_units, n_classes)
    crossentropy = CrossEntropyModule()

    test_input, test_labels = dataset['test'].images, dataset['test'].labels
    test_input = np.reshape(test_input, (test_input.shape[0], n_inputs))

    for step in range(FLAGS.max_steps):
        input, labels = dataset['train'].next_batch(FLAGS.batch_size)
        input = np.reshape(input, (FLAGS.batch_size, n_inputs))
        predictions = mlp.forward(input)

        dL = crossentropy.backward(predictions, labels)
        mlp.backward(dL)

        for layer in mlp.layers:
            if (layer.__class__ == LinearModule):
                layer.params[
                    'weight'] -= FLAGS.learning_rate * layer.grads['weight']
                layer.params[
                    'bias'] -= FLAGS.learning_rate * layer.grads['bias']

        loss = crossentropy.forward(predictions, labels)
        if (step % FLAGS.eval_freq == 0):
            test_prediction = mlp.forward(test_input)
            test_loss = crossentropy.forward(test_prediction, test_labels)
            test_accuracy = accuracy(test_prediction, test_labels)
            sys.stdout = open(
                str(FLAGS.dnn_hidden_units) + '_' + str(FLAGS.learning_rate) +
                '_' + str(FLAGS.max_steps) + '_' + str(FLAGS.batch_size) +
                '_' + str(FLAGS.batch_size) + '_mlp_numpy.csv', 'a')
            print("{},{:f},{:f}".format(step, test_loss, test_accuracy))
コード例 #8
0
def standard_cifar10_get(FLAGS):
    if FLAGS.train_model == 'siamese':
        cifar10 = cifar10_siamese_utils.get_cifar10(FLAGS.data_dir)
    else:
        cifar10 = cifar10_utils.get_cifar10(FLAGS.data_dir)
    cifar10 = wrap_cifar(cifar10, FLAGS)
    image_shape = cifar10.train.images.shape[1:4]
    num_classes = cifar10.test.labels.shape[1]
    return cifar10, image_shape, num_classes
コード例 #9
0
def train():
  """
  Performs training and evaluation of ConvNet model. 

  TODO:
  Implement training and evaluation of ConvNet model. Evaluate your model on the whole test set each eval_freq iterations.
  """

  ### DO NOT CHANGE SEEDS!
  # Set the random seeds for reproducibility
  np.random.seed(42)

  ########################
  # PUT YOUR CODE HERE  #
  #######################
  device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

  cifar10 = cifar10_utils.get_cifar10()
  n_channels = 3
  n_classes = 10
  nr_iterations = FLAGS.max_steps+1
  convnet = ConvNet(n_channels, n_classes).to(device)

  optimizer = optim.Adam(convnet.parameters(), lr=FLAGS.learning_rate)
  loss = nn.CrossEntropyLoss()

  accuracies_test = []
  accuracies_train = []
  losses = []

  for i in range(nr_iterations):
    x, y = cifar10['train'].next_batch(FLAGS.batch_size)  # (batch_size, 3, 32, 32) (batch_size, 10)
    x = torch.from_numpy(x).to(device)
    y = torch.from_numpy(y).type(torch.LongTensor).to(device)
    _, y_target = y.max(1)

    optimizer.zero_grad()
    prediction = convnet(x)

    cross_entropy_loss = loss(prediction, y_target)
    losses.append(cross_entropy_loss.item())
    cross_entropy_loss.backward()
    optimizer.step()

    del x, y_target

    if i % FLAGS.eval_freq == 0:
      x_test, y_test = cifar10['test'].images, cifar10['test'].labels
      x_test = torch.from_numpy(x_test).to(device)
      y_test = torch.from_numpy(y_test).type(torch.LongTensor).to(device)
      pred_test = convnet(x_test)
      acc_test = accuracy(pred_test, y_test)
      acc_train = accuracy(prediction, y)
      accuracies_test.append(acc_test )
      accuracies_train.append(acc_train)
      print('accuracy at step', i, ': ', acc_test)
      del x_test, y_test, pred_test, prediction
コード例 #10
0
def train():
    """
  Performs training and evaluation of ConvNet model. 

  TODO:
  Implement training and evaluation of ConvNet model. Evaluate your model on the whole test set each eval_freq iterations.
  """

    ### DO NOT CHANGE SEEDS!
    # Set the random seeds for reproducibility
    np.random.seed(42)

    ########################
    # PUT YOUR CODE HERE  #
    #######################
    glb.net = ConvNet([100], 10)
    glb.net.cuda()
    criterion = nn.CrossEntropyLoss()
    optimizer = optim.Adam(glb.net.parameters(), lr=LEARNING_RATE_DEFAULT)
    ##optimizer = optim.Adagrad(glb.net.parameters(),lr=0.01,weight_decay=0.001)
    entropy_sum_list = []
    entropies = []
    accuracies = []
    cifar10 = cifar10_utils.get_cifar10(
        '/home/vik1/Downloads/subj/deep_learning/uvadlc_practicals_2018/assignment_1/code/cifar10/cifar-10-batches-py'
    )
    running_loss = 0
    for i in range(1, MAX_STEPS_DEFAULT + 1):
        x, y = cifar10['train'].next_batch(BATCH_SIZE_DEFAULT)
        ##x = x.reshape((BATCH_SIZE_DEFAULT, 32 * 32 * 3))
        x = torch.from_numpy(x)
        x = x.cuda()
        y = torch.from_numpy(y)
        y = y.type(torch.LongTensor)
        y = y.cuda()
        optimizer.zero_grad()
        # forward + backward + optimize
        outputs = glb.net(x)
        loss = criterion(outputs, torch.max(y, 1)[1])
        loss.backward()
        optimizer.step()
        # print statistics
        running_loss += loss.item()
        if (i % EVAL_FREQ_DEFAULT == 0):
            acc = test("test", 312)
            print(i, running_loss, acc)
            entropy_sum_list.append(running_loss)
            running_loss = 0
            accuracies.append(acc)
    print(entropy_sum_list)
    print(accuracies)
    plt.plot(entropy_sum_list, 'r-')
    plt.show()
    plt.close()
    plt.plot(accuracies, 'r-')
    plt.show()
    print("done")
コード例 #11
0
def train():
    """
    Performs training and evaluation of ConvNet model.

    Implement training and evaluation of ConvNet model. Evaluate your model on the whole test set each eval_freq iterations.
    """

    ### DO NOT CHANGE SEEDS!
    # Set the random seeds for reproducibility
    np.random.seed(42)

    dataset = cifar10_utils.get_cifar10(DATA_DIR_DEFAULT)
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    a, b, c = dataset['train'].images.shape[1:]
    n_classes = dataset['train'].labels.shape[1]

    cnn = ConvNet(3, n_classes).to(device)
    optimizer = optim.Adam(cnn.parameters(), lr=FLAGS.learning_rate)
    crossentropy = nn.CrossEntropyLoss()

    n_test = dataset['test'].images.shape[0]

    for step in range(FLAGS.max_steps):
        input, labels = dataset['train'].next_batch(FLAGS.batch_size)
        labels = np.argmax(labels, axis=1)
        input, labels = torch.from_numpy(input).to(device), torch.from_numpy(
            labels).long().to(device)
        predictions = cnn.forward(input)

        loss = crossentropy(predictions, labels)
        # clean up old gradients
        cnn.zero_grad()
        loss.backward()
        optimizer.step()

        if (step == FLAGS.max_steps - 1 or step % FLAGS.eval_freq == 0):

            test_loss = []
            test_accuracy = []
            for i in range(0, n_test, FLAGS.batch_size):
                test_input, test_labels = dataset['test'].next_batch(
                    FLAGS.batch_size)
                test_input = torch.from_numpy(test_input).to(device)
                test_labels = torch.from_numpy(np.argmax(
                    test_labels, axis=1)).long().to(device)
                test_prediction = cnn.forward(test_input)
                test_loss.append(
                    crossentropy(test_prediction, test_labels).item())
                test_accuracy.append(accuracy(test_prediction, test_labels))

            sys.stdout = open(
                str(FLAGS.learning_rate) + '_' + str(FLAGS.max_steps) + '_' +
                str(FLAGS.batch_size) + '_' + str(FLAGS.batch_size) +
                'conv.txt', 'a')
            print("{},{:f},{:f}".format(step, np.mean(test_loss),
                                        np.mean(test_accuracy)))
コード例 #12
0
def train():
    """
  Performs training and evaluation of MLP model. 

  TODO:
  Implement training and evaluation of MLP model. Evaluate your model on the whole test set each eval_freq iterations.
  """

    ### DO NOT CHANGE SEEDS!
    # Set the random seeds for reproducibility
    np.random.seed(42)

    ## Prepare all functions
    # Get number of units in each hidden layer specified in the string such as 100,100
    if FLAGS.dnn_hidden_units:
        dnn_hidden_units = FLAGS.dnn_hidden_units.split(",")
        dnn_hidden_units = [
            int(dnn_hidden_unit_) for dnn_hidden_unit_ in dnn_hidden_units
        ]
    else:
        dnn_hidden_units = []

    ########################
    # PUT YOUR CODE HERE  #
    #######################
    data = cifar10_utils.get_cifar10(FLAGS.data_dir)
    n_inputs = 3 * 32 * 32
    n_classes = 10
    model = MLP(n_inputs, dnn_hidden_units, n_classes)
    loss_fn = CrossEntropyModule()
    max_accuracy = 0.0
    start_time = time.perf_counter()
    for step in range(1, FLAGS.max_steps + 1):
        x, targets = data['train'].next_batch(FLAGS.batch_size)
        input = x.reshape((FLAGS.batch_size, -1))
        predictions = model.forward(input)
        gradient = loss_fn.backward(predictions, targets)
        model.backward(gradient)
        model.step(FLAGS.learning_rate)
        if step == 1 or step % FLAGS.eval_freq == 0:
            training_loss = loss_fn.forward(predictions, targets)
            test_predictions = model.forward(data['test'].images.reshape(
                data['test'].num_examples, -1))
            test_loss = loss_fn.forward(test_predictions, data['test'].labels)
            test_acc = accuracy(test_predictions, data['test'].labels)
            if test_acc > max_accuracy:
                max_accuracy = test_acc
            print(
                "step %d/%d: training loss: %.3f test loss: %.3f accuracy: %.1f%%"
                % (step, FLAGS.max_steps, training_loss, test_loss,
                   test_acc * 100))

    time_taken = time.perf_counter() - start_time
    print("Done. Scored %.1f%% in %.1f seconds." %
          (max_accuracy * 100, time_taken))
コード例 #13
0
def train():
  """
  Performs training and evaluation of MLP model. Evaluate your model on the whole test set each 100 iterations.
  """
  ### DO NOT CHANGE SEEDS!
  # Set the random seeds for reproducibility
  np.random.seed(42)

  ## Prepare all functions
  # Get number of units in each hidden layer specified in the string such as 100,100
  if FLAGS.dnn_hidden_units:
    dnn_hidden_units = FLAGS.dnn_hidden_units.split(",")
    dnn_hidden_units = [int(dnn_hidden_unit_) for dnn_hidden_unit_ in dnn_hidden_units]
  else:
    dnn_hidden_units = []

  ########################
  # PUT YOUR CODE HERE  #
  #######################
  batch_size = FLAGS.batch_size
  # Get cifar data
  print('Get cifar data')
  cifar10 = cifar10_utils.get_cifar10('cifar10/cifar-10-batches-py')
  x, y = cifar10.train.next_batch(batch_size)
  #x = x.flatten()
  x = x.reshape(batch_size, -1)
  input_dimensions = x.shape[1]
  n_classes = y.shape[1]
  print('Data obtained')

  # Initialize MLP instance
  MLP = mlp_numpy.MLP(dnn_hidden_units, n_classes, input_dimensions, batch_size, FLAGS.weight_reg_strength, FLAGS.weight_init_scale)

  # Train
  #FLAGS.max_steps = 7
  for step in range(FLAGS.max_steps):
    logits = MLP.inference(x)
    #print(logits)
    loss, logits   = MLP.loss(logits, y)
    #print(logits)
    MLP.train_step(loss, FLAGS, logits, y)
    
    if step%10 == 0:
      # Calculate accuracy
      print(str(step) + ": " + str(MLP.accuracy(logits,y)) + ",\tloss: " + str(loss))

    x, y = cifar10.train.next_batch(batch_size)
    x = x.reshape(batch_size, -1)
    
    if (step + 1) == FLAGS.max_steps:
      # Load test data
      test_x, test_y = cifar10.test.images, cifar10.test.labels
      test_x = test_x.reshape(test_x.shape[0], -1)
      logits = MLP.inference(test_x)
      print("Accuracy on test data: " + str(MLP.accuracy(logits,test_y)))
コード例 #14
0
def train():
    """
  Performs training and evaluation of ConvNet model. 

  TODO:
  Implement training and evaluation of ConvNet model. Evaluate your model on the whole test set each eval_freq iterations.
  """

    ### DO NOT CHANGE SEEDS!
    # Set the random seeds for reproducibility
    np.random.seed(42)
    torch.manual_seed(42)

    ########################
    # PUT YOUR CODE HERE  #
    #######################
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    data = cifar10_utils.get_cifar10(FLAGS.data_dir)
    n_inputs = 3 * 32 * 32
    n_classes = 10
    batches_per_epoch = (int)(data['test'].images.shape[0] /
                              FLAGS.batch_size)  # need this for test set
    model = ConvNet(n_inputs, n_classes).to(device)
    loss_fn = nn.CrossEntropyLoss()
    optimizer = torch.optim.Adam(model.parameters())
    max_accuracy = 0.0
    start_time = time.perf_counter()
    for step in range(1, FLAGS.max_steps + 1):
        x, y = get_batch(data, 'train', FLAGS.batch_size, device)
        predictions = model.forward(x)
        training_loss = loss_fn(predictions, y.argmax(dim=1))
        optimizer.zero_grad()
        training_loss.backward()
        optimizer.step()
        if step == 1 or step % FLAGS.eval_freq == 0:
            with torch.no_grad():
                test_loss = 0
                test_acc = 0
                for test_batch in range(batches_per_epoch):
                    x, y = get_batch(data, 'test', FLAGS.batch_size, device)
                    predictions = model(x)
                    test_loss += loss_fn(predictions,
                                         y.argmax(dim=1)) / batches_per_epoch
                    test_acc += accuracy(predictions, y) / batches_per_epoch
                if test_acc > max_accuracy:
                    max_accuracy = test_acc
                print(
                    "step %d/%d: training loss: %.3f test loss: %.3f accuracy: %.1f%%"
                    % (step, FLAGS.max_steps, training_loss, test_loss,
                       test_acc * 100))

    time_taken = time.perf_counter() - start_time
    print("Done. Scored %.1f%% in %.1f seconds." %
          (max_accuracy * 100, time_taken))
コード例 #15
0
def train():
    """
    Performs training and evaluation of MLP model.
    """

    ### DO NOT CHANGE SEEDS!
    # Set the random seeds for reproducibility
    np.random.seed(42)

    ## Prepare all functions
    # Get number of units in each hidden layer specified in the string such as 100,100
    if FLAGS.dnn_hidden_units:
        dnn_hidden_units = FLAGS.dnn_hidden_units.split(",")
        dnn_hidden_units = [
            int(dnn_hidden_unit_) for dnn_hidden_unit_ in dnn_hidden_units
        ]
    else:
        dnn_hidden_units = []

    data = cifar10_utils.get_cifar10(data_dir=FLAGS.data_dir)
    train = data['train']
    test = data['test']
    n_inputs = train.images[0].flatten().shape[0]
    n_classes = train.labels[0].shape[0]

    mlp = MLP(n_inputs, dnn_hidden_units, n_classes)
    loss_mod = CrossEntropyModule()

    loss_history = []
    acc_history = []
    for step in range(FLAGS.max_steps):  #FLAGS.max_steps
        x, y = train.next_batch(FLAGS.batch_size)
        x = x.reshape(x.shape[0], n_inputs)
        out = mlp.forward(x)
        loss = loss_mod.forward(out, y)
        loss_history.append(loss)
        dout = loss_mod.backward(out, y)
        mlp.backward(dout)
        mlp.update(FLAGS.learning_rate)
        if step == 0 or (step + 1) % FLAGS.eval_freq == 0:
            x, y = test.images, test.labels
            x = x.reshape(x.shape[0], n_inputs)
            test_out = mlp.forward(x)
            acc = accuracy(test_out, y)
            print('Accuracy:', acc)
            acc_history.append(acc)
    print('Final loss:', loss_history[-1])
    print('Final acc:', acc_history[-1])
    print(len(acc_history))

    plt.plot(loss_history)
    plt.step(range(0, FLAGS.max_steps + 1, FLAGS.eval_freq), acc_history)
    plt.legend(['loss', 'accuracy'])
    plt.show()
コード例 #16
0
def train():
    #In order to run this function from Jupyter, we must add a class FLAGS as input argument for train
    """
  Performs training and evaluation of MLP model. Evaluate your model on the whole test set each 100 iterations.
  """
    ### DO NOT CHANGE SEEDS!
    # Set the random seeds for reproducibility
    np.random.seed(42)

    ## Prepare all functions
    # Get number of units in each hidden layer specified in the string such as 100,100
    if FLAGS.dnn_hidden_units:
        dnn_hidden_units = FLAGS.dnn_hidden_units.split(",")
        dnn_hidden_units = [
            int(dnn_hidden_unit_) for dnn_hidden_unit_ in dnn_hidden_units
        ]
    else:
        dnn_hidden_units = []

    ########################
    # PUT YOUR CODE HERE  #
    #######################
    cifar10 = cifar10_utils.get_cifar10(FLAGS.data_dir)

    x_train, y_train = cifar10.train.images, cifar10.train.labels
    x_test, y_test = cifar10.test.images, cifar10.test.labels

    n_classes = y_test.shape[1]
    dim = x_test.shape[1] * x_test.shape[2] * x_test.shape[3]
    mlp = mlp_numpy.MLP(dnn_hidden_units,
                        n_classes,
                        weight_decay=FLAGS.weight_reg_strength,
                        weight_scale=FLAGS.weight_init_scale)

    #reshape test set
    x_test_res = np.reshape(x_test, (x_test.shape[0], dim))
    #Perform SGD
    for i in range(FLAGS.max_steps + 1):
        batch_x, batch_y = next_batch(FLAGS.batch_size, x_train, y_train)
        x_res = np.reshape(batch_x, (batch_x.shape[0], batch_x.shape[1] *
                                     batch_x.shape[2] * batch_x.shape[3]))
        logits = mlp.inference(x_res)
        loss = mlp.loss(logits, batch_y)
        mlp.train_step(loss, FLAGS.learning_rate)
        if i in [i * 100 for i in range(1, 16)]:
            print('Performing iteration ' + str(i) + ' ...')
            x_test_res = np.reshape(x_test,
                                    (x_test.shape[0], x_test.shape[1] *
                                     x_test.shape[2] * x_test.shape[3]))
            #predict
            preds = mlp.inference(x_test_res)
            acc = mlp.accuracy(preds, y_test)
            print('Accuracy on the test set: ' + str(acc * 100) + '%')
コード例 #17
0
def train():
    """
  Performs training and evaluation of MLP model. Evaluate your model on the whole test set each 100 iterations.
  """
    ### DO NOT CHANGE SEEDS!
    # Set the random seeds for reproducibility
    np.random.seed(42)

    ## Prepare all functions
    # Get number of units in each hidden layer specified in the string such as 100,100
    if FLAGS.dnn_hidden_units:
        dnn_hidden_units = FLAGS.dnn_hidden_units.split(",")
        dnn_hidden_units = [
            int(dnn_hidden_unit_) for dnn_hidden_unit_ in dnn_hidden_units
        ]
    else:
        dnn_hidden_units = []

    ########################
    # PUT YOUR CODE HERE  #
    #######################

    model = MLP(n_hidden=dnn_hidden_units,
                n_classes=10,
                batch_size=FLAGS.batch_size,
                input_dim=32 * 32 * 3,
                weight_decay=FLAGS.weight_reg_strength,
                weight_scale=FLAGS.weight_init_scale)

    Datasets = utils.get_cifar10(data_dir=DATA_DIR_DEFAULT,
                                 one_hot=True,
                                 validation_size=0)

    for i in range(1500):  #(FLAGS.max_steps):
        train_batch = Datasets.train.next_batch(batch_size=FLAGS.batch_size)
        #Get the model output
        logits = model.inference(
            x=train_batch[0].reshape([FLAGS.batch_size, 32 * 32 * 3]))
        #Get the loss and let the model set the loss derivative.
        loss = model.loss(logits=logits, labels=train_batch[1])
        #Perform training step
        model.train_step(loss=loss, flags=FLAGS)

        #Every 100th iteratin print accuracy on the whole test set.
        if i % 100 == 0:
            # for layer in model.layers:
            test_batch = Datasets.test.next_batch(
                batch_size=200)  #Datasets.test.num_examples
            logits = model.inference(
                x=test_batch[0].reshape([200, 32 * 32 * 3]))
            print('-- Step: ', i, " accuracy: ",
                  model.accuracy(logits=logits, labels=test_batch[1]), 'loss',
                  loss)
コード例 #18
0
def train():
    """
  Performs training and evaluation of ConvNet model. 

  TODO:
  Implement training and evaluation of ConvNet model. Evaluate your model on the whole test set each eval_freq iterations.
  """

    ### DO NOT CHANGE SEEDS!
    # Set the random seeds for reproducibility
    np.random.seed(42)

    ########################
    # PUT YOUR CODE HERE  #
    #######################
    ce_loss = nn.CrossEntropyLoss()
    convnet = ConvNet(3, 10)
    optimizer = optim.Adam(convnet.parameters(), lr=FLAGS.learning_rate)

    c10 = cifar10_utils.get_cifar10(FLAGS.data_dir)
    test_data = c10['test'].images[:32]
    test_data = torch.tensor(test_data)
    targets = c10['test'].labels[:32]

    acc_values = []
    loss_values = []

    for i in range(FLAGS.max_steps):  #range(FLAGS.max_steps)
        x, y = c10['train'].next_batch(FLAGS.batch_size)
        y = y.argmax(axis=1)
        x = torch.tensor(x)
        y = torch.tensor(y)

        optimizer.zero_grad()
        out = convnet(x)
        loss = ce_loss(out, y)
        loss.backward()
        optimizer.step()
        loss_values.append(loss.item())

        # evaluate
        if i % FLAGS.eval_freq == 0:
            with torch.no_grad():
                predictions = convnet(test_data).detach().numpy()
                acc = accuracy(predictions, targets)
                print('acc', acc, 'loss', loss.item())
                acc_values.append(acc)

    # save loss and accuracy to file
    with open('accuracy_cnn.txt', 'a') as f_acc:
        print(acc_values, file=f_acc)
    with open('loss_cnn.txt', 'a') as f_loss:
        print(loss_values, file=f_loss)
コード例 #19
0
def main():

    # Load data
    path_to_data = os.path.abspath('cifar10/cifar-10-batches-py/')
    cifar10 = cifar10_utils.get_cifar10(data_dir=path_to_data,
                                        one_hot=False,
                                        validation_size=0)
    train_set = cifar10['train']

    # Initialize model
    input_dim = train_set.images[0, :, :, :].size
    n_classes = train_set.labels.max() + 1

    net = skorch.NeuralNetClassifier(MLP,
                                     criterion=nn.CrossEntropyLoss,
                                     module__n_inputs=input_dim,
                                     module__n_classes=n_classes,
                                     optimizer=torch.optim.Adam)

    # params = {
    #   'lr' : [0.1, 0.01, 0.001],
    #   'module__n_hidden' : [
    #     [100, 100],
    #     [1000],
    #     [50,30,20]
    #   ],
    #   'batch_size' : [64, 128, 256, 512]
    # }
    params = {
        'lr': [0.002],
        'module__n_hidden': [[500, 500, 500, 500]],
        'optimizer': [torch.optim.Adam]
    }

    gs = GridSearchCV(net,
                      params,
                      cv=5,
                      scoring='accuracy',
                      n_jobs=4,
                      refit=False,
                      verbose=2)
    gs.fit(train_set.images.reshape(train_set.images.shape[0], -1),
           train_set.labels)

    print()
    print('--------')
    print()
    print('Best params:\t', gs.best_params_)
    print('Best score:\t', gs.best_score_)
    print()

    with open('gridsearch_full_training_set.dill', 'wb') as f:
        dill.dump(gs, f)
コード例 #20
0
def train():
  """
  Performs training and evaluation of ConvNet model.
  """

  ### DO NOT CHANGE SEEDS!
  # Set the random seeds for reproducibility
  np.random.seed(42)

  cifar10 = cifar10_utils.get_cifar10('cifar10/cifar-10-batches-py')
  x, y = cifar10['train'].next_batch(FLAGS.batch_size)
  device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

  n_channels = np.size(x, 1)
  net = ConvNet(n_channels, 10).to(device)
  crossEntropy = nn.CrossEntropyLoss()
  optimizer = torch.optim.Adam(net.parameters(), lr=FLAGS.learning_rate)

  loss_list = []
  accuracy_list = []
  test_eval_list = []

  for i in range(FLAGS.max_steps):
      x = Variable(torch.from_numpy(x), requires_grad = True).to(device)
      predictions = net(x).to(device)
      numpy_predictions = predictions.cpu().data[:].numpy()

      label_index = torch.LongTensor(np.argmax(y, axis = 1)).to(device)
      loss = crossEntropy(predictions, label_index)

      if i % FLAGS.eval_freq == 0:
          current_accuracy = accuracy(numpy_predictions, y)
          current_test_accuracy = test(net)
          current_loss = loss.cpu().data.numpy()

          loss_list.append(current_loss)
          accuracy_list.append(current_accuracy)
          test_eval_list.append(current_test_accuracy)

          print ('Training epoch %d out of %d. Loss %.3f, Train accuracy %.3f, Test accuracy %.3f' % (i, FLAGS.max_steps, current_loss, current_accuracy, current_test_accuracy))


      optimizer.zero_grad()
      loss.backward()
      optimizer.step()

      x, y = cifar10['train'].next_batch(FLAGS.batch_size)

  # save model
  torch.save(net, MODEL_DIRECTORY + CNN_PYTORCH_FILE)
  test_accuracy = test(net)
  print('Test accuracy %.3f' % (test_accuracy))
コード例 #21
0
def test(net=None):
    np.random.seed(42)

    cifar10 = cifar10_utils.get_cifar10('cifar10/cifar-10-batches-py')

    net = net if net else torch.load(MODEL_DIRECTORY + NUMPY_PYTORCH_FILE)
    x = cifar10['test'].images
    y = cifar10['test'].labels

    x = x.reshape(np.size(x, 0), -1)

    predictions = net.forward(x)
    return accuracy(predictions, y)
コード例 #22
0
ファイル: train_model.py プロジェクト: alponomar/dl_3
def feature_extraction_siamese():
    """
    This method restores a TensorFlow checkpoint file (.ckpt) and rebuilds inference
    model with restored parameters. From then on you can basically use that model in
    any way you want, for instance, feature extraction, finetuning or as a submodule
    of a larger architecture. However, this method should extract features from a
    specified layer and store them in data files such as '.h5', '.npy'/'.npz'
    depending on your preference. You will use those files later in the assignment.

    Args:
        [optional]
    Returns:
        None
    """

    ########################
    # PUT YOUR CODE HERE  #
    ########################

    tf.reset_default_graph()

    classes = [
        'plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship',
        'truck'
    ]
    tf.set_random_seed(42)
    np.random.seed(42)
    cifar10 = cifar10_utils.get_cifar10(FLAGS.data_dir)
    x_test, y_test = cifar10.test.images, cifar10.test.labels
    y_test = np.argmax(y_test, axis=1)
    input_data_dim = cifar10.test.images.shape[1]
    n_classes = 10

    cnn_siamese = Siamese()

    x = tf.placeholder(tf.float32,
                       shape=(None, input_data_dim, input_data_dim, 3),
                       name="x1")
    y = tf.placeholder(tf.float32, shape=(None, 1), name="y")

    with tf.name_scope('train_cnn'):
        infs1 = cnn_siamese.inference(x, reuse=None)
        l2_out = cnn_siamese.l2_out

    with tf.Session() as sess:
        saver = tf.train.Saver()
        saver.restore(sess, FLAGS.checkpoint_dir + '/cnn_model_siamese.ckpt')

        l2_out_features = sess.run([l2_out], feed_dict={x: x_test})[0]
        _plot_tsne("L2 out", l2_out_features, y_test)
        _train_one_vs_all(l2_out_features, y_test, "L2 norm", classes)
コード例 #23
0
def train():
  """
  Performs training and evaluation of ConvNet model. 

  TODO:
  Implement training and evaluation of ConvNet model. Evaluate your model on the whole test set each eval_freq iterations.
  """

  ### DO NOT CHANGE SEEDS!
  # Set the random seeds for reproducibility
  np.random.seed(42)

  ########################
  # PUT YOUR CODE HERE  #
  #######################
  if FLAGS.batch_size:
    batch_size = int(FLAGS.batch_size)

  cifar10 = cifar10_utils.get_cifar10();
  convNet = ConvNet(3, 10);
  print(convNet);
  lossfunc = nn.CrossEntropyLoss();
  optimizer = torch.optim.Adam(convNet.parameters(),lr=LEARNING_RATE_DEFAULT)
  
  cifar10_train = cifar10['train'];  
  # get all test image labels and features:
  cifar10_test = cifar10['test']; 
  
  while (cifar10_train.epochs_completed < 10):
      x, y = cifar10_train.next_batch(batch_size);
      x_test, y_target = cifar10_test.next_batch(batch_size);
      x = torch.autograd.Variable(torch.from_numpy(x));
      y = torch.autograd.Variable(torch.from_numpy(y).long());
      x_test = torch.autograd.Variable(torch.from_numpy(x_test));
      
      #x_test = x_test.reshape((batch_size, -1));
      optimizer.zero_grad();
      out = convNet(x);
      
      loss = lossfunc(out,torch.max(y, 1)[1]);
      loss.backward();
      optimizer.step();
      
      y_test = convNet(x_test);
      rate = accuracy(y_test, y_target)
      
      #print('loss: ', crossentropy_loss);
      print("Accuracy:", rate)
コード例 #24
0
def test(net=None):
    np.random.seed(42)

    cifar10 = cifar10_utils.get_cifar10('cifar10/cifar-10-batches-py')
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

    net = net if net else torch.load(MODEL_DIRECTORY +
                                     MLP_PYTORCH_FILE).to(device)
    x = cifar10['test'].images
    y = cifar10['test'].labels

    x = x.reshape(np.size(x, 0), -1)
    x = Variable(torch.from_numpy(x), requires_grad=True).to(device)

    predictions = net(x)
    numpy_predictions = predictions.cpu().data[:].numpy()

    return accuracy(numpy_predictions, y)
コード例 #25
0
def test(data_type, num_times):
    cifar10 = cifar10_utils.get_cifar10(
        '/home/vik1/Downloads/subj/deep_learning/uvadlc_practicals_2018/assignment_1/code/cifar10/cifar-10-batches-py'
    )
    accu = []
    for i in range(0, num_times):
        x, y = cifar10[data_type].next_batch(BATCH_SIZE_DEFAULT)
        ##x=x/2550;
        ##x[:, 0, :, :] = (x[:, 0, :, :] - glb.mean_red) / (glb.std_red )
        ##x[:, 1, :, :] = (x[:, 1, :, :] - glb.mean_green) / (glb.std_green )
        ##x[:, 2, :, :] = (x[:, 2, :, :] - glb.mean_blue) / (glb.std_blue)
        x = x.reshape((BATCH_SIZE_DEFAULT, 32 * 32 * 3))
        prob = glb.mlp.forward(x)
        acc = accuracy(prob, y)
        ##print(acc)
        accu.append(acc)
    full_acc = sum(accu) / len(accu)
    return full_acc
コード例 #26
0
def train():
  """
  Performs training and evaluation of MLP model. 

  TODO:
  Implement training and evaluation of MLP model. Evaluate your model on the whole test set each eval_freq iterations.
  """

  ### DO NOT CHANGE SEEDS!
  # Set the random seeds for reproducibility
  np.random.seed(42)

  ## Prepare all functions
  # Get number of units in each hidden layer specified in the string such as 100,100
  if FLAGS.dnn_hidden_units:
    dnn_hidden_units = FLAGS.dnn_hidden_units.split(",")
    dnn_hidden_units = [int(dnn_hidden_unit_) for dnn_hidden_unit_ in dnn_hidden_units]
  else:
    dnn_hidden_units = []

  ########################
  # PUT YOUR CODE HERE  #
  #######################

  if FLAGS.batch_size:
    batch_size = int(FLAGS.batch_size)

  data = cifar10_utils.get_cifar10() 
  data_train = data['train']
  x_dim = np.prod(data_train.images.shape[1:])
  y_dim = np.prod(data_train.labels.shape[1:])
  print(x_dim, y_dim)

  mlp = MLP(x_dim, dnn_hidden_units, y_dim)

  while data_train.epochs_completed <= 10:
    x, y = data_train.next_batch(batch_size)
    x = x.reshape(x_dim, batch_size)
    out = mlp.forward(x)
    loss = mlp.loss.forward(out, y)
    dout = mlp.loss.backward(out, y)
    mlp.backward(dout)
    
    print("LOSS:", loss)
コード例 #27
0
def test(data_type, num_times):
    cifar10 = cifar10_utils.get_cifar10(
        '/home/vik1/Downloads/subj/deep_learning/uvadlc_practicals_2018/assignment_1/code/cifar10/cifar-10-batches-py'
    )
    accu = []
    for i in range(0, num_times):
        x, y = cifar10[data_type].next_batch(BATCH_SIZE_DEFAULT)
        x = x.reshape((BATCH_SIZE_DEFAULT, 32 * 32 * 3))
        x = torch.from_numpy(x)
        x = x.cuda()
        output = glb.net(x)
        softmax = torch.nn.Softmax(1)
        x = softmax(output)
        output = output.cpu()
        output = output.detach().numpy()
        acc = accuracy(output, y)
        accu.append(acc)
    full_acc = sum(accu) / len(accu)
    return full_acc
コード例 #28
0
def train():
    """
  Performs training and evaluation of MLP model. 

  TODO:
  Implement training and evaluation of MLP model. Evaluate your model on the whole test set each eval_freq iterations.
  """

    ### DO NOT CHANGE SEEDS!
    # Set the random seeds for reproducibility
    np.random.seed(42)

    ## Prepare all functions
    # Get number of units in each hidden layer specified in the string such as 100,100
    if FLAGS.dnn_hidden_units:
        dnn_hidden_units = FLAGS.dnn_hidden_units.split(",")
        dnn_hidden_units = [
            int(dnn_hidden_unit_) for dnn_hidden_unit_ in dnn_hidden_units
        ]
    else:
        dnn_hidden_units = []

    ########################
    # PUT YOUR CODE HERE  #
    #######################
    batch_size = 100
    image_dim = 32
    channels = 3
    mlp_classes = 10
    mlp_input_size = image_dim * image_dim * channels
    data = cifar10_utils.get_cifar10()
    train_data = data['train']
    validation_data = data['validation']
    test_data = data['test']
    NN = MLP(mlp_input_size, dnn_hidden_units[0], mlp_classes)
    x, y = train_data.next_batch(batch_size)
    print(x.shape, y.shape)
    for image_label in zip(x, y):
        im = np.reshape(image_label[0], (1, mlp_input_size))
        im = torch.tensor(im)
        out = NN.forward(im)
        print(out, image_label[1])
コード例 #29
0
def test(net = None):
    np.random.seed(42)

    cifar10 = cifar10_utils.get_cifar10('cifar10/cifar-10-batches-py')
    device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")

    net = net if net else torch.load(MODEL_DIRECTORY + CNN_PYTORCH_FILE).to(device)

    x, y = cifar10['test'].next_batch(FLAGS.test_batch_size)
    accuracy_list = []

    number_of_examples = len(cifar10['test'].labels)
    counter = 0

    while counter <= number_of_examples:
        x = Variable(torch.from_numpy(x), requires_grad=False).to(device)
        predictions = net(x).to(device)
        numpy_predictions = predictions.cpu().data[:].numpy()
        accuracy_list.append(accuracy(numpy_predictions, y))
        x, y = cifar10['test'].next_batch(FLAGS.batch_size)
        counter += FLAGS.test_batch_size

    return np.mean(accuracy_list)
コード例 #30
0
def main():
    """
    Main function
    """
    # Print all Flags to confirm parameter settings
    print_flags()

    if not os.path.exists(FLAGS.data_dir):
        os.makedirs(FLAGS.data_dir)

    ## Prepare all functions
    # Get number of units in each hidden layer specified in the string such as 100,100
    if FLAGS.dnn_hidden_units:
        dnn_hidden_units = FLAGS.dnn_hidden_units.split(",")
        dnn_hidden_units = [
            int(dnn_hidden_unit_) for dnn_hidden_unit_ in dnn_hidden_units
        ]
    else:
        dnn_hidden_units = []

    # neg_slope = FLAGS.neg_slope

    data = cifar10_utils.get_cifar10(FLAGS.data_dir,
                                     one_hot=False,
                                     validation_size=0)

    img_shape = data["train"].images[0].shape

    # print(np.prod(img_shape), dnn_hidden_units, N_CLASSES)
    mlp = MLP(np.prod(img_shape), dnn_hidden_units, N_CLASSES)
    print(mlp)

    optimizer = optim.SGD(mlp.parameters(), lr=FLAGS.learning_rate)
    loss_module = nn.CrossEntropyLoss()

    # run the training operation
    train(mlp, data, optimizer, loss_module)