def train(): """ Performs training and evaluation of MLP model. Evaluate your model on the whole test set each 100 iterations. """ ### DO NOT CHANGE SEEDS! # Set the random seeds for reproducibility np.random.seed(42) ## Prepare all functions # Get number of units in each hidden layer specified in the string such as 100,100 if FLAGS.dnn_hidden_units: dnn_hidden_units = FLAGS.dnn_hidden_units.split(",") dnn_hidden_units = [ int(dnn_hidden_unit_) for dnn_hidden_unit_ in dnn_hidden_units ] else: dnn_hidden_units = [] ######################## # PUT YOUR CODE HERE # ####################### model = MLP(n_hidden=dnn_hidden_units, n_classes=10, batch_size=FLAGS.batch_size, input_dim=32 * 32 * 3, weight_decay=FLAGS.weight_reg_strength, weight_scale=FLAGS.weight_init_scale) Datasets = utils.get_cifar10(data_dir=DATA_DIR_DEFAULT, one_hot=True, validation_size=0) for i in range(1500): #(FLAGS.max_steps): train_batch = Datasets.train.next_batch(batch_size=FLAGS.batch_size) #Get the model output logits = model.inference( x=train_batch[0].reshape([FLAGS.batch_size, 32 * 32 * 3])) #Get the loss and let the model set the loss derivative. loss = model.loss(logits=logits, labels=train_batch[1]) #Perform training step model.train_step(loss=loss, flags=FLAGS) #Every 100th iteratin print accuracy on the whole test set. if i % 100 == 0: # for layer in model.layers: test_batch = Datasets.test.next_batch( batch_size=200) #Datasets.test.num_examples logits = model.inference( x=test_batch[0].reshape([200, 32 * 32 * 3])) print('-- Step: ', i, " accuracy: ", model.accuracy(logits=logits, labels=test_batch[1]), 'loss', loss)
def train(): """ Performs training and evaluation of MLP model. Evaluate your model on the whole test set each 100 iterations. """ ### DO NOT CHANGE SEEDS! # Set the random seeds for reproducibility np.random.seed(42) ## Prepare all functions # Get number of units in each hidden layer specified in the string such as 100,100 if FLAGS.dnn_hidden_units: dnn_hidden_units = FLAGS.dnn_hidden_units.split(",") dnn_hidden_units = [ int(dnn_hidden_unit_) for dnn_hidden_unit_ in dnn_hidden_units ] else: dnn_hidden_units = [] ######################## # PUT YOUR CODE HERE # ####################### # Import dataset cifar10 = cifar10_utils.get_cifar10(data_dir=FLAGS.data_dir) # Load test data x_test, y_test = cifar10.test.images, cifar10.test.labels x_test = np.reshape(x_test, [x_test.shape[0], -1]) batch_size = FLAGS.batch_size n_classes = 10 input_dim = 3 * 32 * 32 mlp = MLP(n_hidden=dnn_hidden_units, n_classes=n_classes, weight_decay=FLAGS.weight_reg_strength, weight_scale=FLAGS.weight_init_scale, input_dim=input_dim, learning_rate=FLAGS.learning_rate) for tr_step in range(FLAGS.max_steps): # Get next batch x_tr, y_tr = cifar10.train.next_batch(batch_size) # Reshape data for MLP x_tr = np.reshape(x_tr, (batch_size, -1)) # Inference tr_logits = mlp.inference(x_tr) # Calculate loss and accuracy tr_loss = mlp.loss(tr_logits, y_tr) tr_accuracy = mlp.accuracy(tr_logits, y_tr) if tr_step % 10 == 0: print('Step:{} Loss:{:.4f}, Accuracy:{:.4f}'.format( tr_step, tr_loss, tr_accuracy)) mlp.train_step() if tr_step % 100 == 0 or tr_step == FLAGS.max_steps - 1: # Inference test_logits = mlp.inference(x_test) # Calculate loss and accuracy test_loss = mlp.loss(test_logits, y_test) test_accuracy = mlp.accuracy(test_logits, y_test) print('TEST - Loss:{:.4f}, Accuracy:{:.4f}'.format( test_loss, test_accuracy))
def train(): """ Performs training and evaluation of MLP model. Evaluate your model on the whole test set each 100 iterations. """ ### DO NOT CHANGE SEEDS! # Set the random seeds for reproducibility np.random.seed(42) ## Prepare all functions # Get number of units in each hidden layer specified in the string such as 100,100 if FLAGS.dnn_hidden_units: dnn_hidden_units = FLAGS.dnn_hidden_units.split(",") dnn_hidden_units = [ int(dnn_hidden_unit_) for dnn_hidden_unit_ in dnn_hidden_units ] else: dnn_hidden_units = [] ######################## # PUT YOUR CODE HERE # ####################### # dataset cifar10 = cifar10_utils.get_cifar10(data_dir=FLAGS.data_dir) learning_rate = FLAGS.learning_rate weight_init_scale = FLAGS.weight_init_scale weight_reg_strength = FLAGS.weight_reg_strength batch_size = FLAGS.batch_size n_classes = 10 input_dim = 3 * 32 * 32 net = MLP(n_hidden=dnn_hidden_units, n_classes=n_classes, input_dim=input_dim, weight_decay=weight_reg_strength, weight_scale=weight_init_scale) print(net) for _step in range(FLAGS.max_steps): net.training_mode = True X_train, y_train = cifar10.train.next_batch(batch_size) X_train = np.reshape(X_train, (batch_size, -1)) # Feed forward logits_train = net.inference(X_train) # Obtain loss and accuracy train_loss = net.loss(logits_train, y_train) train_accuracy = net.accuracy(logits_train, y_train) print('Ep.{}: train_loss:{:.4f}, train_accuracy:{:.4f}'.format( _step, train_loss, train_accuracy)) train_flags = { 'learning_rate': learning_rate, 'batch_size': batch_size } net.train_step(loss=train_loss, flags=train_flags) if _step % 50 == 0: net.training_mode = False X_test, y_test = cifar10.test.images, cifar10.test.labels X_test = np.reshape(X_test, [X_test.shape[0], -1]) # Feed forward logits_test = net.inference(X_test) # Obtain loss and accuracy test_loss = net.loss(logits_test, y_test) test_accuracy = net.accuracy(logits_test, y_test) print('\t\ttest_loss:{:.4f}, test_accuracy:{:.4f}'.format( test_loss, test_accuracy)) # Print stats # net.plot_stats() print('Done training.')