Esempio n. 1
0
def main(_):
    tf.set_random_seed(1234)
    sess = tf.Session()
    print('---Prepare data...')

    ### YOUR CODE HERE
    data_dir = './'
    ### END CODE HERE

    x_train, y_train, x_test, y_test = load_data(data_dir)
    x_train_new, y_train_new, x_valid, y_valid = train_valid_split(
        x_train, y_train)

    model = Cifar(sess, configure())

    ### YOUR CODE HERE
    # First step: use the train_new set and the valid set to choose hyperparameters.

    # Second step: with hyperparameters determined in the first run, re-train
    # your model on the original train set.
    model.train(x_train, y_train, 50)

    # Third step: after re-training, test your model on the test set.
    # Report testing accuracy in your hard-copy report.
    model.test_or_validate(x_test, y_test, [30, 35, 40, 45, 50])
def main(_):
    sess = tf.Session()
    print('---Prepare data...')

    ### YOUR CODE HERE
    #data_dir = "/content/drive/My Drive/Colab Notebooks/code/HW3/data/"
    data_dir = "/content/drive/My Drive/data"
    ### END CODE HERE

    x_train, y_train, x_test, y_test = load_data(data_dir)
    x_train_new, y_train_new, x_valid, y_valid = train_valid_split(
        x_train, y_train)

    model = Cifar(sess, configure())

    ### YOUR CODE HERE
    # First step: use the train_new set and the valid set to choose hyperparameters.
    #model.train(x_train_new, y_train_new, 200)
    #model.test_or_validate(x_valid, y_valid, [10,20,30,40,50,100,150,200])

    # Second step: with hyperparameters determined in the first run, re-train
    # your model on the original train set.
    # model.train(x_train, y_train, 200)

    # Third step: after re-training, test your model on the test set.
    # Report testing accuracy in your hard-copy report.
    model.test_or_validate(
        x_test, y_test,
        [100, 110, 120, 130, 140, 150, 160, 170, 180, 190, 200])
Esempio n. 3
0
def main(_):
    sess = tf.Session()
    print('---Prepare data...')

    ### YOUR CODE HERE
    data_dir = "data/"
    ### END CODE HERE

    x_train, y_train, x_test, y_test = load_data(data_dir)
    x_train_new, y_train_new, x_valid, y_valid = train_valid_split(
        x_train, y_train)

    model = Cifar(sess, configure())

    ### YOUR CODE HERE
    # First step: use the train_new set and the valid set to choose hyperparameters.

    #
    #model.train(x_train_new, y_train_new, 200)
    #model.train(x_train_new, y_train_new, 3)
    #model.test_or_validate(x_valid, y_valid, [160, 170, 180, 190, 200])
    #model.test_or_validate(x_valid, y_valid, [10])

    # Second step: with hyperparameters determined in the first run, re-train
    # your model on the original train set.
    model.train(x_train, y_train, 150)
Esempio n. 4
0
def main(_):
    sess = tf.Session()
    print('---Prepare data...')

    ### YOUR CODE HERE
    data_dir = '../cifar-10-batches-py/'
    ### END CODE HERE

    x_train, y_train, x_test, y_test = load_data(data_dir)
    x_train_new, y_train_new, x_valid, y_valid = train_valid_split(
        x_train, y_train)

    model = Cifar(sess, configure())

    ### YOUR CODE HERE
    # First step: use the train_new set and the valid set to choose hyperparameters.
    # model.train(x_train_new, y_train_new, 200)
    # model.train(x_train_new, y_train_new, 10)
    # model.test_or_validate(x_valid, y_valid, [160, 170, 180, 190, 200])
    # model.test_or_validate(x_valid, y_valid, list(range(80, 201, 10)))
    # model.test_or_validate(x_valid, y_valid, [150])

    # Second step: with hyperparameters determined in the first run, re-train
    # your model on the original train set.
    model.train(x_train, y_train, 140)

    # Third step: after re-training, test your model on the test set.
    # Report testing accuracy in your hard-copy report.
    model.test_or_validate(x_test, y_test, [140])
Esempio n. 5
0
def main(_):
    sess = tf.Session()
    print('---Prepare data...')

    ### YOUR CODE HERE
    data_dir = "cifar-10-batches-py"
    ### END CODE HERE

    x_train, y_train, x_test, y_test = load_data(data_dir)
    x_train_new, y_train_new, x_valid, y_valid = train_valid_split(
        x_train, y_train, 8000)
    parse_record(x_train[0], True)
    model = Cifar(sess, configure())

    ### YOUR CODE HERE
    # First step: use the train_new set and the valid set to choose hyperparameters.
    def del_all_flags(FLAGS):
        flags_dict = FLAGS._flags()
        keys_list = [keys for keys in flags_dict]
        for keys in keys_list:
            FLAGS.__delattr__(keys)

    # model.train(x_train_new, y_train_new, 3)
    sess_test = tf.Session()
    del_all_flags(tf.flags.FLAGS)
    model_test = Cifar(sess_test, configure())
    model_test.test_or_validate(x_valid, y_valid, [1, 2, 3])
Esempio n. 6
0
def main(_):
	sess = tf.Session()
	print('---Prepare data...')

	### YOUR CODE HERE
	data_dir = "/Users/tiandi03/Desktop/dataset/cifar-10-batches-py"
	### END CODE HERE

	x_train, y_train, x_test, y_test = load_data(data_dir)
	x_train_new, y_train_new, x_valid, y_valid = train_valid_split(x_train, y_train)
	model = Cifar(sess, configure())

	### YOUR CODE HERE
	# First step: use the train_new set and the valid set to choose hyperparameters.
	model.train(x_train_new, y_train_new, 200)
	model.test_or_validate(x_valid, y_valid, [160, 170, 180, 190, 200])
Esempio n. 7
0
def main(_):

	sess = tf.Session()
	print('---Prepare data...')

	### YOUR CODE HERE
	data_dir = os.path.join(os.path.abspath(os.getcwd()),"ResNet/data")

	### END CODE HERE

	x_train, y_train, x_test, y_test = load_data(data_dir)
	x_train_new, y_train_new, x_valid, y_valid = train_valid_split(x_train, y_train)

	model = Cifar(sess, configure())

	### YOUR CODE HERE
	# First step: use the train_new set and the valid set to choose hyperparameters.
	model.train(x_train_new, y_train_new, 200)
Esempio n. 8
0
def main(_):
    sess = tf.Session()
    print('---Prepare data...')

    ### YOUR CODE HERE
    # Download cifar-10 dataset from https://www.cs.toronto.edu/~kriz/cifar.html
    data_dir = "cifar-10-batches-py"
    ### END CODE HERE

    x_train, y_train, x_test, y_test = load_data(data_dir)
    x_train_new, y_train_new, x_valid, y_valid = train_valid_split(
        x_train, y_train)

    model = Cifar(sess, configure())

    ### YOUR CODE HERE
    model.train(x_train, y_train, 40)
    model.test_or_validate(x_test, y_test, [5, 10, 15, 20, 25, 30, 35, 40])
Esempio n. 9
0
def main(_):
    sess = tf.Session()
    print('---Prepare data...')

    ### YOUR CODE HERE
    data_dir = '../cifar-10-batches-py'
    ### END CODE HERE

    x_train, y_train, x_test, y_test = load_data(data_dir)
    x_train_new, y_train_new, x_valid, y_valid = train_valid_split(
        x_train, y_train)

    model = Cifar(sess, configure())

    ### YOUR CODE HERE
    # from Network import ResNet
    # network = ResNet(1, 3, 10, 16)
    # ips = tf.placeholder(tf.float32, shape=(100, 32, 32, 3))
    # sess.run(tf.global_variables_initializer())
    # sess.run(tf.local_variables_initializer())
    # net = network(ips,training=True)
    # from tensorflow.keras import Model
    # model = Model(inputs=ips, outputs=net)

    # print(model.summary)
    # # print(sess.run(network(ips,training=True)))
    # writer = tf.summary.FileWriter('output', sess.graph)
    # writer.close()
    # First step: use the train_new set and the valid set to choose hyperparameters.
    # model.train(x_train_new, y_train_new, 200)
    # while True:
    # model.train(x_train_new, y_train_new, 600)
    # model.test_or_validate(x_valid,y_valid,[i*10 for i in range(1,11)])
    # model.test_or_validate(x_valid,y_valid,[20])
    # model.test_or_validate(x_valid, y_valid, [160, 170, 180, 190, 200])
    # model.test_or_validate(x_valid,y_valid,[10])

    # Second step: with hyperparameters determined in the first run, re-train
    # your model on the original train set.
    # model.train(x_train, y_train, 200)

    # Third step: after re-training, test your model on the test set.
    # Report testing accuracy in your hard-copy report.
    model.test_or_validate(x_test, y_test, [170])
Esempio n. 10
0
def main(_):
    os.environ["CUDA_VISIBLE_DEVICES"] = '3'

    sess = tf.Session()
    print('---Prepare data...')
    x_train, y_train, x_test, y_test = load_data()
    x_train_new, y_train_new, x_valid, y_valid \
       = train_valid_split(x_train, y_train)

    model = MNIST(sess, configure())

    ### YOUR CODE HERE

    # First run: use the train_new set and the valid set to choose
    # hyperparameters, like num_hid_layers, num_hid_units, stopping epoch, etc.
    # Report chosen hyperparameters in your hard-copy report.
    num_hidden_layers = [1, 2, 3]
    num_hidden_units = [256, 512, 1024]
    batch_sizes = [32, 64, 128]
    num_epochs = [1, 5, 10]
    for model.conf.num_hid_layers in num_hidden_layers:
        for model.conf.num_hid_units in num_hidden_units:
            for model.conf.batch_size in batch_sizes:
                for epochs in num_epochs:
                    print("Hidden layers: {}, Hidden units: {}, Batch size: {}, Max epochs: {}".format(model.conf.num_hid_layers, \
                     model.conf.num_hid_units, model.conf.batch_size, epochs))
                    model.train(x_train_new,
                                y_train_new,
                                x_valid,
                                y_valid,
                                epochs,
                                validation=True)
    # Second run: with hyperparameters determined in the first run, re-train
    # your model on the original train set.
    model.train(x_train, y_train, None, None, 10, validation=False)

    # Third run: after re-training, test your model on the test set.
    # Report testing accuracy in your hard-copy report.
    model.test(x_test, y_test, 10)
Esempio n. 11
0
def main(_):
    os.environ["CUDA_VISIBLE_DEVICES"] = '3'

    sess = tf.Session()
    print('---Prepare data...')
    x_train, y_train, x_test, y_test = load_data()
    x_train_new, y_train_new, x_valid, y_valid \
       = train_valid_split(x_train, y_train)

    model = MNIST(sess, configure())

    ### YOUR CODE HERE

    # First run: use the train_new set and the valid set to choose
    # hyperparameters, like num_hid_layers, num_hid_units, stopping epoch, etc.
    # Report chosen hyperparameters in your hard-copy report.

    model.train(x_train_new,
                y_train_new,
                x_valid,
                y_valid,
                max_epoch=1,
                validation=True)
Esempio n. 12
0
def main(_):
	os.environ["CUDA_VISIBLE_DEVICES"] = '3'
    
	#sess = tf.Session()
	print('---Prepare data...')
	x_train, y_train, x_test, y_test = load_data()
	x_train_new, y_train_new, x_valid, y_valid \
				= train_valid_split(x_train, y_train)

	
	# model = MNIST(sess, conf())
	### YOUR CODE HERE
	conf = configure()

	# First run: use the train_new set and the valid set to choose
	# hyperparameters, like num_hid_layers, num_hid_units, stopping epoch, etc.
	# Report chosen hyperparameters in your hard-copy report.
	params = {
	'num_hid_layers': [0, 1, 2, 3, 4, 5],
	'num_hid_units': [64, 128, 256, 512],
	'max_epoch': [50, 100, 125, 150, 175, 200],
	'batch_size': [128, 256, 512, 1024, 2048]
	}

	#lowest supported by python
	best_accuracy = -sys.maxsize -1
	best_params = None

	for batch_size in params['batch_size']:
		conf.batch_size = batch_size
		for num_hid_units in params['num_hid_units']:
			conf.num_hid_units = num_hid_units
			for num_hid_layers in params['num_hid_layers']:
				conf.num_hid_layers = num_hid_layers
				max_epoch = max(params['max_epoch'])
				sess = tf.Session(graph=tf.get_default_graph())
				model = MNIST(sess, conf)
				model.train(x_train_new, y_train_new, x_valid, y_valid, max_epoch, validation=False)
				for epoch in params['max_epoch']:
					accuracy = model.test(x_valid, y_valid, epoch)
					print ("Accuracy with", num_hid_units, "hidden Units,", batch_size, "batch_size,", 
						num_hid_layers, "hidden Layers and", epoch, "epoch:", accuracy)
					if accuracy > best_accuracy:
						best_params = (batch_size, num_hid_units, num_hid_layers, epoch)
						best_accuracy = accuracy
				sess.close()
				tf.reset_default_graph()

	print ("Best Accuracy with", best_params[1], "hidden Units,", best_params[0], "batch_size,", 
						best_params[2], "hidden Layers and", best_params[3], "epoch:", best_accuracy)


	# Second run: with hyperparameters determined in the first run, re-train
	# your model on the original train set.
	sess = tf.Session(graph=tf.get_default_graph())
	conf.batch_size, conf.num_hid_units, conf.num_hid_layers, max_epoch = best_params
	model = MNIST(sess, conf)
	model.train(x_train, x_train, x_valid, y_valid, max_epoch, validation=False)

	# Third run: after re-training, test your model on the test set.
	# Report testing accuracy in your hard-copy report.
	accuracy = model.test(x_test, y_test, max_epoch)
	sess.close()