示例#1
0
def main(_):
    tf.set_random_seed(1234)
    sess = tf.Session()
    print('---Prepare data...')

    ### YOUR CODE HERE
    data_dir = './'
    ### END CODE HERE

    x_train, y_train, x_test, y_test = load_data(data_dir)
    x_train_new, y_train_new, x_valid, y_valid = train_valid_split(
        x_train, y_train)

    model = Cifar(sess, configure())

    ### YOUR CODE HERE
    # First step: use the train_new set and the valid set to choose hyperparameters.

    # Second step: with hyperparameters determined in the first run, re-train
    # your model on the original train set.
    model.train(x_train, y_train, 50)

    # Third step: after re-training, test your model on the test set.
    # Report testing accuracy in your hard-copy report.
    model.test_or_validate(x_test, y_test, [30, 35, 40, 45, 50])
def main(_):
    sess = tf.Session()
    print('---Prepare data...')

    ### YOUR CODE HERE
    #data_dir = "/content/drive/My Drive/Colab Notebooks/code/HW3/data/"
    data_dir = "/content/drive/My Drive/data"
    ### END CODE HERE

    x_train, y_train, x_test, y_test = load_data(data_dir)
    x_train_new, y_train_new, x_valid, y_valid = train_valid_split(
        x_train, y_train)

    model = Cifar(sess, configure())

    ### YOUR CODE HERE
    # First step: use the train_new set and the valid set to choose hyperparameters.
    #model.train(x_train_new, y_train_new, 200)
    #model.test_or_validate(x_valid, y_valid, [10,20,30,40,50,100,150,200])

    # Second step: with hyperparameters determined in the first run, re-train
    # your model on the original train set.
    # model.train(x_train, y_train, 200)

    # Third step: after re-training, test your model on the test set.
    # Report testing accuracy in your hard-copy report.
    model.test_or_validate(
        x_test, y_test,
        [100, 110, 120, 130, 140, 150, 160, 170, 180, 190, 200])
示例#3
0
def main(_):
    sess = tf.Session()
    print('---Prepare data...')

    ### YOUR CODE HERE
    data_dir = "data/"
    ### END CODE HERE

    x_train, y_train, x_test, y_test = load_data(data_dir)
    x_train_new, y_train_new, x_valid, y_valid = train_valid_split(
        x_train, y_train)

    model = Cifar(sess, configure())

    ### YOUR CODE HERE
    # First step: use the train_new set and the valid set to choose hyperparameters.

    #
    #model.train(x_train_new, y_train_new, 200)
    #model.train(x_train_new, y_train_new, 3)
    #model.test_or_validate(x_valid, y_valid, [160, 170, 180, 190, 200])
    #model.test_or_validate(x_valid, y_valid, [10])

    # Second step: with hyperparameters determined in the first run, re-train
    # your model on the original train set.
    model.train(x_train, y_train, 150)
示例#4
0
def main(_):
    sess = tf.Session()
    print('---Prepare data...')

    ### YOUR CODE HERE
    data_dir = '../cifar-10-batches-py/'
    ### END CODE HERE

    x_train, y_train, x_test, y_test = load_data(data_dir)
    x_train_new, y_train_new, x_valid, y_valid = train_valid_split(
        x_train, y_train)

    model = Cifar(sess, configure())

    ### YOUR CODE HERE
    # First step: use the train_new set and the valid set to choose hyperparameters.
    # model.train(x_train_new, y_train_new, 200)
    # model.train(x_train_new, y_train_new, 10)
    # model.test_or_validate(x_valid, y_valid, [160, 170, 180, 190, 200])
    # model.test_or_validate(x_valid, y_valid, list(range(80, 201, 10)))
    # model.test_or_validate(x_valid, y_valid, [150])

    # Second step: with hyperparameters determined in the first run, re-train
    # your model on the original train set.
    model.train(x_train, y_train, 140)

    # Third step: after re-training, test your model on the test set.
    # Report testing accuracy in your hard-copy report.
    model.test_or_validate(x_test, y_test, [140])
示例#5
0
def main(_):
    sess = tf.Session()
    print('---Prepare data...')

    ### YOUR CODE HERE
    data_dir = "cifar-10-batches-py"
    ### END CODE HERE

    x_train, y_train, x_test, y_test = load_data(data_dir)
    x_train_new, y_train_new, x_valid, y_valid = train_valid_split(
        x_train, y_train, 8000)
    parse_record(x_train[0], True)
    model = Cifar(sess, configure())

    ### YOUR CODE HERE
    # First step: use the train_new set and the valid set to choose hyperparameters.
    def del_all_flags(FLAGS):
        flags_dict = FLAGS._flags()
        keys_list = [keys for keys in flags_dict]
        for keys in keys_list:
            FLAGS.__delattr__(keys)

    # model.train(x_train_new, y_train_new, 3)
    sess_test = tf.Session()
    del_all_flags(tf.flags.FLAGS)
    model_test = Cifar(sess_test, configure())
    model_test.test_or_validate(x_valid, y_valid, [1, 2, 3])
示例#6
0
文件: main.py 项目: tiandi111/lake
def main(_):
	sess = tf.Session()
	print('---Prepare data...')

	### YOUR CODE HERE
	data_dir = "/Users/tiandi03/Desktop/dataset/cifar-10-batches-py"
	### END CODE HERE

	x_train, y_train, x_test, y_test = load_data(data_dir)
	x_train_new, y_train_new, x_valid, y_valid = train_valid_split(x_train, y_train)
	model = Cifar(sess, configure())

	### YOUR CODE HERE
	# First step: use the train_new set and the valid set to choose hyperparameters.
	model.train(x_train_new, y_train_new, 200)
	model.test_or_validate(x_valid, y_valid, [160, 170, 180, 190, 200])
示例#7
0
def main(_):

	sess = tf.Session()
	print('---Prepare data...')

	### YOUR CODE HERE
	data_dir = os.path.join(os.path.abspath(os.getcwd()),"ResNet/data")

	### END CODE HERE

	x_train, y_train, x_test, y_test = load_data(data_dir)
	x_train_new, y_train_new, x_valid, y_valid = train_valid_split(x_train, y_train)

	model = Cifar(sess, configure())

	### YOUR CODE HERE
	# First step: use the train_new set and the valid set to choose hyperparameters.
	model.train(x_train_new, y_train_new, 200)
示例#8
0
def main(_):
    sess = tf.Session()
    print('---Prepare data...')

    ### YOUR CODE HERE
    # Download cifar-10 dataset from https://www.cs.toronto.edu/~kriz/cifar.html
    data_dir = "cifar-10-batches-py"
    ### END CODE HERE

    x_train, y_train, x_test, y_test = load_data(data_dir)
    x_train_new, y_train_new, x_valid, y_valid = train_valid_split(
        x_train, y_train)

    model = Cifar(sess, configure())

    ### YOUR CODE HERE
    model.train(x_train, y_train, 40)
    model.test_or_validate(x_test, y_test, [5, 10, 15, 20, 25, 30, 35, 40])
示例#9
0
def main(_):
    sess = tf.Session()
    print('---Prepare data...')

    ### YOUR CODE HERE
    data_dir = '../cifar-10-batches-py'
    ### END CODE HERE

    x_train, y_train, x_test, y_test = load_data(data_dir)
    x_train_new, y_train_new, x_valid, y_valid = train_valid_split(
        x_train, y_train)

    model = Cifar(sess, configure())

    ### YOUR CODE HERE
    # from Network import ResNet
    # network = ResNet(1, 3, 10, 16)
    # ips = tf.placeholder(tf.float32, shape=(100, 32, 32, 3))
    # sess.run(tf.global_variables_initializer())
    # sess.run(tf.local_variables_initializer())
    # net = network(ips,training=True)
    # from tensorflow.keras import Model
    # model = Model(inputs=ips, outputs=net)

    # print(model.summary)
    # # print(sess.run(network(ips,training=True)))
    # writer = tf.summary.FileWriter('output', sess.graph)
    # writer.close()
    # First step: use the train_new set and the valid set to choose hyperparameters.
    # model.train(x_train_new, y_train_new, 200)
    # while True:
    # model.train(x_train_new, y_train_new, 600)
    # model.test_or_validate(x_valid,y_valid,[i*10 for i in range(1,11)])
    # model.test_or_validate(x_valid,y_valid,[20])
    # model.test_or_validate(x_valid, y_valid, [160, 170, 180, 190, 200])
    # model.test_or_validate(x_valid,y_valid,[10])

    # Second step: with hyperparameters determined in the first run, re-train
    # your model on the original train set.
    # model.train(x_train, y_train, 200)

    # Third step: after re-training, test your model on the test set.
    # Report testing accuracy in your hard-copy report.
    model.test_or_validate(x_test, y_test, [170])
示例#10
0
def main(_):
    os.environ["CUDA_VISIBLE_DEVICES"] = '3'

    sess = tf.Session()
    print('---Prepare data...')
    x_train, y_train, x_test, y_test = load_data()
    x_train_new, y_train_new, x_valid, y_valid \
       = train_valid_split(x_train, y_train)

    model = MNIST(sess, configure())

    ### YOUR CODE HERE

    # First run: use the train_new set and the valid set to choose
    # hyperparameters, like num_hid_layers, num_hid_units, stopping epoch, etc.
    # Report chosen hyperparameters in your hard-copy report.
    num_hidden_layers = [1, 2, 3]
    num_hidden_units = [256, 512, 1024]
    batch_sizes = [32, 64, 128]
    num_epochs = [1, 5, 10]
    for model.conf.num_hid_layers in num_hidden_layers:
        for model.conf.num_hid_units in num_hidden_units:
            for model.conf.batch_size in batch_sizes:
                for epochs in num_epochs:
                    print("Hidden layers: {}, Hidden units: {}, Batch size: {}, Max epochs: {}".format(model.conf.num_hid_layers, \
                     model.conf.num_hid_units, model.conf.batch_size, epochs))
                    model.train(x_train_new,
                                y_train_new,
                                x_valid,
                                y_valid,
                                epochs,
                                validation=True)
    # Second run: with hyperparameters determined in the first run, re-train
    # your model on the original train set.
    model.train(x_train, y_train, None, None, 10, validation=False)

    # Third run: after re-training, test your model on the test set.
    # Report testing accuracy in your hard-copy report.
    model.test(x_test, y_test, 10)
示例#11
0
def main(_):
    os.environ["CUDA_VISIBLE_DEVICES"] = '3'

    sess = tf.Session()
    print('---Prepare data...')
    x_train, y_train, x_test, y_test = load_data()
    x_train_new, y_train_new, x_valid, y_valid \
       = train_valid_split(x_train, y_train)

    model = MNIST(sess, configure())

    ### YOUR CODE HERE

    # First run: use the train_new set and the valid set to choose
    # hyperparameters, like num_hid_layers, num_hid_units, stopping epoch, etc.
    # Report chosen hyperparameters in your hard-copy report.

    model.train(x_train_new,
                y_train_new,
                x_valid,
                y_valid,
                max_epoch=1,
                validation=True)
示例#12
0
def main(_):
	os.environ["CUDA_VISIBLE_DEVICES"] = '3'
    
	#sess = tf.Session()
	print('---Prepare data...')
	x_train, y_train, x_test, y_test = load_data()
	x_train_new, y_train_new, x_valid, y_valid \
				= train_valid_split(x_train, y_train)

	
	# model = MNIST(sess, conf())
	### YOUR CODE HERE
	conf = configure()

	# First run: use the train_new set and the valid set to choose
	# hyperparameters, like num_hid_layers, num_hid_units, stopping epoch, etc.
	# Report chosen hyperparameters in your hard-copy report.
	params = {
	'num_hid_layers': [0, 1, 2, 3, 4, 5],
	'num_hid_units': [64, 128, 256, 512],
	'max_epoch': [50, 100, 125, 150, 175, 200],
	'batch_size': [128, 256, 512, 1024, 2048]
	}

	#lowest supported by python
	best_accuracy = -sys.maxsize -1
	best_params = None

	for batch_size in params['batch_size']:
		conf.batch_size = batch_size
		for num_hid_units in params['num_hid_units']:
			conf.num_hid_units = num_hid_units
			for num_hid_layers in params['num_hid_layers']:
				conf.num_hid_layers = num_hid_layers
				max_epoch = max(params['max_epoch'])
				sess = tf.Session(graph=tf.get_default_graph())
				model = MNIST(sess, conf)
				model.train(x_train_new, y_train_new, x_valid, y_valid, max_epoch, validation=False)
				for epoch in params['max_epoch']:
					accuracy = model.test(x_valid, y_valid, epoch)
					print ("Accuracy with", num_hid_units, "hidden Units,", batch_size, "batch_size,", 
						num_hid_layers, "hidden Layers and", epoch, "epoch:", accuracy)
					if accuracy > best_accuracy:
						best_params = (batch_size, num_hid_units, num_hid_layers, epoch)
						best_accuracy = accuracy
				sess.close()
				tf.reset_default_graph()

	print ("Best Accuracy with", best_params[1], "hidden Units,", best_params[0], "batch_size,", 
						best_params[2], "hidden Layers and", best_params[3], "epoch:", best_accuracy)


	# Second run: with hyperparameters determined in the first run, re-train
	# your model on the original train set.
	sess = tf.Session(graph=tf.get_default_graph())
	conf.batch_size, conf.num_hid_units, conf.num_hid_layers, max_epoch = best_params
	model = MNIST(sess, conf)
	model.train(x_train, x_train, x_valid, y_valid, max_epoch, validation=False)

	# Third run: after re-training, test your model on the test set.
	# Report testing accuracy in your hard-copy report.
	accuracy = model.test(x_test, y_test, max_epoch)
	sess.close()
示例#13
0
def main():
    properties = None
    if (len(sys.argv) > 1):
        properties = Properties(sys.argv[1])
    else:
        properties = Properties("Local")

    x_train, y_train, x_test, y_test = load_data(properties.data_dir,
                                                 properties.test_dir)

    model = UNetClassifier(num_layers=3, num_filters=64,
                           num_classes=400).build_model()

    def huber_loss(y_true, y_pred, clip_delta=1.0):
        error = y_true - y_pred
        cond = tf.keras.backend.abs(error) < clip_delta
        squared_loss = 0.5 * tf.keras.backend.square(error)
        linear_loss = clip_delta * (tf.keras.backend.abs(error) -
                                    0.5 * clip_delta)

        return tf.where(cond, squared_loss, linear_loss)

    def crossentropy_loss(y_true, y_pred):
        return K.categorical_crossentropy(y_true, y_pred)

    y_train_new = formbins(y_train)

    unique, counts = np.unique(y_train_new, return_counts=True)
    weights = []
    weights = 1 - counts / np.sum(counts)
    weights /= np.sum(weights)

    dictionary = dict(zip(unique, weights))
    for i in range(10):
        next(data_generator(properties.data_dir, 10, dictionary))

    weightsVector = []
    for i in range(100):
        if i in dictionary:
            weightsVector.append(dictionary[i])
        else:
            weightsVector.append(0)

    loss = weighted_categorical_crossentropy(weightsVector)
    model.compile(optimizer=optimizers.Adam(lr=0.01),
                  loss="categorical_crossentropy",
                  metrics=["accuracy"])

    log_directory = properties.log_dir + str(time())
    tensorboard = TensorBoard(log_dir=log_directory)

    filepath = properties.model_dir + "weights-{epoch:02d}-{acc:.2f}.h5"
    checkpoint = ModelCheckpoint(filepath,
                                 monitor='acc',
                                 verbose=1,
                                 save_best_only=True,
                                 mode='max')

    model.fit_generator(data_generator(properties.data_dir, 2, dictionary),
                        steps_per_epoch=len(os.listdir(properties.data_dir)) //
                        2,
                        epochs=25,
                        callbacks=[tensorboard, checkpoint],
                        verbose=1)

    bins = [
        0, 8, 14, 20, 25, 106, 143, 139, 179, 159, 189, 199, 209, 219, 248,
        265, 259, 283, 307, 323, 334, 341, 380, 384, 23
    ]
    for i in range(0, len(x_test)):
        y_pred = model.predict(x_test[i].reshape(1, 256, 256, 1))
        if (i == 0):
            for k in range(5):
                for l in range(5):
                    fileName = properties.results_dir + str(k) + str(
                        l) + '.csv'
                    toprint = y_pred[0, k, l, :].reshape(400, )
                    np.savetxt(fileName, toprint, delimiter=',')
        y_pred_bin = y_pred.reshape(256, 256, 400)
        y_pred_bin = np.argmax(y_pred_bin, axis=2)
        y_pred_alpha = 13 * (y_pred_bin // 20) + 6
        y_pred_beta = 13 * (y_pred_bin % 20) + 6
        y_pred = np.dstack((x_test[i], y_pred_alpha.reshape(256, 256, 1),
                            y_pred_beta.reshape(256, 256, 1)))
        y_pred = y_pred.astype(np.uint8)
        image = cv2.cvtColor(y_pred, cv2.COLOR_LAB2RGB)
        outputfileName = properties.results_dir + str(i) + '.jpg'
        cv2.imwrite(outputfileName, image)
示例#14
0
                             eval_metric='mae',
                             nthread=2)
    model.fit(X_train, y_train)
    pred_val_y = model.predict(X_val)
    print(np.mean(np.abs(pred_val_y - y_val)))
    del X_train, y_train
    gc.collect()
    pred_test_y = model.predict(x_test)
    del x_test
    gc.collect()
    print('=' * 60)
    return pred_val_y, pred_test_y


if __name__ == '__main__':
    train_X, train_y, test_X = load_data()
    train_meta = np.zeros(train_y.shape[0])
    test_meta = np.zeros(test_X.shape[0])
    splits = list(
        KFold(n_splits=5, shuffle=True,
              random_state=233).split(train_X, train_y))
    for idx, (train_idx, valid_idx) in enumerate(splits):
        X_train = train_X[train_idx]
        y_train = train_y[train_idx]
        X_val = train_X[valid_idx]
        y_val = train_y[valid_idx]
        pred_val_y, pred_test_y = xgboost_train(X_train,
                                                y_train,
                                                X_val,
                                                y_val,
                                                test_X,
示例#15
0
    "embedding_size":8,
    "deep_layers":[32,32],
    "dropout_deep":[0.5,0.5,0.5],
    "deep_layer_activation":tf.nn.relu,
    "batch_norm":1,
    "batch_norm_decay":0.995,
    "verbose":True,
    "random_seed":config.RANDOM_SEED,
    "loss_type":"logloss",
    "deep_init_size":50,
    "use_inner":False
}

train_params = {
    "loss_type":"logloss",
    "learning_rate":0.01,
    "epochs":30,
    "optimizer_type":"sgd",
    "batch_size":4
}

if __name__ == '__main__':
    # load data
    dfTrain, X_train, y_train, X_submission = load_data()

    # folds
    folds = list(StratifiedKFold(n_splits=config.NUM_SPLITS, shuffle=True,
                                 random_state=config.RANDOM_SEED).split(X_train, y_train))
    k_fold_cross_valid(dfTrain, X_submission, folds, pnn_params, train_params)

示例#16
0
def main():
	properties = None    
	if (len(sys.argv) > 1):
		properties = Properties(sys.argv[1])
	else:
		properties = Properties("Local")
    
	x_train, y_train, x_test, y_test = load_data(properties.data_dir, properties.test_dir)

	model = UNetRegressor(64, 3).build_model()

	#def quantile_metric(quantile, y_true, y_pred):
	#	e = y_true - y_pred
	#	metric = K.mean(K.maximum(quantile * e, (quantile - 1) * e), axis=-1)
	#	return metric

	#def loss(y_true, y_pred):
	#	eA = y_true[:, :, :, 0] - y_pred[:, :, :, 0]
	#	eB = y_true[:, :, :, 1] - y_pred[:, :, :, 1]
	#	return K.mean(K.square(eA), axis=-1) + K.mean(K.square(eB), axis=-1)

	def huber_loss(y_true, y_pred, clip_delta=1.0):
		error = y_true - y_pred
		cond = tf.keras.backend.abs(error) < clip_delta
		squared_loss = 0.5 * tf.keras.backend.square(error)
		linear_loss  = clip_delta * (tf.keras.backend.abs(error) - 0.5 * clip_delta)

		return tf.where(cond, squared_loss, linear_loss)

	def crossentropy_loss(y_true, y_pred):
		'''
		bins = np.linspace(0, 260, 21)
		y_true_new = []
		for image_ab in y_true:
			y_binned = np.digitize(image_ab, bins) - 1
			y_binned = y_binned[:, :, 0] * 20 + y_binned[:, :, 1]
			wt = np.zeros((256, 256, 2))
			for i in range(y_binned.shape[0]):
				for j in range(y_binned.shape[1]):
        				wt[i][j][0] = weights[y_binned[i][j] - 1]
        				wt[i][j][1] = weights[y_binned[i][j] - 1]

			y_binned = (y_binned * wt).astype(int)
			y_binned = np.digitize(y_binned, bins) - 1
			y_binned = y_binned[:, :, 0] * 20 + y_binned[:, :, 1] 

			y_true_new.append(y_binned)
		'''

		cross_ent = K.categorical_crossentropy(y_pred, y_true)
		#.switch(K.equal(y_true, -1), K.zeros_like(y_true), K.square(y_true-y_pred))
		#K.categorical_crossentropy(y_pred, y_true)
		#K.switch(K.equal(y_true, -1), K.zeros_like(y_true), K.square(y_true-y_pred))
		#y_pred - y_true
		# 
		#cross_ent = K.mean(cross_ent, axis=-1) 
		return cross_ent

	'''
	model.compile(optimizer=optimizers.Adam(lr=0.001),
			loss=lambda y, f: huber_loss(y, f, clip_delta=0.5),
			metrics=["accuracy"])

	'''
	model.compile(optimizer=optimizers.Adam(lr=0.001),
			loss=lambda y, f: crossentropy_loss(y, f),
			metrics=["accuracy"])

	log_directory = properties.log_dir+str(time())    
	tensorboard = TensorBoard(log_dir=log_directory)

	filepath = properties.model_dir+ "weights-{epoch:02d}-{acc:.2f}.h5"
	checkpoint = ModelCheckpoint(filepath,
				monitor='acc',																																																																					
				verbose=1,
				mode='max')

	#save_best_only=True,
	#checkpoint = ModelCheckpoint(filepath,
	#			monitor='acc',																																																																					
	#			verbose=1,
	#			mode='max')



	y_train_new = formbins(y_train)
	print(y_train.shape)
	print(y_train_new.shape)

	unique, counts = np.unique(y_train_new, return_counts=True)
	#dictionary = dict(zip(unique, counts))
	#print(len(dictionary))

	weights = 1 - counts / np.sum(counts)
	weights /= np.sum(weights)
	
	dictionary = dict(zip(unique, weights))
	print(dictionary)


		    



	'''
	#print(y_train[:, :, :, 0])
	#print(y_train[:, :, :, 0].shape)
	weight = np.zeros((1000, 256, 256, 1))
	bins = np.linspace(0, 260, 21)
	y_train_new = []
	for k, image_ab in enumerate(y_train):
        	#Create bins - each bin size is kept as 13 so there are roughly 20 bins from 0 to 255
        	#bins = ([  0.,  13.,  26.,  39.,  52.,  65.,  78.,  91., 104., 117., 130.,
        	#143., 156., 169., 182., 195., 208., 221., 234., 247., 260.])
		y_train_bin = np.digitize(image_ab, bins)-1 #returns a value in 0 to 19
		y_train_bin = y_train_bin[:,:,0]*20 + y_train_bin[:,:,1]

		#print(image_ab.shape)
		#print(y_train_bin.shape)
		#exit(0)

		for i in range(y_train_bin.shape[0]):
			for j in range(y_train_bin.shape[1]):
				weight[k][i][j] = dictionary[y_train_bin[i][j]]
	
		#weight = dictionary(y_train_bin)	
		#print(y_train_bin)
		#print(y_train_bin.shape)
		#print(weight)
		#exit(0)

	#print(weight)
	'''

	#for key in dictionary:
		#a = key//20
		#b = key%20
		#alpha = np.full((256, 256), 13*a + 7)
		#beta = np.full((256, 256), 13*b + 7)
		#L = np.full((256, 256), 50)
		#print(13*a + 7, 13*b + 7) 
		#image_lab = np.dstack((L, alpha, beta))
		#print(image_lab.shape)
		#image_lab = image_lab.astype(np.uint8)
		#image = cv2.cvtColor(image_lab, cv2.COLOR_Lab2RGB)
		#outputfileName = properties.results_dir+str(key)+'.jpg'
		#cv2.imwrite(outputfileName, image)
	#fit/fit_generator giving OOM when batch_size is high
	#model.fit(x_train, y_train,
	#	epochs=1,
	#	batch_size=16,
	#	callbacks=[tensorboard, checkpoint],
	#	verbose=1)

	model.fit_generator(data_generator(properties.data_dir, 4, dictionary),
			steps_per_epoch=len(os.listdir(properties.data_dir)) // 4,
			epochs=10,
			callbacks=[tensorboard, checkpoint],
			verbose=1)		

	#Burn! Burn! Burn! How do I know the corresponding 3rd channel for each prediction?
	#y_pred = model.predict_generator(data_generator(test_dir, 10),
	#		steps=len(os.listdir(test_dir)) // 10,
	#		verbose=1)

	'''for i in range(0, len(x_test)):
		y_pred = model.predict(x_test[i].reshape(1, 256, 256, 1))
		y_pred = np.dstack((x_test[i], y_pred.reshape(256, 256, 2)))
		y_pred = y_pred.astype(np.uint8)
		image = cv2.cvtColor(y_pred, cv2.COLOR_LAB2RGB)
		outputfileName = properties.results_dir+str(i)+'.jpg'        
		cv2.imwrite(outputfileName, image)'''

	check_output=[]
	for i in range(0, len(x_test)):
	    y_pred = model.predict(x_test[i].reshape(1, 256, 256, 1))
	    #print("y_pred", y_pred)
	    y_pred = y_pred.reshape(256,256,100)
	    check_output=y_pred

	    #print("y_pred shape", y_pred.shape)
	    y_pred_bin = np.argmax(y_pred, axis=2)

	    #print("y_pred bin", y_pred_bin)
	    y_pred_alpha = 25.6*(y_pred_bin//5)+12.8
	    #print("y_pred alpha", y_pred_alpha)
	    y_pred_beta = 25.6*(y_pred_bin % 5)+12.8
	    #print("y_pred beta", y_pred_beta)
	    y_pred = np.dstack((x_test[i], y_pred_alpha.reshape(256, 256, 1), y_pred_beta.reshape(256,256,1)))
	    y_pred = y_pred.astype(np.uint8)
	    image = cv2.cvtColor(y_pred, cv2.COLOR_LAB2RGB)
	    outputfileName = properties.results_dir + str(i) + '.jpg'

	    #trueoutputfileName = properties.trueresults_dir + str(i) + '.jpg'
	    cv2.imwrite(outputfileName, image)

	    

	print("###############printing ypred################")
	print(check_output)
	np.savetxt('/content/drive/My Drive/UNet-Colorization/data/data/test/ypred0.csv', np.reshape(check_output[0,0,:],100), delimiter=',')
	np.savetxt('/content/drive/My Drive/UNet-Colorization/data/data/test/ypred1.csv', np.reshape(check_output[0,1,:],100), delimiter=',')
	np.savetxt('/content/drive/My Drive/UNet-Colorization/data/data/test/ypred100.csv', np.reshape(check_output[100,100,:],100), delimiter=',')
	np.savetxt('/content/drive/My Drive/UNet-Colorization/data/data/test/ypred255.csv', np.reshape(check_output[255,255,:],100), delimiter=',')