# create session with tf.Session() as sess: # create summary environment current_time = datetime.datetime.now().strftime(('%Y%m%d-%H%M%S')) log_dir = 'logs/' + current_time # parameter configuration lr = 0.001 # learning rate batchsz = 10 # batch size epoch = 30 # training period # prepare training dataset and test dataset # train: 55000, test: 10000, validation: 5000 mnist = read_data_sets('mnist_data/') # load minist dataset data = provide_data(mnist) # create input and output placeholder input = tf.placeholder(dtype=tf.float32, shape=[None, 28, 28, 1], name='input') labels = tf.placeholder(dtype=tf.float32, shape=[None, 10], name='labels') # create instance of neural network net = neuralNetwork() # forward the network logits = net.forward(input) # get loss cross_entropy = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=labels) loss_operation = tf.reduce_mean(cross_entropy, name="loss")
# parameter configuration # TODO: change learning rate to decayed learning rate lr = 0.001 # learning rate batchsz = 128 # batch size epoch = 30 # training period IMAGE_SIZE = 224 # prepare training dataset and test dataset # train: 55000, test: 10000, validation: 5000 cifar_10 = Cifar_10() f_path = "data/cifar-10-batches-py/" cifar_10.read_data_sets(f_path) # load cifar-10 dataset # load cifar-10 dataset data = provide_data(cifar_10) # create input and output placeholder input = tf.compat.v1.placeholder(dtype=tf.float32, shape=[None, IMAGE_SIZE, IMAGE_SIZE, 3], # revised by me name='input') labels = tf.compat.v1.placeholder(dtype=tf.float32, shape=[None, 10], name='labels') prob = tf.compat.v1.placeholder_with_default(0.0, shape=(), name='prob') # create instance of neural network net = neuralNetwork() # forward the network logits = net.forward(input, prob)