def convert(image): image = data.norm_min_max_tf(image) return tf.image.convert_image_dtype(image, dtype=tf.uint8, saturate=True)
# determine the sizes roisize = 64 EPS = 1e-12 # create placeholders for batchfeeding im_xdim, im_ydim = scale_size, scale_size inputs_tf = tf.placeholder(tf.float32, shape=(batch_size, im_xdim, im_ydim, 1), name='inputs_tf') outputs_tf = tf.placeholder(tf.float32, shape=(batch_size, im_xdim, im_ydim, 1), name='outputs_tf') spikes_tf = tf.placeholder(tf.float32, shape=(batch_size, im_xdim, im_ydim, 1), name='spikes_tf') # inputs and targets are [batch_size, height, width, channels] C2Pmodel = model.create_model(inputs_tf, outputs_tf, ndf, ngf, EPS, gan_weight, l1_weight, l1_sparse_weight, lr, beta1) # reverse any processing on images so they can be written to disk or displayed to user outputs = data.deprocess_tf(C2Pmodel.outputs) outputs = data.norm_min_max_tf(outputs) outputs = tf.reduce_sum(outputs, axis=[0,3], name='outputs') # define saver saver = tf.train.Saver(max_to_keep=1) # Initialize the variables (i.e. assign their default value) init = tf.global_variables_initializer() print("Variables have been initialized!") with tf.Session() as sess: #sess = tf.InteractiveSession() #sess = tf.Session() # Run the initializer sess.run(init)