def train(self): predict = self.forward(Mnist.train.image) #######GP sess = tf.Session() with tf.sg_queue_context(sess): tf.sg_init(sess) trainf = sess.run([Mnist.train.image])[0] n, w, h, c = trainf.shape print trainf.shape np.savetxt('./image.txt', trainf[1, :, :, 0]) #print trainf[1, :, :, 0] #plt.imshow(trainf[1, :, :, 0]) #plt.axis('off') #plt.show() #print type(trainf[1, :, :, 0]) transfer = np.zeros((n, w, h, c)) for i in range(n): candi = random.randint(0, n - 1) #print GP(trainf[i, :, :, 0], trainf[candi, :, :, 0]) #transfer[i, :, :, :] = GP(trainf[i, :, :, :], trainf[candi, :, :, :]) #print trainsfer[i, :, :, :] t = tf.convert_to_tensor(transfer, dtype=tf.float32) gp_predict = predict.sg_reuse(input=t) #print trainf.shape sess.close()
def sg_print(tensor_list): r"""Simple tensor printing function for debugging. Prints the value, shape, and data type of each tensor in the list. Args: tensor_list: A list/tuple of tensors or a single tensor. Returns: The value of the tensors. For example, ```python import sugartensor as tf a = tf.constant([1.]) b = tf.constant([2.]) out = tf.sg_print([a, b]) # Should print [ 1.] (1,) float32 # [ 2.] (1,) float32 print(out) # Should print [array([ 1.], dtype=float32), array([ 2.], dtype=float32)] ``` """ # to list if type(tensor_list) is not list and type(tensor_list) is not tuple: tensor_list = [tensor_list] # evaluate tensor list with queue runner with tf.Session() as sess: sg_init(sess) with tf.sg_queue_context(): res = sess.run(tensor_list) for r in res: print(r, r.shape, r.dtype) return res
def main(): g = ModelGraph() with tf.Session() as sess: tf.sg_init(sess) # restore parameters saver = tf.train.Saver() saver.restore(sess, tf.train.latest_checkpoint('asset/train/ckpt')) hits = 0 num_imgs = 0 with tf.sg_queue_context(sess): # loop end-of-queue while True: try: logits, y = sess.run([g.logits, g.y]) # (16, 28) preds = np.squeeze(np.argmax(logits, -1)) # (16,) hits += np.equal(preds, y).astype(np.int32).sum() num_imgs += len(y) print "%d/%d = %.02f" % (hits, num_imgs, float(hits) / num_imgs) except: break print "\nFinal result is\n%d/%d = %.02f" % (hits, num_imgs, float(hits) / num_imgs)
def train(self): # train baseline model input_ph = tf.placeholder(shape=[batch_size, 28, 28, 1], dtype=tf.float32) label_ph = tf.placeholder(shape=[ batch_size, ], dtype=tf.int32) predict = self.forward(input_ph) loss_tensor = tf.reduce_mean(predict.sg_ce(target=label_ph)) # use to update network parameters optim = tf.sg_optim(loss_tensor, optim='Adam', lr=1e-3) # use saver to save a new model saver = tf.train.Saver() sess = tf.Session() with tf.sg_queue_context(sess): # inital tf.sg_init(sess) # validation acc = (predict.sg_reuse( input=Mnist.valid.image).sg_softmax().sg_accuracy( target=Mnist.valid.label, name='validation')) tf.sg_train(loss=loss, eval_metric=[acc], max_ep=max_ep, save_dir=save_dir, ep_size=Mnist.train.num_batch, log_interval=10)
def test(self): # predict = self.forward(Mnist.test.image) # acc = (predict.sg_softmax() # .sg_accuracy(target=Mnist.test.label, name='test')) sess = tf.Session() with tf.sg_queue_context(sess): tf.sg_init(sess) testf = sess.run([Mnist.test.image])[0] # print testf.shape n, w, h, c = testf.shape tmp0 = np.zeros((n * w, h)) tmp02 = np.zeros((n * w, h)) tmp05 = np.zeros((n * w, h)) tmp08 = np.zeros((n * w, h)) tmp90 = np.zeros((n * w, h)) tmp_90 = np.zeros((n * w, h)) for i in range(n): tmp0[i * w : (i + 1) * w, 0 : h] = testf[i, :, :, 0] tmp02[i * w : (i + 1) * w, 0 : h] = addnoisy(testf[i, :, :, 0], 0.2) tmp05[i * w : (i + 1) * w, 0 : h] = addnoisy(testf[i, :, :, 0], 0.5) tmp08[i * w : (i + 1) * w, 0 : h] = addnoisy(testf[i, :, :, 0], 0.8) tmp90[i * w : (i + 1) * w, 0 : h] = rotate90(testf[i, :, :, 0]) tmp_90[i * w : (i + 1) * w, 0 : h] = rotate_90(testf[i, :, :, 0])# addnoisy(testf[i, :, :, 0], 0.8) #testf[i, :, :, 0] = addnoisy(testf[i, :, :, 0], 0.0) #testf[i, :, :, 0] = rotate90(testf[i, :, :, 0]) #testf[i, :, :, 0] = rotate_90(testf[i, :, :, 0]) #print testf[i, :, :, 0] np.savetxt('./image0.txt', tmp0) np.savetxt('./image02.txt', tmp02) np.savetxt('./image05.txt', tmp05) np.savetxt('./image08.txt', tmp08) np.savetxt('./image90.txt', tmp90) np.savetxt('./image_90.txt', tmp_90) testf_tensor = tf.convert_to_tensor(testf, dtype=tf.float32) predict = self.forward(testf_tensor) acc = (predict.sg_softmax() .sg_accuracy(target=Mnist.test.label, name='test')) saver=tf.train.Saver() saver.restore(sess, tf.train.latest_checkpoint(save_dir)) total_accuracy = 0 for i in range(Mnist.test.num_batch): total_accuracy += np.sum(sess.run([acc])[0]) print('Evaluation accuracy: {}'.format(float(total_accuracy)/(Mnist.test.num_batch*batch_size))) # close session sess.close()
def sg_print(tensor_list): # to list if type(tensor_list) is not list and type(tensor_list) is not tuple: tensor_list = [tensor_list] # evaluate tensor list with queue runner with tf.Session() as sess: sg_init(sess) with tf.sg_queue_context(): res = sess.run(tensor_list) for r in res: print r, r.shape, r.dtype return res
def test(self): print 'Testing model {}: addnoise={}, rotate={}, var={}'.format( save_dir, addnoise, rotate, var) input_ph = tf.placeholder(shape=[batch_size, 28, 28, 1], dtype=tf.float32) label_ph = tf.placeholder(shape=[ batch_size, ], dtype=tf.int32) predict = self.forward(input_ph) acc = (predict.sg_softmax().sg_accuracy(target=label_ph, name='test')) sess = tf.Session() with tf.sg_queue_context(sess): tf.sg_init(sess) saver = tf.train.Saver() saver.restore(sess, tf.train.latest_checkpoint(save_dir)) total_accuracy = 0 for i in range(Mnist.test.num_batch): [image_array, label_array] = sess.run([Mnist.test.image, Mnist.test.label]) if addnoise: image_array[0, :, :, 0] = addnoisy(image_array[0, :, :, 0], var) if rotate: image_array[0, :, :, 0] = rotate_90(image_array[0, :, :, 0]) acc_value = sess.run([acc], feed_dict={ input_ph: image_array, label_ph: label_array })[0] total_accuracy += np.sum(acc_value) print 'Evaluation accuracy: {}'.format( float(total_accuracy) / (Mnist.test.num_batch * batch_size)) # close session sess.close()
# with tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) as sess: # init variables tf.sg_init(sess) # restore parameters saver = tf.train.Saver() saver.restore(sess, tf.train.latest_checkpoint('asset/train')) # logging tf.sg_info('Testing started on %s set at global step[%08d].' % (tf.sg_arg().set.upper(), sess.run(tf.sg_global_step()))) with tf.sg_queue_context(): # create progress bar iterator = tqdm(range(0, int(data.num_batch * tf.sg_arg().frac)), total=int(data.num_batch * tf.sg_arg().frac), initial=0, desc='test', ncols=70, unit='b', leave=False) # batch loop loss_avg = 0. for _ in iterator: # run session batch_loss = sess.run(loss) # loss history update if batch_loss is not None and \ not np.isnan(batch_loss.all()) and not np.isinf(batch_loss.all()):
# # generator network with tf.sg_context(name='generator', act='relu', bn=True): gen = (x.sg_conv(dim=32).sg_conv().sg_conv( dim=4, act='sigmoid', bn=False).sg_periodic_shuffle(factor=2).sg_squeeze()) # # run generator # fig_name = 'asset/train/sample1.png' fig_name2 = 'asset/train/sample2.png' with tf.Session() as sess: with tf.sg_queue_context(sess): tf.sg_init(sess) # restore parameters saver = tf.train.Saver() saver.restore(sess, tf.train.latest_checkpoint('asset/train/ckpt')) # run generator gt, low, bicubic, sr = sess.run( [x.sg_squeeze(), x_nearest, x_bicubic, gen]) # plot result #sr[0].thumbnail(size, Image.ANTIALIAS) plt.figure(figsize=(4, 3)) #plt.set_axis_off()
def wrapper(**kwargs): opt = tf.sg_opt(kwargs) # default training options opt += tf.sg_opt(lr=0.001, save_dir='asset/train', max_ep=1000, ep_size=100000, save_interval=600, log_interval=60, early_stop=True, lr_reset=False, eval_metric=[], max_keep=5, keep_interval=1, tqdm=True, console_log=False) # make directory if not exist if not os.path.exists(opt.save_dir + '/log'): os.makedirs(opt.save_dir + '/log') if not os.path.exists(opt.save_dir + '/ckpt'): os.makedirs(opt.save_dir + '/ckpt') # find last checkpoint last_file = tf.train.latest_checkpoint(opt.save_dir + '/ckpt') if last_file: ep = start_ep = int(last_file.split('-')[1]) + 1 start_step = int(last_file.split('-')[2]) else: ep = start_ep = 1 start_step = 0 # checkpoint saver saver = tf.train.Saver(max_to_keep=opt.max_keep, keep_checkpoint_every_n_hours=opt.keep_interval) # summary writer summary_writer = tf.train.SummaryWriter(opt.save_dir + '/log', graph=tf.get_default_graph()) # add learning rate summary with tf.name_scope('summary'): tf.scalar_summary('60. learning_rate/learning_rate', _learning_rate) # add evaluation metric summary for m in opt.eval_metric: tf.sg_summary_metric(m) # summary op summary_op = tf.merge_all_summaries() # create session if opt.sess: sess = opt.sess else: # session with multiple GPU support sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) # initialize variables sg_init(sess) # restore last checkpoint if last_file: saver.restore(sess, last_file) # set learning rate if start_ep == 1 or opt.lr_reset: sess.run(_learning_rate.assign(opt.lr)) # logging tf.sg_info('Training started from epoch[%03d]-step[%d].' % (start_ep, start_step)) try: # start data queue runner with tf.sg_queue_context(sess): # set session mode to train tf.sg_set_train(sess) # loss history for learning rate decay loss, loss_prev, early_stopped = None, None, False # time stamp for saving and logging last_saved = last_logged = time.time() # epoch loop for ep in range(start_ep, opt.max_ep + 1): # show progressbar if opt.tqdm: iterator = tqdm(range(opt.ep_size), desc='train', ncols=70, unit='b', leave=False) else: iterator = range(opt.ep_size) # batch loop for _ in iterator: # call train function batch_loss = func(sess, opt) # loss history update if batch_loss is not None: if loss is None: loss = np.mean(batch_loss) else: loss = loss * 0.9 + np.mean(batch_loss) * 0.1 # saving if time.time() - last_saved > opt.save_interval: last_saved = time.time() saver.save(sess, opt.save_dir + '/ckpt/model-%03d' % ep, write_meta_graph=False, global_step=sess.run( tf.sg_global_step())) # logging if time.time() - last_logged > opt.log_interval: last_logged = time.time() # set session mode to infer tf.sg_set_infer(sess) # run evaluation op if len(opt.eval_metric) > 0: sess.run(opt.eval_metric) if opt.console_log: # console logging # log epoch information tf.sg_info( '\tEpoch[%03d:lr=%7.5f:gs=%d] - loss = %s' % (ep, sess.run(_learning_rate), sess.run(tf.sg_global_step()), ('NA' if loss is None else '%8.6f' % loss))) else: # tensorboard logging # run logging op summary_writer.add_summary( sess.run(summary_op), global_step=sess.run(tf.sg_global_step())) # learning rate decay if opt.early_stop and loss_prev: # if loss stalling if loss >= 0.95 * loss_prev: # early stopping current_lr = sess.run(_learning_rate) if current_lr < 5e-6: early_stopped = True break else: # decrease learning rate by half sess.run( _learning_rate.assign(current_lr / 2.)) # update loss history loss_prev = loss # revert session mode to train tf.sg_set_train(sess) # log epoch information if not opt.console_log: tf.sg_info( '\tEpoch[%03d:lr=%7.5f:gs=%d] - loss = %s' % (ep, sess.run(_learning_rate), sess.run(tf.sg_global_step()), ('NA' if loss is None else '%8.6f' % loss))) if early_stopped: tf.sg_info('\tEarly stopped ( no loss progress ).') break finally: # save last epoch saver.save(sess, opt.save_dir + '/ckpt/model-%03d' % ep, write_meta_graph=False, global_step=sess.run(tf.sg_global_step())) # set session mode to infer tf.sg_set_infer(sess) # logging tf.sg_info('Training finished at epoch[%d]-step[%d].' % (ep, sess.run(tf.sg_global_step()))) # close session if opt.sess is None: sess.close()
def wrapper(**kwargs): r""" Manages arguments of `tf.sg_opt`. Args: **kwargs: lr: A Python Scalar (optional). Learning rate. Default is .001. eval_metric: A list of tensors containing the value to evaluate. Default is []. early_stop: Boolean. If True (default), the training should stop when the following two conditions are met. i. Current loss is less than .95 * previous loss. ii. Current learning rate is less than 5e-6. lr_reset: Boolean. If True, learning rate is set to opt.lr. when training restarts. Otherwise (Default), the value of the stored `_learning_rate` is taken. save_dir: A string. The root path to which checkpoint and log files are saved. Default is `asset/train`. max_ep: A positive integer. Maximum number of epochs. Default is 1000. ep_size: A positive integer. Number of Total batches in an epoch. For proper display of log. Default is 1e5. save_interval: A Python scalar. The interval of saving checkpoint files. By default, for every 600 seconds, a checkpoint file is written. log_interval: A Python scalar. The interval of recoding logs. By default, for every 60 seconds, logging is executed. max_keep: A positive integer. Maximum number of recent checkpoints to keep. Default is 5. keep_interval: A Python scalar. How often to keep checkpoints. Default is 1 hour. tqdm: Boolean. If True (Default), progress bars are shown. console_log: Boolean. If True, a series of loss will be shown on the console instead of tensorboard. Default is False. """ opt = tf.sg_opt(kwargs) # default training options opt += tf.sg_opt(lr=0.001, save_dir='asset/train', max_ep=1000, ep_size=100000, save_interval=600, log_interval=60, early_stop=True, lr_reset=False, eval_metric=[], max_keep=5, keep_interval=1, tqdm=True, console_log=False) # make directory if not exist if not os.path.exists(opt.save_dir): os.makedirs(opt.save_dir) # find last checkpoint last_file = tf.train.latest_checkpoint(opt.save_dir) if last_file: ep = start_ep = int(last_file.split('-')[1]) + 1 start_step = int(last_file.split('-')[2]) else: ep = start_ep = 1 start_step = 0 # checkpoint saver saver = tf.train.Saver(max_to_keep=opt.max_keep, keep_checkpoint_every_n_hours=opt.keep_interval) # summary writer summary_writer = tf.summary.FileWriter(opt.save_dir, graph=tf.get_default_graph()) # add learning rate summary tf.summary.scalar('learning_r', _learning_rate) # add evaluation metric summary for m in opt.eval_metric: tf.sg_summary_metric(m) # summary op summary_op = tf.summary.merge_all() # create session if opt.sess: sess = opt.sess else: # session with multiple GPU support sess = tf.Session(config=tf.ConfigProto(allow_soft_placement=True)) # initialize variables sg_init(sess) # restore last checkpoint if last_file: saver.restore(sess, last_file) # set learning rate if start_ep == 1 or opt.lr_reset: sess.run(_learning_rate.assign(opt.lr)) # logging tf.sg_info('Training started from epoch[%03d]-step[%d].' % (start_ep, start_step)) try: # start data queue runner with tf.sg_queue_context(sess): # set session mode to train tf.sg_set_train(sess) # loss history for learning rate decay loss, loss_prev, early_stopped = None, None, False # time stamp for saving and logging last_saved = last_logged = time.time() # epoch loop for ep in range(start_ep, opt.max_ep + 1): # show progressbar if opt.tqdm: iterator = tqdm(range(opt.ep_size), desc='train', ncols=70, unit='b', leave=False) else: iterator = range(opt.ep_size) # batch loop for _ in iterator: # call train function batch_loss = func(sess, opt) # loss history update if batch_loss is not None: if loss is None: loss = np.mean(batch_loss) else: loss = loss * 0.9 + np.mean(batch_loss) * 0.1 # saving if time.time() - last_saved > opt.save_interval: last_saved = time.time() saver.save(sess, opt.save_dir + '/model-%03d' % ep, write_meta_graph=False, global_step=sess.run(tf.sg_global_step())) # logging if time.time() - last_logged > opt.log_interval: last_logged = time.time() # set session mode to infer tf.sg_set_infer(sess) # run evaluation op if len(opt.eval_metric) > 0: sess.run(opt.eval_metric) if opt.console_log: # console logging # log epoch information tf.sg_info('\tEpoch[%03d:lr=%7.5f:gs=%d] - loss = %s' % (ep, sess.run(_learning_rate), sess.run(tf.sg_global_step()), ('NA' if loss is None else '%8.6f' % loss))) else: # tensorboard logging # run logging op summary_writer.add_summary(sess.run(summary_op), global_step=sess.run(tf.sg_global_step())) # learning rate decay if opt.early_stop and loss_prev: # if loss stalling if loss >= 0.95 * loss_prev: # early stopping current_lr = sess.run(_learning_rate) if current_lr < 5e-6: early_stopped = True break else: # decrease learning rate by half sess.run(_learning_rate.assign(current_lr / 2.)) # update loss history loss_prev = loss # revert session mode to train tf.sg_set_train(sess) # log epoch information if not opt.console_log: tf.sg_info('\tEpoch[%03d:lr=%7.5f:gs=%d] - loss = %s' % (ep, sess.run(_learning_rate), sess.run(tf.sg_global_step()), ('NA' if loss is None else '%8.6f' % loss))) if early_stopped: tf.sg_info('\tEarly stopped ( no loss progress ).') break finally: # save last epoch saver.save(sess, opt.save_dir + '/model-%03d' % ep, write_meta_graph=False, global_step=sess.run(tf.sg_global_step())) # set session mode to infer tf.sg_set_infer(sess) # logging tf.sg_info('Training finished at epoch[%d]-step[%d].' % (ep, sess.run(tf.sg_global_step()))) # close session if opt.sess is None: sess.close()
# generator network with tf.sg_context(name='generator', act='relu', bn=True): gen = (x_small .sg_conv(dim=32) .sg_conv() .sg_conv(dim=4, act='sigmoid', bn=False) .sg_periodic_shuffle(factor=2) .sg_squeeze()) # # run generator # fig_name = 'asset/train/sample.png' with tf.Session() as sess: with tf.sg_queue_context(sess): tf.sg_init(sess) # restore parameters saver = tf.train.Saver() saver.restore(sess, tf.train.latest_checkpoint('asset/train/ckpt')) # run generator gt, low, bicubic, sr = sess.run([x.sg_squeeze(), x_nearest, x_bicubic, gen]) # plot result _, ax = plt.subplots(10, 12, sharex=True, sharey=True) for i in range(10): for j in range(3): ax[i][j*4].imshow(low[i*3+j], 'gray')
def train_with_GP(self): input_ph = tf.placeholder(shape=[batch_size, 28, 28, 1], dtype=tf.float32) label_ph = tf.placeholder(shape=[ batch_size, ], dtype=tf.int32) predict = self.forward(input_ph) loss_tensor = tf.reduce_mean(predict.sg_ce(target=label_ph)) # use to update network parameters optim = tf.sg_optim(loss_tensor, optim='Adam', lr=1e-3) # use saver to save a new model saver = tf.train.Saver() sess = tf.Session() with tf.sg_queue_context(sess): # inital tf.sg_init(sess) # train by GP guilding for e in range(max_ep): previous_loss = None for i in range(Mnist.train.num_batch): [image_array, label_array ] = sess.run([Mnist.train.image, Mnist.train.label]) if (e == 0 or e == 1 ): # first and second epoch train no noisy image loss = sess.run([loss_tensor, optim], feed_dict={ input_ph: image_array, label_ph: label_array })[0] print 'Baseline loss = ', loss elif ( e == 2 ): # third epoch train with gp image and original image gpIn1 = np.squeeze(image_array) gpIn2 = np.zeros((28, 28)) image_gp = GP(gpIn1, gpIn2, seed=0.8) image_gp2 = image_gp[np.newaxis, ...] image_gp2 = image_gp2[..., np.newaxis] loss = sess.run([loss_tensor, optim], feed_dict={ input_ph: image_array, label_ph: label_array })[0] print 'GP without nosiy loss = ', loss loss = sess.run([loss_tensor, optim], feed_dict={ input_ph: image_gp2, label_ph: label_array })[0] print 'GP loss = ', loss else: # other epoch train with gp evolution gpIn1 = np.squeeze(image_array) gpIn2 = np.zeros((28, 28)) image_gp = GP(gpIn1, gpIn2, seed=random.random()) image_gp2 = image_gp[np.newaxis, ...] image_gp2 = image_gp2[..., np.newaxis] loss = sess.run([loss_tensor, optim], feed_dict={ input_ph: image_array, label_ph: label_array })[0] print 'GP without nosiy loss = ', loss loss = sess.run([loss_tensor, optim], feed_dict={ input_ph: image_gp2, label_ph: label_array })[0] print 'GP loss = ', loss if loss < previous_loss: for i in range(5): loss = sess.run([loss_tensor, optim], feed_dict={ input_ph: image_gp2, label_ph: label_array })[0] gpIn1 = image_gp2 image_gp2[0, :, :, 0] = GP(gpIn1[0, :, :, 0], gpIn2, seed=random.random()) print 'GP EV loss = ', loss previous_loss = loss saver.save(sess, os.path.join(save_dir, 'gp_model'), global_step=e) # close session sess.close()
def sg_print(tensor): with tf.Session() as sess: sg_init(sess) with tf.sg_queue_context(): res = sess.run(tensor) print res, res.shape, res.dtype
def main(argv): # set log level to debug tf.sg_verbosity(10) # # hyper parameters # size = 160, 147 batch_size = 1 # batch size # # inputs # pngName = argv png = tf.read_file(pngName) #png.thumbnail(size, Image.ANTIALIAS) #png = tf.resize(png1, (14,14)) myPNG = tf.image.decode_png(png) y = convert_image(pngName) x = tf.reshape(y, [1, 28, 28, 1]) print(x) # corrupted image x_small = tf.image.resize_bicubic(x, (14, 14)) x_bicubic = tf.image.resize_bicubic(x_small, (28, 28)).sg_squeeze() x_nearest = tf.image.resize_images( x_small, (28, 28), tf.image.ResizeMethod.NEAREST_NEIGHBOR).sg_squeeze() # # create generator # # I've used ESPCN scheme # http://www.cv-foundation.org/openaccess/content_cvpr_2016/papers/Shi_Real-Time_Single_Image_CVPR_2016_paper.pdf # # generator network with tf.sg_context(name='generator', act='relu', bn=True): gen = (x.sg_conv(dim=32).sg_conv().sg_conv( dim=4, act='sigmoid', bn=False).sg_periodic_shuffle(factor=2).sg_squeeze()) # # run generator # fileName = "inPython.png" fig_name = "genImages/" + fileName #fig_name2 = 'asset/train/sample2.png' print("start") with tf.Session() as sess: with tf.sg_queue_context(sess): tf.sg_init(sess) # restore parameters saver = tf.train.Saver() #saver.restore(sess, tf.train.latest_checkpoint('asset/train/ckpt')) saver.restore( sess, tf.train.latest_checkpoint('python/asset/train/ckpt')) # run generator gt, low, bicubic, sr = sess.run( [x.sg_squeeze(), x_nearest, x_bicubic, gen]) # plot result #sr[0].thumbnail(size, Image.ANTIALIAS) plt.figure(figsize=(1, 1)) #plt.set_axis_off() hr = plt.imshow(sr[0], 'gray') plt.axis('tight') plt.axis('off') #ax.set_axis_off() #ax.thumbnail(size, Image.ANTIALIAS) #plt.savefig(fig_name,bbox_inches='tight',pad_inches=0,dpi=600) plt.savefig(fig_name, dpi=600) #tf.sg_info('Sample image saved to "%s"' % fig_name) plt.close() ##print (type (sr[0])) ##sourceImage = Image.fromarray(np.uint8(sr[0]) print("done")