예제 #1
0
            input_im = np.array([inference_data.inputs[i]]).astype(np.float32)
            path_lr = inference_data.paths_LR[i]
            results = sess.run(save_fetch,
                               feed_dict={
                                   inputs_raw: input_im,
                                   path_LR: path_lr
                               })
            filesets = save_images(results, FLAGS)
            for i, f in enumerate(filesets):
                print('evaluate image', f['name'])

# The training mode
elif FLAGS.mode == 'train':
    # Load data for training and testing
    # ToDo Add online downscaling
    data = data_loader(FLAGS)
    print('Data count = %d' % (data.image_count))

    # Connect to the network
    if FLAGS.task == 'SRGAN':
        Net = SRGAN(data.inputs, data.targets, FLAGS)
    elif FLAGS.task == 'SRResnet':
        Net = SRResnet(data.inputs, data.targets, FLAGS)
    else:
        raise NotImplementedError('Unknown task type')

    print('Finish building the network!!!')

    # Convert the images output from the network
    with tf.name_scope('convert_image'):
        # Deprocess the images outputed from the model
예제 #2
0
# Check the output_dir is given
if FLAGS.output_dir is None:
    raise ValueError('The output directory is needed')

# Check the output directory to save the checkpoint
if not os.path.exists(FLAGS.output_dir):
    os.mkdir(FLAGS.output_dir)

if not os.path.exists(os.path.join(FLAGS.output_dir, 'buffer')):
    os.mkdir(os.path.join(FLAGS.output_dir, 'buffer'))

# pyramid = get_pyramid(targets, pyramid_layers) # a list store the pyramid of target textures image
pyrm_layers = FLAGS.pyrm_layers
tar_name = FLAGS.target_dir.split('/')[-1]
tar_name = tar_name.split('.')[0]
targets0 = data_loader(img_dir=None, FLAGS=FLAGS)
# targets = to_tensor(targets0)

#print(os.listdir(path+'buffer/'))
'''
filleds = os.listdir(os.path.join(FLAGS.output_dir,'buffer'))
n = 2048
m = n//512
filleds = [int((_.split('_')[1]).split('.')[0]) for _ in filleds if str(n) in _]
def ind0(i):
    return i//(m//2)*2*m + i%(m//2)*2
flag = [(i in filleds) for i in range(m**2)]
filled_inds = [flag[ind0(i)] and flag[ind0(i)+1] and flag[ind0(i)+m] and flag[ind0(i)+m+1] \
               for i in range((m//2)**2)]
unfilled_inds = [i for i in range((m//2)**2) if filled_inds[i]==False]
begin = FLAGS.texture_shape[0]//n - 1
예제 #3
0
# Check the output directory to save the checkpoint
if not os.path.exists(FLAGS.output_dir):
    os.mkdir(FLAGS.output_dir)

if FLAGS.summary_dir is None:
    raise ValueError('The summary directory is needed')

# Check the summary directory to save the event
if not os.path.exists(FLAGS.summary_dir):
    os.mkdir(FLAGS.summary_dir)

# Load data for training and testing
# ToDo Add online downscaling

targets, initials = data_loader(FLAGS)

# how to initialize the synthesis image
if FLAGS.task_mode == 'texture_synthesis':
    pass
    #contents = initials  # initialization image must be noise
elif FLAGS.task_mode == 'style_transfer':
    contents = initials
    if FLAGS.initials == 'content image':
        initials = contents
    elif FLAGS.initials == 'noise':
        contents_shape = contents.shape[1:3]
        initials = None
    elif FLAGS.initials == 'style image':
        initials = targets
예제 #4
0
print_configuration_op(FLAGS)

# Check the output_dir is given
if FLAGS.output_dir is None:
    raise ValueError('The output directory is needed')

# Check the output directory to save the checkpoint
if not os.path.exists(FLAGS.output_dir):
    os.mkdir(FLAGS.output_dir)

# pyramid = get_pyramid(targets, pyramid_layers) # a list store the pyramid of target textures image
pyrm_layers = FLAGS.pyrm_layers
start = time.time()
tar_name = FLAGS.target_dir.split('/')[-1]
tar_name = tar_name.split('.')[0]
targets0 = data_loader(FLAGS)

for i in range(pyrm_layers - 1, -1, -1):
    targets = tf.constant(targets0, dtype=tf.float32)
    w0, h0 = [targets.shape[1], targets.shape[2]]
    w, h = [w0 // (2**i), h0 // (2**i)]
    target = tf.image.resize_bicubic(targets, [w, h])
    print('\nCurrent image : ', tar_name)
    print('Image size : ', target.shape)
    print('Now in pyramid layer %i, total %i layer (from L%i to L0)\n' %
          (i, pyrm_layers, pyrm_layers - 1))
    if i == pyrm_layers - 1:  # if now initial the large scale gen_output
        if FLAGS.texture_shape == [-1, -1]:
            FLAGS.texture_shape = [w, h]
        else:
            FLAGS.texture_shape = [ FLAGS.texture_shape[0]//(2**i), \
예제 #5
0
        max_iter = len(inference_data.inputs)
        print('Evaluation starts!!')
        for i in range(max_iter):
            input_im = np.array([inference_data.inputs[i]]).astype(np.float32)
            path_lr = inference_data.paths_LR[i]
            results = sess.run(save_fetch, feed_dict={inputs_raw: input_im, path_LR: path_lr})
            filesets = save_images(results, FLAGS)
            for i, f in enumerate(filesets):
                print('evaluate image', f['name'])


# The training mode
elif FLAGS.mode == 'train':
    # Load data for training and testing
    # ToDo Add online downscaling
    data = data_loader(FLAGS)
    print('Data count = %d' % (data.image_count))

    # Connect to the network
    if FLAGS.task == 'SRGAN':
        Net = SRGAN(data.inputs, data.targets, FLAGS)
    elif FLAGS.task =='SRResnet':
        Net = SRResnet(data.inputs, data.targets, FLAGS)
    else:
        raise NotImplementedError('Unknown task type')

    print('Finish building the network!!!')

    # Convert the images output from the network
    with tf.name_scope('convert_image'):
        # Deprocess the images outputed from the model