Exemple #1
0
    g_vars, d_vars, losses = model.build_graph_with_losses(images,
                                                           config=config)

    # validation images
    if config.VAL:
        with open(config.DATA_FLIST[config.DATASET][1]) as f:
            val_fnames = f.read().splitlines()
        # progress monitor by visualizing static images
        for i in range(config.STATIC_VIEW_SIZE):
            static_fnames = val_fnames[i:i + 1]
            static_images = ng.data.DataFromFNames(
                static_fnames,
                config.IMG_SHAPES,
                nthreads=1,
                random_crop=config.RANDOM_CROP).data_pipeline(1)
            static_inpainted_images = model.build_static_infer_graph(
                static_images, config, name='static_view/%d' % i)

    # training settings
    lr = tf.get_variable('lr',
                         shape=[],
                         trainable=False,
                         initializer=tf.constant_initializer(1e-4))
    d_optimizer = tf.train.AdamOptimizer(lr, beta1=0.5, beta2=0.9)
    g_optimizer = d_optimizer

    # gradient processor
    if config.GRADIENT_CLIP:
        gradient_processor = lambda grad_var: (tf.clip_by_average_norm(
            grad_var[0], config.GRADIENT_CLIP_VALUE), grad_var[1])
    else:
        gradient_processor = None
 # validation images
 if FLAGS.val:
     with open(FLAGS.data_flist[FLAGS.dataset][1]) as f:
         val_fnames = f.read().splitlines()
     if FLAGS.guided:
         val_fnames = [(fname, fname[:-4] + '_edge.jpg')
                       for fname in val_fnames]
     # progress monitor by visualizing static images
     for i in range(FLAGS.static_view_size):
         static_fnames = val_fnames[i:i + 1]
         static_images = ng.data.DataFromFNames(
             static_fnames,
             img_shapes,
             nthreads=1,
             random_crop=FLAGS.random_crop).data_pipeline(1)
         static_inpainted_images = model.build_static_infer_graph(
             FLAGS, static_images, name='static_view/%d' % i)
 # training settings
 lr = tf.get_variable('lr',
                      shape=[],
                      trainable=False,
                      initializer=tf.constant_initializer(1e-4))
 d_optimizer = tf.train.AdamOptimizer(lr, beta1=0.5, beta2=0.999)
 g_optimizer = d_optimizer
 # train discriminator with secondary trainer, should initialize before
 # primary trainer.
 # discriminator_training_callback = ng.callbacks.SecondaryTrainer(
 discriminator_training_callback = ng.callbacks.SecondaryMultiGPUTrainer(
     num_gpus=FLAGS.num_gpus_per_job,
     pstep=1,
     optimizer=d_optimizer,
     var_list=d_vars,
 images = data.data_pipeline(config.BATCH_SIZE)
 # main model
 model = InpaintCAModel()
 g_vars, d_vars, losses = model.build_graph_with_losses(
     images[0], config=config, exclusionmask=images[exclusionmask_index] if config.EXC_MASKS else None, mask=None if config.GEN_MASKS else images[mask_index])
 # validation images
 if config.VAL:
     with open(config.DATA_FLIST[config.DATASET][1]) as f:
         val_fnames = [(l.split(' ')[0], l.split(' ')[1]) for l in f.read().splitlines()]
     # progress monitor by visualizing static images
     for i in range(config.STATIC_VIEW_SIZE):
         static_fnames = val_fnames[i:i+1]
         static_images = ng.data.DataFromFNames(
             static_fnames, config.IMG_SHAPES, nthreads=1,
             random_crop=config.RANDOM_CROP, random_flip=config.RANDOM_FLIP).data_pipeline(1)
         static_inpainted_images = model.build_static_infer_graph(
             static_images[0], config, name='static_view/%d' % i, exclusionmask=images[exclusionmask_index] if config.EXC_MASKS else None)
 # training settings
 lr = tf.get_variable(
     'lr', shape=[], trainable=False,
     initializer=tf.constant_initializer(1e-4))
 d_optimizer = tf.train.AdamOptimizer(lr, beta1=0.5, beta2=0.9)
 g_optimizer = d_optimizer
 # gradient processor
 if config.GRADIENT_CLIP:
     gradient_processor = lambda grad_var: (
         tf.clip_by_average_norm(grad_var[0], config.GRADIENT_CLIP_VALUE),
         grad_var[1])
 else:
     gradient_processor = None
 # log dir
 log_prefix = 'model_logs/' + '_'.join([