예제 #1
0
def main(_):
    if not os.path.exists(args.checkpoint_dir):
        os.makedirs(args.checkpoint_dir)
    if not os.path.exists(args.sample_dir):
        os.makedirs(args.sample_dir)
    if not os.path.exists(args.test_dir):
        os.makedirs(args.test_dir)
    cfg = dict({'allow_soft_placement': False, 'log_device_placement': False})
    cfg['gpu_options'] = tf.GPUOptions(per_process_gpu_memory_fraction=0)
    cfg['allow_soft_placement'] = True
    with tf.device('/cpu:0'):
        with tf.Session(config=tf.ConfigProto(**cfg)) as sess:
            model = pix2pix(sess,
                            image_size=args.fine_size,
                            batch_size=args.batch_size,
                            output_size=args.fine_size,
                            dataset_name=args.dataset_name,
                            checkpoint_dir=args.checkpoint_dir,
                            sample_dir=args.sample_dir,
                            mean=args.mean,
                            std=args.std)

            if args.phase == 'train':
                model.train(args)
            else:
                model.sample_model_for_test()
def main(_):
    if not os.path.exists(args.checkpoint_dir):
        os.makedirs(args.checkpoint_dir)
    if not os.path.exists(args.sample_dir):
        os.makedirs(args.sample_dir)
    if not os.path.exists(args.test_dir):
        os.makedirs(args.test_dir)

    if not os.path.exists(args.visual_dir):
        os.makedirs(args.visual_dir)

    with tf.Session() as sess:
        model = pix2pix(sess,
                        image_size=args.fine_size,
                        batch_size=args.batch_size,
                        output_size=args.fine_size,
                        dataset_name=args.dataset_name,
                        checkpoint_dir=args.checkpoint_dir,
                        sample_dir=args.sample_dir,
                        visual_dir=args.visual_dir)

        tic = time.clock()
        if args.phase == 'train':
            model.train(args)

            toc1 = time.clock()
            print('training using time:', str(toc1 - tic))

        elif args.phase == 'test':
            model.test_new_temp(args)
        else:  # run all
            model.train(args)
            model.test_new_temp(args)
예제 #3
0
파일: main.py 프로젝트: ItzikMalkiel/AGB
def main(_):

    tf.set_random_seed(42)
    np.random.seed(42)

    if not os.path.exists(args.checkpoint_dir):
        os.makedirs(args.checkpoint_dir)
    if not os.path.exists(args.sample_dir):
        os.makedirs(args.sample_dir)
    if not os.path.exists(args.test_dir):
        os.makedirs(args.test_dir)

    gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=0.333)
    config = tf.ConfigProto(gpu_options=gpu_options, allow_soft_placement=True)
    config.gpu_options.allow_growth = True
    config.log_device_placement = True

    with tf.Session(config=config) as sess:
        model = pix2pix(sess, L1_lambda, useAdaptiveLossBalancing, adaptiveGradBalancingRatio, gan_weight, decayD, image_size=args.fine_size, batch_size=args.batch_size,
                        output_size=args.fine_size, dataset_name=args.dataset_name,
                        checkpoint_dir=args.checkpoint_dir, sample_dir=args.sample_dir)
        if args.phase == 'train':
                model.train(args, RUN_NAME, loadModel)
        else:
            model.test(args)
예제 #4
0
def main(_):
    if not os.path.exists(args.checkpoint_dir):
        os.makedirs(args.checkpoint_dir)
    if not os.path.exists(args.sample_dir):
        os.makedirs(args.sample_dir)
    if not os.path.exists(args.test_dir):
        os.makedirs(args.test_dir)

    with tf.Session() as sess:
        model = pix2pix(sess,
                        image_size=args.fine_size,
                        batch_size=args.batch_size,
                        output_size=args.fine_size,
                        dataset_name=args.dataset_name,
                        checkpoint_dir=args.checkpoint_dir,
                        load_checkpoint=args.load_checkpoint,
                        sample_dir=args.sample_dir,
                        L1_lambda=args.L1_lambda,
                        latent_lambda=args.latent_lambda,
                        ssim_lambda=args.ssim_lambda,
                        test_name=args.test_name)

        if args.phase == 'train':
            model.train(args)
        else:
            model.test(args)
예제 #5
0
파일: main.py 프로젝트: neklyud/CombinedGAN
def main(_):
    with tf.Session() as sess:
        model = pix2pix(sess,
                        image_size=args.fine_size,
                        batch_size=args.batch_size,
                        output_size=args.fine_size,
                        gf_dim=args.ngf,
                        df_dim=args.ndf,
                        L1_lambda=args.L1_lambda,
                        input_c_dim=args.input_nc,
                        output_c_dim=args.output_nc,
                        dataset_name=args.dataset_name,
                        checkpoint_dir=args.checkpoint_dir,
                        sample_dir=args.sample_dir,
                        train_size=args.train_size,
                        data_load=args.data_load,
                        phase=args.phase,
                        save_latest_freq=args.save_latest_freq,
                        print_freq=args.print_freq,
                        deviation=args.deviation)

        if args.phase == 'train':
            model.train(args)
        else:
            model.test(args)
예제 #6
0
def main(_):

    if not os.path.exists(args.checkpoint_dir):
        os.makedirs(args.checkpoint_dir)
    if not os.path.exists(args.sample_dir):
        os.makedirs(args.sample_dir)
    if not os.path.exists(args.test_dir):
        os.makedirs(args.test_dir)

    with tf.Session() as sess:
        model = pix2pix(sess,
                        image_size=args.fine_size,
                        batch_size=args.batch_size,
                        output_size=args.fine_size,
                        dataset_name=args.dataset_name,
                        checkpoint_dir=args.checkpoint_dir,
                        sample_dir=args.sample_dir,
                        L1_lambda=args.L1_lambda,
                        G2_lambda=args.G2_lambda,
                        input_c_dim=args.input_nc,
                        output_c_dim=args.output_nc,
                        direction=args.which_direction)

        if args.phase == 'train':
            model.train(args)
        else:
            model.test(args)
예제 #7
0
파일: main.py 프로젝트: WeiZongqi/HOFAM
def main(_):
    if not os.path.exists(args.checkpoint_dir):
        os.makedirs(args.checkpoint_dir)
    if not os.path.exists(args.sample_dir):
        os.makedirs(args.sample_dir)
    if not os.path.exists(args.test_dir):
        os.makedirs(args.test_dir)
    if not os.path.exists(args.log_dir):
        os.makedirs(args.log_dir)
    config = tf.ConfigProto(allow_soft_placement=True)
    with tf.Session(config=config) as sess:
        model = pix2pix(sess, image_size_H=args.fine_size_H,
                        image_size_W=args.fine_size_W,
                        batch_size=args.batch_size,
                        output_size_H=args.fine_size_H,
                        output_size_W=args.fine_size_W,
                        gf_dim=args.ngf,
                        df_dim=args.ndf,
                        dataset_name=args.dataset_name,
                        L1_lambda=args.L1_lambda,
                        checkpoint_dir=args.checkpoint_dir,
                        sample_dir=args.sample_dir)

        if args.phase == 'train':
            model.train(args)
        elif args.phase == 'test':
            model.test(args)
        else:
            model.demo(args)
예제 #8
0
def actions():

    tf.reset_default_graph()

    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True

    with tf.Session(config=config) as sess:

        model = pix2pix(
            sess,
            args,
            image_size=args.fine_size,
            load_size=args.load_size,
            batch_size=args.batch_size,
            output_size=args.fine_size,
            input_c_dim=args.input_nc,
            output_c_dim=args.output_nc,
            dataset_name=args.dataset_name,
            checkpoint_dir=args.checkpoint_dir,
            sample_dir=args.sample_dir)  #args added for using DeepLabv3

        if args.phase == 'train':
            model.train(args)
        elif args.phase == 'generate_image':
            model.generate_image(args)
        else:
            print('...')
예제 #9
0
def main(_):
    print("Pix2pix tensorflow serve!")
    if not os.path.exists(args.checkpoint_dir):
        os.makedirs(args.checkpoint_dir)
    if not os.path.exists(args.sample_dir):
        os.makedirs(args.sample_dir)
    if not os.path.exists(args.test_dir):
        os.makedirs(args.test_dir)

    with tf.Session() as sess:
        model = pix2pix(sess,
                        image_size=args.fine_size,
                        batch_size=args.batch_size,
                        output_size=args.fine_size,
                        dataset_name=args.dataset_name,
                        checkpoint_dir=args.checkpoint_dir,
                        sample_dir=args.sample_dir,
                        direction=args.which_direction)

        model.load_model(args)

        # create web.py server
        urls = ('/run/local_id/(.*)', 'index')
        webapp = serve(urls, globals())
        web.model = model
        webapp.run(port=8081)
예제 #10
0
def main(_):
    if not os.path.exists(args.checkpoint_dir):
        os.makedirs(args.checkpoint_dir)

    with tf.Session() as sess:
        model = pix2pix(sess,
                        batch_size=args.batch_size,
                        load_size=args.load_size,
                        fine_size=args.fine_size,
                        dataset_name=args.dataset_name,
                        which_direction=args.which_direction,
                        checkpoint_dir=args.checkpoint_dir,
                        load_model=args.continue_train,
                        gf_dim=args.ngf,
                        df_dim=args.ndf,
                        L1_lambda=args.L1_lambda,
                        input_c_dim=args.input_nc,
                        output_c_dim=args.output_nc,
                        flips=args.flips,
                        rotations=args.rotations,
                        keep_aspect=args.keep_aspect,
                        pad_to_white=args.pad_to_white,
                        gcn=args.gcn,
                        interp=args.interp,
                        acc_threshold=args.acc_threshold)

        if args.phase == 'train':
            shutil.rmtree(args.sample_dir, ignore_errors=True)
            os.makedirs(args.sample_dir)
            model.train(args)

        shutil.rmtree(args.test_dir, ignore_errors=True)
        os.makedirs(args.test_dir)
        model.test(args)
예제 #11
0
파일: main.py 프로젝트: nmakow/cs230-final
def main(_):
    if not os.path.exists(args.checkpoint_dir):
        os.makedirs(args.checkpoint_dir)
    if not os.path.exists(args.sample_dir):
        os.makedirs(args.sample_dir)
    if not os.path.exists(args.test_dir):
        os.makedirs(args.test_dir)
    if not os.path.exists(
            os.path.join(args.checkpoint_dir, args.experiment_name)):
        os.makedirs(os.path.join(args.checkpoint_dir, args.experiment_name))
    if not os.path.exists(os.path.join(args.sample_dir, args.experiment_name)):
        os.makedirs(os.path.join(args.sample_dir, args.experiment_name))

    with tf.Session() as sess:
        model = pix2pix(sess,
                        image_size=args.fine_size,
                        batch_size=args.batch_size,
                        output_size=args.fine_size,
                        dataset_name=args.dataset_name,
                        checkpoint_dir=args.checkpoint_dir,
                        sample_dir=args.sample_dir,
                        experiment_name=args.experiment_name,
                        wgan=args.wgan,
                        gp=args.gp,
                        L1_lambda=args.L1_lambda)

        if args.phase == 'train':
            model.train(args)
        else:
            model.test(args)
예제 #12
0
파일: main.py 프로젝트: zuobinxiong/pix2pix
def main(_):
    if not os.path.exists(args.checkpoint_dir):
        os.makedirs(args.checkpoint_dir)
    if not os.path.exists(args.sample_dir):
        os.makedirs(args.sample_dir)
    if not os.path.exists(args.test_dir):
        os.makedirs(args.test_dir)

    run_config = tf.ConfigProto()
    run_config.gpu_options.per_process_gpu_memory_fraction = 0.5
    # run_config.gpu_options.allow_growth=True

    with tf.Session(config=run_config) as sess:
        model = pix2pix(sess,
                        image_size=args.fine_size,
                        batch_size=args.batch_size,
                        output_size=args.fine_size,
                        dataset_name=args.dataset_name,
                        checkpoint_dir=args.checkpoint_dir,
                        sample_dir=args.sample_dir)

        if args.phase == 'train':
            model.train(args)
        else:
            model.test(args)
예제 #13
0
def main(_):
    args.checkpoint_dir = os.path.join(args.save_dir, args.checkpoint_dir)
    args.sample_dir = os.path.join(args.save_dir, args.sample_dir)
    args.test_dir = os.path.join(args.save_dir, args.test_dir)
    args.log_dir = os.path.join(args.save_dir, args.log_dir)
    if not os.path.exists(args.checkpoint_dir):
        os.makedirs(args.checkpoint_dir)
    if not os.path.exists(args.sample_dir):
        os.makedirs(args.sample_dir)
    if not os.path.exists(args.test_dir):
        os.makedirs(args.test_dir)

    checkGPU.auto_queue(
        gpu_memory_require=gpu_memory_require,
        interval=1,
    )
    config = checkGPU.set_memory_usage(usage=gpu_memory_require,
                                       allow_growth=True)

    with tf.Session(config=config) as sess:
        model = pix2pix(sess,
                        image_size=args.fine_size,
                        batch_size=args.batch_size,
                        output_size=args.fine_size,
                        dataset_name=args.dataset_name,
                        checkpoint_dir=args.checkpoint_dir,
                        sample_dir=args.sample_dir)

        if args.phase == 'train':
            model.train(args)
        else:
            model.test(args)
예제 #14
0
def main(_):
    if not os.path.exists(args.checkpoint_dir):
        os.makedirs(args.checkpoint_dir)
    if not os.path.exists(args.sample_dir):
        os.makedirs(args.sample_dir)
    if not os.path.exists(args.test_dir):
        os.makedirs(args.test_dir)

    with tf.Session() as sess:
        model = pix2pix(sess, args) 

        if args.phase == 'train':
            model.train()
        else:
            model.test()
예제 #15
0
def train(conf):

    img_path = glob.glob(os.path.join(conf['tr_data_path'], '*.%s'%(conf['data_ext'])))
    conf['img_path'] = img_path
    
    model = pix2pix(conf)
    model.build_graph()
    
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    sess = tf.Session(config = config)
    sess.run(tf.global_variables_initializer())
    saver = tf.train.Saver(max_to_keep = None)
    step = len(conf['img_path']) // conf['batch_size']
    
    if conf['in_memory']:
        img_list = []
        for img in conf['img_path']:
            img_ = Image.open(img)
            img_ = img_.resize((2*conf['load_size'], conf['load_size']), Image.BICUBIC)
            img_ = np.array(img_)
            img_list.append(img_)
        img_list = np.asarray(img_list)
        
        sess.run(model.data_loader.init_op['tr_init'], feed_dict = {model.data_loader.image_arr : img_list})
        
            
    else:
        sess.run(model.data_loader.init_op['tr_init'])
        
    
    for i in range(conf['epoch']):
        for t in range(step):
            _, d_loss = sess.run([model.discriminator_train, model.discriminator_loss])
            _, gene_image, g_loss = sess.run([model.generator_train, model.fake_image, model.generator_loss])
            print("%02d _ %05d"%(i, t))
            print('d_loss : %0.6f, g_loss : %0.6f' % (d_loss, g_loss))

        gene_image = gene_image[0]
        gene_image = ((gene_image + 1.0) * 255.0) / 2.0
        gene_image = np.round(gene_image)
        gene_image = np.clip(gene_image, 0.0, 255.0)
        gene_image = gene_image.astype(np.uint8)
        gene_image = Image.fromarray(gene_image)
        gene_image.save('temp/%s_%04d.png'%(conf['model_name'], i))
        
    saver.save(sess, os.path.join('./model/pix2pix_%s'%conf['model_name']))
예제 #16
0
def main(_):
    if not os.path.exists(args.checkpoint_dir):
        os.makedirs(args.checkpoint_dir)
    if not os.path.exists(args.sample_dir):
        os.makedirs(args.sample_dir)
    if not os.path.exists(args.test_dir):
        os.makedirs(args.test_dir)

    with tf.Session() as sess:
        model = pix2pix(sess, image_size=args.fine_size, batch_size=args.batch_size,
                        output_size=args.fine_size, dataset_name=args.dataset_name,
                        checkpoint_dir=args.checkpoint_dir, sample_dir=args.sample_dir)

        if args.phase == 'train':
            model.train(args)
        else:
            model.test(args)
예제 #17
0
def main(_):
    if not os.path.exists(args.checkpoint_dir):
        os.makedirs(args.checkpoint_dir)
    if not os.path.exists(args.sample_dir):
        os.makedirs(args.sample_dir)
    if not os.path.exists(args.test_dir):
        os.makedirs(args.test_dir)
    with tf.Session() as sess:
        model = pix2pix(sess,
                        image_size=args.fine_size,
                        batch_size=args.batch_size,
                        output_size=args.fine_size,
                        dataset_name=args.dataset_name,
                        checkpoint_dir=args.checkpoint_dir,
                        sample_dir=args.sample_dir)
        #model.py 에 있는 pix2pix 을 호출하면서 인자값을 주려고 함-->model.py 로 가기

        model.test(args)
def main(_):
    if not os.path.exists(args.checkpoint_dir):
        os.makedirs(args.checkpoint_dir)
    if not os.path.exists(args.sample_dir):
        os.makedirs(args.sample_dir)
    if not os.path.exists(args.test_dir):
        os.makedirs(args.test_dir)

    with tf.Session() as sess:
        model = pix2pix(sess, image_size=args.fine_size, batch_size=args.batch_size,
                        output_size=args.fine_size, dataset_name=args.dataset_name, layer_features=args.layer_features,
                        checkpoint_dir=args.checkpoint_dir, sample_dir=args.sample_dir)


        if args.phase == 'train':
            model.train(args)
        else:
            model.test(args)
예제 #19
0
def main(_):
    if not os.path.exists(args.checkpoint_dir):
        os.makedirs(args.checkpoint_dir)
    if not os.path.exists(args.sample_dir):
        os.makedirs(args.sample_dir)
    if not os.path.exists(args.test_dir):
        os.makedirs(args.test_dir)

    #config = tf.ConfigProto()
    #config.gpu_options.allow_growth = True
    with tf.Session() as sess:
        model = pix2pix(sess, image_size=args.fine_size, batch_size=args.batch_size,
                        output_size=args.fine_size, dataset_name=args.dataset_name, gf_dim=args.ngf,
                        checkpoint_dir=args.checkpoint_dir, sample_dir=args.sample_dir)

        if args.phase == 'train':
            model.train(args)
        else:
            model.test(args)
예제 #20
0
def main(_):
    if not os.path.exists(args.checkpoint_dir):
        os.makedirs(args.checkpoint_dir)
    if not os.path.exists(args.sample_dir):
        os.makedirs(args.sample_dir)
    if not os.path.exists(args.test_dir):
        os.makedirs(args.test_dir)

    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    config.allow_soft_placement = True
    config.log_device_placement = False

    with tf.Session(config = config) as sess:
        model = pix2pix(sess, image_size=args.fine_size, load_size=args.load_size, batch_size=args.batch_size,
                        output_size=args.fine_size, input_c_dim=args.input_nc, dataset_name=args.dataset_name,
                        checkpoint_dir=args.checkpoint_dir, sample_dir=args.sample_dir)

        if args.phase == 'train':
            model.train(args)
        else:
            model.test(args)
예제 #21
0
파일: main.py 프로젝트: CosmosHua/AICC_CV
def main(_):
    args.train_dir = "HRFace1.2"
    args.model_dir += "/" + os.path.basename(args.train_dir)
    if args.phase == "train": assert (os.path.isdir(args.train_dir))
    if not os.path.exists(args.model_dir):
        os.makedirs(args.model_dir)  # for train/test
    if not os.path.exists(args.test_dir):
        os.makedirs(args.test_dir)  # for train/test

    print("\nParameters :\n", args)  # device_count: limit number of GPUs
    config = tf.ConfigProto(device_count={"GPU": 1}, allow_soft_placement=True)
    #config.gpu_options.per_process_gpu_memory_fraction = 0.1 # gpu_memory ratio
    config.gpu_options.allow_growth = True  # dynamicly apply gpu_memory
    with tf.Session(config=config) as sess:
        model = pix2pix(sess=sess,
                        batch_size=args.batch_size,
                        input_nc=args.input_nc,
                        output_nc=args.output_nc,
                        L1_lambda=args.L1_lambda,
                        SS_lambda=args.SS_lambda)
        if args.phase == "train": model.train(args)  # Train
        else: model.test(args)  # Test
예제 #22
0
파일: main.py 프로젝트: clotha87762/Pix2Pix
def main(_):
    
    if not os.path.exists(args.test_path):
        os.makedirs(args.test_path)
    if not os.path.exists(args.sample_path):
        os.makedirs(args.sample_path)
    if not os.path.exists(args.log_path):
        os.makedirs(args.log_path)
    if not os.path.exists(args.model_path):
        os.makedirs(args.model_path)
    
    
   
    with tf.Session() as sess:
                       
            model = pix2pix(sess, args)
            model.build()
         
            if args.phase == 'train':
                model.train()
            else :
                model.test()
예제 #23
0
def test():
    tf.reset_default_graph()
    if not os.path.exists(args.checkpoint_dir):
        os.makedirs(args.checkpoint_dir)
    if not os.path.exists(args.sample_dir):
        os.makedirs(args.sample_dir)
    if not os.path.exists(args.test_dir):
        os.makedirs(args.test_dir)
    ### 사진 이름을 기반으로 if문 돌게 만들어라.

    filenames = os.listdir('input')
    filenames.sort()

    if len(filenames) == 0:
        print("Not Exist")

    count = 0
    if filenames[0].split(".")[0] == "99999":
        args.dataset_name = "car"

    if filenames[0].split(".")[0] == "11111":
        args.dataset_name = "bicycle"

    with tf.Session() as sess:

        model = pix2pix(sess,
                        image_size=args.fine_size,
                        batch_size=args.batch_size,
                        output_size=args.fine_size,
                        dataset_name=args.dataset_name,
                        checkpoint_dir=args.checkpoint_dir,
                        sample_dir=args.sample_dir)

        if args.phase == 'train':
            model.train(args)
        else:
            model.test(args)
예제 #24
0
def main(_):
    if not os.path.exists(args.checkpoint_dir):
        os.makedirs(args.checkpoint_dir)
    if not os.path.exists(args.sample_dir):
        os.makedirs(args.sample_dir)
    if not os.path.exists(args.test_dir):
        os.makedirs(args.test_dir)

    with tf.Session() as sess:
        model = pix2pix(sess, image_size=args.fine_size, batch_size=args.batch_size,
                        output_size=args.fine_size, input_c_dim=args.input_nc,
                        output_c_dim=args.output_nc, dataset_name=args.dataset_name,
                        checkpoint_dir=args.checkpoint_dir, sample_dir=args.sample_dir)

        if args.phase == 'train':
            model.train(args)
        elif args.phase == 'test':
            model.test(args)
        elif args.phase == 'generate_image':
            model.generate_image(args)
        elif args.phase == 'create_dataset':
            model.create_dataset(args)
        else:
            print ('...')
예제 #25
0
    def __init__(self,
                 sess,
                 args,
                 filename_glob,
                 batch_size=1,
                 image_height=256,
                 image_width=512,
                 image_channels=3,
                 image_type='jpeg'):
        #We create a tf variable to hold the global step, this has the effect
        #that when a checkpoint is created this value is saved.
        #Making the plots in tensorboard being continued when the model is restored.
        global_step = tf.Variable(0)
        increment_step = global_step.assign_add(1)

        #Create a queue that will be automatically fill by another thread
        #as we read batches out of it
        train_in = self.batch_queue(filename_glob, batch_size, image_height,
                                    image_width, image_channels, image_type)

        # create model
        model = pix2pix(sess,
                        train_in,
                        global_step,
                        args,
                        image_size=args.fine_size,
                        batch_size=args.batch_size,
                        output_size=args.fine_size,
                        dataset_name=args.dataset_name,
                        checkpoint_dir=args.checkpoint_dir,
                        sample_dir=args.sample_dir)
        model.train(args)

        init_op = tf.global_variables_initializer()
        #This is required to intialize num_epochs for the filename_queue
        init_local = tf.local_variables_initializer()

        # Initialize the variables (like the epoch counter).
        sess.run([init_op, init_local])

        # Start input enqueue threads.
        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(sess=sess, coord=coord)
        try:

            progress = tqdm()
            while not coord.should_stop():
                # Run training steps or whatever
                global_step = sess.run(increment_step)
                progress.update()
                model.train_step()

        except tf.errors.OutOfRangeError:
            print('Done training -- epoch limit reached')
        finally:
            # When done, ask the threads to stop.
            coord.request_stop()

        # Wait for threads to finish.
        coord.join(threads)
        sess.close()
예제 #26
0
c = 0.01
epochs = 20
batchSize = 10
batchNum = 100
lr = 0.00005
natob = 32
nfd = 16
startNum = 0

dopt = RMSprop(lr)
# Generator
generator = m.generator(natob, batchSize=64)
# Discriminator
discriminator = m.discriminator(nfd, opt=dopt)
doutSize = discriminator.output_shape[1:-1]
pix2pix = m.pix2pix(generator, discriminator, alpha=0, belta=1, opt=dopt)

# loss
listDisLossReal = []
listDisLossGen = []
listGenLoss = []

datagen = datagen = ImageDataGenerator(vertical_flip=True,
                                       horizontal_flip=True,
                                       fill_mode='constant')
# load train data
pkl_file = open('../data/NormEnhance/train/trainX.pkl', 'rb')
trainX = pickle.load(pkl_file)
pkl_file.close()
pkl_file = open('../data/NormEnhance/train/trainY.pkl', 'rb')
trainY = pickle.load(pkl_file)
예제 #27
0
    saver = tf.train.Saver(tf.global_variables(), max_to_keep=100)
    variables = slim.get_variables_to_restore()

with g2.as_default():
    pix2pix_args = pix2pix_args

with tf.Session(graph=g1, config=tf.ConfigProto(allow_soft_placement=True)) as sess1:
    with tf.Session(graph=g2, config=tf.ConfigProto(allow_soft_placement=True)) as sess2:
        coord = tf.train.Coordinator()
        threads = tf.train.start_queue_runners(coord=coord)

        saver.restore(sess1, './checkpoint/IFPA/model.ckpt-58')

        pix2pix_model = pix2pix(sess2, image_size=pix2pix_args.fine_size, batch_size=pix2pix_args.batch_size,
                                output_size=pix2pix_args.fine_size, dataset_name=pix2pix_args.dataset_name,
                                checkpoint_dir=pix2pix_args.checkpoint_dir, sample_dir=pix2pix_args.sample_dir)
        pix2pix_model.demo(pix2pix_args, 0, 0, True)

        input_path = "./test"
        src = np.array([[224, 448], [0, 448], [0, 0], [224, 0]], np.float32)
        dst = np.array([[0, 0], [224, 0], [224, 448], [0, 448]], np.float32)
        M_flip = cv2.getPerspectiveTransform(src, dst)

        ori_frame_size = np.array([[0, 0], [640, 0], [640, 480], [0, 480]], np.float32)
        resize_frame_size = np.array([[0, 0], [256, 0], [256, 256], [0, 256]], np.float32)
        M_resize2ori = cv2.getPerspectiveTransform(resize_frame_size, ori_frame_size)

        img = np.zeros((256, 256, 3), np.uint8)

        tf1_err_output_path = "D:/baseline_error/tf1"
예제 #28
0
def main(_):
    print("Pix2pix tensorflow Exporter!")
    if not os.path.exists(args.checkpoint_dir):
        os.makedirs(args.checkpoint_dir)
    if not os.path.exists(args.sample_dir):
        os.makedirs(args.sample_dir)
    if not os.path.exists(args.test_dir):
        os.makedirs(args.test_dir)

    with tf.Session() as sess:
        model = pix2pix(sess,
                        image_size=args.fine_size,
                        batch_size=args.batch_size,
                        output_size=args.fine_size,
                        dataset_name=args.dataset_name,
                        checkpoint_dir=args.checkpoint_dir,
                        sample_dir=args.sample_dir,
                        input_c_dim=args.input_nc,
                        output_c_dim=args.output_nc,
                        direction=args.which_direction)

        model.load_model(args)
        graph = tf.get_default_graph()
        input_graph_def = graph.as_graph_def()

        # fix batch norm nodes
        for node in input_graph_def.node:
            if node.op == 'RefSwitch':
                node.op = 'Switch'
                for index in xrange(len(node.input)):
                    if 'moving_' in node.input[index]:
                        node.input[index] = node.input[index] + '/read'
            elif node.op == 'AssignSub':
                node.op = 'Sub'
                if 'use_locking' in node.attr:
                    del node.attr['use_locking']

        # freeze!
        freeze_graph_def = graph_util.convert_variables_to_constants(
            sess, input_graph_def, ['generator/Tanh'])

        #copy input-related sub graph_util
        input_node_names_list = ['real_A_and_B_images']
        input_replaced_graph_def = tf.GraphDef()
        for node in freeze_graph_def.node:
            if node.name in input_node_names_list:
                placeholder_node = tf.NodeDef()
                placeholder_node.op = 'Placeholder'
                placeholder_node.name = node.name
                placeholder_node.attr['dtype'].CopyFrom(
                    tf.AttrValue(type=tf.float32.as_datatype_enum))
                input_replaced_graph_def.node.extend([placeholder_node])
                print(node.name, 'is replaced with placeholder')
            else:
                input_replaced_graph_def.node.extend([copy.deepcopy(node)])

        # extract subgraph
        output_sub_graph_def = graph_util.extract_sub_graph(
            input_replaced_graph_def, ['generator/Tanh'])

        with tf.gfile.GFile('export_model.pb', 'wb') as f:
            f.write(output_sub_graph_def.SerializeToString())
예제 #29
0
def test(conf):
    
    img_path = glob.glob(os.path.join(conf['val_data_path'], '*.%s'%(conf['data_ext'])))
    conf['img_path'] = sorted(img_path)
    
    model = pix2pix(conf)
    model.build_graph()
    
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    sess = tf.Session(config = config)
    sess.run(tf.global_variables_initializer())
    saver = tf.train.Saver(max_to_keep = None)
    saver.restore(sess, conf['pre_trained_model'])
    step = len(conf['img_path']) // conf['batch_size']
    
    if conf['in_memory']:
        img_list = []
        for img in conf['img_path']:
            img_ = Image.open(img)
            img_ = img_.resize((2*conf['load_size'], conf['load_size']), Image.BICUBIC)
            img_ = np.array(img_)
            img_list.append(img_)
        img_list = np.asarray(img_list)
        
        sess.run(model.data_loader.init_op['val_init'], feed_dict = {model.data_loader.image_arr : img_list})
        
    else:
        sess.run(model.data_loader.init_op['val_init'])
    
    if not os.path.exists('result_%s'%(conf['model_name'])):
        os.makedirs('result_%s'%(conf['model_name']))
    
    for t in range(step):

        gene_image, img_A, GT = sess.run([model.fake_image, model.image_A, model.image_B])

        gene_image = gene_image[0]
        gene_image = ((gene_image + 1.0) * 255.0) / 2.0
        gene_image = np.round(gene_image)
        gene_image = np.clip(gene_image, 0.0, 255.0)
        gene_image = gene_image.astype(np.uint8)
        gene_image = Image.fromarray(gene_image)
        gene_image.save('result_%s/%s_gene.png'%(conf['model_name'], conf['img_path'][t].split('/')[-1].split('.')[0]))


        img_A = img_A[0]
        img_A = ((img_A + 1.0) * 255.0) / 2.0
        img_A = np.round(img_A)
        img_A = np.clip(img_A, 0.0, 255.0)
        img_A = img_A.astype(np.uint8)
        img_A = Image.fromarray(img_A)
        img_A.save('result_%s/%s_input.png'%(conf['model_name'], conf['img_path'][t].split('/')[-1].split('.')[0]))


        GT = GT[0]
        GT = ((GT + 1.0) * 255.0) / 2.0
        GT = np.round(GT)
        GT = np.clip(GT, 0.0, 255.0)
        GT = GT.astype(np.uint8)
        GT = Image.fromarray(GT)
        GT.save('result_%s/%s_GT.png'%(conf['model_name'], conf['img_path'][t].split('/')[-1].split('.')[0]))
예제 #30
0
def main(_):
    # checking exceptions
    if args.phase == 'train' and not (args.is_gan or args.is_l1 or args.is_lc
                                      or args.is_ls):
        raise ValueError('Need to choose at least one loss objective')
    if args.feat_match and not args.is_gan:
        raise ValueError('Only can use feature matching when using GAN loss')

    # see README, need to download first
    sys.path.append("../models/research/slim")

    os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True

    with tf.Session(config=config) as sess:
        model = pix2pix(sess,
                        phase=args.phase,
                        task=args.task,
                        residual=args.residual,
                        feat_match_dynamic=args.feat_match_dynamic,
                        is_gan=args.is_gan,
                        is_l1=args.is_l1,
                        is_lc=args.is_lc,
                        is_ls=args.is_ls,
                        is_finetune=args.is_finetune,
                        dataset_dir=args.dataset_dir,
                        validation_split=args.validation_split,
                        log_dir=args.log_dir,
                        checkpoint_dir=args.checkpoint_dir,
                        sample_dir=args.sample_dir,
                        feat_match=args.feat_match,
                        test_dir=args.test_dir,
                        epochs=args.epochs,
                        batch_size=args.batch_size,
                        block=args.block,
                        dimension=args.dimension,
                        input_size=args.input_size,
                        output_size=args.output_size,
                        input_c_dim=args.input_nc,
                        output_c_dim=args.output_nc,
                        gf_dim=args.ngf,
                        g_times=args.g_times,
                        df_dim=args.ndf,
                        lr=args.lr,
                        beta1=args.beta1,
                        save_epoch_freq=args.save_epoch_freq,
                        save_best=args.save_best,
                        print_freq=args.print_freq,
                        sample_freq=args.sample_freq,
                        continue_train=args.continue_train,
                        L1_lamb=args.L1_lambda,
                        c_lamb=args.c_lambda,
                        s_lamb=args.s_lambda,
                        data_type=args.data_type,
                        is_dicom=args.is_dicom,
                        is_max_norm=args.is_max_norm,
                        extractor=args.extractor)

        if args.phase == 'train':
            model.train()
        else:
            model.test()
def main(_):
    if not os.path.exists(args.checkpoint_dir):
        os.makedirs(args.checkpoint_dir)
    if not os.path.exists(args.sample_dir):
        os.makedirs(args.sample_dir)
    if not os.path.exists(args.test_dir):
        os.makedirs(args.test_dir)

    with tf.Session() as sess:
        model = pix2pix(sess,
                        image_size=args.fine_size,
                        batch_size=args.batch_size,
                        output_size=args.fine_size,
                        dataset_name=args.dataset_name,
                        checkpoint_dir=args.checkpoint_dir,
                        sample_dir=args.sample_dir)

        if args.phase == 'train':
            model.train(args)
        elif args.phase == 'test':
            model.test(args)
        elif args.phase == 'demo':
            model.demo(args, 0, 0, True)
            cap0 = cv2.VideoCapture(1)
            cap1 = cv2.VideoCapture(0)
            cap0.set(cv2.CAP_PROP_FRAME_WIDTH, 640)
            cap0.set(cv2.CAP_PROP_FRAME_HEIGHT, 480)
            cap1.set(cv2.CAP_PROP_FRAME_WIDTH, 640)
            cap1.set(cv2.CAP_PROP_FRAME_HEIGHT, 480)

            input_path = "./test"
            src = np.array([[224, 448], [0, 448], [0, 0], [224, 0]],
                           np.float32)
            dst = np.array([[0, 0], [224, 0], [224, 448], [0, 448]],
                           np.float32)
            M_flip = cv2.getPerspectiveTransform(src, dst)

            ori_frame_size = np.array([[0, 0], [640, 0], [640, 480], [0, 480]],
                                      np.float32)
            resize_frame_size = np.array(
                [[0, 0], [256, 0], [256, 256], [0, 256]], np.float32)
            M_resize2ori = cv2.getPerspectiveTransform(resize_frame_size,
                                                       ori_frame_size)
            # fourcc = cv2.VideoWriter_fourcc(*'mp4v')
            # videoWriter = cv2.VideoWriter('./video/test.mp4', fourcc, 30.0, (448, 448))

            img = np.zeros((256, 256, 3), np.uint8)

            #**************bg_sub_color*********************************************************
            # count = 1
            # global frame0_bg, frame1_bg
            # while (True):
            #     ret0, frame0 = cap0.read()
            #     assert ret0
            #     ret1, frame1 = cap1.read()
            #     assert ret1
            #     if (count > 10):
            #         frame0_bg = cv2.resize(frame0, (256, 256), cv2.INTER_AREA)
            #         frame1_bg = cv2.resize(frame1, (256, 256), cv2.INTER_AREA)
            #         break
            #     count += 1
            # **************bg_sub_color*********************************************************

            while (True):
                st = time.time()
                ret0, frame0 = cap0.read()
                assert ret0
                ret1, frame1 = cap1.read()
                assert ret1

                img1 = cv2.resize(frame0, (256, 256), cv2.INTER_AREA)
                img2 = cv2.resize(frame1, (256, 256), cv2.INTER_AREA)

                # **************bg_sub_color*********************************************************
                # img1 = cv2.subtract(img1, frame0_bg)
                # img2 = cv2.subtract(img2, frame1_bg)
                # **************bg_sub_color*********************************************************

                # result = np.concatenate((img1, img2), axis=1)
                result1 = np.concatenate((img, img1), axis=1)
                result2 = np.concatenate((img, img2), axis=1)
                st1 = time.time()
                model.demo(args, result1, result2, False)
                print("model cost time:{:.2f}s".format(time.time() - st1))
                print("cost time:{:.2f}s".format(time.time() - st))
                img1_pre = cv2.imread("{}/test_0001.png".format(input_path))
                img2_pre = cv2.imread("{}/test_0002.png".format(input_path))

                img1_cal, img1_p, img1_c = img_preprocess(
                    img1_pre, img1, frame0, M_resize2ori)
                img2_cal, img2_p, img2_c = img_preprocess(
                    img2_pre, img2, frame1, M_resize2ori)
                img2_cal = cv2.warpPerspective(img2_cal, M_flip, (224, 448))

                result3 = np.concatenate((img1_pre, img2_pre), axis=1)
                result4 = np.concatenate((img1_cal, img2_cal), axis=1)
                # videoWriter.write(result4)
                result5 = np.concatenate((img1_p, img2_p), axis=1)
                result6 = np.concatenate((img1_c, img2_c), axis=1)

                result7 = np.concatenate((result6, result3))
                result7 = np.concatenate((result7, result5))
                blank = np.zeros((448, 320, 3), np.uint8)
                result4 = np.concatenate((result4, blank), axis=1)
                blank = np.zeros((320, 768, 3), np.uint8)
                result4 = np.concatenate((result4, blank), axis=0)
                result8 = np.concatenate((result7, result4), axis=1)
                # cv2.imshow("0", result)
                cv2.imshow("1", result8)
                # cv2.imshow("2", result4)

                key = cv2.waitKey(30) & 0xFF
                if (key == 27):
                    cap0.release()
                    cap1.release()
                    # videoWriter.release()
                    cv2.destroyAllWindows()
                    break
        elif args.phase == 'test_IOU':
            # iou_imfor = []
            with open("./{}_test_record.txt".format(args.dataset_name),
                      'a') as fp:
                fp.write("model\t, avg_IOU\n")

            diffBG_path = './datasets/{}/{}/*.jpg'.format(
                args.dataset_name, "val")

            is_grayscale = (args.input_nc == 1)
            diffBG_images = load_imgs(diffBG_path, is_grayscale,
                                      args.batch_size)

            for i in range(1, 201):
                model.test_IOU(args,
                               sample_images=diffBG_images,
                               model_name='pix2pix.model-{}'.format(i),
                               stat=True)
                avg_iou = target_IOU_cal(
                    '{}'.format(args.test_dir),
                    './datasets/{}/{}'.format(args.dataset_name, "val"))

                print("test1: model-{}, avg_IOU:{:.2f}".format(i, avg_iou))

                test_record = "{}\t, {:.2f}".format(i, avg_iou)

                with open("./{}_test_record.txt".format(args.dataset_name),
                          'a') as fp:
                    fp.write("{}\n".format(test_record))