Ejemplo n.º 1
0
def main(_):
    if FLAGS.exp_name is None:
        FLAGS.exp_name = "reconstrued_results_unitLength" + str(
            int(FLAGS.latent_dim / FLAGS.latent_num))
    image_shape = [
        int(i) for i in FLAGS.image_shape.strip('()[]{}').split(',')
    ]
    dirs = init_directories(FLAGS.exp_name, FLAGS.output_dir)
    dirs['data'] = '../npz_datas' if FLAGS.data_dir is None else FLAGS.data_dir
    dirs['codes'] = os.path.join(dirs['data'], 'codes/')
    create_directories(dirs, FLAGS.train, FLAGS.save_codes)

    output_dim = reduce(mul, image_shape, 1)

    run_config = tf.ConfigProto(allow_soft_placement=True)
    run_config.gpu_options.allow_growth = True
    run_config.gpu_options.per_process_gpu_memory_fraction = 0.9
    sess = tf.Session(config=run_config)

    ae = AE(
        session=sess,
        arch=FLAGS.arch,
        lr=FLAGS.lr,
        alpha=FLAGS.alpha,
        beta=FLAGS.beta,
        latent_dim=FLAGS.latent_dim,
        latent_num=FLAGS.latent_num,
        class_net_unit_num=FLAGS.class_net_unit_num,
        output_dim=output_dim,
        batch_size=FLAGS.batch_size,
        image_shape=image_shape,
        exp_name=FLAGS.exp_name,
        dirs=dirs,
        vis_reconst=FLAGS.visualize_reconstruct,
    )

    if FLAGS.train:

        data1Name = 'SVHN10WithBg_img1_oneguided_N20000x32x32x3_train'

        data_manager = ShapesDataManager(
            dirs['data'],
            data1Name,
            FLAGS.batch_size,
            image_shape,
            shuffle=False,
            file_ext=FLAGS.file_ext,
            train_fract=0.8,
            inf=True,
            supervised=False)  # supervised=True for get label
        ae.train_iter1, ae.dev_iter1, ae.test_iter1 = data_manager.get_iterators(
        )

        n_iters_per_epoch = data_manager.n_train // data_manager.batch_size
        FLAGS.stats_interval = int(FLAGS.stats_interval * n_iters_per_epoch)
        FLAGS.ckpt_interval = int(FLAGS.ckpt_interval * n_iters_per_epoch)
        n_iters = int(FLAGS.epochs * n_iters_per_epoch)

        ae.train(n_iters, n_iters_per_epoch, FLAGS.stats_interval,
                 FLAGS.ckpt_interval)
Ejemplo n.º 2
0
def main(_):
    if FLAGS.exp_name is None:
        FLAGS.exp_name = "reconstrued_results"
    image_shape = [int(i) for i in FLAGS.image_shape.strip('()[]{}').split(',')]
    dirs = init_directories(FLAGS.exp_name, FLAGS.output_dir)
    dirs['data'] = '../../../npz_datas' if FLAGS.data_dir is None else FLAGS.data_dir
    dirs['codes'] = os.path.join(dirs['data'], 'codes/')
    create_directories(dirs, FLAGS.train, FLAGS.save_codes)
    
    output_dim  = reduce(mul, image_shape, 1)
    
    run_config = tf.ConfigProto(allow_soft_placement=True)
    run_config.gpu_options.allow_growth=True
    run_config.gpu_options.per_process_gpu_memory_fraction=0.9
    sess = tf.Session(config=run_config)

    ae = AE(
        session=sess,
        arch=FLAGS.arch,
        lr=FLAGS.lr,
        alpha=FLAGS.alpha,
        beta=FLAGS.beta,
        latent_dim=FLAGS.latent_dim,
        latent_num=FLAGS.latent_num,
        class_net_unit_num=FLAGS.class_net_unit_num,
        output_dim=output_dim,
        batch_size=FLAGS.batch_size,
        image_shape=image_shape,
        exp_name=FLAGS.exp_name,
        dirs=dirs,
        vis_reconst=FLAGS.visualize_reconstruct,
    )

  
    if FLAGS.train:

        data1Name='FashionMultiAndMask_unitlength1_20000x32x32x1_train'

        data_manager = ShapesDataManager(dirs['data'],
                        data1Name, FLAGS.batch_size, 
                        image_shape, shuffle=False,file_ext=FLAGS.file_ext, train_fract=0.8, 
                        inf=True)
        ae.train_iter1, ae.dev_iter1, ae.test_iter1= data_manager.get_iterators()
        
        n_iters_per_epoch = data_manager.n_train // data_manager.batch_size
        FLAGS.stats_interval = int(FLAGS.stats_interval * n_iters_per_epoch)
        FLAGS.ckpt_interval = int(FLAGS.ckpt_interval * n_iters_per_epoch)
        n_iters = int(FLAGS.epochs * n_iters_per_epoch)
        
        ae.train(n_iters, n_iters_per_epoch, FLAGS.stats_interval, FLAGS.ckpt_interval)

    
    # test get changed images 
    data1Name="FashionMultiAndMask_unitlength1_64x32x32x1_test"
    
    data_manager = ShapesDataManager(dirs['data'],
                    data1Name,FLAGS.batch_size, 
                    image_shape, shuffle=False,file_ext=FLAGS.file_ext, train_fract=1.0, 
                    inf=True)
    ae.train_iter1, ae.dev_iter1, ae.test_iter1= data_manager.get_iterators()

    ae.session.run(tf.global_variables_initializer())
    saved_step = ae.load()
    assert saved_step > 1, "A trained model is needed to encode the data!"
    
    pathForSave='RecostructedImg64Test'
    try:
        os.makedirs(pathForSave)
    except OSError as exc:  # Python >2.5
        if exc.errno == errno.EEXIST and os.path.isdir(pathForSave):
            pass
        else:
            raise

    assert saved_step > 1, "A trained model is needed to encode the data!"
    for k in range(1):
        fixed_x1, fixed_mk1 , _ = next(ae.train_iter1)
        #print(fixed_x1.shape)
        #print(fixed_mk1)
        ae.encodeImg(pathForSave,fixed_x1, fixed_mk1,k)
    #print(k)
    print('finish encode!')
def main(_):
    if FLAGS.exp_name is None:
        FLAGS.exp_name = "reconstrued_results"
    image_shape = [
        int(i) for i in FLAGS.image_shape.strip('()[]{}').split(',')
    ]
    dirs = init_directories(FLAGS.exp_name, FLAGS.output_dir)
    dirs[
        'data'] = '../../../npz_datas_single_test' if FLAGS.data_dir is None else FLAGS.data_dir
    dirs['codes'] = os.path.join(dirs['data'], 'codes/')
    create_directories(dirs, FLAGS.train, FLAGS.save_codes)

    output_dim = reduce(mul, image_shape, 1)

    run_config = tf.ConfigProto(allow_soft_placement=True)
    run_config.gpu_options.allow_growth = True
    run_config.gpu_options.per_process_gpu_memory_fraction = 0.9
    sess = tf.Session(config=run_config)

    ae = AE(
        session=sess,
        arch=FLAGS.arch,
        lr=FLAGS.lr,
        alpha=FLAGS.alpha,
        beta=FLAGS.beta,
        latent_dim=FLAGS.latent_dim,
        latent_num=FLAGS.latent_num,
        class_net_unit_num=FLAGS.class_net_unit_num,
        output_dim=output_dim,
        batch_size=FLAGS.batch_size,
        image_shape=image_shape,
        exp_name=FLAGS.exp_name,
        dirs=dirs,
        vis_reconst=FLAGS.visualize_reconstruct,
    )

    # save codes and images
    data1Name = "mnistOffset1_Imgs_GTimages_mask_GTlabel_(32x64)x32x32x1_unitLength1_CodeImageDataset"

    data_manager = ShapesDataManager(dirs['data'],
                                     data1Name,
                                     FLAGS.batch_size,
                                     image_shape,
                                     shuffle=False,
                                     file_ext=FLAGS.file_ext,
                                     train_fract=1.0,
                                     inf=True)
    ae.train_iter1, ae.dev_iter1, ae.test_iter1 = data_manager.get_iterators()

    ae.session.run(tf.global_variables_initializer())
    saved_step = ae.load_fixedNum(1875)
    assert saved_step > 1, "A trained model is needed to encode the data!"

    pathForSave = 'ValidateEncodedImgs_Offset{}'.format(1)
    save_name = "reconstrued_results_offset{}".format(1)
    try:
        os.makedirs(pathForSave)
    except OSError as exc:  # Python >2.5
        if exc.errno == errno.EEXIST and os.path.isdir(pathForSave):
            pass
        else:
            raise

    codes = []
    images = []
    sampleNum = 2048
    for batch_num in range(int(sampleNum / FLAGS.batch_size)):
        img_batch, _mask1, _ = next(ae.train_iter1)
        # code = ae.encode(img_batch) #[batch_size, reg_latent_dim]
        code, image = ae.getCodesAndImgs(pathForSave, img_batch, _mask1,
                                         batch_num)
        codes.append(code)
        images.append(image)
        if batch_num < 5 or batch_num % 10 == 0:
            print(("Batch number {0}".format(batch_num)))

    codes = np.vstack(codes)
    images = np.vstack(images)
    filename = os.path.join(dirs['codes'], "codes_" + save_name)
    # np.save(filename, codes)
    np.savez(filename + '.npz', imagesNorm0_1=images, codes=codes)

    print(("Images and Codes saved to: {0}".format(filename)))
Ejemplo n.º 4
0
def main(_):
    if FLAGS.exp_name is None:
        FLAGS.exp_name = "separateRep_{}_{}_{}_lr_{}".format( FLAGS.arch, FLAGS.latent_dim, FLAGS.image_wh, FLAGS.lr)
    image_shape = [int(i) for i in FLAGS.image_shape.strip('()[]{}').split(',')]
    dirs = init_directories(FLAGS.exp_name, FLAGS.output_dir)
    dirs['data'] = '../../../npz_datas' if FLAGS.data_dir is None else FLAGS.data_dir
    dirs['codes'] = os.path.join(dirs['data'], 'codes/')
    create_directories(dirs, FLAGS.train, FLAGS.save_codes)

    output_dim = reduce(mul, image_shape, 1)

    run_config = tf.ConfigProto(allow_soft_placement=True)
    run_config.gpu_options.allow_growth = True
    run_config.gpu_options.per_process_gpu_memory_fraction = 0.9
    sess = tf.Session(config=run_config)

    ae = AE(
        session=sess,
        arch=FLAGS.arch,
        lr=FLAGS.lr,
        alpha=FLAGS.alpha,
        beta=FLAGS.beta,
        latent_dim=FLAGS.latent_dim,
        latent_num=FLAGS.latent_num,
        class_net_unit_num=FLAGS.class_net_unit_num,
        output_dim=output_dim,
        batch_size=FLAGS.batch_size,
        image_shape=image_shape,
        exp_name=FLAGS.exp_name,
        dirs=dirs,
        vis_reconst=FLAGS.visualize_reconstruct,
    )


    if FLAGS.visualize_reconstruct:
        sampleNum =1280 # 20x64 large batch, forward prop only
        dataVisualName1='pattern_(20x64)x64x64x3_unitLength9_test_visualdata1'
        dataVisualName2='pattern_(20x64)x64x64x3_unitLength9_test_visualdata2'
        
        data_manager = TeapotsDataManager(dirs['data'],
                        dataVisualName1,dataVisualName2, FLAGS.batch_size, 
                        image_shape, shuffle=False,file_ext=FLAGS.file_ext, train_fract=0.8, 
                        inf=True,supervised=False)  
        #data_manager.set_divisor_batch_size()

        #ae.train_iter, ae.dev_iter, ae.test_iter= data_manager.get_iterators()
        ae.train_iter1, ae.dev_iter1, ae.test_iter1,ae.train_iter2, ae.dev_iter2, ae.test_iter2= data_manager.get_iterators()
        
        ae.session.run(tf.global_variables_initializer())
        # change the which iteration ckpt you want to use
        saved_step = ae.load_fixedNum(inter_num=5750)
        assert saved_step > 1, "A trained model is needed to encode the data!"
        
        pathForSave='VisualImgsResults'
        try:
            os.makedirs(pathForSave)
        except OSError as exc:  # Python >2.5
            if exc.errno == errno.EEXIST and os.path.isdir(pathForSave):
                pass
            else:
                raise

        for batch_num in range(int(sampleNum/FLAGS.batch_size)):
            img_batch1, _mask1, _ = next(ae.train_iter1)
            img_batch2, _mask2, _ = next(ae.train_iter2)
            #code = ae.encode(img_batch) #[batch_size, reg_latent_dim]
            ae.getVisualImgs(pathForSave,img_batch1, _mask1,img_batch2, _mask2,batch_num)
            if batch_num < 5 or batch_num % 10 == 0:
                print(("Batch number {0}".format(batch_num)))
           
        print("Images and Codes saved to Folder: VisualImgsResults")
Ejemplo n.º 5
0
def main(_):
    if FLAGS.exp_name is None:
        FLAGS.exp_name = "reconstrued_results"
    image_shape = [
        int(i) for i in FLAGS.image_shape.strip('()[]{}').split(',')
    ]
    dirs = init_directories(FLAGS.exp_name, FLAGS.output_dir)
    dirs[
        'data'] = '../../../npz_datas_single_test' if FLAGS.data_dir is None else FLAGS.data_dir
    dirs['codes'] = os.path.join(dirs['data'], 'codes/')
    create_directories(dirs, FLAGS.train, FLAGS.save_codes)

    output_dim = reduce(mul, image_shape, 1)

    run_config = tf.ConfigProto(allow_soft_placement=True)
    run_config.gpu_options.allow_growth = True
    run_config.gpu_options.per_process_gpu_memory_fraction = 0.9
    sess = tf.Session(config=run_config)

    ae = AE(
        session=sess,
        arch=FLAGS.arch,
        lr=FLAGS.lr,
        alpha=FLAGS.alpha,
        beta=FLAGS.beta,
        latent_dim=FLAGS.latent_dim,
        latent_num=FLAGS.latent_num,
        class_net_unit_num=FLAGS.class_net_unit_num,
        output_dim=output_dim,
        batch_size=FLAGS.batch_size,
        image_shape=image_shape,
        exp_name=FLAGS.exp_name,
        dirs=dirs,
        vis_reconst=FLAGS.visualize_reconstruct,
    )

    # test get changed images
    data1Name = "mnistOffset1_Imgs_GTimages_mask_GTlabel_(32x64)x32x32x1_unitLength1_CodeImageDataset"

    data_manager = ShapesDataManager(dirs['data'],
                                     data1Name,
                                     FLAGS.batch_size,
                                     image_shape,
                                     shuffle=False,
                                     file_ext=FLAGS.file_ext,
                                     train_fract=1.0,
                                     inf=True)
    ae.train_iter1, ae.dev_iter1, ae.test_iter1 = data_manager.get_iterators()

    ae.session.run(tf.global_variables_initializer())
    saved_step = ae.load_fixedNum(1875)
    assert saved_step > 1, "A trained model is needed to encode the data!"

    pathForSave = 'VisualImgsResults'
    try:
        os.makedirs(pathForSave)
    except OSError as exc:  # Python >2.5
        if exc.errno == errno.EEXIST and os.path.isdir(pathForSave):
            pass
        else:
            raise

    assert saved_step > 1, "A trained model is needed to encode the data!"
    sampleNum = 2048
    for k in range(int(sampleNum / FLAGS.batch_size)):
        fixed_x1, fixed_mk1, _ = next(ae.train_iter1)
        #print(fixed_x1.shape)
        #print(fixed_mk1 )
        ae.visualise_reconstruction_path(pathForSave, fixed_x1, fixed_mk1, k)
    #print(k)
    print('finish encode!')