Beispiel #1
0
def main(_):
    if FLAGS.exp_name is None:
        FLAGS.exp_name = "reconstrued_results_unitLength" + str(
            int(FLAGS.latent_dim / FLAGS.latent_num))
    image_shape = [
        int(i) for i in FLAGS.image_shape.strip('()[]{}').split(',')
    ]
    dirs = init_directories(FLAGS.exp_name, FLAGS.output_dir)
    dirs['data'] = '../npz_datas' if FLAGS.data_dir is None else FLAGS.data_dir
    dirs['codes'] = os.path.join(dirs['data'], 'codes/')
    create_directories(dirs, FLAGS.train, FLAGS.save_codes)

    output_dim = reduce(mul, image_shape, 1)

    run_config = tf.ConfigProto(allow_soft_placement=True)
    run_config.gpu_options.allow_growth = True
    run_config.gpu_options.per_process_gpu_memory_fraction = 0.9
    sess = tf.Session(config=run_config)

    ae = AE(
        session=sess,
        arch=FLAGS.arch,
        lr=FLAGS.lr,
        alpha=FLAGS.alpha,
        beta=FLAGS.beta,
        latent_dim=FLAGS.latent_dim,
        latent_num=FLAGS.latent_num,
        class_net_unit_num=FLAGS.class_net_unit_num,
        output_dim=output_dim,
        batch_size=FLAGS.batch_size,
        image_shape=image_shape,
        exp_name=FLAGS.exp_name,
        dirs=dirs,
        vis_reconst=FLAGS.visualize_reconstruct,
    )

    if FLAGS.train:

        data1Name = 'SVHN10WithBg_img1_oneguided_N20000x32x32x3_train'

        data_manager = ShapesDataManager(
            dirs['data'],
            data1Name,
            FLAGS.batch_size,
            image_shape,
            shuffle=False,
            file_ext=FLAGS.file_ext,
            train_fract=0.8,
            inf=True,
            supervised=False)  # supervised=True for get label
        ae.train_iter1, ae.dev_iter1, ae.test_iter1 = data_manager.get_iterators(
        )

        n_iters_per_epoch = data_manager.n_train // data_manager.batch_size
        FLAGS.stats_interval = int(FLAGS.stats_interval * n_iters_per_epoch)
        FLAGS.ckpt_interval = int(FLAGS.ckpt_interval * n_iters_per_epoch)
        n_iters = int(FLAGS.epochs * n_iters_per_epoch)

        ae.train(n_iters, n_iters_per_epoch, FLAGS.stats_interval,
                 FLAGS.ckpt_interval)
Beispiel #2
0
def main(_):
    if FLAGS.exp_name is None:
        FLAGS.exp_name = "reconstrued_results"
    image_shape = [int(i) for i in FLAGS.image_shape.strip('()[]{}').split(',')]
    dirs = init_directories(FLAGS.exp_name, FLAGS.output_dir)
    dirs['data'] = '../../../npz_datas' if FLAGS.data_dir is None else FLAGS.data_dir
    dirs['codes'] = os.path.join(dirs['data'], 'codes/')
    create_directories(dirs, FLAGS.train, FLAGS.save_codes)
    
    output_dim  = reduce(mul, image_shape, 1)
    
    run_config = tf.ConfigProto(allow_soft_placement=True)
    run_config.gpu_options.allow_growth=True
    run_config.gpu_options.per_process_gpu_memory_fraction=0.9
    sess = tf.Session(config=run_config)

    ae = AE(
        session=sess,
        arch=FLAGS.arch,
        lr=FLAGS.lr,
        alpha=FLAGS.alpha,
        beta=FLAGS.beta,
        latent_dim=FLAGS.latent_dim,
        latent_num=FLAGS.latent_num,
        class_net_unit_num=FLAGS.class_net_unit_num,
        output_dim=output_dim,
        batch_size=FLAGS.batch_size,
        image_shape=image_shape,
        exp_name=FLAGS.exp_name,
        dirs=dirs,
        vis_reconst=FLAGS.visualize_reconstruct,
    )

  
    if FLAGS.train:

        data1Name='FashionMultiAndMask_unitlength1_20000x32x32x1_train'

        data_manager = ShapesDataManager(dirs['data'],
                        data1Name, FLAGS.batch_size, 
                        image_shape, shuffle=False,file_ext=FLAGS.file_ext, train_fract=0.8, 
                        inf=True)
        ae.train_iter1, ae.dev_iter1, ae.test_iter1= data_manager.get_iterators()
        
        n_iters_per_epoch = data_manager.n_train // data_manager.batch_size
        FLAGS.stats_interval = int(FLAGS.stats_interval * n_iters_per_epoch)
        FLAGS.ckpt_interval = int(FLAGS.ckpt_interval * n_iters_per_epoch)
        n_iters = int(FLAGS.epochs * n_iters_per_epoch)
        
        ae.train(n_iters, n_iters_per_epoch, FLAGS.stats_interval, FLAGS.ckpt_interval)

    
    # test get changed images 
    data1Name="FashionMultiAndMask_unitlength1_64x32x32x1_test"
    
    data_manager = ShapesDataManager(dirs['data'],
                    data1Name,FLAGS.batch_size, 
                    image_shape, shuffle=False,file_ext=FLAGS.file_ext, train_fract=1.0, 
                    inf=True)
    ae.train_iter1, ae.dev_iter1, ae.test_iter1= data_manager.get_iterators()

    ae.session.run(tf.global_variables_initializer())
    saved_step = ae.load()
    assert saved_step > 1, "A trained model is needed to encode the data!"
    
    pathForSave='RecostructedImg64Test'
    try:
        os.makedirs(pathForSave)
    except OSError as exc:  # Python >2.5
        if exc.errno == errno.EEXIST and os.path.isdir(pathForSave):
            pass
        else:
            raise

    assert saved_step > 1, "A trained model is needed to encode the data!"
    for k in range(1):
        fixed_x1, fixed_mk1 , _ = next(ae.train_iter1)
        #print(fixed_x1.shape)
        #print(fixed_mk1)
        ae.encodeImg(pathForSave,fixed_x1, fixed_mk1,k)
    #print(k)
    print('finish encode!')
Beispiel #3
0
model_path = sys.argv[2]

trainX = np.load(npy_path)

trainX_preprocessed, trainX_preprocessed2 = preprocess(trainX)
img_dataset = Image_Dataset2(trainX_preprocessed)

same_seeds(0)

model = AE().cuda()
criterion = nn.MSELoss()
optimizer = torch.optim.AdamW(model.parameters(), lr=1e-5, weight_decay=1e-5)
#optimizer = torch.optim.SGD(model.parameters(), lr=1e-3, weight_decay=1e-5, momentum=0.9)
scheduler = optim.lr_scheduler.ReduceLROnPlateau(optimizer, 'min') # adjust lr

model.train()
n_epoch = 300


# 準備 dataloader, model, loss criterion 和 optimizer
img_dataloader = DataLoader(img_dataset, batch_size=64, shuffle=True)


# 主要的訓練過程
for epoch in range(n_epoch):
    for data in img_dataloader:
        img = data
        img = img.cuda()

        output1, output = model(img)
        loss = criterion(output, img)
Beispiel #4
0
def ae_train(train_dataset, test_dataset, epochs, mode):
    # reload and freeze model.

    gan_checkpoint_dir = './gan_training_checkpoints'
    gan_checkpoint = tf.train.Checkpoint(
        optimizer=generator_opt,
        discriminator_optimizer=discriminator_opt,
        generator=generator,
        discriminator=discriminator,
    )

    latest = tf.train.latest_checkpoint(gan_checkpoint_dir)
    gan_checkpoint.restore(latest)

    frozen_generator = gan_checkpoint.generator
    frozen_generator.trainable = False

    # build vae model.
    vae_opt = tf.keras.optimizers.Adam(1e-3)

    if mode == TrainMode.AE:
        print(
            "*********************** Training naive AE***************************"
        )
        model = AE(frozen_generator)
        checkpoint_dir = './ae_training_checkpoints'
        checkpoint_prefix = os.path.join(checkpoint_dir, "ckpt")
        checkpoint = tf.train.Checkpoint(AE_optimizer=model.optimizer,
                                         AE=model)
        print("generator is trainable: ", model.generator_net.trainable)
        path = './output/ae_output'
        if not os.path.exists(path):
            os.makedirs(path)
    else:
        print(
            "*********************** Training Auxiliary AE***************************"
        )
        model = AAE(gan_checkpoint)
        model.discriminator.trainable = False
        model.generator_net.trainable = False
        # model.build((128, 128))
        print(model.discriminator.summary())
        print(model.discriminator.get_layer('conv2d_3'))
        checkpoint_dir = './aae_training_checkpoints'
        checkpoint_prefix = os.path.join(checkpoint_dir, "ckpt")
        checkpoint = tf.train.Checkpoint(AE_optimizer=model.optimizer,
                                         AE=model)
        path = './output/aae_output'
        print("generator is trainable: ", model.generator_net.trainable)
        if not os.path.exists(path):
            os.makedirs(path)

    # train
    sample_input = next(iter(test_dataset))[:4, ::]
    ae_generate_and_save_images(model, 0, sample_input, path)
    for epoch in range(1, epochs + 1):
        start_time = time.time()
        for train_x in train_dataset:
            model.train(train_x)
        end_time = time.time()

        if epoch % 1 == 0:
            loss = tf.keras.metrics.Mean()
            for test_x in test_dataset:
                loss(model.compute_loss(test_x))
            elbo = -loss.result()
            print('Epoch: {}, Test set ELBO: {}, '
                  'time elapse for current epoch {}'.format(
                      epoch, elbo, end_time - start_time))
            ae_generate_and_save_images(model, epoch, sample_input, path)
            checkpoint.save(file_prefix=checkpoint_prefix + "_epoch_%d" %
                            (epoch + 1))
        if epoch == epochs - 1:
            ae_generate_and_save_images(model, epoch, sample_input, path)
            checkpoint.save(file_prefix=checkpoint_prefix + "_epoch_%d" %
                            (epoch + 1))