Ejemplo n.º 1
0
def main():

    # dataset = 'shuffle_mnist'
    dataset = 'disjoint_mnist'
    # dataset = 'cifar_10'
    # Load configs
    opts = configs.config_mnist
    # opts = configs.config_cifar
    task_num = opts['task_num']
    logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(message)s')
    # Load data
    data = setdata.set_data(dataset, task_num)
    logging.error(opts)
    # print (data[0].data.shape)
    # print (data[0].labels.shape)
    # print (data[0].labels[:5])
    # print (data[1].labels[:5])
    # print (data[0].test_labels[:5])
    # print (data[1].test_labels[:5])
    # print (data[0].test_data.shape)
    # print (data[0].test_labels.shape)
    # print (data[0].data_shape)

    # images = []
    # for i in range(3):
    #     img = np.reshape(data[i].data[:10], (10, 28, 28))
    #     images.append(np.concatenate(img, axis = 1))
    # img = np.concatenate([images[0], images[1], images[2]], axis = 0)
    # plt.imshow(img, cmap='Greys_r',interpolation='none', vmin=0., vmax=1)
    # plt.savefig("dataset_1.jpg")

    # tf.set_random_seed(1233)

    with tf.Session(config=tf.ConfigProto(gpu_options=tf.GPUOptions(
            allow_growth=True))) as sess:

        # Initialize model
        wae = WAE(opts, sess)
        # Initialize variables
        sess.run(wae.init)
        logging.error(tf.trainable_variables())
        logging.error(len(tf.trainable_variables()))
        # logging.error(wae.t_network_vars)

        # Initialize/Set former parameters as zeros
        random_z, pseudo_G_z, pseudo_T_z, w_old, b_old, f_g_z, pred = wae.former_init(
        )

        for i in range(len(data)):
            if i == 0:
                # Consider no trans loss and regularization loss in Task 0
                lambda_list = {
                    'wae_lambda': 1,
                    'rec_lambda': 0.1,
                    'trans_lambda': 0.0,
                    'reg_lambda': 0.0,
                    'f_lambda': 0.0,
                    'main_lambda': 1.0
                }
                logging.error("task " + str(i) + ":")
                logging.error(lambda_list)
                wae.train(data, i, random_z, pseudo_G_z, pseudo_T_z, w_old,
                          b_old, f_g_z, pred, lambda_list)
                wae.test(data, True)

                # Update/Save former parameters
                random_z, pseudo_G_z, pseudo_T_z, w_old, b_old, f_g_z, pred = wae.former(
                )
                print(type(random_z))
                print(type(pseudo_G_z))
                print(type(pseudo_T_z[-1]))
                print(pseudo_G_z.shape)

                # Print images generated by random z
                # idx = np.random.choice(400, 20, replace=False)
                # img_gen = np.reshape(pseudo_G_z[idx], (20, 28, 28))
                # img_gen = np.concatenate(img_gen, axis = 1)
                # plt.imshow(img_gen, cmap='Greys_r',interpolation='none', vmin=0., vmax=1)
                # plt.savefig("img_gen_after_task_%d_0.jpg" % i)
                # break

            else:
                lambda_list = {
                    'wae_lambda': 1.0,
                    'rec_lambda': 0.5 * opts['task_num'],
                    'trans_lambda': 0.01 * opts['task_num'],
                    'reg_lambda': 0.01,
                    'f_lambda': 1.0,
                    'main_lambda': 1.0
                }
                logging.error("task " + str(i) + ":")
                logging.error(lambda_list)
                wae.train(data, i, random_z, pseudo_G_z, pseudo_T_z, w_old,
                          b_old, f_g_z, pred, lambda_list)
                wae.test(data, True)

                random_z, pseudo_G_z, pseudo_T_z, w_old, b_old, f_g_z, pred = wae.former(
                )
Ejemplo n.º 2
0
Archivo: run.py Proyecto: paruby/wae
def main():

    if FLAGS.exp == 'celebA':
        opts = configs.config_celebA
    elif FLAGS.exp == 'celebA_small':
        opts = configs.config_celebA_small
    elif FLAGS.exp == 'celebA_ae_patch_var':
        opts = configs.config_celebA_ae_patch_var
    elif FLAGS.exp == 'celebA_sylvain_adv':
        opts = configs.config_celebA_sylvain_adv
    elif FLAGS.exp == 'celebA_adv':
        opts = configs.config_celebA_adv
    elif FLAGS.exp == 'mnist':
        opts = configs.config_mnist
    elif FLAGS.exp == 'mnist_small':
        opts = configs.config_mnist_small
    elif FLAGS.exp == 'dsprites':
        opts = configs.config_dsprites
    elif FLAGS.exp == 'grassli':
        opts = configs.config_grassli
    elif FLAGS.exp == 'grassli_small':
        opts = configs.config_grassli_small
    elif FLAGS.exp == 'cifar':
        opts = configs.config_cifar
    else:
        assert False, 'Unknown experiment configuration'

    opts['mode'] = FLAGS.mode
    if opts['mode'] == 'test':
        assert FLAGS.checkpoint is not None, 'Checkpoint must be provided'
        opts['checkpoint'] = FLAGS.checkpoint

    if FLAGS.zdim is not None:
        opts['zdim'] = FLAGS.zdim
    if FLAGS.pz is not None:
        opts['pz'] = FLAGS.pz
    if FLAGS.lr is not None:
        opts['lr'] = FLAGS.lr
    if FLAGS.w_aef is not None:
        opts['w_aef'] = FLAGS.w_aef
    if FLAGS.z_test is not None:
        opts['z_test'] = FLAGS.z_test
    if FLAGS.lambda_schedule is not None:
        opts['lambda_schedule'] = FLAGS.lambda_schedule
    if FLAGS.work_dir is not None:
        opts['work_dir'] = FLAGS.work_dir
    if FLAGS.wae_lambda is not None:
        opts['lambda'] = FLAGS.wae_lambda
    if FLAGS.celebA_crop is not None:
        opts['celebA_crop'] = FLAGS.celebA_crop
    if FLAGS.enc_noise is not None:
        opts['e_noise'] = FLAGS.enc_noise
    if FLAGS.e_num_filters is not None:
        opts['e_num_filters'] = FLAGS.e_num_filters
    if FLAGS.g_num_filters is not None:
        opts['g_num_filters'] = FLAGS.g_num_filters
    if FLAGS.smart_cost is True:
        opts['cost'] = []
        if FLAGS.patch_var_w is not None:
            opts['cost'].append(('patch_variances', FLAGS.patch_var_w))
        if FLAGS.l2sq_w is not None:
            opts['cost'].append(('l2sq', FLAGS.l2sq_w))
        if FLAGS.sylvain_adv_c_w is not None and FLAGS.sylvain_emb_c_w is not None:
            adv_c_w = FLAGS.sylvain_adv_c_w
            emb_c_w = FLAGS.sylvain_emb_c_w
            opts['cost'].append(
                ('_sylvain_recon_loss_using_disc_conv', [adv_c_w, emb_c_w]))
            opts['cross_p_w'] = 0
            opts['diag_p_w'] = 0
        if FLAGS.adv_c_num_units is not None:
            opts['adv_c_num_units'] = FLAGS.adv_c_num_units
        if FLAGS.adv_c_patches_size is not None:
            opts['adv_c_patches_size'] = FLAGS.adv_c_patches_size
        if FLAGS.adv_use_sq is not None:
            opts['adv_use_sq'] = FLAGS.adv_use_sq

    if opts['verbose']:
        logging.basicConfig(level=logging.DEBUG,
                            format='%(asctime)s - %(message)s')
    logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(message)s')
    utils.create_dir(opts['work_dir'])
    utils.create_dir(os.path.join(opts['work_dir'], 'checkpoints'))

    if opts['e_noise'] == 'gaussian' and opts['pz'] != 'normal':
        assert False, 'Gaussian encoders compatible only with Gaussian prior'
        return

    # Dumping all the configs to the text file
    with utils.o_gfile((opts['work_dir'], 'params.txt'), 'w') as text:
        text.write('Parameters:\n')
        for key in opts:
            text.write('%s : %s\n' % (key, opts[key]))

    # Loading the dataset
    data = DataHandler(opts)
    assert data.num_points >= opts['batch_size'], 'Training set too small'

    # Creating WAE model
    wae = WAE(opts)
    if opts['mode'] == 'train':

        # Training WAE
        wae.train(data)

    elif opts['mode'] == 'test':

        # Do something else
        wae.test()
Ejemplo n.º 3
0
def main():

    # Select dataset to use
    if FLAGS.exp == 'celebA':
        opts = configs.config_celebA
    elif FLAGS.exp == 'celebA_small':
        opts = configs.config_celebA_small
    elif FLAGS.exp == 'mnist':
        opts = configs.config_mnist
    elif FLAGS.exp == 'mnist_small':
        opts = configs.config_mnist_small
    elif FLAGS.exp == 'dsprites':
        opts = configs.config_dsprites
    elif FLAGS.exp == 'grassli':
        opts = configs.config_grassli
    elif FLAGS.exp == 'grassli_small':
        opts = configs.config_grassli_small
    else:
        assert False, 'Unknown experiment configuration'

    # Select training method
    if FLAGS.method:
        opts['method'] = FLAGS.method

    # Working directory
    if FLAGS.work_dir:
        opts['work_dir'] = FLAGS.work_dir

    # Verbose
    if opts['verbose']:
        logging.basicConfig(level=logging.DEBUG,
                            format='%(asctime)s - %(message)s')
    logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(message)s')

    # Create directories
    utils.create_dir(opts['method'])
    work_dir = os.path.join(opts['method'], opts['work_dir'])
    utils.create_dir(work_dir)
    utils.create_dir(os.path.join(work_dir, 'checkpoints'))

    # Dumping all the configs to the text file
    with utils.o_gfile((work_dir, 'params.txt'), 'w') as text:
        text.write('Parameters:\n')
        for key in opts:
            text.write('%s : %s\n' % (key, opts[key]))

    # Loading the dataset
    data = DataHandler(opts)
    assert data.num_points >= opts['batch_size'], 'Training set too small'

    #Reset tf graph
    tf.reset_default_graph()

    # build WAE
    wae = WAE(opts)

    # Training/testing/vizu
    if FLAGS.mode == "train":
        wae.train(data, opts['work_dir'], FLAGS.weights_file)
    elif FLAGS.mode == "test":
        wae.test(data, opts['work_dir'], FLAGS.weights_file)
    elif FLAGS.mode == "reg":
        wae.reg(data, opts['work_dir'], FLAGS.weights_file)
    elif FLAGS.mode == "vizu":
        wae.vizu(data, opts['work_dir'], FLAGS.weights_file)