コード例 #1
0
if __name__=="__main__":

    model_config_dict = OrderedDict()
    model_config_dict['batch_size']          = 64
    model_config_dict['num_display']         = 16*16
    model_config_dict['hidden_distribution'] = 1.
    model_config_dict['epochs']              = 200

    is_training = True
    is_continue = False
    last_batch_idx = 0
    if is_training is True:
        #################
        # LOAD DATA SET #
        #################
        _ , data_stream = faces(batch_size=model_config_dict['batch_size'])

        expert_size_list = [1024]
        hidden_size_list = [100]
        num_filters_list = [128]
        lr_list          = [1e-3]
        lambda_eng_list  = [1e-5]

        for lr in lr_list:
            for num_filters in num_filters_list:
                for hidden_size in hidden_size_list:
                    for expert_size in expert_size_list:
                        for lambda_eng in lambda_eng_list:
                            model_config_dict['hidden_size']         = hidden_size
                            model_config_dict['expert_size']         = expert_size
                            model_config_dict['min_num_gen_filters'] = num_filters
コード例 #2
0
ファイル: train_uncond_dcgan.py プロジェクト: igotyooo/dcgan
    os.makedirs(model_dir)
if not os.path.exists(samples_dir):
    os.makedirs(samples_dir)
f_log = open('logs/%s.ndjson'%desc, 'wb')
log_fields = [
    'n_epochs', 
    'n_updates', 
    'n_examples', 
    'n_seconds',
    '1k_va_nnd',
    '10k_va_nnd',
    '100k_va_nnd',
    'g_cost',
    'd_cost',
]
tr_data, te_data, tr_stream, val_stream, te_stream = faces(ntrain=ntrain) # Only tr_data/tr_stream are used.
tr_handle = tr_data.open()
vaX, = tr_data.get_data(tr_handle, slice(0, 10000))
vaX = transform(vaX)
vis_idxs = py_rng.sample(np.arange(len(vaX)), nvis)
vaX_vis = inverse_transform(vaX[vis_idxs])
color_grid_vis(vaX_vis, (14, 14), 'samples/%s_etl_test.png'%desc)
sample_zmb = floatX(np_rng.uniform(-1., 1., size=(nvis, nz)))
vaX = vaX.reshape(len(vaX), -1)

# DEFINE NETWORKS.
relu = activations.Rectify()
sigmoid = activations.Sigmoid()
lrelu = activations.LeakyRectify()
tanh = activations.Tanh()
bce = T.nnet.binary_crossentropy
コード例 #3
0
            if batch_count % 100 == 0:
                # sample data
                save_as = samples_dir + "/" + model_name + "_SAMPLES{}.png".format(batch_count)
                sample_data = sampling_function(fixed_hidden_data)[0]
                sample_data = np.asarray(sample_data)
                color_grid_vis(inverse_transform(sample_data).transpose([0, 2, 3, 1]), (16, 16), save_as)

                np.save(file=samples_dir + "/" + model_name + "_MOMENT_COST", arr=np.asarray(moment_cost_list))
                np.save(file=samples_dir + "/" + model_name + "_VAE_COST", arr=np.asarray(vae_cost_list))


if __name__ == "__main__":

    batch_size = 128
    num_epochs = 100
    _, data_stream = faces(batch_size=batch_size)

    num_hiddens = 1024
    learning_rate = 1e-4
    l2_weight = 1e-5

    optimizer = Adagrad(lr=sharedX(learning_rate), regularizer=Regularizer(l2=l2_weight))

    model_test_name = (
        model_name
        + "_HIDDEN{}".format(int(num_hiddens))
        + "_REG{}".format(int(-np.log10(l2_weight)))
        + "_LR{}".format(int(-np.log10(learning_rate)))
    )
    train_model(
        model_name=model_test_name,
コード例 #4
0
                save_as = samples_dir + "/" + model_test_name + "_MODEL.pkl"
                save_model(tensor_params_list=decoder_parameters, save_to=save_as)


if __name__ == "__main__":

    model_config_dict = OrderedDict()
    model_config_dict["batch_size"] = 12 * 12
    model_config_dict["num_display"] = 16 * 16
    model_config_dict["hidden_distribution"] = 1.0
    model_config_dict["epochs"] = 200

    #################
    # LOAD DATA SET #
    #################
    _, data_stream = faces(batch_size=model_config_dict["batch_size"])

    hidden_size_list = [1024]
    num_filters_list = [128]
    lr_list = [1e-4]
    dropout_list = [False]
    lambda_eng_list = [1e-10]
    lambda_gen_list = [1e-10]

    for lr in lr_list:
        for num_filters in num_filters_list:
            for hidden_size in hidden_size_list:
                for lambda_eng in lambda_eng_list:
                    for lambda_gen in lambda_gen_list:
                        model_config_dict["hidden_size"] = hidden_size
                        model_config_dict["min_num_gen_filters"] = num_filters
コード例 #5
0
ファイル: train_uncond_dcgan.py プロジェクト: VittalP/gamn
l2 = 1e-5         # l2 weight decay
nvis = 196        # # of samples to visualize during training
b1 = 0.5          # momentum term of adam
nc = 3            # # of channels in image
nbatch = 128      # # of examples in batch
npx = 64          # # of pixels width/height of images
nz = 100          # # of dim for Z
ngf = 128         # # of gen filters in first conv layer
ndf = 128         # # of discrim filters in first conv layer
nx = npx*npx*nc   # # of dimensions in X
niter = 25        # # of iter at starting learning rate
niter_decay = 0   # # of iter to linearly decay learning rate to zero
lr = 0.0002       # initial learning rate for adam
ntrain = 266251   # # of examples to train on

tr_data, te_data, tr_stream, val_stream, te_stream = faces(ntrain=ntrain)

tr_handle = tr_data.open()
vaX, = tr_data.get_data(tr_handle, slice(0, 10000))
vaX = transform(vaX)

desc = 'vc_dcgan'
model_dir = 'models/%s'%desc
samples_dir = 'samples/%s'%desc
if not os.path.exists('logs/'):
    os.makedirs('logs/')
if not os.path.exists(model_dir):
    os.makedirs(model_dir)
if not os.path.exists(samples_dir):
    os.makedirs(samples_dir)
コード例 #6
0
l2 = 1e-5  # l2 weight decay
nvis = 196  # # of samples to visualize during training
b1 = 0.5  # momentum term of adam
nc = 3  # # of channels in image
nbatch = 128  # # of examples in batch
npx = 64  # # of pixels width/height of images
nz = 100  # # of dim for Z
ngf = 128  # # of gen filters in first conv layer
ndf = 128  # # of discrim filters in first conv layer
nx = npx * npx * nc  # # of dimensions in X
niter = 25  # # of iter at starting learning rate
niter_decay = 0  # # of iter to linearly decay learning rate to zero
lr = 0.0002  # initial learning rate for adam
ntrain = 350000  # # of examples to train on

tr_data, te_data, tr_stream, val_stream, te_stream = faces(ntrain=ntrain)

tr_handle = tr_data.open()
vaX, = tr_data.get_data(tr_handle, slice(0, 10000))
vaX = transform(vaX)

desc = 'uncond_dcgan'
model_dir = 'models/%s' % desc
samples_dir = 'samples/%s' % desc
if not os.path.exists('logs/'):
    os.makedirs('logs/')
if not os.path.exists(model_dir):
    os.makedirs(model_dir)
if not os.path.exists(samples_dir):
    os.makedirs(samples_dir)