Ejemplo n.º 1
0
rec_dir = os.path.join(args.cache_dir, 'rec')
model_dir = os.path.join(args.cache_dir, 'models')
log_dir = os.path.join(args.cache_dir, 'log')
web_dir = os.path.join(args.cache_dir, 'web_rec')
html = image_save.ImageSave(web_dir, expr_name, append=True)
utils.mkdirs([rec_dir, model_dir, log_dir, web_dir])

# load data
tr_data, te_data, tr_stream, te_stream, ntrain, ntest \
    = load_imgs(ntrain=None, ntest=None, batch_size=args.batch_size, data_file=args.data_file)
te_handle = te_data.open()
ntest = int(np.floor(ntest / float(args.batch_size)) * args.batch_size)
# st()
test_x, = te_data.get_data(te_handle, slice(0, ntest))

test_x = train_dcgan_utils.transform(test_x, nc=nc)
predict_params = train_dcgan_utils.init_predict_params(nz=nz,
                                                       n_f=n_f,
                                                       n_layers=n_layers,
                                                       nc=nc)
# load modelG
gen_params = train_dcgan_utils.init_gen_params(nz=nz,
                                               n_f=n_f,
                                               n_layers=n_layers,
                                               nc=nc)
train_dcgan_utils.load_model(gen_params, os.path.join(model_dir, 'gen_params'))
gen_batchnorm = train_dcgan_utils.load_batchnorm(
    os.path.join(model_dir, 'gen_batchnorm'))

# define the model
t = time()
Ejemplo n.º 2
0
print('COMPILING...')
t = time()
_estimate_batchnorm = theano.function([x], bn_data)
print('%.2f seconds to compile theano functions' % (time() - t))

nb_sum = []
nb_mean = []
nb_mean_ext = []
num_batches = int(np.floor(ntrain / float(batch_size)))
print('n_batch = %d, batch_size = %d' % (num_batches, batch_size))

# first pass
print('first pass: computing mean')
n = 0
for imb, in tqdm(tr_stream.get_epoch_iterator(), total=ntrain / batch_size):
    imb = train_dcgan_utils.transform(imb, nc=nc)
    bn_data = _estimate_batchnorm(imb)

    if n == 0:
        for d in bn_data:
            nb_sum.append(d)
    else:
        for id, d in enumerate(bn_data):
            nb_sum[id] = nb_sum[id] + d
    n = n + 1
    if n >= num_batches:
        break
# compute empirical mean
for id, d_sum in enumerate(nb_sum):
    if d_sum.ndim == 4:
        m = np.mean(d_sum, axis=(0, 2, 3)) / num_batches
Ejemplo n.º 3
0
rec_dir = os.path.join(args.cache_dir, 'rec')
model_dir = os.path.join(args.cache_dir, 'models')
log_dir = os.path.join(args.cache_dir, 'log')
web_dir = os.path.join(args.cache_dir, 'web_rec')
html = image_save.ImageSave(web_dir, expr_name, append=True)
utils.mkdirs([rec_dir, model_dir, log_dir, web_dir])

# load data
tr_data, te_data, tr_stream, te_stream, ntrain, ntest \
    = load_imgs(ntrain=None, ntest=None, batch_size=args.batch_size, data_file=args.data_file)
te_handle = te_data.open()
ntest = int(np.floor(ntest/float(args.batch_size)) * args.batch_size)
# st()
test_x, = te_data.get_data(te_handle, slice(0, ntest))

test_x = train_dcgan_utils.transform(test_x, nc=nc)
predict_params = train_dcgan_utils.init_predict_params(nz=nz, n_f=n_f, n_layers=n_layers, nc=nc)
# load modelG
gen_params = train_dcgan_utils.init_gen_params(nz=nz, n_f=n_f, n_layers=n_layers, nc=nc)
train_dcgan_utils.load_model(gen_params, os.path.join(model_dir, 'gen_params'))
gen_batchnorm = train_dcgan_utils.load_batchnorm(os.path.join(model_dir, 'gen_batchnorm'))

# define the model
t= time()
x = T.tensor4()
z = train_dcgan_utils.predict(x, predict_params, n_layers=n_layers)
gx = train_dcgan_utils.gen_test(z, gen_params, gen_batchnorm, n_layers=n_layers, n_f=n_f)

# define pixel loss
pixel_loss = costs.L2Loss(gx, x)
Ejemplo n.º 4
0
t = time()
_estimate_batchnorm = theano.function([x], bn_data)
print('%.2f seconds to compile theano functions' % (time() - t))


nb_sum = []
nb_mean = []
nb_mean_ext = []
num_batches = int(np.floor(ntrain / float(batch_size)))
print('n_batch = %d, batch_size = %d' % (num_batches, batch_size))

# first pass
print('first pass: computing mean')
n = 0
for imb, in tqdm(tr_stream.get_epoch_iterator(), total=ntrain / batch_size):
    imb = train_dcgan_utils.transform(imb, nc=nc)
    bn_data = _estimate_batchnorm(imb)

    if n == 0:
        for d in bn_data:
            nb_sum.append(d)
    else:
        for id, d in enumerate(bn_data):
            nb_sum[id] = nb_sum[id] + d
    n = n+1
    if n >= num_batches:
        break
# compute empirical mean
for id, d_sum in enumerate(nb_sum):
    if d_sum.ndim == 4:
        m = np.mean(d_sum, axis=(0, 2, 3)) / num_batches