args.cache_dir = './cache/%s/' % expr_name for arg in vars(args): print('[%s] =' % arg, getattr(args, arg)) # create directories rec_dir = os.path.join(args.cache_dir, 'rec') model_dir = os.path.join(args.cache_dir, 'models') log_dir = os.path.join(args.cache_dir, 'log') web_dir = os.path.join(args.cache_dir, 'web_rec') html = image_save.ImageSave(web_dir, expr_name, append=True) utils.mkdirs([rec_dir, model_dir, log_dir, web_dir]) # load data tr_data, te_data, tr_stream, te_stream, ntrain, ntest \ = load_imgs(ntrain=None, ntest=None, batch_size=args.batch_size, data_file=args.data_file) te_handle = te_data.open() ntest = int(np.floor(ntest / float(args.batch_size)) * args.batch_size) # st() test_x, = te_data.get_data(te_handle, slice(0, ntest)) test_x = train_dcgan_utils.transform(test_x, nc=nc) predict_params = train_dcgan_utils.init_predict_params(nz=nz, n_f=n_f, n_layers=n_layers, nc=nc) # load modelG gen_params = train_dcgan_utils.init_gen_params(nz=nz, n_f=n_f, n_layers=n_layers, nc=nc)
import argparse import load parser = argparse.ArgumentParser(description='get #images in a hdf5 dataset.') parser.add_argument('--data_file', dest='data_file', help='the location of dataset file', default='../datasets/outdoor_64.hdf5', type=str) args = parser.parse_args() _, _, _, _, ntrain, ntest = load.load_imgs(ntrain=None, ntest=None, batch_size=128, data_file=args.data_file) print('dataset: %s; #training images: %d; #test images: %d' % (args.data_file, ntrain, ntest))
for arg in vars(args): print('[%s] =' % arg, getattr(args, arg)) # create directories sample_dir = os.path.join(args.cache_dir, 'samples') model_dir = os.path.join(args.cache_dir, 'models') log_dir = os.path.join(args.cache_dir, 'log') web_dir = os.path.join(args.cache_dir, 'web_dcgan') html = image_save.ImageSave(web_dir, expr_name, append=True) utils.mkdirs([sample_dir, model_dir, log_dir, web_dir]) # load data from hdf5 file tr_data, te_data, tr_stream, te_stream, ntrain, ntest = load.load_imgs( ntrain=None, ntest=None, batch_size=args.batch_size, data_file=args.data_file) te_handle = te_data.open() test_x, = te_data.get_data(te_handle, slice(0, ntest)) # generate real samples and test transform/inverse_transform test_x = train_dcgan_utils.transform(test_x, nc=nc) vis_idxs = py_rng.sample(np.arange(len(test_x)), n_vis) vaX_vis = train_dcgan_utils.inverse_transform(test_x[vis_idxs], npx=npx, nc=nc) # st() n_grid = int(np.sqrt(n_vis)) grid_real = utils.grid_vis((vaX_vis * 255.0).astype(np.uint8), n_grid, n_grid) train_dcgan_utils.save_image(grid_real, os.path.join(sample_dir, 'real_samples.png'))
args.cache_dir = './cache/%s/' % expr_name for arg in vars(args): print('[%s] =' % arg, getattr(args, arg)) # create directories rec_dir = os.path.join(args.cache_dir, 'rec') model_dir = os.path.join(args.cache_dir, 'models') log_dir = os.path.join(args.cache_dir, 'log') web_dir = os.path.join(args.cache_dir, 'web_rec') html = image_save.ImageSave(web_dir, expr_name, append=True) utils.mkdirs([rec_dir, model_dir, log_dir, web_dir]) # load data tr_data, te_data, tr_stream, te_stream, ntrain, ntest \ = load_imgs(ntrain=None, ntest=None, batch_size=args.batch_size, data_file=args.data_file) te_handle = te_data.open() ntest = int(np.floor(ntest/float(args.batch_size)) * args.batch_size) # st() test_x, = te_data.get_data(te_handle, slice(0, ntest)) test_x = train_dcgan_utils.transform(test_x, nc=nc) predict_params = train_dcgan_utils.init_predict_params(nz=nz, n_f=n_f, n_layers=n_layers, nc=nc) # load modelG gen_params = train_dcgan_utils.init_gen_params(nz=nz, n_f=n_f, n_layers=n_layers, nc=nc) train_dcgan_utils.load_model(gen_params, os.path.join(model_dir, 'gen_params')) gen_batchnorm = train_dcgan_utils.load_batchnorm(os.path.join(model_dir, 'gen_batchnorm')) # define the model t= time() x = T.tensor4()
if not args.cache_dir: args.cache_dir = './cache/%s/' % expr_name for arg in vars(args): print('[%s] =' % arg, getattr(args, arg)) # create directories sample_dir = os.path.join(args.cache_dir, 'samples') model_dir = os.path.join(args.cache_dir, 'models') log_dir = os.path.join(args.cache_dir, 'log') web_dir = os.path.join(args.cache_dir, 'web_dcgan') html = image_save.ImageSave(web_dir, expr_name, append=True) utils.mkdirs([sample_dir, model_dir, log_dir, web_dir]) # load data from hdf5 file tr_data, te_data, tr_stream, te_stream, ntrain, ntest = load.load_imgs(ntrain=None, ntest=None, batch_size=args.batch_size,data_file=args.data_file) te_handle = te_data.open() test_x, = te_data.get_data(te_handle, slice(0, ntest)) # generate real samples and test transform/inverse_transform test_x = train_dcgan_utils.transform(test_x, nc=nc) vis_idxs = py_rng.sample(np.arange(len(test_x)), n_vis) vaX_vis = train_dcgan_utils.inverse_transform(test_x[vis_idxs], npx=npx, nc=nc) # st() n_grid = int(np.sqrt(n_vis)) grid_real = utils.grid_vis((vaX_vis*255.0).astype(np.uint8), n_grid, n_grid) train_dcgan_utils.save_image(grid_real, os.path.join(sample_dir, 'real_samples.png')) # define DCGAN model disc_params = train_dcgan_utils.init_disc_params(n_f=n_f, n_layers=n_layers, nc=nc)