pad=1, use_weightnorm=config.use_weightnorm)) discriminator.add(BatchNormalization(256)) discriminator.add(Activation(config.nonlinearity)) if config.use_minibatch_discrimination: discriminator.add(reshape_1d()) discriminator.add( MinibatchDiscrimination(None, num_kernels=50, ndim_kernel=5, train_weights=True)) discriminator.add(Linear(None, 1, use_weightnorm=config.use_weightnorm)) params = { "config": config.to_dict(), "model": discriminator.to_dict(), } with open(discriminator_sequence_filename, "w") as f: json.dump(params, f, indent=4, sort_keys=True, separators=(',', ': ')) discriminator_params = params # specify generator generator_sequence_filename = args.model_dir + "/generator.json" if os.path.isfile(generator_sequence_filename): print "loading", generator_sequence_filename with open(generator_sequence_filename, "r") as f: try: params = json.load(f)
discriminator.add(Linear(None, 2)) generator = Sequential(weight_initializer=config.weight_initializer, weight_init_std=config.weight_init_std) generator.add(Linear(config.ndim_x, 1000)) generator.add(Activation(config.nonlinearity)) generator.add(Linear(None, 1000)) generator.add(Activation(config.nonlinearity)) if config.distribution_z == "deterministic": generator.add(Linear(None, config.ndim_z)) elif config.distribution_z == "gaussian": generator.add(Gaussian(None, config.ndim_z)) # outputs mean and ln(var) else: raise Exception() params = { "config": config.to_dict(), "decoder": decoder.to_dict(), "generator": generator.to_dict(), "discriminator": discriminator.to_dict(), } with open(model_filename, "w") as f: json.dump(params, f, indent=4, sort_keys=True, separators=(',', ': ')) aae = AAE(params) aae.load(args.model_dir) if args.gpu_device != -1: cuda.get_device(args.gpu_device).use() aae.to_gpu()
Merge(num_inputs=2, out_size=500, use_weightnorm=config.use_weightnorm)) q_y_ax.add(BatchNormalization(500)) q_y_ax.add(Activation(config.nonlinearity)) q_y_ax.add(Linear(None, 500, use_weightnorm=config.use_weightnorm)) q_y_ax.add(BatchNormalization(500)) q_y_ax.add(Activation(config.nonlinearity)) q_y_ax.add(Linear(None, 500, use_weightnorm=config.use_weightnorm)) q_y_ax.add(BatchNormalization(500)) q_y_ax.add(Activation(config.nonlinearity)) q_y_ax.add( Linear(None, config.ndim_y, use_weightnorm=config.use_weightnorm)) params = { "config": config.to_dict(), "p_a_yz": p_a_yz.to_dict(), "p_x_ayz": p_x_ayz.to_dict(), "q_a_x": q_a_x.to_dict(), "q_y_ax": q_y_ax.to_dict(), "q_z_axy": q_z_axy.to_dict(), } with open(model_filename, "w") as f: json.dump(params, f, indent=4, sort_keys=True, separators=(',', ': ')) sdgm = SDGM(params) sdgm.load(args.model_dir) if args.gpu_device != -1: cuda.get_device(args.gpu_device).use() sdgm.to_gpu()
generator_z = Sequential() if config.distribution_z == "deterministic": generator_z.add(Linear(None, config.ndim_z)) elif config.distribution_z == "gaussian": generator_z.add(Gaussian(None, config.ndim_z)) # outputs mean and ln(var) else: raise Exception() generator_y = Sequential() generator_y.add(Linear(None, config.ndim_y)) params = { "config": config.to_dict(), "decoder": decoder.to_dict(), "generator_shared": generator_shared.to_dict(), "generator_z": generator_z.to_dict(), "generator_y": generator_y.to_dict(), "discriminator_y": discriminator_y.to_dict(), "discriminator_z": discriminator_z.to_dict(), } with open(model_filename, "w") as f: json.dump(params, f, indent=4, sort_keys=True, separators=(',', ': ')) aae = AAE(params) aae.load(args.model_dir) if args.gpu_device != -1: cuda.get_device(args.gpu_device).use()
config.learning_rate = 0.0001 config.momentum = 0.9 config.gradient_clipping = 1 config.weight_decay = 0 model = Sequential() model.add(Convolution2D(1, 32, ksize=4, stride=2, pad=1)) model.add(BatchNormalization(32)) model.add(Activation(config.nonlinearity)) model.add(Convolution2D(32, 64, ksize=4, stride=2, pad=1)) model.add(BatchNormalization(64)) model.add(Activation(config.nonlinearity)) model.add(Convolution2D(64, 128, ksize=3, stride=2, pad=1)) model.add(BatchNormalization(128)) model.add(Activation(config.nonlinearity)) model.add(Linear(None, config.num_classes)) params = { "config": config.to_dict(), "model": model.to_dict(), } with open(sequence_filename, "w") as f: json.dump(params, f, indent=4, sort_keys=True, separators=(',', ': ')) model = Discriminator(params) model.load(args.model_dir) if args.gpu_device != -1: cuda.get_device(args.gpu_device).use() model.to_gpu()
# q(y|a,x) - y ~ Categorical q_y_ax = Sequential(weight_initializer=config.weight_initializer, weight_init_std=config.weight_init_std) q_y_ax.add(Merge(num_inputs=2, out_size=500, use_weightnorm=config.use_weightnorm)) q_y_ax.add(BatchNormalization(500)) q_y_ax.add(Activation(config.nonlinearity)) q_y_ax.add(Linear(None, 500, use_weightnorm=config.use_weightnorm)) q_y_ax.add(BatchNormalization(500)) q_y_ax.add(Activation(config.nonlinearity)) q_y_ax.add(Linear(None, 500, use_weightnorm=config.use_weightnorm)) q_y_ax.add(BatchNormalization(500)) q_y_ax.add(Activation(config.nonlinearity)) q_y_ax.add(Linear(None, config.ndim_y, use_weightnorm=config.use_weightnorm)) params = { "config": config.to_dict(), "p_a_yz": p_a_yz.to_dict(), "p_x_ayz": p_x_ayz.to_dict(), "q_a_x": q_a_x.to_dict(), "q_y_ax": q_y_ax.to_dict(), "q_z_axy": q_z_axy.to_dict(), } with open(model_filename, "w") as f: json.dump(params, f, indent=4, sort_keys=True, separators=(',', ': ')) sdgm = SDGM(params) sdgm.load(args.model_dir) if args.gpu_device != -1: cuda.get_device(args.gpu_device).use() sdgm.to_gpu()
feature_extractor.add(Activation(config.nonlinearity)) feature_extractor.add(gaussian_noise(std=0.3)) feature_extractor.add(Linear(None, config.num_experts, use_weightnorm=config.use_weightnorm)) feature_extractor.add(tanh()) # experts experts = Sequential(weight_initializer=config.weight_initializer, weight_init_std=config.weight_init_std) experts.add(Linear(config.num_experts, config.num_experts, use_weightnorm=config.use_weightnorm)) # b b = Sequential(weight_initializer=config.weight_initializer, weight_init_std=config.weight_init_std) b.add(Linear(config.ndim_input, 1, nobias=True)) params = { "config": config.to_dict(), "feature_extractor": feature_extractor.to_dict(), "experts": experts.to_dict(), "b": b.to_dict(), } with open(energy_model_filename, "w") as f: json.dump(params, f, indent=4, sort_keys=True, separators=(',', ': ')) params_energy_model = params # specify generative model generative_model_filename = args.model_dir + "/generative_model.json" if os.path.isfile(generative_model_filename): print "loading", generative_model_filename with open(generative_model_filename, "r") as f: try:
generator.add(Activation(config.nonlinearity_g)) generator.add(BatchNormalization(512 * projection_size**2)) generator.add(reshape((-1, 512, projection_size, projection_size))) generator.add(PixelShuffler2D(512, 256, r=2)) generator.add(BatchNormalization(256)) generator.add(Activation(config.nonlinearity_g)) generator.add(PixelShuffler2D(256, 128, r=2)) generator.add(BatchNormalization(128)) generator.add(Activation(config.nonlinearity_g)) generator.add(PixelShuffler2D(128, 64, r=2)) generator.add(BatchNormalization(64)) generator.add(Activation(config.nonlinearity_g)) generator.add(PixelShuffler2D(64, 3, r=2)) params = { "config": config.to_dict(), "decoder": decoder.to_dict(), "encoder": encoder.to_dict(), "generator": generator.to_dict(), } with open(sequence_filename, "w") as f: json.dump(params, f, indent=4, sort_keys=True, separators=(',', ': ')) began = BEGAN(params) began.load(args.model_dir) if args.gpu_device != -1: cuda.get_device(args.gpu_device).use() began.to_gpu()