config.optimizer = "Adam" config.learning_rate = 0.0003 config.momentum = 0.9 config.gradient_clipping = 10 config.weight_decay = 0 config.use_weightnorm = False config.num_mc_samples = 1 # p(x|y,z) - x ~ Bernoulli p_x_ayz = Sequential(weight_initializer=config.weight_initializer, weight_init_std=config.weight_init_std) p_x_ayz.add( Merge(num_inputs=3, out_size=500, use_weightnorm=config.use_weightnorm)) p_x_ayz.add(BatchNormalization(500)) p_x_ayz.add(Activation(config.nonlinearity)) p_x_ayz.add(Linear(None, 500, use_weightnorm=config.use_weightnorm)) p_x_ayz.add(BatchNormalization(500)) p_x_ayz.add(Activation(config.nonlinearity)) p_x_ayz.add(Linear(None, 500, use_weightnorm=config.use_weightnorm)) p_x_ayz.add(BatchNormalization(500)) p_x_ayz.add(Activation(config.nonlinearity)) p_x_ayz.add( Linear(None, config.ndim_x, use_weightnorm=config.use_weightnorm)) # p(a|x,y,z) - a ~ Gaussian p_a_yz = Sequential(weight_initializer=config.weight_initializer, weight_init_std=config.weight_init_std) p_a_yz.add( Merge(num_inputs=2, out_size=500, use_weightnorm=config.use_weightnorm))
config.weight_decay = 0 config.use_feature_matching = False config.use_minibatch_discrimination = False discriminator = Sequential(weight_initializer=config.weight_initializer, weight_init_std=config.weight_init_std) discriminator.add(gaussian_noise(std=0.3)) discriminator.add( Convolution2D(3, 32, ksize=4, stride=2, pad=1, use_weightnorm=config.use_weightnorm)) discriminator.add(BatchNormalization(32)) discriminator.add(Activation(config.nonlinearity)) discriminator.add( Convolution2D(32, 64, ksize=4, stride=2, pad=1, use_weightnorm=config.use_weightnorm)) discriminator.add(BatchNormalization(64)) discriminator.add(Activation(config.nonlinearity)) discriminator.add( Convolution2D(64, 128, ksize=4, stride=2, pad=1,
config.weight_initializer = "Normal" config.use_weightnorm = False config.nonlinearity = "leaky_relu" config.optimizer = "rmsprop" config.learning_rate = 0.0001 config.momentum = 0.5 config.gradient_clipping = 1 config.weight_decay = 0 config.use_feature_matching = False config.use_minibatch_discrimination = False discriminator = Sequential(weight_initializer=config.weight_initializer, weight_init_std=config.weight_init_std) discriminator.add(Linear(None, 500, use_weightnorm=config.use_weightnorm)) # discriminator.add(gaussian_noise(std=0.5)) discriminator.add(Activation(config.nonlinearity)) # discriminator.add(BatchNormalization(500)) if config.use_minibatch_discrimination: discriminator.add( MinibatchDiscrimination(None, num_kernels=50, ndim_kernel=5)) discriminator.add(Linear(None, 500, use_weightnorm=config.use_weightnorm)) params = { "config": config.to_dict(), "model": discriminator.to_dict(), } with open(discriminator_sequence_filename, "w") as f: json.dump(params, f, indent=4, sort_keys=True, separators=(',', ': ')) discriminator_params = params
config.ndim_y = 10 config.ndim_z = 10 config.distribution_z = "deterministic" # deterministic or gaussian config.weight_std = 0.01 config.weight_initializer = "Normal" config.nonlinearity = "relu" config.optimizer = "Adam" config.learning_rate = 0.0001 config.momentum = 0.1 config.gradient_clipping = 5 config.weight_decay = 0 # x = decoder(y, z) decoder = Sequential() decoder.add(Merge(num_inputs=2, out_size=1000, nobias=True)) decoder.add(Activation(config.nonlinearity)) # decoder.add(BatchNormalization(1000)) decoder.add(Linear(None, 1000)) decoder.add(Activation(config.nonlinearity)) # decoder.add(BatchNormalization(1000)) decoder.add(Linear(None, 1000)) decoder.add(Activation(config.nonlinearity)) # decoder.add(BatchNormalization(1000)) decoder.add(Linear(None, config.ndim_x)) decoder.add(tanh()) discriminator_z = Sequential() discriminator_z.add(gaussian_noise(std=0.3)) discriminator_z.add(Linear(config.ndim_z, 1000)) discriminator_z.add(Activation(config.nonlinearity)) # discriminator_z.add(BatchNormalization(1000))
else: config = Params() config.num_classes = 10 config.weight_std = 0.1 config.weight_initializer = "Normal" config.nonlinearity = "relu" config.optimizer = "adam" config.learning_rate = 0.0001 config.momentum = 0.9 config.gradient_clipping = 1 config.weight_decay = 0 model = Sequential() model.add(Convolution2D(1, 32, ksize=4, stride=2, pad=1)) model.add(BatchNormalization(32)) model.add(Activation(config.nonlinearity)) model.add(Convolution2D(32, 64, ksize=4, stride=2, pad=1)) model.add(BatchNormalization(64)) model.add(Activation(config.nonlinearity)) model.add(Convolution2D(64, 128, ksize=3, stride=2, pad=1)) model.add(BatchNormalization(128)) model.add(Activation(config.nonlinearity)) model.add(Linear(None, config.num_classes)) params = { "config": config.to_dict(), "model": model.to_dict(), } with open(sequence_filename, "w") as f: json.dump(params, f, indent=4, sort_keys=True, separators=(',', ': '))
config.ndim_input = image_width * image_height config.num_experts = 128 config.weight_init_std = 0.05 config.weight_initializer = "Normal" config.use_weightnorm = True config.nonlinearity = "elu" config.optimizer = "Adam" config.learning_rate = 0.0002 config.momentum = 0.5 config.gradient_clipping = 10 config.weight_decay = 0 # feature extractor feature_extractor = Sequential(weight_initializer=config.weight_initializer, weight_init_std=config.weight_init_std) feature_extractor.add(Linear(config.ndim_input, 1000, use_weightnorm=config.use_weightnorm)) feature_extractor.add(Activation(config.nonlinearity)) feature_extractor.add(gaussian_noise(std=0.3)) feature_extractor.add(Linear(None, 500, use_weightnorm=config.use_weightnorm)) feature_extractor.add(Activation(config.nonlinearity)) feature_extractor.add(gaussian_noise(std=0.3)) feature_extractor.add(Linear(None, 250, use_weightnorm=config.use_weightnorm)) feature_extractor.add(Activation(config.nonlinearity)) feature_extractor.add(gaussian_noise(std=0.3)) feature_extractor.add(Linear(None, config.num_experts, use_weightnorm=config.use_weightnorm)) feature_extractor.add(tanh()) # experts experts = Sequential(weight_initializer=config.weight_initializer, weight_init_std=config.weight_init_std) experts.add(Linear(config.num_experts, config.num_experts, use_weightnorm=config.use_weightnorm)) # b
config.weight_std = 0.01 config.weight_initializer = "Normal" config.nonlinearity_d = "elu" config.nonlinearity_g = "elu" config.optimizer = "adam" config.learning_rate = 0.0001 config.momentum = 0.5 config.gradient_clipping = 1 config.weight_decay = 0 # Discriminator encoder = Sequential() encoder.add(gaussian_noise(std=0.3)) encoder.add(Convolution2D(3, 32, ksize=4, stride=2, pad=1)) encoder.add(BatchNormalization(32)) encoder.add(Activation(config.nonlinearity_d)) encoder.add(Convolution2D(32, 64, ksize=4, stride=2, pad=1)) encoder.add(BatchNormalization(64)) encoder.add(Activation(config.nonlinearity_d)) encoder.add(Convolution2D(64, 128, ksize=4, stride=2, pad=1)) encoder.add(BatchNormalization(128)) encoder.add(Activation(config.nonlinearity_d)) encoder.add(Convolution2D(128, 256, ksize=4, stride=2, pad=1)) encoder.add(BatchNormalization(256)) encoder.add(Activation(config.nonlinearity_d)) encoder.add(Linear(None, ndim_h)) projection_size = 6 # Decoder decoder = Sequential()