config.gradient_clipping = 10 config.weight_decay = 0 config.use_feature_matching = False config.use_minibatch_discrimination = False discriminator = Sequential(weight_initializer=config.weight_initializer, weight_init_std=config.weight_init_std) discriminator.add(gaussian_noise(std=0.3)) discriminator.add( Convolution2D(3, 32, ksize=4, stride=2, pad=1, use_weightnorm=config.use_weightnorm)) discriminator.add(BatchNormalization(32)) discriminator.add(Activation(config.nonlinearity)) discriminator.add( Convolution2D(32, 64, ksize=4, stride=2, pad=1, use_weightnorm=config.use_weightnorm)) discriminator.add(BatchNormalization(64)) discriminator.add(Activation(config.nonlinearity)) discriminator.add( Convolution2D(64, 128, ksize=4, stride=2,
config.use_weightnorm = False config.weight_init_std = 0.1 config.weight_initializer = "Normal" config.nonlinearity = "relu" config.optimizer = "adam" config.learning_rate = 0.0001 config.momentum = 0.5 config.gradient_clipping = 10 config.weight_decay = 0 # generator generator = Sequential(weight_initializer=config.weight_initializer, weight_init_std=config.weight_init_std) generator.add( Linear(config.ndim_input, 500, use_weightnorm=config.use_weightnorm)) generator.add(BatchNormalization(500)) generator.add(Activation(config.nonlinearity)) generator.add(Linear(None, 500, use_weightnorm=config.use_weightnorm)) generator.add(BatchNormalization(500)) generator.add(Activation(config.nonlinearity)) generator.add( Linear(None, config.ndim_output, use_weightnorm=config.use_weightnorm)) if config.distribution_output == "sigmoid": generator.add(Activation("sigmoid")) if config.distribution_output == "tanh": generator.add(Activation("tanh")) params = { "config": config.to_dict(), "model": generator.to_dict(), }
config.nonlinearity = "elu" config.optimizer = "Adam" config.learning_rate = 0.0003 config.momentum = 0.9 config.gradient_clipping = 10 config.weight_decay = 0 config.use_weightnorm = False config.num_mc_samples = 1 # p(x|y,z) - x ~ Bernoulli p_x_ayz = Sequential(weight_initializer=config.weight_initializer, weight_init_std=config.weight_init_std) p_x_ayz.add( Merge(num_inputs=3, out_size=500, use_weightnorm=config.use_weightnorm)) p_x_ayz.add(BatchNormalization(500)) p_x_ayz.add(Activation(config.nonlinearity)) p_x_ayz.add(Linear(None, 500, use_weightnorm=config.use_weightnorm)) p_x_ayz.add(BatchNormalization(500)) p_x_ayz.add(Activation(config.nonlinearity)) p_x_ayz.add(Linear(None, 500, use_weightnorm=config.use_weightnorm)) p_x_ayz.add(BatchNormalization(500)) p_x_ayz.add(Activation(config.nonlinearity)) p_x_ayz.add( Linear(None, config.ndim_x, use_weightnorm=config.use_weightnorm)) # p(a|x,y,z) - a ~ Gaussian p_a_yz = Sequential(weight_initializer=config.weight_initializer, weight_init_std=config.weight_init_std) p_a_yz.add( Merge(num_inputs=2, out_size=500,
raise Exception("could not load {}".format(sequence_filename)) else: config = Params() config.num_classes = 10 config.weight_std = 0.1 config.weight_initializer = "Normal" config.nonlinearity = "relu" config.optimizer = "adam" config.learning_rate = 0.0001 config.momentum = 0.9 config.gradient_clipping = 1 config.weight_decay = 0 model = Sequential() model.add(Convolution2D(1, 32, ksize=4, stride=2, pad=1)) model.add(BatchNormalization(32)) model.add(Activation(config.nonlinearity)) model.add(Convolution2D(32, 64, ksize=4, stride=2, pad=1)) model.add(BatchNormalization(64)) model.add(Activation(config.nonlinearity)) model.add(Convolution2D(64, 128, ksize=3, stride=2, pad=1)) model.add(BatchNormalization(128)) model.add(Activation(config.nonlinearity)) model.add(Linear(None, config.num_classes)) params = { "config": config.to_dict(), "model": model.to_dict(), } with open(sequence_filename, "w") as f:
else: config = Params() config.num_classes = 10 config.weight_std = 0.1 config.weight_initializer = "Normal" config.nonlinearity = "relu" config.optimizer = "adam" config.learning_rate = 0.0001 config.momentum = 0.9 config.gradient_clipping = 1 config.weight_decay = 0 model = Sequential() model.add(Linear(None, 500)) model.add(Activation(config.nonlinearity)) model.add(BatchNormalization(500)) model.add(Linear(None, 500)) model.add(Activation(config.nonlinearity)) model.add(BatchNormalization(500)) model.add(Linear(None, config.num_classes)) params = { "config": config.to_dict(), "model": model.to_dict(), } with open(sequence_filename, "w") as f: json.dump(params, f, indent=4, sort_keys=True, separators=(',', ': ')) model = Discriminator(params) model.load(args.model_dir)
config.ndim_y = 10 config.weight_init_std = 0.01 config.weight_initializer = "Normal" config.nonlinearity = "relu" config.optimizer = "Adam" config.learning_rate = 0.0002 config.momentum = 0.9 config.gradient_clipping = 10 config.weight_decay = 0 config.lambda_ = 1 config.Ip = 1 model = Sequential(weight_initializer=config.weight_initializer, weight_init_std=config.weight_init_std) model.add(Linear(None, 1200)) model.add(Activation(config.nonlinearity)) model.add(BatchNormalization(1200)) model.add(Linear(None, 600)) model.add(Activation(config.nonlinearity)) model.add(BatchNormalization(600)) model.add(Linear(None, config.ndim_y)) params = { "config": config.to_dict(), "model": model.to_dict(), } with open(model_filename, "w") as f: json.dump(params, f, indent=4, sort_keys=True, separators=(',', ': ')) vat = VAT(params) vat.load(args.model_dir)
config.ndim_h = ndim_h config.weight_std = 0.01 config.weight_initializer = "Normal" config.nonlinearity_d = "elu" config.nonlinearity_g = "elu" config.optimizer = "adam" config.learning_rate = 0.0001 config.momentum = 0.5 config.gradient_clipping = 1 config.weight_decay = 0 # Discriminator encoder = Sequential() encoder.add(gaussian_noise(std=0.3)) encoder.add(Convolution2D(3, 32, ksize=4, stride=2, pad=1)) encoder.add(BatchNormalization(32)) encoder.add(Activation(config.nonlinearity_d)) encoder.add(Convolution2D(32, 64, ksize=4, stride=2, pad=1)) encoder.add(BatchNormalization(64)) encoder.add(Activation(config.nonlinearity_d)) encoder.add(Convolution2D(64, 128, ksize=4, stride=2, pad=1)) encoder.add(BatchNormalization(128)) encoder.add(Activation(config.nonlinearity_d)) encoder.add(Convolution2D(128, 256, ksize=4, stride=2, pad=1)) encoder.add(BatchNormalization(256)) encoder.add(Activation(config.nonlinearity_d)) encoder.add(Linear(None, ndim_h)) projection_size = 6 # Decoder
config = EnergyModelParams() config.num_experts = 512 config.weight_init_std = 0.05 config.weight_initializer = "Normal" config.use_weightnorm = False config.nonlinearity = "elu" config.optimizer = "Adam" config.learning_rate = 0.0002 config.momentum = 0.5 config.gradient_clipping = 10 config.weight_decay = 0 # feature extractor feature_extractor = Sequential(weight_initializer=config.weight_initializer, weight_init_std=config.weight_init_std) feature_extractor.add(Convolution2D(3, 32, ksize=4, stride=2, pad=1, use_weightnorm=config.use_weightnorm)) feature_extractor.add(BatchNormalization(32)) feature_extractor.add(Activation(config.nonlinearity)) feature_extractor.add(dropout()) feature_extractor.add(Convolution2D(32, 64, ksize=4, stride=2, pad=1, use_weightnorm=config.use_weightnorm)) feature_extractor.add(BatchNormalization(64)) feature_extractor.add(Activation(config.nonlinearity)) feature_extractor.add(dropout()) feature_extractor.add(Convolution2D(64, 192, ksize=4, stride=2, pad=1, use_weightnorm=config.use_weightnorm)) feature_extractor.add(BatchNormalization(192)) feature_extractor.add(Activation(config.nonlinearity)) feature_extractor.add(dropout()) feature_extractor.add(Convolution2D(192, 256, ksize=4, stride=2, pad=1, use_weightnorm=config.use_weightnorm)) feature_extractor.add(reshape_1d()) feature_extractor.add(MinibatchDiscrimination(None, num_kernels=50, ndim_kernel=5, train_weights=True)) feature_extractor.add(tanh())