Example #1
0
    # generator.add(Activation(config.nonlinearity))
    # generator.add(BatchNormalization(512 * projection_size ** 2))
    # generator.add(reshape((-1, 512, projection_size, projection_size)))
    # generator.add(Deconvolution2D(512, 256, ksize=4, stride=2, pad=paddings.pop(0)))
    # generator.add(BatchNormalization(256))
    # generator.add(Activation(config.nonlinearity))
    # generator.add(Deconvolution2D(256, 128, ksize=4, stride=2, pad=paddings.pop(0)))
    # generator.add(BatchNormalization(128))
    # generator.add(Activation(config.nonlinearity))
    # generator.add(Deconvolution2D(128, 64, ksize=4, stride=2, pad=paddings.pop(0)))
    # generator.add(BatchNormalization(64))
    # generator.add(Activation(config.nonlinearity))
    # generator.add(Deconvolution2D(64, 3, ksize=4, stride=2, pad=paddings.pop(0)))

    # PixelShuffler version
    generator.add(Linear(config.ndim_input, 512 * projection_size**2))
    generator.add(Activation(config.nonlinearity))
    generator.add(BatchNormalization(512 * projection_size**2))
    generator.add(reshape((-1, 512, projection_size, projection_size)))
    generator.add(PixelShuffler2D(512, 256, r=2))
    generator.add(BatchNormalization(256))
    generator.add(Activation(config.nonlinearity))
    generator.add(PixelShuffler2D(256, 128, r=2))
    generator.add(BatchNormalization(128))
    generator.add(Activation(config.nonlinearity))
    generator.add(PixelShuffler2D(128, 64, r=2))
    generator.add(BatchNormalization(64))
    generator.add(Activation(config.nonlinearity))
    generator.add(PixelShuffler2D(64, 3, r=2))

    if config.distribution_output == "sigmoid":
	config = Config()
	config.ndim_x = 28 * 28
	config.ndim_y = 10
	config.ndim_z = 2
	config.distribution_z = "deterministic"	# deterministic or gaussian
	config.weight_init_std = 0.001
	config.weight_initializer = "Normal"
	config.nonlinearity = "relu"
	config.optimizer = "Adam"
	config.learning_rate = 0.0002
	config.momentum = 0.5
	config.gradient_clipping = 5
	config.weight_decay = 0

	decoder = Sequential(weight_initializer=config.weight_initializer, weight_init_std=config.weight_init_std)
	decoder.add(Linear(None, 1000))
	decoder.add(Activation(config.nonlinearity))
	decoder.add(Linear(None, 1000))
	decoder.add(Activation(config.nonlinearity))
	decoder.add(Linear(None, config.ndim_x))
	decoder.add(sigmoid())

	discriminator = Sequential(weight_initializer=config.weight_initializer, weight_init_std=config.weight_init_std)
	discriminator.add(Merge(num_inputs=2, out_size=1000, nobias=True))
	discriminator.add(gaussian_noise(std=0.3))
	discriminator.add(Activation(config.nonlinearity))
	discriminator.add(Linear(None, 1000))
	discriminator.add(Activation(config.nonlinearity))
	discriminator.add(Linear(None, 1000))
	discriminator.add(Activation(config.nonlinearity))
	discriminator.add(Linear(None, 2))
Example #3
0
    config.learning_rate = 0.0003
    config.momentum = 0.9
    config.gradient_clipping = 10
    config.weight_decay = 0
    config.use_weightnorm = False
    config.num_mc_samples = 1

    # p(x|y,z) - x ~ Bernoulli
    p_x_ayz = Sequential(weight_initializer=config.weight_initializer,
                         weight_init_std=config.weight_init_std)
    p_x_ayz.add(
        Merge(num_inputs=3, out_size=500,
              use_weightnorm=config.use_weightnorm))
    p_x_ayz.add(BatchNormalization(500))
    p_x_ayz.add(Activation(config.nonlinearity))
    p_x_ayz.add(Linear(None, 500, use_weightnorm=config.use_weightnorm))
    p_x_ayz.add(BatchNormalization(500))
    p_x_ayz.add(Activation(config.nonlinearity))
    p_x_ayz.add(Linear(None, 500, use_weightnorm=config.use_weightnorm))
    p_x_ayz.add(BatchNormalization(500))
    p_x_ayz.add(Activation(config.nonlinearity))
    p_x_ayz.add(
        Linear(None, config.ndim_x, use_weightnorm=config.use_weightnorm))

    # p(a|x,y,z) - a ~ Gaussian
    p_a_yz = Sequential(weight_initializer=config.weight_initializer,
                        weight_init_std=config.weight_init_std)
    p_a_yz.add(
        Merge(num_inputs=2, out_size=500,
              use_weightnorm=config.use_weightnorm))
    p_a_yz.add(BatchNormalization(500))
    config.ndim_y = 10
    config.ndim_reduction = 2
    config.ndim_z = config.ndim_reduction
    config.cluster_head_distance_threshold = 1
    config.distribution_z = "deterministic"  # deterministic or gaussian
    config.weight_std = 0.001
    config.weight_initializer = "Normal"
    config.nonlinearity = "relu"
    config.optimizer = "Adam"
    config.learning_rate = 0.0001
    config.momentum = 0.5
    config.gradient_clipping = 5
    config.weight_decay = 0

    decoder = Sequential()
    decoder.add(Linear(None, 1000))
    decoder.add(Activation(config.nonlinearity))
    # decoder.add(BatchNormalization(1000))
    decoder.add(Linear(None, 1000))
    decoder.add(Activation(config.nonlinearity))
    # decoder.add(BatchNormalization(1000))
    decoder.add(Linear(None, config.ndim_x))
    decoder.add(tanh())

    discriminator_z = Sequential()
    discriminator_z.add(gaussian_noise(std=0.3))
    discriminator_z.add(Linear(config.ndim_z, 1000))
    discriminator_z.add(Activation(config.nonlinearity))
    # discriminator_z.add(BatchNormalization(1000))
    discriminator_z.add(Linear(None, 1000))
    discriminator_z.add(Activation(config.nonlinearity))
Example #5
0
    config.distribution_z = "deterministic"  # deterministic or gaussian
    config.weight_std = 0.01
    config.weight_initializer = "Normal"
    config.nonlinearity = "relu"
    config.optimizer = "Adam"
    config.learning_rate = 0.0001
    config.momentum = 0.1
    config.gradient_clipping = 5
    config.weight_decay = 0

    # x = decoder(y, z)
    decoder = Sequential()
    decoder.add(Merge(num_inputs=2, out_size=1000, nobias=True))
    decoder.add(Activation(config.nonlinearity))
    # decoder.add(BatchNormalization(1000))
    decoder.add(Linear(None, 1000))
    decoder.add(Activation(config.nonlinearity))
    # decoder.add(BatchNormalization(1000))
    decoder.add(Linear(None, 1000))
    decoder.add(Activation(config.nonlinearity))
    # decoder.add(BatchNormalization(1000))
    decoder.add(Linear(None, config.ndim_x))
    decoder.add(tanh())

    discriminator_z = Sequential()
    discriminator_z.add(gaussian_noise(std=0.3))
    discriminator_z.add(Linear(config.ndim_z, 1000))
    discriminator_z.add(Activation(config.nonlinearity))
    # discriminator_z.add(BatchNormalization(1000))
    discriminator_z.add(Linear(None, 1000))
    discriminator_z.add(Activation(config.nonlinearity))
Example #6
0
    config.num_mixture = args.num_mixture
    config.ndim_z = 256
    config.ndim_h = 128
    config.weight_std = 0.1
    config.weight_initializer = "Normal"
    config.nonlinearity_d = "elu"
    config.nonlinearity_g = "elu"
    config.optimizer = "adam"
    config.learning_rate = 0.0001
    config.momentum = 0.1
    config.gradient_clipping = 1
    config.weight_decay = 0

    encoder = Sequential()
    encoder.add(gaussian_noise(std=0.1))
    encoder.add(Linear(2, 64))
    encoder.add(Activation(config.nonlinearity_d))
    # encoder.add(BatchNormalization(64))
    encoder.add(Linear(None, 64))
    encoder.add(Activation(config.nonlinearity_d))
    # encoder.add(BatchNormalization(64))
    encoder.add(Linear(None, config.ndim_h))

    decoder = Sequential()
    decoder.add(Linear(config.ndim_h, 64))
    decoder.add(Activation(config.nonlinearity_d))
    # decoder.add(BatchNormalization(64))
    decoder.add(Linear(None, 64))
    decoder.add(Activation(config.nonlinearity_d))
    # decoder.add(BatchNormalization(64))
    decoder.add(Linear(None, 2))
    config.learning_rate = 0.0001
    config.momentum = 0.9
    config.gradient_clipping = 1
    config.weight_decay = 0

    model = Sequential()
    model.add(Convolution2D(1, 32, ksize=4, stride=2, pad=1))
    model.add(BatchNormalization(32))
    model.add(Activation(config.nonlinearity))
    model.add(Convolution2D(32, 64, ksize=4, stride=2, pad=1))
    model.add(BatchNormalization(64))
    model.add(Activation(config.nonlinearity))
    model.add(Convolution2D(64, 128, ksize=3, stride=2, pad=1))
    model.add(BatchNormalization(128))
    model.add(Activation(config.nonlinearity))
    model.add(Linear(None, config.num_classes))

    params = {
        "config": config.to_dict(),
        "model": model.to_dict(),
    }

    with open(sequence_filename, "w") as f:
        json.dump(params, f, indent=4, sort_keys=True, separators=(',', ': '))

model = Discriminator(params)
model.load(args.model_dir)

if args.gpu_device != -1:
    cuda.get_device(args.gpu_device).use()
    model.to_gpu()
Example #8
0
	config.weight_initializer = "Normal"
	config.nonlinearity = "relu"
	config.optimizer = "Adam"
	config.learning_rate = 0.0001
	config.momentum = 0.5
	config.gradient_clipping = 10
	config.weight_decay = 0

	# model
	# compute projection width
	input_size = get_in_size_of_deconv_layers(image_width, num_layers=4, ksize=4, stride=2)
	# compute required paddings
	paddings = get_paddings_of_deconv_layers(image_width, num_layers=4, ksize=4, stride=2)

	generator = Sequential(weight_initializer=config.weight_initializer, weight_init_std=config.weight_init_std)
	generator.add(Linear(config.ndim_input, 512 * input_size ** 2, use_weightnorm=config.use_weightnorm))
	generator.add(Activation(config.nonlinearity))
	generator.add(BatchNormalization(512 * input_size ** 2))
	generator.add(reshape((-1, 512, input_size, input_size)))
	generator.add(Deconvolution2D(512, 256, ksize=4, stride=2, pad=paddings.pop(0), use_weightnorm=config.use_weightnorm))
	generator.add(BatchNormalization(256))
	generator.add(Activation(config.nonlinearity))
	generator.add(Deconvolution2D(256, 128, ksize=4, stride=2, pad=paddings.pop(0), use_weightnorm=config.use_weightnorm))
	generator.add(BatchNormalization(128))
	generator.add(Activation(config.nonlinearity))
	generator.add(Deconvolution2D(128, 3, ksize=4, stride=2, pad=paddings.pop(0), use_weightnorm=config.use_weightnorm))
	if config.distribution_output == "sigmoid":
		generator.add(sigmoid())
	if config.distribution_output == "tanh":
		generator.add(tanh())
Example #9
0

	discriminator = Sequential()
	discriminator.add(Convolution2D(3, 32, ksize=4, stride=2, pad=1))
	discriminator.add(BatchNormalization(32))
	discriminator.add(Activation(config.nonlinearity))
	discriminator.add(Convolution2D(32, 64, ksize=4, stride=2, pad=1))
	discriminator.add(BatchNormalization(64))
	discriminator.add(Activation(config.nonlinearity))
	discriminator.add(Convolution2D(64, 128, ksize=4, stride=2, pad=1))
	discriminator.add(BatchNormalization(128))
	discriminator.add(Activation(config.nonlinearity))
	discriminator.add(Convolution2D(128, 256, ksize=4, stride=2, pad=1))
	discriminator.add(BatchNormalization(256))
	discriminator.add(Activation(config.nonlinearity))
	discriminator.add(Linear(None, 1))

	discriminator_params = {
		"config": config.to_dict(),
		"model": discriminator.to_dict(),
	}

	with open(discriminator_sequence_filename, "w") as f:
		json.dump(discriminator_params, f, indent=4, sort_keys=True, separators=(',', ': '))

# specify generator
generator_sequence_filename = args.model_dir + "/generator.json"

if os.path.isfile(generator_sequence_filename):
	print "loading", generator_sequence_filename
	with open(generator_sequence_filename, "r") as f:
Example #10
0
	config = Config()
	config.ndim_x = 28 * 28
	config.ndim_y = 10
	config.weight_init_std = 0.01
	config.weight_initializer = "Normal"
	config.nonlinearity = "relu"
	config.optimizer = "Adam"
	config.learning_rate = 0.0002
	config.momentum = 0.9
	config.gradient_clipping = 10
	config.weight_decay = 0
	config.lambda_ = 1
	config.Ip = 1

	model = Sequential(weight_initializer=config.weight_initializer, weight_init_std=config.weight_init_std)
	model.add(Linear(None, 1200))
	model.add(Activation(config.nonlinearity))
	model.add(BatchNormalization(1200))
	model.add(Linear(None, 600))
	model.add(Activation(config.nonlinearity))
	model.add(BatchNormalization(600))
	model.add(Linear(None, config.ndim_y))

	params = {
		"config": config.to_dict(),
		"model": model.to_dict(),
	}

	with open(model_filename, "w") as f:
		json.dump(params, f, indent=4, sort_keys=True, separators=(',', ': '))
Example #11
0
	config = EnergyModelParams()
	config.ndim_input = image_width * image_height
	config.num_experts = 128
	config.weight_init_std = 0.05
	config.weight_initializer = "Normal"
	config.use_weightnorm = True
	config.nonlinearity = "elu"
	config.optimizer = "Adam"
	config.learning_rate = 0.0002
	config.momentum = 0.5
	config.gradient_clipping = 10
	config.weight_decay = 0

	# feature extractor
	feature_extractor = Sequential(weight_initializer=config.weight_initializer, weight_init_std=config.weight_init_std)
	feature_extractor.add(Linear(config.ndim_input, 1000, use_weightnorm=config.use_weightnorm))
	feature_extractor.add(Activation(config.nonlinearity))
	feature_extractor.add(gaussian_noise(std=0.3))
	feature_extractor.add(Linear(None, 500, use_weightnorm=config.use_weightnorm))
	feature_extractor.add(Activation(config.nonlinearity))
	feature_extractor.add(gaussian_noise(std=0.3))
	feature_extractor.add(Linear(None, 250, use_weightnorm=config.use_weightnorm))
	feature_extractor.add(Activation(config.nonlinearity))
	feature_extractor.add(gaussian_noise(std=0.3))
	feature_extractor.add(Linear(None, config.num_experts, use_weightnorm=config.use_weightnorm))
	feature_extractor.add(tanh())

	# experts
	experts = Sequential(weight_initializer=config.weight_initializer, weight_init_std=config.weight_init_std)
	experts.add(Linear(config.num_experts, config.num_experts, use_weightnorm=config.use_weightnorm))
Example #12
0
    # Discriminator
    encoder = Sequential()
    encoder.add(gaussian_noise(std=0.3))
    encoder.add(Convolution2D(3, 32, ksize=4, stride=2, pad=1))
    encoder.add(BatchNormalization(32))
    encoder.add(Activation(config.nonlinearity_d))
    encoder.add(Convolution2D(32, 64, ksize=4, stride=2, pad=1))
    encoder.add(BatchNormalization(64))
    encoder.add(Activation(config.nonlinearity_d))
    encoder.add(Convolution2D(64, 128, ksize=4, stride=2, pad=1))
    encoder.add(BatchNormalization(128))
    encoder.add(Activation(config.nonlinearity_d))
    encoder.add(Convolution2D(128, 256, ksize=4, stride=2, pad=1))
    encoder.add(BatchNormalization(256))
    encoder.add(Activation(config.nonlinearity_d))
    encoder.add(Linear(None, ndim_h))

    projection_size = 6

    # Decoder
    decoder = Sequential()
    decoder.add(BatchNormalization(ndim_h))
    decoder.add(Linear(ndim_h, 256 * projection_size**2))
    decoder.add(Activation(config.nonlinearity_g))
    decoder.add(BatchNormalization(256 * projection_size**2))
    decoder.add(reshape((-1, 256, projection_size, projection_size)))
    decoder.add(PixelShuffler2D(256, 128, r=2))
    decoder.add(BatchNormalization(128))
    decoder.add(Activation(config.nonlinearity_d))
    decoder.add(PixelShuffler2D(128, 64, r=2))
    decoder.add(BatchNormalization(64))
    config.clamp_lower = -0.01
    config.clamp_upper = 0.01
    config.num_critic = 5
    config.weight_std = 0.001
    config.weight_initializer = "Normal"
    config.nonlinearity = "leaky_relu"
    config.optimizer = "rmsprop"
    config.learning_rate = 0.0001
    config.momentum = 0.5
    config.gradient_clipping = 1
    config.weight_decay = 0

    chainer.global_config.discriminator = config

    discriminator = Sequential()
    discriminator.add(Linear(None, 500))
    discriminator.add(Activation(config.nonlinearity))
    discriminator.add(Linear(None, 500))

    params = {
        "config": config.to_dict(),
        "model": discriminator.to_dict(),
    }

    with open(discriminator_sequence_filename, "w") as f:
        json.dump(params, f, indent=4, sort_keys=True, separators=(',', ': '))

discriminator_params = params

# specify generator
generator_sequence_filename = args.model_dir + "/generator.json"
Example #14
0
File: model.py Project: cai-mj/ddgm
	feature_extractor.add(Convolution2D(32, 64, ksize=4, stride=2, pad=1, use_weightnorm=config.use_weightnorm))
	feature_extractor.add(BatchNormalization(64))
	feature_extractor.add(Activation(config.nonlinearity))
	feature_extractor.add(dropout())
	feature_extractor.add(Convolution2D(64, 192, ksize=4, stride=2, pad=1, use_weightnorm=config.use_weightnorm))
	feature_extractor.add(BatchNormalization(192))
	feature_extractor.add(Activation(config.nonlinearity))
	feature_extractor.add(dropout())
	feature_extractor.add(Convolution2D(192, 256, ksize=4, stride=2, pad=1, use_weightnorm=config.use_weightnorm))
	feature_extractor.add(reshape_1d())
	feature_extractor.add(MinibatchDiscrimination(None, num_kernels=50, ndim_kernel=5, train_weights=True))
	feature_extractor.add(tanh())

	# experts
	experts = Sequential(weight_initializer=config.weight_initializer, weight_init_std=config.weight_init_std)
	experts.add(Linear(None, config.num_experts, use_weightnorm=config.use_weightnorm))

	# b
	b = Sequential(weight_initializer=config.weight_initializer, weight_init_std=config.weight_init_std)
	b.add(Linear(None, 1, nobias=True))

	params = {
		"config": config.to_dict(),
		"feature_extractor": feature_extractor.to_dict(),
		"experts": experts.to_dict(),
		"b": b.to_dict(),
	}

	with open(energy_model_filename, "w") as f:
		json.dump(params, f, indent=4, sort_keys=True, separators=(',', ': '))
Example #15
0
    config = DiscriminatorParams()
    config.a = 0
    config.b = 1
    config.c = 1
    config.weight_std = 0.01
    config.weight_initializer = "Normal"
    config.use_weightnorm = False
    config.nonlinearity = "leaky_relu"
    config.optimizer = "adam"
    config.learning_rate = 0.0001
    config.momentum = 0.5
    config.gradient_clipping = 1
    config.weight_decay = 0

    discriminator = Sequential()
    discriminator.add(Linear(None, 128, use_weightnorm=config.use_weightnorm))
    discriminator.add(Activation(config.nonlinearity))
    # discriminator.add(BatchNormalization(128))
    discriminator.add(Linear(None, 128, use_weightnorm=config.use_weightnorm))
    discriminator.add(Activation(config.nonlinearity))
    # discriminator.add(BatchNormalization(128))
    discriminator.add(Linear(None, 1, use_weightnorm=config.use_weightnorm))

    discriminator_params = {
        "config": config.to_dict(),
        "model": discriminator.to_dict(),
    }

    with open(discriminator_sequence_filename, "w") as f:
        json.dump(discriminator_params,
                  f,
Example #16
0
    # model.add(BatchNormalization(1800, use_cudnn=False))
    # model.add(Linear(None, config.num_clusters))

    model.add(Convolution2D(1, 32, ksize=4, stride=2, pad=1))
    model.add(Activation(config.nonlinearity))
    model.add(BatchNormalization(32))
    model.add(Convolution2D(32, 64, ksize=4, stride=2, pad=1))
    model.add(Activation(config.nonlinearity))
    model.add(BatchNormalization(64))
    model.add(Convolution2D(64, 128, ksize=3, stride=2, pad=1))
    model.add(Activation(config.nonlinearity))
    model.add(BatchNormalization(128))
    model.add(Convolution2D(128, 256, ksize=4, stride=2, pad=1))
    model.add(Activation(config.nonlinearity))
    model.add(BatchNormalization(256))
    model.add(Linear(None, config.num_clusters))

    params = {
        "config": config.to_dict(),
        "model": model.to_dict(),
    }

    with open(sequence_filename, "w") as f:
        json.dump(params, f, indent=4, sort_keys=True, separators=(',', ': '))

imsat = Classifier(params)
imsat.load(args.model_dir)

if args.gpu_device != -1:
    cuda.get_device(args.gpu_device).use()
    imsat.to_gpu()
Example #17
0
        Convolution2D(128,
                      256,
                      ksize=4,
                      stride=2,
                      pad=1,
                      use_weightnorm=config.use_weightnorm))
    discriminator.add(BatchNormalization(256))
    discriminator.add(Activation(config.nonlinearity))
    if config.use_minibatch_discrimination:
        discriminator.add(reshape_1d())
        discriminator.add(
            MinibatchDiscrimination(None,
                                    num_kernels=50,
                                    ndim_kernel=5,
                                    train_weights=True))
    discriminator.add(Linear(None, 1, use_weightnorm=config.use_weightnorm))

    params = {
        "config": config.to_dict(),
        "model": discriminator.to_dict(),
    }

    with open(discriminator_sequence_filename, "w") as f:
        json.dump(params, f, indent=4, sort_keys=True, separators=(',', ': '))

discriminator_params = params

# specify generator
generator_sequence_filename = args.model_dir + "/generator.json"

if os.path.isfile(generator_sequence_filename):
Example #18
0
    config.ndim_output = 10
    config.weight_init_std = 1
    config.weight_initializer = "GlorotNormal"
    config.use_weightnorm = False
    config.nonlinearity = "softplus"
    config.optimizer = "Adam"
    config.learning_rate = 0.001
    config.momentum = 0.5
    config.gradient_clipping = 10
    config.weight_decay = 0
    config.use_feature_matching = True
    config.use_minibatch_discrimination = False

    discriminator = Sequential(weight_initializer=config.weight_initializer, weight_init_std=config.weight_init_std)
    discriminator.add(gaussian_noise(std=0.3))
    discriminator.add(Linear(config.ndim_input, 1000, use_weightnorm=config.use_weightnorm))
    discriminator.add(gaussian_noise(std=0.5))
    discriminator.add(Activation(config.nonlinearity))
    # discriminator.add(BatchNormalization(1000))
    discriminator.add(Linear(None, 500, use_weightnorm=config.use_weightnorm))
    discriminator.add(gaussian_noise(std=0.5))
    discriminator.add(Activation(config.nonlinearity))
    # discriminator.add(BatchNormalization(500))
    discriminator.add(Linear(None, 250, use_weightnorm=config.use_weightnorm))
    discriminator.add(gaussian_noise(std=0.5))
    discriminator.add(Activation(config.nonlinearity))
    # discriminator.add(BatchNormalization(250))
    if config.use_minibatch_discrimination:
        discriminator.add(MinibatchDiscrimination(None, num_kernels=50, ndim_kernel=5))
    discriminator.add(Linear(None, config.ndim_output, use_weightnorm=config.use_weightnorm))
    # no need to add softmax() here