Example #1
0
    config.nonlinearity = "elu"
    config.optimizer = "Adam"
    config.learning_rate = 0.0001
    config.momentum = 0.5
    config.gradient_clipping = 10
    config.weight_decay = 0
    config.use_feature_matching = False
    config.use_minibatch_discrimination = False

    discriminator = Sequential(weight_initializer=config.weight_initializer,
                               weight_init_std=config.weight_init_std)
    discriminator.add(gaussian_noise(std=0.3))
    discriminator.add(
        Convolution2D(3,
                      32,
                      ksize=4,
                      stride=2,
                      pad=1,
                      use_weightnorm=config.use_weightnorm))
    discriminator.add(BatchNormalization(32))
    discriminator.add(Activation(config.nonlinearity))
    discriminator.add(
        Convolution2D(32,
                      64,
                      ksize=4,
                      stride=2,
                      pad=1,
                      use_weightnorm=config.use_weightnorm))
    discriminator.add(BatchNormalization(64))
    discriminator.add(Activation(config.nonlinearity))
    discriminator.add(
        Convolution2D(64,
Example #2
0
    config.clamp_lower = -0.01
    config.clamp_upper = 0.01
    config.num_critic = 1
    config.weight_std = 0.001
    config.weight_initializer = "Normal"
    config.nonlinearity = "leaky_relu"
    config.optimizer = "rmsprop"
    config.learning_rate = 0.0001
    config.momentum = 0.5
    config.gradient_clipping = 10
    config.weight_decay = 0

    chainer.global_config.discriminator = config

    discriminator = Sequential()
    discriminator.add(Convolution2D(3, 32, ksize=4, stride=2, pad=1))
    discriminator.add(BatchNormalization(32))
    discriminator.add(Activation(config.nonlinearity))
    discriminator.add(Convolution2D(32, 64, ksize=4, stride=2, pad=1))
    discriminator.add(BatchNormalization(64))
    discriminator.add(Activation(config.nonlinearity))
    discriminator.add(Convolution2D(64, 128, ksize=4, stride=2, pad=1))
    discriminator.add(BatchNormalization(128))
    discriminator.add(Activation(config.nonlinearity))
    discriminator.add(Convolution2D(128, 256, ksize=4, stride=2, pad=1))
    discriminator.add(BatchNormalization(256))
    discriminator.add(Activation(config.nonlinearity))
    discriminator.add(Convolution2D(256, 512, ksize=4, stride=2, pad=0))

    params = {
        "config": config.to_dict(),
        except Exception as e:
            raise Exception("could not load {}".format(sequence_filename))
else:
    config = Params()
    config.num_classes = 10
    config.weight_std = 0.1
    config.weight_initializer = "Normal"
    config.nonlinearity = "relu"
    config.optimizer = "adam"
    config.learning_rate = 0.0001
    config.momentum = 0.9
    config.gradient_clipping = 1
    config.weight_decay = 0

    model = Sequential()
    model.add(Convolution2D(1, 32, ksize=4, stride=2, pad=1))
    model.add(BatchNormalization(32))
    model.add(Activation(config.nonlinearity))
    model.add(Convolution2D(32, 64, ksize=4, stride=2, pad=1))
    model.add(BatchNormalization(64))
    model.add(Activation(config.nonlinearity))
    model.add(Convolution2D(64, 128, ksize=3, stride=2, pad=1))
    model.add(BatchNormalization(128))
    model.add(Activation(config.nonlinearity))
    model.add(Linear(None, config.num_classes))

    params = {
        "config": config.to_dict(),
        "model": model.to_dict(),
    }
Example #4
0
	config = DiscriminatorParams()
	config.weight_init_std = 0.001
	config.weight_initializer = "Normal"
	config.use_weightnorm = False
	config.nonlinearity = "elu"
	config.optimizer = "Adam"
	config.learning_rate = 0.0001
	config.momentum = 0.5
	config.gradient_clipping = 10
	config.weight_decay = 0
	config.use_feature_matching = False
	config.use_minibatch_discrimination = False

	discriminator = Sequential(weight_initializer=config.weight_initializer, weight_init_std=config.weight_init_std)
	discriminator.add(gaussian_noise(std=0.3))
	discriminator.add(Convolution2D(3, 32, ksize=4, stride=2, pad=1, use_weightnorm=config.use_weightnorm))
	discriminator.add(BatchNormalization(32))
	discriminator.add(Activation(config.nonlinearity))
	discriminator.add(Convolution2D(32, 64, ksize=4, stride=2, pad=1, use_weightnorm=config.use_weightnorm))
	discriminator.add(BatchNormalization(64))
	discriminator.add(Activation(config.nonlinearity))
	discriminator.add(Convolution2D(64, 128, ksize=4, stride=2, pad=1, use_weightnorm=config.use_weightnorm))
	discriminator.add(BatchNormalization(128))
	discriminator.add(Activation(config.nonlinearity))
	discriminator.add(Convolution2D(128, 256, ksize=4, stride=2, pad=1, use_weightnorm=config.use_weightnorm))
	discriminator.add(BatchNormalization(256))
	discriminator.add(Activation(config.nonlinearity))
	if config.use_minibatch_discrimination:
		discriminator.add(reshape_1d())
		discriminator.add(MinibatchDiscrimination(None, num_kernels=50, ndim_kernel=5, train_weights=True))
	discriminator.add(Linear(None, 2, use_weightnorm=config.use_weightnorm))
Example #5
0
    config.ndim_z = ndim_z
    config.ndim_h = ndim_h
    config.weight_std = 0.01
    config.weight_initializer = "Normal"
    config.nonlinearity_d = "elu"
    config.nonlinearity_g = "elu"
    config.optimizer = "adam"
    config.learning_rate = 0.0001
    config.momentum = 0.5
    config.gradient_clipping = 1
    config.weight_decay = 0

    # Discriminator
    encoder = Sequential()
    encoder.add(gaussian_noise(std=0.3))
    encoder.add(Convolution2D(3, 32, ksize=4, stride=2, pad=1))
    encoder.add(BatchNormalization(32))
    encoder.add(Activation(config.nonlinearity_d))
    encoder.add(Convolution2D(32, 64, ksize=4, stride=2, pad=1))
    encoder.add(BatchNormalization(64))
    encoder.add(Activation(config.nonlinearity_d))
    encoder.add(Convolution2D(64, 128, ksize=4, stride=2, pad=1))
    encoder.add(BatchNormalization(128))
    encoder.add(Activation(config.nonlinearity_d))
    encoder.add(Convolution2D(128, 256, ksize=4, stride=2, pad=1))
    encoder.add(BatchNormalization(256))
    encoder.add(Activation(config.nonlinearity_d))
    encoder.add(Linear(None, ndim_h))

    projection_size = 6
Example #6
0
File: model.py Project: cai-mj/ddgm
else:
	config = EnergyModelParams()
	config.num_experts = 512
	config.weight_init_std = 0.05
	config.weight_initializer = "Normal"
	config.use_weightnorm = False
	config.nonlinearity = "elu"
	config.optimizer = "Adam"
	config.learning_rate = 0.0002
	config.momentum = 0.5
	config.gradient_clipping = 10
	config.weight_decay = 0

	# feature extractor
	feature_extractor = Sequential(weight_initializer=config.weight_initializer, weight_init_std=config.weight_init_std)
	feature_extractor.add(Convolution2D(3, 32, ksize=4, stride=2, pad=1, use_weightnorm=config.use_weightnorm))
	feature_extractor.add(BatchNormalization(32))
	feature_extractor.add(Activation(config.nonlinearity))
	feature_extractor.add(dropout())
	feature_extractor.add(Convolution2D(32, 64, ksize=4, stride=2, pad=1, use_weightnorm=config.use_weightnorm))
	feature_extractor.add(BatchNormalization(64))
	feature_extractor.add(Activation(config.nonlinearity))
	feature_extractor.add(dropout())
	feature_extractor.add(Convolution2D(64, 192, ksize=4, stride=2, pad=1, use_weightnorm=config.use_weightnorm))
	feature_extractor.add(BatchNormalization(192))
	feature_extractor.add(Activation(config.nonlinearity))
	feature_extractor.add(dropout())
	feature_extractor.add(Convolution2D(192, 256, ksize=4, stride=2, pad=1, use_weightnorm=config.use_weightnorm))
	feature_extractor.add(reshape_1d())
	feature_extractor.add(MinibatchDiscrimination(None, num_kernels=50, ndim_kernel=5, train_weights=True))
	feature_extractor.add(tanh())