Exemple #1
0
        except Exception as e:
            raise Exception("could not load {}".format(sequence_filename))
else:
    config = Params()
    config.num_classes = 10
    config.weight_std = 0.1
    config.weight_initializer = "Normal"
    config.nonlinearity = "relu"
    config.optimizer = "adam"
    config.learning_rate = 0.0001
    config.momentum = 0.9
    config.gradient_clipping = 1
    config.weight_decay = 0

    model = Sequential()
    model.add(Linear(None, 500))
    model.add(Activation(config.nonlinearity))
    model.add(BatchNormalization(500))
    model.add(Linear(None, 500))
    model.add(Activation(config.nonlinearity))
    model.add(BatchNormalization(500))
    model.add(Linear(None, config.num_classes))

    params = {
        "config": config.to_dict(),
        "model": model.to_dict(),
    }

    with open(sequence_filename, "w") as f:
        json.dump(params, f, indent=4, sort_keys=True, separators=(',', ': '))
Exemple #2
0
    config.gamma = 0.5
    config.num_mixture = args.num_mixture
    config.ndim_z = 256
    config.ndim_h = 128
    config.weight_std = 0.1
    config.weight_initializer = "Normal"
    config.nonlinearity_d = "elu"
    config.nonlinearity_g = "elu"
    config.optimizer = "adam"
    config.learning_rate = 0.0001
    config.momentum = 0.1
    config.gradient_clipping = 1
    config.weight_decay = 0

    encoder = Sequential()
    encoder.add(gaussian_noise(std=0.1))
    encoder.add(Linear(2, 64))
    encoder.add(Activation(config.nonlinearity_d))
    # encoder.add(BatchNormalization(64))
    encoder.add(Linear(None, 64))
    encoder.add(Activation(config.nonlinearity_d))
    # encoder.add(BatchNormalization(64))
    encoder.add(Linear(None, config.ndim_h))

    decoder = Sequential()
    decoder.add(Linear(config.ndim_h, 64))
    decoder.add(Activation(config.nonlinearity_d))
    # decoder.add(BatchNormalization(64))
    decoder.add(Linear(None, 64))
    decoder.add(Activation(config.nonlinearity_d))
    # decoder.add(BatchNormalization(64))
Exemple #3
0
    config.num_critic = 5
    config.weight_init_std = 0.001
    config.weight_initializer = "Normal"
    config.use_weightnorm = False
    config.nonlinearity = "leaky_relu"
    config.optimizer = "rmsprop"
    config.learning_rate = 0.0001
    config.momentum = 0.5
    config.gradient_clipping = 1
    config.weight_decay = 0
    config.use_feature_matching = False
    config.use_minibatch_discrimination = False

    discriminator = Sequential(weight_initializer=config.weight_initializer,
                               weight_init_std=config.weight_init_std)
    discriminator.add(Linear(None, 128, use_weightnorm=config.use_weightnorm))
    discriminator.add(Activation(config.nonlinearity))
    # discriminator.add(BatchNormalization(128))
    if config.use_minibatch_discrimination:
        discriminator.add(
            MinibatchDiscrimination(None, num_kernels=50, ndim_kernel=5))
    discriminator.add(Linear(None, 128, use_weightnorm=config.use_weightnorm))

    params = {
        "config": config.to_dict(),
        "model": discriminator.to_dict(),
    }

    with open(discriminator_sequence_filename, "w") as f:
        json.dump(params, f, indent=4, sort_keys=True, separators=(',', ': '))
Exemple #4
0
    config.clamp_lower = -0.01
    config.clamp_upper = 0.01
    config.num_critic = 1
    config.weight_std = 0.001
    config.weight_initializer = "Normal"
    config.nonlinearity = "leaky_relu"
    config.optimizer = "rmsprop"
    config.learning_rate = 0.0001
    config.momentum = 0.5
    config.gradient_clipping = 10
    config.weight_decay = 0

    chainer.global_config.discriminator = config

    discriminator = Sequential()
    discriminator.add(Convolution2D(3, 32, ksize=4, stride=2, pad=1))
    discriminator.add(BatchNormalization(32))
    discriminator.add(Activation(config.nonlinearity))
    discriminator.add(Convolution2D(32, 64, ksize=4, stride=2, pad=1))
    discriminator.add(BatchNormalization(64))
    discriminator.add(Activation(config.nonlinearity))
    discriminator.add(Convolution2D(64, 128, ksize=4, stride=2, pad=1))
    discriminator.add(BatchNormalization(128))
    discriminator.add(Activation(config.nonlinearity))
    discriminator.add(Convolution2D(128, 256, ksize=4, stride=2, pad=1))
    discriminator.add(BatchNormalization(256))
    discriminator.add(Activation(config.nonlinearity))
    discriminator.add(Convolution2D(256, 512, ksize=4, stride=2, pad=0))

    params = {
        "config": config.to_dict(),
Exemple #5
0
import numpy as np
from chainer import Variable
from chainer import functions as F
from sequential import Sequential
import link
import function
import util
import chain

# Linear test
x = np.random.normal(scale=1, size=(2, 28 * 28)).astype(np.float32)
x = Variable(x)

seq = Sequential(weight_initializer="GlorotNormal", weight_init_std=0.05)
seq.add(link.Linear(28 * 28, 500))
seq.add(link.BatchNormalization(500))
seq.add(link.Linear(500, 500))
seq.add(function.Activation("clipped_relu"))
seq.add(link.Linear(500, 500, use_weightnorm=True))
seq.add(function.Activation("crelu"))  # crelu outputs 2x
seq.add(link.BatchNormalization(1000))
seq.add(link.Linear(1000, 500))
seq.add(function.Activation("elu"))
seq.add(link.Linear(500, 500, use_weightnorm=True))
seq.add(function.Activation("hard_sigmoid"))
seq.add(link.BatchNormalization(500))
seq.add(link.Linear(500, 500))
seq.add(function.Activation("leaky_relu"))
seq.add(link.Linear(500, 500, use_weightnorm=True))
seq.add(function.Activation("relu"))
seq.add(link.BatchNormalization(500))
	config = Config()
	config.ndim_x = 28 * 28
	config.ndim_y = 10
	config.ndim_z = 2
	config.distribution_z = "deterministic"	# deterministic or gaussian
	config.weight_init_std = 0.001
	config.weight_initializer = "Normal"
	config.nonlinearity = "relu"
	config.optimizer = "Adam"
	config.learning_rate = 0.0002
	config.momentum = 0.5
	config.gradient_clipping = 5
	config.weight_decay = 0

	decoder = Sequential(weight_initializer=config.weight_initializer, weight_init_std=config.weight_init_std)
	decoder.add(Linear(None, 1000))
	decoder.add(Activation(config.nonlinearity))
	decoder.add(Linear(None, 1000))
	decoder.add(Activation(config.nonlinearity))
	decoder.add(Linear(None, config.ndim_x))
	decoder.add(sigmoid())

	discriminator = Sequential(weight_initializer=config.weight_initializer, weight_init_std=config.weight_init_std)
	discriminator.add(Merge(num_inputs=2, out_size=1000, nobias=True))
	discriminator.add(gaussian_noise(std=0.3))
	discriminator.add(Activation(config.nonlinearity))
	discriminator.add(Linear(None, 1000))
	discriminator.add(Activation(config.nonlinearity))
	discriminator.add(Linear(None, 1000))
	discriminator.add(Activation(config.nonlinearity))
	discriminator.add(Linear(None, 2))
Exemple #7
0
    config.num_critic = 5
    config.weight_init_std = 0.001
    config.weight_initializer = "Normal"
    config.use_weightnorm = False
    config.nonlinearity = "leaky_relu"
    config.optimizer = "rmsprop"
    config.learning_rate = 0.0001
    config.momentum = 0.5
    config.gradient_clipping = 1
    config.weight_decay = 0
    config.use_feature_matching = False
    config.use_minibatch_discrimination = False

    discriminator = Sequential(weight_initializer=config.weight_initializer,
                               weight_init_std=config.weight_init_std)
    discriminator.add(Linear(None, 500, use_weightnorm=config.use_weightnorm))
    # discriminator.add(gaussian_noise(std=0.5))
    discriminator.add(Activation(config.nonlinearity))
    # discriminator.add(BatchNormalization(500))
    if config.use_minibatch_discrimination:
        discriminator.add(
            MinibatchDiscrimination(None, num_kernels=50, ndim_kernel=5))
    discriminator.add(Linear(None, 500, use_weightnorm=config.use_weightnorm))

    params = {
        "config": config.to_dict(),
        "model": discriminator.to_dict(),
    }

    with open(discriminator_sequence_filename, "w") as f:
        json.dump(params, f, indent=4, sort_keys=True, separators=(',', ': '))
    config = DiscriminatorParams()
    config.a = 0
    config.b = 1
    config.c = 1
    config.weight_std = 0.01
    config.weight_initializer = "Normal"
    config.use_weightnorm = False
    config.nonlinearity = "leaky_relu"
    config.optimizer = "adam"
    config.learning_rate = 0.0001
    config.momentum = 0.5
    config.gradient_clipping = 1
    config.weight_decay = 0

    discriminator = Sequential()
    discriminator.add(Linear(None, 500, use_weightnorm=config.use_weightnorm))
    # discriminator.add(gaussian_noise(std=0.5))
    discriminator.add(Activation(config.nonlinearity))
    # discriminator.add(BatchNormalization(500))
    discriminator.add(Linear(None, 500, use_weightnorm=config.use_weightnorm))
    discriminator.add(Activation(config.nonlinearity))
    # discriminator.add(BatchNormalization(500))
    discriminator.add(Linear(None, 1, use_weightnorm=config.use_weightnorm))

    discriminator_params = {
        "config": config.to_dict(),
        "model": discriminator.to_dict(),
    }

    with open(discriminator_sequence_filename, "w") as f:
        json.dump(discriminator_params,
Exemple #9
0
	config = EnergyModelParams()
	config.ndim_input = ndim_input
	config.num_experts = 4
	config.weight_init_std = 0.05
	config.weight_initializer = "Normal"
	config.use_weightnorm = True
	config.nonlinearity = "elu"
	config.optimizer = "Adam"
	config.learning_rate = 0.0002
	config.momentum = 0.5
	config.gradient_clipping = 10
	config.weight_decay = 0

	# feature extractor
	feature_extractor = Sequential(weight_initializer=config.weight_initializer, weight_init_std=config.weight_init_std)
	feature_extractor.add(Linear(config.ndim_input, 128, use_weightnorm=config.use_weightnorm))
	feature_extractor.add(Activation(config.nonlinearity))
	feature_extractor.add(Linear(None, 128, use_weightnorm=config.use_weightnorm))
	feature_extractor.add(tanh())

	# experts
	experts = Sequential(weight_initializer=config.weight_initializer, weight_init_std=config.weight_init_std)
	experts.add(Linear(128, config.num_experts, use_weightnorm=config.use_weightnorm))

	# b
	b = Sequential(weight_initializer=config.weight_initializer, weight_init_std=config.weight_init_std)
	b.add(Linear(config.ndim_input, 1, nobias=True))

	params = {
		"config": config.to_dict(),
		"feature_extractor": feature_extractor.to_dict(),
Exemple #10
0
    config.gamma = 0.5
    config.ndim_z = ndim_z
    config.ndim_h = ndim_h
    config.weight_std = 0.01
    config.weight_initializer = "Normal"
    config.nonlinearity_d = "elu"
    config.nonlinearity_g = "elu"
    config.optimizer = "adam"
    config.learning_rate = 0.0001
    config.momentum = 0.5
    config.gradient_clipping = 1
    config.weight_decay = 0

    # Discriminator
    encoder = Sequential()
    encoder.add(gaussian_noise(std=0.3))
    encoder.add(Convolution2D(3, 32, ksize=4, stride=2, pad=1))
    encoder.add(BatchNormalization(32))
    encoder.add(Activation(config.nonlinearity_d))
    encoder.add(Convolution2D(32, 64, ksize=4, stride=2, pad=1))
    encoder.add(BatchNormalization(64))
    encoder.add(Activation(config.nonlinearity_d))
    encoder.add(Convolution2D(64, 128, ksize=4, stride=2, pad=1))
    encoder.add(BatchNormalization(128))
    encoder.add(Activation(config.nonlinearity_d))
    encoder.add(Convolution2D(128, 256, ksize=4, stride=2, pad=1))
    encoder.add(BatchNormalization(256))
    encoder.add(Activation(config.nonlinearity_d))
    encoder.add(Linear(None, ndim_h))

    projection_size = 6
Exemple #11
0
	config = Config()
	config.ndim_x = 28 * 28
	config.ndim_y = 10
	config.weight_init_std = 0.01
	config.weight_initializer = "Normal"
	config.nonlinearity = "relu"
	config.optimizer = "Adam"
	config.learning_rate = 0.0002
	config.momentum = 0.9
	config.gradient_clipping = 10
	config.weight_decay = 0
	config.lambda_ = 1
	config.Ip = 1

	model = Sequential(weight_initializer=config.weight_initializer, weight_init_std=config.weight_init_std)
	model.add(Linear(None, 1200))
	model.add(Activation(config.nonlinearity))
	model.add(BatchNormalization(1200))
	model.add(Linear(None, 600))
	model.add(Activation(config.nonlinearity))
	model.add(BatchNormalization(600))
	model.add(Linear(None, config.ndim_y))

	params = {
		"config": config.to_dict(),
		"model": model.to_dict(),
	}

	with open(model_filename, "w") as f:
		json.dump(params, f, indent=4, sort_keys=True, separators=(',', ': '))
    config.clamp_lower = -0.01
    config.clamp_upper = 0.01
    config.num_critic = 5
    config.weight_std = 0.001
    config.weight_initializer = "Normal"
    config.nonlinearity = "leaky_relu"
    config.optimizer = "rmsprop"
    config.learning_rate = 0.0001
    config.momentum = 0.5
    config.gradient_clipping = 1
    config.weight_decay = 0

    chainer.global_config.discriminator = config

    discriminator = Sequential()
    discriminator.add(Linear(None, 500))
    discriminator.add(Activation(config.nonlinearity))
    discriminator.add(Linear(None, 500))

    params = {
        "config": config.to_dict(),
        "model": discriminator.to_dict(),
    }

    with open(discriminator_sequence_filename, "w") as f:
        json.dump(params, f, indent=4, sort_keys=True, separators=(',', ': '))

discriminator_params = params

# specify generator
generator_sequence_filename = args.model_dir + "/generator.json"
Exemple #13
0
else:
	config = EnergyModelParams()
	config.num_experts = 512
	config.weight_init_std = 0.05
	config.weight_initializer = "Normal"
	config.use_weightnorm = False
	config.nonlinearity = "elu"
	config.optimizer = "Adam"
	config.learning_rate = 0.0002
	config.momentum = 0.5
	config.gradient_clipping = 10
	config.weight_decay = 0

	# feature extractor
	feature_extractor = Sequential(weight_initializer=config.weight_initializer, weight_init_std=config.weight_init_std)
	feature_extractor.add(Convolution2D(3, 32, ksize=4, stride=2, pad=1, use_weightnorm=config.use_weightnorm))
	feature_extractor.add(BatchNormalization(32))
	feature_extractor.add(Activation(config.nonlinearity))
	feature_extractor.add(dropout())
	feature_extractor.add(Convolution2D(32, 64, ksize=4, stride=2, pad=1, use_weightnorm=config.use_weightnorm))
	feature_extractor.add(BatchNormalization(64))
	feature_extractor.add(Activation(config.nonlinearity))
	feature_extractor.add(dropout())
	feature_extractor.add(Convolution2D(64, 192, ksize=4, stride=2, pad=1, use_weightnorm=config.use_weightnorm))
	feature_extractor.add(BatchNormalization(192))
	feature_extractor.add(Activation(config.nonlinearity))
	feature_extractor.add(dropout())
	feature_extractor.add(Convolution2D(192, 256, ksize=4, stride=2, pad=1, use_weightnorm=config.use_weightnorm))
	feature_extractor.add(reshape_1d())
	feature_extractor.add(MinibatchDiscrimination(None, num_kernels=50, ndim_kernel=5, train_weights=True))
	feature_extractor.add(tanh())
Exemple #14
0
    config.clamp_lower = -0.01
    config.clamp_upper = 0.01
    config.num_critic = 5
    config.weight_std = 0.001
    config.weight_initializer = "Normal"
    config.nonlinearity = "leaky_relu"
    config.optimizer = "rmsprop"
    config.learning_rate = 0.0001
    config.momentum = 0.5
    config.gradient_clipping = 1
    config.weight_decay = 0

    chainer.global_config.discriminator = config

    discriminator = Sequential()
    discriminator.add(Linear(None, 128))
    discriminator.add(Activation(config.nonlinearity))
    discriminator.add(Linear(None, 128))

    params = {
        "config": config.to_dict(),
        "model": discriminator.to_dict(),
    }

    with open(discriminator_sequence_filename, "w") as f:
        json.dump(params, f, indent=4, sort_keys=True, separators=(',', ': '))

discriminator_params = params

# specify generator
generator_sequence_filename = args.model_dir + "/generator.json"
        except Exception as e:
            raise Exception("could not load {}".format(sequence_filename))
else:
    config = Params()
    config.num_classes = 10
    config.weight_std = 0.1
    config.weight_initializer = "Normal"
    config.nonlinearity = "relu"
    config.optimizer = "adam"
    config.learning_rate = 0.0001
    config.momentum = 0.9
    config.gradient_clipping = 1
    config.weight_decay = 0

    model = Sequential()
    model.add(Convolution2D(1, 32, ksize=4, stride=2, pad=1))
    model.add(BatchNormalization(32))
    model.add(Activation(config.nonlinearity))
    model.add(Convolution2D(32, 64, ksize=4, stride=2, pad=1))
    model.add(BatchNormalization(64))
    model.add(Activation(config.nonlinearity))
    model.add(Convolution2D(64, 128, ksize=3, stride=2, pad=1))
    model.add(BatchNormalization(128))
    model.add(Activation(config.nonlinearity))
    model.add(Linear(None, config.num_classes))

    params = {
        "config": config.to_dict(),
        "model": model.to_dict(),
    }
Exemple #16
0
	config.b = 1
	config.c = 1
	config.weight_std = 0.01
	config.weight_initializer = "Normal"
	config.use_weightnorm = False
	config.nonlinearity = "leaky_relu"
	config.optimizer = "adam"
	config.learning_rate = 0.0001
	config.momentum = 0.5
	config.gradient_clipping = 1
	config.weight_decay = 0


	discriminator = Sequential()
	# discriminator.add(gaussian_noise(std=0.3))
	discriminator.add(Convolution2D(3, 32, ksize=4, stride=2, pad=1, use_weightnorm=config.use_weightnorm))
	discriminator.add(BatchNormalization(32))
	discriminator.add(Activation(config.nonlinearity))
	discriminator.add(Convolution2D(32, 64, ksize=4, stride=2, pad=1, use_weightnorm=config.use_weightnorm))
	discriminator.add(BatchNormalization(64))
	discriminator.add(Activation(config.nonlinearity))
	discriminator.add(Convolution2D(64, 128, ksize=4, stride=2, pad=1, use_weightnorm=config.use_weightnorm))
	discriminator.add(BatchNormalization(128))
	discriminator.add(Activation(config.nonlinearity))
	discriminator.add(Convolution2D(128, 256, ksize=4, stride=2, pad=1, use_weightnorm=config.use_weightnorm))
	discriminator.add(BatchNormalization(256))
	discriminator.add(Activation(config.nonlinearity))
	discriminator.add(Linear(None, 1, use_weightnorm=config.use_weightnorm))

	discriminator_params = {
		"config": config.to_dict(),
    config.ndim_y = 10
    config.ndim_reduction = 2
    config.ndim_z = config.ndim_reduction
    config.cluster_head_distance_threshold = 1
    config.distribution_z = "deterministic"  # deterministic or gaussian
    config.weight_std = 0.001
    config.weight_initializer = "Normal"
    config.nonlinearity = "relu"
    config.optimizer = "Adam"
    config.learning_rate = 0.0001
    config.momentum = 0.5
    config.gradient_clipping = 5
    config.weight_decay = 0

    decoder = Sequential()
    decoder.add(Linear(None, 1000))
    decoder.add(Activation(config.nonlinearity))
    # decoder.add(BatchNormalization(1000))
    decoder.add(Linear(None, 1000))
    decoder.add(Activation(config.nonlinearity))
    # decoder.add(BatchNormalization(1000))
    decoder.add(Linear(None, config.ndim_x))
    decoder.add(tanh())

    discriminator_z = Sequential()
    discriminator_z.add(gaussian_noise(std=0.3))
    discriminator_z.add(Linear(config.ndim_z, 1000))
    discriminator_z.add(Activation(config.nonlinearity))
    # discriminator_z.add(BatchNormalization(1000))
    discriminator_z.add(Linear(None, 1000))
    discriminator_z.add(Activation(config.nonlinearity))
Exemple #18
0
    config.num_clusters = 10
    config.weight_std = 0.1
    config.weight_initializer = "Normal"
    config.nonlinearity = "relu"
    config.optimizer = "adam"
    config.learning_rate = 0.002
    config.momentum = 0.9
    config.gradient_clipping = 1
    config.weight_decay = 0
    config.lam = 0.2
    config.mu = 4.0
    config.sigma = 100.0
    config.ip = 1

    model = Sequential()
    model.add(Linear(None, 1200))
    model.add(Activation(config.nonlinearity))
    model.add(BatchNormalization(1200))
    model.add(Linear(None, 1200))
    model.add(Activation(config.nonlinearity))
    model.add(BatchNormalization(1200))
    model.add(Linear(None, config.num_clusters))

    params = {
        "config": config.to_dict(),
        "model": model.to_dict(),
    }

    with open(sequence_filename, "w") as f:
        json.dump(params, f, indent=4, sort_keys=True, separators=(',', ': '))
Exemple #19
0
                learning_rate_decay=1.0,
                weight_decay=0.001)

train, label, test = load_data(data_dir)

# Model in Keras-Style!

learning_rate = 0.001

nn = Sequential(learning_rate=learning_rate,
                epochs=100,
                batch_size=100,
                learning_rate_decay=0.95,
                weight_decay=0.001)

nn.add(Dense(n=200, in_shape=train.shape[1]))
nn.add(BatchNorm())
nn.add(Dense(n=100))
nn.add(BatchNorm())
nn.add(Dense(n=200))
nn.add(BatchNorm())
nn.add(Dense(n=10, activation="softmax"))
nn.compile(loss="cross_entropy_softmax", optimiser="Adam")

indices = list(range(len(train)))
random.shuffle(indices)

train = list(map(train.__getitem__, indices))
label = list(map(label.__getitem__, indices))

X = scale_data(train)
Exemple #20
0
def model_adam(X, y, verbose):
    nn = Sequential(learning_rate=learning_rate, epochs=epochs, batch_size=100,
                    learning_rate_decay=0.95, weight_decay=0.01)

    nn.add(Dense(n=200, in_shape=X.shape[1]))
    nn.add(BatchNorm())
    nn.add(Dense(n=100))
    nn.add(BatchNorm())
    nn.add(Dense(n=80))
    nn.add(BatchNorm())
    nn.add(Dense(n=40))
    nn.add(BatchNorm())
    nn.add(Dense(n=80))
    nn.add(BatchNorm())
    nn.add(Dense(n=100))
    nn.add(BatchNorm())
    nn.add(Dense(n=200))
    nn.add(BatchNorm())
    nn.add(Dense(n=10, activation="softmax"))
    nn.compile(loss="cross_entropy_softmax", optimiser="Adam")

    nn.fit(X, y, verbose)

    return nn
Exemple #21
0
    config.weight_init_std = 0.01
    config.weight_initializer = "Normal"
    config.nonlinearity = "elu"
    config.optimizer = "Adam"
    config.learning_rate = 0.0003
    config.momentum = 0.9
    config.gradient_clipping = 10
    config.weight_decay = 0
    config.use_weightnorm = False
    config.num_mc_samples = 1

    # p(x|y,z) - x ~ Bernoulli
    p_x_ayz = Sequential(weight_initializer=config.weight_initializer,
                         weight_init_std=config.weight_init_std)
    p_x_ayz.add(
        Merge(num_inputs=3, out_size=500,
              use_weightnorm=config.use_weightnorm))
    p_x_ayz.add(BatchNormalization(500))
    p_x_ayz.add(Activation(config.nonlinearity))
    p_x_ayz.add(Linear(None, 500, use_weightnorm=config.use_weightnorm))
    p_x_ayz.add(BatchNormalization(500))
    p_x_ayz.add(Activation(config.nonlinearity))
    p_x_ayz.add(Linear(None, 500, use_weightnorm=config.use_weightnorm))
    p_x_ayz.add(BatchNormalization(500))
    p_x_ayz.add(Activation(config.nonlinearity))
    p_x_ayz.add(
        Linear(None, config.ndim_x, use_weightnorm=config.use_weightnorm))

    # p(a|x,y,z) - a ~ Gaussian
    p_a_yz = Sequential(weight_initializer=config.weight_initializer,
                        weight_init_std=config.weight_init_std)
Exemple #22
0
	config = EnergyModelParams()
	config.ndim_input = image_width * image_height
	config.num_experts = 128
	config.weight_init_std = 0.05
	config.weight_initializer = "Normal"
	config.use_weightnorm = True
	config.nonlinearity = "elu"
	config.optimizer = "Adam"
	config.learning_rate = 0.0002
	config.momentum = 0.5
	config.gradient_clipping = 10
	config.weight_decay = 0

	# feature extractor
	feature_extractor = Sequential(weight_initializer=config.weight_initializer, weight_init_std=config.weight_init_std)
	feature_extractor.add(Linear(config.ndim_input, 1000, use_weightnorm=config.use_weightnorm))
	feature_extractor.add(Activation(config.nonlinearity))
	feature_extractor.add(gaussian_noise(std=0.3))
	feature_extractor.add(Linear(None, 500, use_weightnorm=config.use_weightnorm))
	feature_extractor.add(Activation(config.nonlinearity))
	feature_extractor.add(gaussian_noise(std=0.3))
	feature_extractor.add(Linear(None, 250, use_weightnorm=config.use_weightnorm))
	feature_extractor.add(Activation(config.nonlinearity))
	feature_extractor.add(gaussian_noise(std=0.3))
	feature_extractor.add(Linear(None, config.num_experts, use_weightnorm=config.use_weightnorm))
	feature_extractor.add(tanh())

	# experts
	experts = Sequential(weight_initializer=config.weight_initializer, weight_init_std=config.weight_init_std)
	experts.add(Linear(config.num_experts, config.num_experts, use_weightnorm=config.use_weightnorm))
Exemple #23
0
    config = DiscriminatorParams()
    config.a = 0
    config.b = 1
    config.c = 1
    config.weight_std = 0.01
    config.weight_initializer = "Normal"
    config.use_weightnorm = False
    config.nonlinearity = "leaky_relu"
    config.optimizer = "adam"
    config.learning_rate = 0.0001
    config.momentum = 0.5
    config.gradient_clipping = 1
    config.weight_decay = 0

    discriminator = Sequential()
    discriminator.add(Linear(None, 128, use_weightnorm=config.use_weightnorm))
    discriminator.add(Activation(config.nonlinearity))
    # discriminator.add(BatchNormalization(128))
    discriminator.add(Linear(None, 128, use_weightnorm=config.use_weightnorm))
    discriminator.add(Activation(config.nonlinearity))
    # discriminator.add(BatchNormalization(128))
    discriminator.add(Linear(None, 1, use_weightnorm=config.use_weightnorm))

    discriminator_params = {
        "config": config.to_dict(),
        "model": discriminator.to_dict(),
    }

    with open(discriminator_sequence_filename, "w") as f:
        json.dump(discriminator_params,
                  f,
Exemple #24
0
	config.ndim_y = 10
	config.ndim_z = 100
	config.weight_init_std = 0.01
	config.weight_initializer = "Normal"
	config.nonlinearity = "relu"
	config.optimizer = "Adam"
	config.learning_rate = 0.0003
	config.momentum = 0.9
	config.gradient_clipping = 10
	config.weight_decay = 0
	config.use_weightnorm = False
	config.num_mc_samples = 1

	# p(x|y,z) - x ~ Bernoulli
	p_x_ayz = Sequential(weight_initializer=config.weight_initializer, weight_init_std=config.weight_init_std)
	p_x_ayz.add(Merge(num_inputs=3, out_size=500, use_weightnorm=config.use_weightnorm))
	p_x_ayz.add(BatchNormalization(500))
	p_x_ayz.add(Activation(config.nonlinearity))
	p_x_ayz.add(Linear(None, 500, use_weightnorm=config.use_weightnorm))
	p_x_ayz.add(BatchNormalization(500))
	p_x_ayz.add(Activation(config.nonlinearity))
	p_x_ayz.add(Linear(None, 500, use_weightnorm=config.use_weightnorm))
	p_x_ayz.add(BatchNormalization(500))
	p_x_ayz.add(Activation(config.nonlinearity))
	p_x_ayz.add(Linear(None, config.ndim_x, use_weightnorm=config.use_weightnorm))

	# p(a|x,y,z) - a ~ Gaussian
	p_a_yz = Sequential(weight_initializer=config.weight_initializer, weight_init_std=config.weight_init_std)
	p_a_yz.add(Merge(num_inputs=2, out_size=500, use_weightnorm=config.use_weightnorm))
	p_a_yz.add(BatchNormalization(500))
	p_a_yz.add(Activation(config.nonlinearity))
Exemple #25
0
    config = DiscriminatorParams()
    config.weight_init_std = 0.001
    config.weight_initializer = "Normal"
    config.use_weightnorm = False
    config.nonlinearity = "elu"
    config.optimizer = "Adam"
    config.learning_rate = 0.0001
    config.momentum = 0.5
    config.gradient_clipping = 10
    config.weight_decay = 0
    config.use_feature_matching = False
    config.use_minibatch_discrimination = False

    discriminator = Sequential(weight_initializer=config.weight_initializer,
                               weight_init_std=config.weight_init_std)
    discriminator.add(gaussian_noise(std=0.3))
    discriminator.add(
        Convolution2D(3,
                      32,
                      ksize=4,
                      stride=2,
                      pad=1,
                      use_weightnorm=config.use_weightnorm))
    discriminator.add(BatchNormalization(32))
    discriminator.add(Activation(config.nonlinearity))
    discriminator.add(
        Convolution2D(32,
                      64,
                      ksize=4,
                      stride=2,
                      pad=1,
Exemple #26
0
def init_nn():

    learning_rate = 0.001

    nn = Sequential(learning_rate=learning_rate,
                    epochs=50,
                    batch_size=100,
                    learning_rate_decay=0.95,
                    weight_decay=0.001)

    nn.add(Dense(n=200, in_shape=train.shape[1]))
    nn.add(BatchNorm())
    nn.add(Dense(n=100))
    nn.add(BatchNorm())
    nn.add(Dense(n=80))
    nn.add(BatchNorm())
    nn.add(Dense(n=20))
    nn.add(BatchNorm())
    nn.add(Dense(n=80))
    nn.add(BatchNorm())
    nn.add(Dense(n=100))
    nn.add(BatchNorm())
    nn.add(Dense(n=200))
    nn.add(BatchNorm())
    nn.add(Dense(n=10, activation="softmax"))
    nn.compile(loss="cross_entropy_softmax", optimiser="Adam")

    return nn