示例#1
0
 def _get_output(self, X, train=False):
     mean, logsigma = self.get_mean_logsigma(X)
     if train:
         if K._BACKEND == 'theano':
             eps = K.random_normal((X.shape[0], self.output_dim))
         else:
             eps = K.random_normal((self.batch_size, self.output_dim))
         return mean + K.exp(logsigma) * eps
     else:
         return mean
示例#2
0
 def _get_output(self, X, train=False):
     mean, logsigma = self.get_mean_logsigma(X)
     if train:
         ### Temporary change, scale down size of noise
         if K._BACKEND == 'theano':
             eps = K.random_normal((X.shape[0], self.output_dim), std=self.regularizer_scale)
         else:
             eps = K.random_normal((self.batch_size, self.output_dim))
         ### Temporary change, multiply by regularizer_scale 
         return mean + self.regularizer_scale * K.exp(logsigma) * eps
     else:
         return mean
 def _get_output(self, X, train=False):
     mean, logsigma = self.get_mean_logsigma(X)
     if train:
         # infer batch size for theano backend
         if K._BACKEND == 'theano':
             eps = K.random_normal((X.shape[0], self.output_dim))
         else:
             sizes = K.concatenate([K.shape(X)[0:1],
                                    np.asarray([self.output_dim, ])])
             eps = K.random_normal(sizes)
         # return sample : mean + std * noise
         return mean + K.exp(logsigma) * eps
     else:
         # for testing, the sample is deterministic
         return mean
示例#4
0
def sampling(args):
    """Reparameterization trick by sampling fr an isotropic unit Gaussian.
    # Arguments:
        args (tensor): mean and log of variance of Q(z|X)
    # Returns:
        z (tensor): sampled latent vector
    """
    print("args")
    print(args)
    # print("z_mean")
    # print(z_mean)
    # print("z_log_var")
    # print(z_log_var)
    z_mean, z_log_var = args
    batch = K.shape(z_mean)[0]
    print("batch")
    print(z_mean.shape[0])
    print(batch)
    dim = K.int_shape(z_mean)[1]
    print("dim")
    print(z_mean.shape[1])
    print(dim)
    # by default, random_normal has mean=0 and std=1.0
    epsilon = K.random_normal(shape=(batch, dim))
    return z_mean + K.exp(0.5 * z_log_var) * epsilon
示例#5
0
文件: variational.py 项目: t13m/seya
 def _get_output(self, X, train=False):
     mean, logsigma = self.get_mean_logsigma(X)
     if train:
         eps = K.random_normal((self.batch_size, self.output_dim))
         return mean + K.exp(logsigma) * eps
     else:
         return mean
示例#6
0
def sampling(args):
    z_mean, z_log_var = args
    batch = K.shape(z_mean)[0]
    dim = K.int_shape(z_mean)[1]
    # by default, random_normal has mean=0 and std=1.0
    epsilon = K.random_normal(shape=(batch, dim))
    return z_mean + K.exp(0.5 * z_log_var) * epsilon
    def call(self, x):
        # sample from noise distribution
        e_i = K.random_normal((self.input_dim, self.units))
        e_j = K.random_normal((self.units,))

        # We use the factorized Gaussian noise variant from Section 3 of Fortunato et al.
        eW = K.sign(e_i) * (K.sqrt(K.abs(e_i))) * K.sign(e_j) * (K.sqrt(K.abs(e_j)))
        eB = K.sign(e_j) * (K.abs(e_j) ** (1 / 2))

        noise_injected_weights = K.dot(x, self.mu_weight + (self.sigma_weight * eW))
        noise_injected_bias = self.mu_bias + (self.sigma_bias * eB)

        output = K.bias_add(noise_injected_weights, noise_injected_bias)
        if self.activation is not None:
            output = self.activation(output)
        return output
示例#8
0
def sampling(args):
    mean, std = args
    eps = K.random_normal(
        shape=(batch_size,latent_dim),
        mean=0.0,
        std=epsilon
    )
    return mean + std * eps
def normal_latent_sampling(latent_shape):
    """
    Sample from normal distribution
    :param latent_shape: batch shape
    :return: normal samples, shape=(n,)+latent_shape
    """
    return Lambda(lambda x: K.random_normal((K.shape(x)[0],) + latent_shape),
                  output_shape=lambda x: ((x[0],) + latent_shape))
    def build(self, input_shape):
        input_dim = input_shape[1]
        shape = [input_dim, self.output_dim]
        self.W = K.random_normal(shape)
        v = 1.0 # np.sqrt(6.0 / (input_dim + self.output_dim))
        self.mean = K.variable(np.random.uniform(low=-v, high=v, size=shape))
        self.log_stdev = K.variable(np.random.uniform(low=-v, high=v, size=shape))
        self.bias = K.variable(np.random.uniform(low=-v, high=v, size=[self.output_dim]))

        self.trainable_weights = [self.mean, self.bias, self.log_stdev]
示例#11
0
    def build(self, input_shape):
        input_dim = input_shape[1]
        shape = [input_dim, self.output_dim]
        self.epsilon = K.random_normal(shape, mean=self.mean_prior, std=self.std_prior)
        v = np.sqrt(6.0 / (input_dim + self.output_dim))
        self.mean = K.variable(np.random.uniform(low=-v, high=v, size=shape), name='mean')
        self.log_std = K.variable(np.random.uniform(low=-v, high=v, size=shape), name='log_std')
        self.bias = K.variable(np.random.uniform(low=-v, high=v, size=[self.output_dim]), name='bias')
        self.W = self.epsilon*K.log(1.0 + K.exp(self.log_std)) + self.mean

        self.trainable_weights = [self.mean, self.log_std, self.bias]
示例#12
0
def model_encoder(latent_dim, input_shape, hidden_dim=512, reg=lambda: l1(1e-7)):
    x = Input(input_shape, name="x")
    h = Flatten()(x)
    h = Dense(hidden_dim, name="encoder_h1", W_regularizer=reg())(h)
    h = LeakyReLU(0.2)(h)
    h = Dense(hidden_dim, name="encoder_h2", W_regularizer=reg())(h)
    h = LeakyReLU(0.2)(h)
    mu = Dense(latent_dim, name="encoder_mu", W_regularizer=reg())(h)
    log_sigma_sq = Dense(latent_dim, name="encoder_log_sigma_sq", W_regularizer=reg())(h)
    z = merge([mu, log_sigma_sq], mode=lambda p: p[0] + K.random_normal(K.shape(p[0])) * K.exp(p[1] / 2),
              output_shape=lambda p: p[0])
    return Model(x, z, name="encoder")
示例#13
0
    def build(self, input_shape):
        if self.dim_ordering == 'th':
            stack_size = input_shape[1]
            self.W_shape = (self.nb_filter, stack_size, self.nb_row, self.nb_col)
        elif self.dim_ordering == 'tf':
            stack_size = input_shape[3]
            self.W_shape = (self.nb_row, self.nb_col, stack_size, self.nb_filter)
        else:
            raise Exception('Invalid dim_ordering: ' + self.dim_ordering)

        # self.W = self.init(self.W_shape, name='{}_W'.format(self.name))

        input_dim, output_dim = initializations.get_fans(self.W_shape)
        v = np.sqrt(6.0 / (input_dim + output_dim))
        values = np.random.uniform(low=-v, high=v, size=self.W_shape)
        self.mean = K.variable(values, name='mean')
        values = np.random.uniform(low=-v, high=v, size=self.W_shape)
        self.log_std = K.variable(values, name='log_std')


        self.epsilon = K.random_normal(self.W_shape,
                                       mean=self.mean_prior, std=self.std_prior)
        self.W = self.epsilon*K.log(1.0 + K.exp(self.log_std)) + self.mean

        if self.bias:
            self.b = K.zeros((self.nb_filter,), name='{}_b'.format(self.name))
            self.trainable_weights = [self.mean, self.log_std, self.b]
        else:
            self.trainable_weights = [self.mean, self.log_std]
        self.regularizers = []

        if self.W_regularizer:
            self.W_regularizer.set_param(self.W)
            self.regularizers.append(self.W_regularizer)

        if self.bias and self.b_regularizer:
            self.b_regularizer.set_param(self.b)
            self.regularizers.append(self.b_regularizer)

        if self.activity_regularizer:
            self.activity_regularizer.set_layer(self)
            self.regularizers.append(self.activity_regularizer)

        self.constraints = {}
        if self.W_constraint:
            self.constraints[self.W] = self.W_constraint
        if self.bias and self.b_constraint:
            self.constraints[self.b] = self.b_constraint

        if self.initial_weights is not None:
            self.set_weights(self.initial_weights)
            del self.initial_weights
示例#14
0
def sampling(args):
	'''
	Reparameterization trick by sampling from an isotropic unit Gaussian.
	Args:
		args(tensor): mean and log of variance of Q(z|X)
	Returns:
		z(tensor): sampled latent vector
	'''
	z_mean, z_log_var = args 
	batch = K.shape(z_mean)[0]
	dim = K.int_shape(z_mean)[1]
	# By default, random_normal has mean=0 and std=1.0
	epsilon = K.random_normal(shape=(batch, dim))
	return z_mean + K.exp(0.5 * z_log_var) * epsilon
示例#15
0
    def build_encoder(self):
        # Encoder

        img = Input(shape=self.img_shape)

        h = Flatten()(img)
        h = Dense(512)(h)
        h = LeakyReLU(alpha=0.2)(h)
        h = Dense(512)(h)
        h = LeakyReLU(alpha=0.2)(h)
        mu = Dense(self.latent_dim)(h)
        log_var = Dense(self.latent_dim)(h)
        latent_repr = merge([mu, log_var],
                mode=lambda p: p[0] + K.random_normal(K.shape(p[0])) * K.exp(p[1] / 2),
                output_shape=lambda p: p[0])

        return Model(img, latent_repr)
def model_encoder(latent_dim, input_shape, hidden_dim=1024, reg=lambda: l1(1e-5), batch_norm_mode=2):
    x = Input(input_shape, name="x")
    h = Flatten()(x)
    h = Dense(hidden_dim, name="encoder_h1", W_regularizer=reg())(h)
    h = BatchNormalization(mode=batch_norm_mode)(h)
    h = LeakyReLU(0.2)(h)
    h = Dense(hidden_dim / 2, name="encoder_h2", W_regularizer=reg())(h)
    h = BatchNormalization(mode=batch_norm_mode)(h)
    h = LeakyReLU(0.2)(h)
    h = Dense(hidden_dim / 4, name="encoder_h3", W_regularizer=reg())(h)
    h = BatchNormalization(mode=batch_norm_mode)(h)
    h = LeakyReLU(0.2)(h)
    mu = Dense(latent_dim, name="encoder_mu", W_regularizer=reg())(h)
    log_sigma_sq = Dense(latent_dim, name="encoder_log_sigma_sq", W_regularizer=reg())(h)
    z = merge([mu, log_sigma_sq], mode=lambda p: p[0] + K.random_normal(p[0].shape) * K.exp(p[1] / 2),
              output_shape=lambda x: x[0])
    return Model(x, z, name="encoder")
示例#17
0
def model_encoder(latent_dim, input_shape,
                  hidden_dim=100,
                  reg=lambda: l1l2(1e-7, 0)):
    x = Input(input_shape, name="x")
    h = GRU(hidden_dim, return_sequences=True)(x)
    h = GRU(hidden_dim)(h)
    h = Dense(hidden_dim, activation=activation,
              name="encoder_h3",
              W_regularizer=reg())(h)    
    
    mu = Dense(latent_dim, name="encoder_mu", W_regularizer=reg())(h)
    
    log_sigma_sq = Dense(latent_dim, name="encoder_log_sigma_sq",
                         W_regularizer=reg())(h)
    
    z = merge([mu, log_sigma_sq],
              mode=lambda p: p[0] + K.random_normal(K.shape(p[0])) * K.exp(p[1] / 2),
              output_shape=lambda p: p[0])
    
    return Model(x, z, name="encoder")
示例#18
0
def sampling(args):
    """Reparameterization trick by sampling fr an isotropic unit Gaussian.
    # Arguments:
        args (tensor): mean and log of variance of Q(z|X)
    # Returns:
        z (tensor): sampled latent vector
    """

    z_mean, z_log_var = args
    print('z_mean shape')
    print(z_mean.shape)
    print('z_log_var shape')
    print(z_log_var.shape)
    # time.sleep(2)
    batch = K.shape(z_mean)[0]
    dim = K.int_shape(z_mean)[1]
    # by default, random_normal has mean=0 and std=1.0
    epsilon = K.random_normal(shape=(batch, dim))
    f = K.exp(0.5*z_log_var)
    print('f shape')
    print(K.shape(f))
    # time.sleep(20)
    return z_mean + K.exp(0.5 * z_log_var) * epsilon
def model_encoder(latent_dim, input_shape, units=512, reg=lambda: l1l2(l1=1e-7, l2=1e-7), dropout=0.5):
    k = 5
    x = Input(input_shape)
    h = Convolution2D(units / 4, k, k, border_mode='same', W_regularizer=reg())(x)
    # h = SpatialDropout2D(dropout)(h)
    h = MaxPooling2D(pool_size=(2, 2))(h)
    h = LeakyReLU(0.2)(h)
    h = Convolution2D(units / 2, k, k, border_mode='same', W_regularizer=reg())(h)
    # h = SpatialDropout2D(dropout)(h)
    h = MaxPooling2D(pool_size=(2, 2))(h)
    h = LeakyReLU(0.2)(h)
    h = Convolution2D(units / 2, k, k, border_mode='same', W_regularizer=reg())(h)
    # h = SpatialDropout2D(dropout)(h)
    h = MaxPooling2D(pool_size=(2, 2))(h)
    h = LeakyReLU(0.2)(h)
    h = Convolution2D(units, k, k, border_mode='same', W_regularizer=reg())(h)
    # h = SpatialDropout2D(dropout)(h)
    h = LeakyReLU(0.2)(h)
    h = Flatten()(h)
    mu = Dense(latent_dim, name="encoder_mu", W_regularizer=reg())(h)
    log_sigma_sq = Dense(latent_dim, name="encoder_log_sigma_sq", W_regularizer=reg())(h)
    z = Lambda(lambda (_mu, _lss): _mu + K.random_normal(K.shape(_mu)) * K.exp(_lss / 2),
               output_shape=lambda (_mu, _lss): _mu)([mu, log_sigma_sq])
    return Model(x, z, name="encoder")
示例#20
0
文件: model.py 项目: AhlamMD/deepchem
 def sampling(args):
   z_mean_, z_log_var_ = args
   batch_size = K.shape(z_mean_)[0]
   epsilon = K.random_normal(
       shape=(batch_size, latent_rep_size), mean=0., std=epsilon_std)
   return z_mean_ + K.exp(z_log_var_ / 2) * epsilon
示例#21
0
def sample_z(args):
    mu, log_sigma = args
    eps = K.random_normal(shape=(batch_size, latent_dim),
                          mean=epsilon_mean,
                          stddev=epsilon_std)
    return mu + K.exp(log_sigma / 2) * eps
示例#22
0
 def sample(m):
     m, log_var = m
     norm = K.random_normal(shape=(32, LATENT_DIM, 32, 32), mean=0., std=1.)
     return norm * K.exp(log_var / 2) + m
示例#23
0
def sample_z(args):
    mu, log_sigma = args
    batch = K.shape(mu)[0]
    dim = K.int_shape(mu)[1]
    eps = K.random_normal(shape=(batch, dim), mean=0, stddev=1.)
    return mu + K.exp(log_sigma / 2) * eps
示例#24
0
 def gen(in_):
     z_mean, z_log_sigma = in_
     epsilon = K.random_normal(shape=(batch_size, LATENT_DIM))
     ex = K.exp(0.5 * z_log_sigma)
     return z_mean + ex * epsilon
示例#25
0
文件: f-VAEs.py 项目: wgwangang/flow
                    padding='SAME')(z_)
        z_ = BatchNormalization()(z_)
        z_ = Activation('relu')(z_)
        z_ = Conv2D(K.int_shape(z_)[-1],
                    kernel_size=(1, 1),
                    padding='SAME',
                    kernel_initializer='zeros')(z_)
        z = Add()([z, z_])
    z = fl.UnSqueeze()(z)

z = Activation('tanh')(z)

decoder = Model(z_in, z)
decoder.summary()

u = Lambda(lambda z: K.random_normal(shape=K.shape(z)))(x)  # 留着,不能动
z = Reshape(K.int_shape(u)[1:] + (1, ))(u)
z = fl.Actnorm(use_shift=False)(z)
z = Reshape(K.int_shape(z)[1:-1])(z)
z = Add()([z, x])

x_recon = decoder(z)
x_recon = Subtract()([x_recon, x_in])
x_recon = Reshape(K.int_shape(x_recon)[1:] + (1, ))(x_recon)
x_recon = fl.Actnorm(use_shift=False)(x_recon)
x_recon = Reshape(K.int_shape(x_recon)[1:-1])(x_recon)

recon_loss = 0.5 * K.sum(K.mean(x_recon**2, 0)) + 0.5 * np.log(
    2 * np.pi) * np.prod(K.int_shape(x_recon)[1:])

depth = 12
示例#26
0
def sampling(args):
    z_mean, z_log_var = args
    epsilon = K.random_normal(shape=(K.shape(z_mean)[0], Z_DIM), mean=0.,stddev=1.)
    return z_mean + K.exp(z_log_var / 2) * epsilon
示例#27
0
def sampling(args):
    z_mean, z_log_std = args
    batch = K.shape(z_mean)[0]
    dim = K.int_shape(z_mean)[1]
    epsilon = K.random_normal(shape=(batch, dim))
    return z_mean + K.exp(z_log_std) * epsilon
示例#28
0
 def sampling(args):
     z_mean, z_log_var = args
     return z_mean + K.exp(0.5 * z_log_var) * K.random_normal(
         K.shape(z_mean), seed=0)
示例#29
0
 def sampling(args):
     z_mean, z_log_var = args
     batch = K.shape(z_mean)[0]
     dim = K.int_shape(z_mean)[1]
     epsilon = K.random_normal(shape=(batch, dim), seed=0)
     return z_mean + K.exp(0.5 * z_log_var) * epsilon
示例#30
0
def sampling(args):
    latent_mean, laten_log_value = args
    batch = K.shape(latent_mean)[0]
    dimension = K.int_shape(latent_mean)[1]
    epsilon = K.random_normal(shape = (batch, dimension))
    return latent_mean + K.exp(0.5 * laten_log_value) * epsilon
示例#31
0
文件: dropouts.py 项目: yakolle/ml2lm
            def dropped_inputs():
                if 'max' == self.agg_method:
                    x_agg = bk.max(inputs, axis=0)
                    if self.smooth_rate > 0:
                        x_agg = self.smooth_rate * bk.mean(
                            inputs, axis=0) + (1 - self.smooth_rate) * x_agg
                elif 'extreme' == self.agg_method:
                    x_mean = bk.mean(inputs, axis=0)
                    x_agg = tf.where(x_mean >= 0, bk.max(inputs, axis=0),
                                     bk.min(inputs, axis=0))
                    if self.smooth_rate > 0:
                        x_agg = self.smooth_rate * x_mean + (
                            1 - self.smooth_rate) * x_agg
                else:
                    x_agg = bk.mean(inputs, axis=0)

                x_min, x_max = bk.min(x_agg), bk.max(x_agg)
                x_agg_int = bk.cast(
                    input_shape[-1] * (x_agg - x_min) / (x_max - x_min),
                    'int32')
                if self.unique_supported:
                    _, idx, counts = tf.unique_with_counts(x_agg_int)
                    dr = self.rate**(
                        1. / (self.anneal * bk.cast(counts, inputs.dtype)))
                    dr = tf.where(1 == counts, self.rate * bk.ones_like(dr),
                                  dr)
                else:

                    def _seg_dr(ele):
                        _cnt = bk.sum(bk.cast(ele == x_agg_int, inputs.dtype))
                        _dr = self.rate if 1 == _cnt else self.rate**(
                            1. / (self.anneal * _cnt))
                        return _dr

                    dr = bk.map_fn(_seg_dr, x_agg_int, dtype=inputs.dtype)
                    idx = bk.arange(0, x_agg_int.shape[0])

                if 'gaussian' == self.noise_type:
                    sigma = (dr / (1. - dr))**.5
                    noise_tensor = bk.gather(sigma, idx) * bk.random_normal(
                        x_agg_int.shape, dtype=inputs.dtype) + 1.
                    return inputs * noise_tensor
                else:
                    dr_tensor = bk.random_uniform(noise_shape,
                                                  seed=self.seed,
                                                  dtype=inputs.dtype)
                    ret = inputs * bk.cast(dr_tensor >= bk.gather(dr, idx),
                                           inputs.dtype)

                    if 'abs' == self.keep_amp_type:
                        old_amps = bk.sum(bk.abs(inputs),
                                          axis=-1,
                                          keepdims=True)
                        cur_amps = bk.sum(bk.stop_gradient(bk.abs(ret)),
                                          axis=-1,
                                          keepdims=True)
                        ret = ret * old_amps / (cur_amps + self.epsilon)
                    elif self.keep_amp_type is not None:
                        old_amps = bk.sum(inputs, axis=-1, keepdims=True)
                        cur_amps = bk.sum(bk.stop_gradient(ret),
                                          axis=-1,
                                          keepdims=True)
                        ret = ret * old_amps / (cur_amps + self.epsilon)

                    return ret
示例#32
0
    def sampling(args):
        z_mean, z_log_var = args
        epsilon = K.random_normal(shape=(encoder_layers[-1], ), mean=0.)

        return z_mean + K.exp(z_log_var / 2) * epsilon
示例#33
0
    def build(self):
        model = self.net.model
        mu_model = self.net.mu_model
        log_std_model = self.net.log_std_model
        q_model = self.net.q_model
        target_model = self.net.target_model
        target_mu_model = self.net.target_mu_model
        target_log_std_model = self.net.target_log_std_model
        target_q_model = self.net.target_q_model

        self.states = tf.placeholder(tf.float32,
                                     shape=(None, self.in_dim),
                                     name='states')
        self.actions = tf.placeholder(tf.float32,
                                      shape=[None, self.action_dim],
                                      name='actions')
        self.rewards = tf.placeholder(tf.float32, shape=[None], name='rewards')
        self.next_states = tf.placeholder(tf.float32,
                                          shape=[None, self.in_dim],
                                          name='next_states')
        self.ys = tf.placeholder(tf.float32, shape=[None])

        # There are other implementations about how can we take aciton.
        # Taking next action version or using only mu version or searching action which maximize Q.
        target_mu = target_mu_model(self.states)
        target_log_std = target_log_std_model(self.states)
        target_action = target_mu + K.random_normal(
            K.shape(target_mu), dtype=tf.float32) * K.exp(target_log_std)
        self.target_q = K.sum(target_q_model(
            Concatenate()([target_model(self.states), target_action])),
                              axis=-1)

        self.q = K.sum(q_model(
            Concatenate()([model(self.states), self.actions])),
                       axis=-1)
        self.q_loss = K.mean(K.square(self.ys - self.q))

        self.mu = mu_model(self.states)
        self.log_std = log_std_model(self.states)
        self.eta = (self.actions - self.mu) / K.exp(self.log_std)
        inferred_action = self.mu + K.stop_gradient(self.eta) * K.exp(
            self.log_std)
        self.pi_loss = -K.mean(
            q_model(Concatenate()([model(self.states), inferred_action])))

        self.q_updater = self.q_optimizer.minimize(self.q_loss,
                                                   var_list=self.net.var_q)
        self.pi_updater = self.pi_opimizer.minimize(self.pi_loss,
                                                    var_list=self.net.var_pi)

        self.soft_updater = [
            K.update(t_p,
                     t_p * (1 - self.tau) + p * self.tau)
            for p, t_p in zip(self.net.var_all, self.net.var_target_all)
        ]
        self.sync = [
            K.update(t_p, p)
            for p, t_p in zip(self.net.var_all, self.net.var_target_all)
        ]

        self.sess.run(tf.global_variables_initializer())
        self.built = True
示例#34
0
 def _sampling(args):
     z_mean = args
     batch = K.shape(z_mean)[0]
     dim = K.int_shape(z_mean)[1]
     epsilon = K.random_normal(shape=(batch, dim))
     return z_mean + float(std) * epsilon
示例#35
0
def sampling(args):
    z_mean, z_log_var = args
    epsilon = K.random_normal(shape=(K.shape(z_mean)[0], latent_dim), mean=0.,
                              stddev=epsilon_std)
    return z_mean + K.exp(z_log_var / 2) * epsilon
 def sampling(args):
     z_mean, z_log_var = args
     epsilon = K.random_normal(shape=(K.shape(z_mean)[0], input_shape),
                               mean=0.,
                               stddev=1.)
     return z_mean + K.exp(z_log_var / 2) * epsilon
示例#37
0
def _conv_kernel_initializer(shape, dtype=None):
    fan_in, fan_out = _compute_fans(shape)
    stddev = np.sqrt(2. / fan_in)
    return K.random_normal(shape, 0., stddev, dtype)
示例#38
0
    def model_encoder(self,
                      units=512,
                      reg=lambda: regularizers.l1_l2(l1=1e-7, l2=1e-7),
                      dropout=0.5):
        k = 5
        x = Input(shape=self.img_shape)
        h = Conv2D(units // 4, (k, k),
                   padding='same',
                   kernel_regularizer=reg())(x)
        # h = SpatialDropout2D(dropout)(h)
        h = MaxPooling2D(pool_size=(2, 2))(h)  # 32 x 32
        #h = LeakyReLU(0.2)(h)
        h = PReLU()(h)
        h = Conv2D(units // 2, (k, k),
                   padding='same',
                   kernel_regularizer=reg())(h)
        # h = SpatialDropout2D(dropout)(h)
        h = MaxPooling2D(pool_size=(2, 2))(h)  # 16 x 16
        #h = LeakyReLU(0.2)(h)
        h = PReLU()(h)
        h = Conv2D(units // 2, (k, k),
                   padding='same',
                   kernel_regularizer=reg())(h)
        # h = SpatialDropout2D(dropout)(h)
        h = MaxPooling2D(pool_size=(2, 2))(h)  # 8 x 8
        #h = LeakyReLU(0.2)(h)
        h = PReLU()(h)
        h = Conv2D(units, (k, k), padding='same', kernel_regularizer=reg())(h)
        # h = SpatialDropout2D(dropout)(h)
        #h = LeakyReLU(0.2)(h)
        h = PReLU()(h)
        h = Flatten()(h)

        x2 = Input(shape=self.bathy_shape)
        h2 = ZeroPadding2D(padding=((6, 5), (6,
                                             5)))(x2)  # from 21x21 to 32 x 32
        h2 = Conv2D(units // 4, (k, k),
                    padding='same',
                    kernel_regularizer=reg())(h2)
        # h = SpatialDropout2D(dropout)(h)
        h2 = MaxPooling2D(pool_size=(2, 2))(h2)  # 16 x 16
        #h = LeakyReLU(0.2)(h)
        h2 = PReLU()(h2)
        h2 = Conv2D(units // 2, (k, k),
                    padding='same',
                    kernel_regularizer=reg())(h2)
        # h = SpatialDropout2D(dropout)(h)
        h2 = MaxPooling2D(pool_size=(2, 2))(h2)  # 8 x 8
        #h = LeakyReLU(0.2)(h)
        h2 = PReLU()(h2)
        h2 = Conv2D(units // 2, (k, k),
                    padding='same',
                    kernel_regularizer=reg())(h2)
        # h = SpatialDropout2D(dropout)(h)
        h2 = MaxPooling2D(pool_size=(2, 2))(h2)  # 4 x 4
        #h = LeakyReLU(0.2)(h)
        h2 = PReLU()(h2)
        h2 = Conv2D(units, (k, k), padding='same',
                    kernel_regularizer=reg())(h2)
        # h = SpatialDropout2D(dropout)(h)
        #h = LeakyReLU(0.2)(h)
        h2 = PReLU()(h2)
        h2 = Flatten()(h2)

        hcomb = Concatenate()([h, h2])

        mu = Dense(self.latent_dim,
                   name="encoder_mu",
                   kernel_regularizer=reg())(hcomb)
        log_sigma_sq = Dense(self.latent_dim,
                             name="encoder_log_sigma_sq",
                             kernel_regularizer=reg())(hcomb)
        # z = Lambda(lambda (_mu, _lss): _mu + K.random_normal(K.shape(_mu)) * K.exp(_lss / 2),output_shape=lambda (_mu, _lss): _mu)([mu, log_sigma_sq])
        z = Lambda(lambda ml: ml[0] + K.random_normal(K.shape(ml[0])) * K.exp(
            ml[1] / 2),
                   output_shape=lambda ml: ml[0])([mu, log_sigma_sq])

        return Model([x, x2], z, name="encoder")
示例#39
0
 def sampling(self, args):
     z_mean, z_sigma = args
     epsilon = K.random_normal(shape=(self.z_dim, ),
                               mean=0.,
                               stddev=epsilon_std)
     return z_mean + z_sigma * epsilon
示例#40
0
    # 重参数技巧
    def call(self, inputs):
        z, shift, log_scale = inputs
        z = K.exp(log_scale) * z + shift
        logdet = -K.sum(K.mean(log_scale, 0))
        self.add_loss(logdet)
        return z


# 算p(Z|X)的均值和方差
z_shift = Dense(z_dim)(x)

z_log_scale = Dense(z_dim)(x)
# 重参数层,相当于给输入加入噪声
u = Lambda(lambda z: K.random_normal(shape=K.shape(z)))(z_shift)
z = ScaleShift()([u, z_shift, z_log_scale])

x_recon = decoder(z)
x_out = Subtract()([x_in, x_recon])
# xent_loss是重构loss,z_loss是KL loss
recon_loss = 0.5 * K.sum(K.mean(x_out**2, 0)) + 0.5 * np.log(
    2 * np.pi) * np.prod(K.int_shape(x_out)[1:])
z_loss = 0.5 * K.sum(K.mean(z**2, 0)) - 0.5 * K.sum(K.mean(u**2, 0))
vae_loss = recon_loss + z_loss

vae = Model(x_in, x_out)
vae.add_loss(vae_loss)
vae.compile(optimizer=Adam(1e-4))

示例#41
0
def sampling(z_par):
    z_mu, z_var = z_par
    epsilon = K.random_normal(shape=z_mu.shape)
    return z_mu + K.sqrt(z_var) * epsilon
示例#42
0
 def sampling(args):
     z_mean, z_log_var = args
     epsilon = K.random_normal(shape=(batch_size, latent_dim),
                               mean=0.,
                               stddev=epsilon_std)
     return z_mean + K.exp(z_log_var) * epsilon
示例#43
0
def hlms_initializer(shape, dtype=None, std=0.2):
    return K.random_normal(shape, dtype=dtype)
示例#44
0
 def __sampling(self, args):
     sample_mean, sample_log_std = args
     epsilon = K.random_normal(shape=(self.batch_size, self.latent_size))
     return sample_mean + K.exp(sample_log_std) * epsilon
 def sample_z( args ):
     z_mean, z_log_var = args
     epsilon = K.random_normal( shape=( latent_dim, ),\
             mean = 0., stddev=1 )
     return z_mean + K.exp( z_log_var / 2 ) * epsilon
示例#46
0
def gaussian_noise(x, mean=0.0, std=0.1, random_state=1234):
    return x + K.random_normal(
        K.shape(x), mean=mean, std=std, seed=random_state)
示例#47
0
def gaussian_noise(x, mean=0.0, std=0.1, random_state=1234):
    return x + K.random_normal(K.shape(x), mean=mean, std=std, seed=random_state)
示例#48
0
文件: VAE.py 项目: uc-lhcb/Calo-ML
 def sample_z(args):
     self.mu, self.sigma = args
     batch = K.shape(self.mu)[0]
     dim = K.int_shape(self.mu)[1]
     eps = K.random_normal(shape=(batch, dim))
     return self.mu + K.exp(self.sigma / 2) * eps
示例#49
0
def sample_z(args):
	global m, n_z
	mu, log_sigma = args
	eps = K.random_normal(shape=(m, n_z), mean=0., stddev=1.)
	return mu + K.exp(log_sigma / 2) * eps
def norm(x):
    return (x-np.min(x))/(np.max(x)-np.min(x))

part=8
thre=1
## Certo é 256
recog=Sequential()
recog.add(Dense(64,activation='relu',input_shape=(784,),init='glorot_uniform'))

recog_left=recog
recog_left.add(Dense(64,input_shape=(64,),activation='relu'))

recog_right=recog
recog_right.add(Dense(64,input_shape=(64,),activation='relu'))
recog_right.add(Lambda(lambda x: x + K.exp(x / 2) * K.random_normal(shape=(1, 64), mean=0.,
                              std=epsilon_std), output_shape=(64,)))
recog_right.add(Highway())
recog_right.add(Activation('sigmoid'))

recog1=Sequential()
recog1.add(Merge([recog_left,recog_right],mode = 'ave'))
recog1.add(Dense(784))

#### HERE***
recog11=Sequential()
layer=Dense(64,init='glorot_uniform',input_shape=(784,))
layer.trainable=False
recog11.add(layer)
layer2=Dense(784, activation='sigmoid',init='glorot_uniform')
layer2.trainable=False
recog11.add(layer2)
 def sampling(args):
     z_mean, z_log_sigma = args
     epsilon = K.random_normal(shape=(64, noise_dim,),
                       mean=0., std=0.01)
     return z_mean + K.exp(z_log_sigma) * epsilon
示例#52
0
 def sample(self, n):
     return K.random_normal(shape=(n, self.d))
示例#53
0
def sampling(args):
    z_mean, z_log_std = args
    epsilon = K.random_normal(shape=(batch_size, latent_dim),
                              mean=0., std=epsilon_std)
    return z_mean + K.exp(z_log_std) * epsilon
 def _sampling(self, args):
     z_mean, z_log_var = args
     epsilon = K.random_normal(shape=(K.shape(z_mean)[0], self.n_z),
                               mean=0.,
                               stddev=self.epsilon_std)
     return z_mean + K.sqrt(K.exp(z_log_var)) * epsilon
示例#55
0
def sampling(args):
    z_mean, z_log_var = args
    epsilon = K.random_normal(shape=(K.shape(z_mean)[0], latent_dim),
                              mean=0.,
                              stddev=epsilon_std)
    return z_mean + K.exp(z_log_var / 2) * epsilon
def sample_z(args):
    mu, log_sigma = args
    eps = K.random_normal(shape=(m, latent_dim))
    return mu + K.exp(log_sigma / 2) * eps
示例#57
0
 def sampling(self, args):
     mu, log_var = args
     epsilon = K.random_normal(shape=K.shape(mu), mean=0, stddev=1.)
     return mu + K.exp(log_var / 2) * epsilon
示例#58
0
def sampling(args):
    z_mean, z_log_var = args
    epsilon = K.random_normal(shape = (K.shape(z_mean)[0], latent_dim), \
        mean = 0, stddev = 1)
    return z_mean + K.exp(0.5 * z_log_var) * epsilon
示例#59
0
def sample_z(args):
  z_mu, z_sigma = args
  eps = K.random_normal(shape=(K.shape(z_mu)[0], K.int_shape(z_mu)[1]))
  return z_mu + K.exp(z_sigma / 2) * eps

part = 8
thre = 1
## Certo é 256
recog = Sequential()
recog.add(
    Dense(64, activation='relu', input_shape=(784, ), init='glorot_uniform'))

recog_left = recog
recog_left.add(Dense(64, input_shape=(64, ), activation='relu'))

recog_right = recog
recog_right.add(Dense(64, input_shape=(64, ), activation='relu'))
recog_right.add(
    Lambda(lambda x: x + K.exp(x / 2) * K.random_normal(
        shape=(1, 64), mean=0., std=epsilon_std),
           output_shape=(64, )))
recog_right.add(Highway())
recog_right.add(Activation('sigmoid'))

recog1 = Sequential()
recog1.add(Merge([recog_left, recog_right], mode='ave'))
recog1.add(Dense(64, init='glorot_uniform'))
recog1.add(Dense(784, activation='sigmoid', init='glorot_uniform'))

recog1.compile(loss='mean_squared_error', optimizer=sgd, metrics=['mae'])

recog1.fit(x_train[0].reshape((1, 784)),
           x_train[0].reshape((1, 784)),
           nb_epoch=150,
           batch_size=30,