示例#1
0
    def build(self, input_shape):
        #The initialisation parameters
        self.mean = 0.0
        self.stddev = 1.0
        dtype = 'float32'
        self.seed = 1

        #The output and the inut dimension
        k = self.output_dim
        d = self.input_dim

        #Initialise the variables to be trained. The variables are according to the
        #function defined.
        self.W = K.random_normal_variable((k, d, d),
                                          self.mean,
                                          self.stddev,
                                          dtype=dtype)
        self.V = K.random_normal_variable((2 * d, k),
                                          self.mean,
                                          self.stddev,
                                          dtype=dtype)
        self.b = K.zeros((self.input_dim, ))

        #Set the variables to be trained.
        self.trainable_weights = [self.W, self.V, self.b]
示例#2
0
 def sampling(args):
     z_mean, z_log_sigma = args
     epsilon = K.random_normal_variable(shape=(self.config.batch_size,
                                               self.config.latent_dim),
                                        mean=0.,
                                        scale=1.)
     return z_mean + K.exp(z_log_sigma / 2.) * epsilon
示例#3
0
    def build(self, input_shape):

        self.patch_layer.build(input_shape)

        self.input_shape_t = self.patch_layer.compute_output_shape(input_shape)

        self.dim = self.input_shape_t[-1]

        self.filters = self.dict_size

        self.strides = (1, self.dim)

        self.kernel_shape = (1, self.dim, self.dict_size)

        self.D0 = K.random_normal_variable((self.dim, self.dict_size),
                                           mean=0,
                                           scale=1)

        self.D = tf.matmul(tf.diag(1 / tf.norm(self.D0, axis=1)), self.D0)

        self.D_ols = tf.matmul(tf.linalg.inv(
            tf.matmul(self.D, self.D, transpose_a=True) +
            self.alpha * tf.eye(self.dict_size)),
                               self.D,
                               transpose_b=True)
        self.kernel = K.reshape(self.D_ols, self.kernel_shape)
        #self.add_weight(shape=self.kernel_shape,
        #                              initializer='glorot_uniform',
        #                              name='kernel')
        self.D_kernel = K.reshape(tf.matmul(self.D, self.D_ols),
                                  (1, self.dim, self.dim))

        self.trainable_weights = [self.D0]
示例#4
0
def test_specify_initial_state(layer_class):
    num_states = 2 if layer_class is recurrent.LSTM else 1

    # Test with Keras tensor
    inputs = Input((timesteps, embedding_dim))
    initial_state = [Input((units, )) for _ in range(num_states)]
    layer = layer_class(units)
    output = layer(inputs, initial_state=initial_state)
    model = Model([inputs] + initial_state, output)
    model.compile(loss='categorical_crossentropy', optimizer='adam')

    inputs = np.random.random((num_samples, timesteps, embedding_dim))
    initial_states = [
        np.random.random((num_samples, units)) for _ in range(num_states)
    ]
    targets = np.random.random((num_samples, units))
    model.fit([inputs] + initial_states, targets)

    # Test with non-Keras tensor
    inputs = Input((timesteps, embedding_dim))
    initial_state = [
        K.random_normal_variable((units, ), 0, 1) for _ in range(num_states)
    ]
    layer = layer_class(units)
    output = layer(inputs, initial_state=initial_state)
    model = Model([inputs], output)
    model.compile(loss='categorical_crossentropy', optimizer='adam')

    inputs = np.random.random((num_samples, timesteps, embedding_dim))
    targets = np.random.random((num_samples, units))
    model.fit(inputs, targets)
示例#5
0
def test_specify_initial_state(layer_class):
    num_states = 2 if layer_class is recurrent.LSTM else 1

    # Test with Keras tensor
    inputs = Input((timesteps, embedding_dim))
    initial_state = [Input((units,)) for _ in range(num_states)]
    layer = layer_class(units)
    output = layer(inputs, initial_state=initial_state)
    model = Model([inputs] + initial_state, output)
    model.compile(loss='categorical_crossentropy', optimizer='adam')

    inputs = np.random.random((num_samples, timesteps, embedding_dim))
    initial_states = [np.random.random((num_samples, units))
                      for _ in range(num_states)]
    targets = np.random.random((num_samples, units))
    model.fit([inputs] + initial_states, targets)

    # Test with non-Keras tensor
    inputs = Input((timesteps, embedding_dim))
    initial_state = [K.random_normal_variable((units,), 0, 1) for _ in range(num_states)]
    layer = layer_class(units)
    output = layer(inputs, initial_state=initial_state)
    model = Model([inputs], output)
    model.compile(loss='categorical_crossentropy', optimizer='adam')

    inputs = np.random.random((num_samples, timesteps, embedding_dim))
    targets = np.random.random((num_samples, units))
    model.fit(inputs, targets)
示例#6
0
    def __init__(self, val_shape, state_shape, output_shape, dropout):

        #inputs
        x = K.placeholder(shape=val_shape)
        h_1 = K.placeholder(shape=val_shape)
        C_1 = K.placeholder(shape=state_shape)

        #Forget gate
        comb = K.concatenate([h_1, x])
        Wf = K.random_normal_variable(shape=K.int_shape(comb),
                                      mean=0,
                                      scale=1,
                                      name='forget_W')
        bf = K.random_normal_variable(shape=K.int_shape(comb),
                                      mean=0.3,
                                      scale=1,
                                      name='forget_b')
        ft = self._sigmoid_node(x, h_1, Wf, bf)

        #input gate layer
        Wi = K.random_normal_variable(shape=K.int_shape(comb),
                                      mean=0,
                                      scale=1,
                                      name='i_W')
        bi = K.random_normal_variable(shape=K.int_shape(comb),
                                      mean=0,
                                      scale=1,
                                      name='i_b')
        it = self._sigmoid_node(x, h_1, Wi, b)

        #new state inclusion layer
        Wcl = K.random_normal_variable(shape=K.int_shape(comb),
                                       mean=0,
                                       scale=1,
                                       name='cl_W')
        bcl = K.random_normal_variable(shape=K.int_shape(comb),
                                       mean=0,
                                       scale=1,
                                       name='cl_b')
        CLt = K.tanh((Wc * comb) + bc)

        #New state
        self.Ct = (ft * C_1) + (it * CLt)

        #Output layer
        Wo = K.random_normal_variable(shape=K.int_shape(comb),
                                      mean=0,
                                      scale=1,
                                      name='o_W')
        bo = K.random_normal_variable(shape=K.int_shape(comb),
                                      mean=0,
                                      scale=1,
                                      name='o_b')
        ot = self._sigmoid_node(x, h_1, Wo, bo)
        self.ht = ot * K.tanh(Ct)
示例#7
0
    def sampling(args):
        z_mean, z_log_var = args

        epsilon = K.random_normal_variable(shape=(params['batch_size'], params['hidden_dim']),
                                           mean=0., scale=1.)
        # insert kl loss here

        z_rand = z_mean + K.exp(z_log_var / 2) * kl_loss_var * epsilon
        return K.in_train_phase(z_rand, z_mean)
    def __call__(self, w):
        if self.sd_lambda == 0:
            return 0
        else:
            w = K.reshape(w, [-1, w.shape.as_list()[-1]])
            x = K.random_normal_variable(shape=(int(w.shape[1]), 1), mean=0, scale=1)

            for i in range(0, self.iterations): 
                x_p = K.dot(w, x)
                x = K.dot(K.transpose(w), x_p)
            
            return self.sd_lambda * K.sum(K.pow(K.dot(w, x), 2.0)) / K.sum(K.pow(x, 2.0))
    def __call__(self, w):
        norm = self.max_k

        if len(w.shape) == 4:
            x = K.random_normal_variable(shape=(1,) + self.in_shape[1:3] + (self.in_shape[0],), mean=0, scale=1)

            for i in range(0, self.iterations): 
                x_p = K.conv2d(x, w, strides=self.stride, padding=self.padding)
                x = K.conv2d_transpose(x_p, w, x.shape, strides=self.stride, padding=self.padding)
            
            Wx = K.conv2d(x, w, strides=self.stride, padding=self.padding)
            norm = K.sqrt(K.sum(K.pow(Wx, 2.0)) / K.sum(K.pow(x, 2.0)))
        else:
            x = K.random_normal_variable(shape=(int(w.shape[1]), 1), mean=0, scale=1)

            for i in range(0, self.iterations): 
                x_p = K.dot(w, x)
                x = K.dot(K.transpose(w), x_p)
            
            norm = K.sqrt(K.sum(K.pow(K.dot(w, x), 2.0)) / K.sum(K.pow(x, 2.0)))

        return w * (1.0 / K.maximum(1.0, norm / self.max_k))
示例#10
0
    def sampling(args):
        ## could not save model(pickle issue), should do some modify.
        z_mean, z_log_var = args

        epsilon = K.random_normal_variable(shape=(params['batch_size'],
                                                  params['hidden_dim']),
                                           mean=0.,
                                           scale=1.)

        # insert kl loss here

        z_rand = z_mean + K.exp(z_log_var / 2) * kl_loss_var * epsilon

        return K.in_train_phase(z_rand, z_mean)
示例#11
0
def cross_block(x, y):
    '''
        x_(l+1) = x_0 * x^T_l * w_l + b_l + x_l
        x : input feature
        y : the l-th feature after cross feature layer
    '''
    y_shape = K.int_shape(y)
    weight = K.random_normal_variable(shape=(y_shape[1], 1),
                                      mean=0,
                                      scale=0.01)
    tmp = K.dot(x, weight)
    x_shape = K.int_shape(x)
    tmp = Lambda(lambda z: K.repeat_elements(z, x_shape[1], axis=1))(tmp)
    # tmp = Lambda(lambda z: K.dot(K.permute_dimensions(z[0], (0, 2, 1)), z[1]))([y, weight])
    return multiply([x, tmp])
示例#12
0
    def func(x):
        o, u = x[0], x[1]
        A = []
        for i in range(5):
            A.append(x[i + 2])
        alpha = K.random_normal_variable(shape=(1, ),
                                         mean=0,
                                         scale=1,
                                         name='alpha')
        dot_prod3 = Lambda(name='dot_prod3_' + pfx,
                           function=lambda x: K.sum(
                               multiply([x[0], x[1]]), keepdims=True, axis=1))

        scores = []
        for i in range(5):
            scores.append(dot_prod3([(alpha * o + (1 - alpha) * u),
                                     A[i]]))  # shape = (None, 1)

        z = Activation('softmax')(concatenate(scores))  # shape = (None, 5)
        return z
    def build(self, input_shape):
        if self.multiple_inputs:
            n_in = input_shape[1][-1]
        else:
            n_in = input_shape[-1]
        print(n_in)
        #sda
        #n_in=n_in+1
        n_out = self.attention_dim
        lim = np.sqrt(6. / (n_in + n_out))
        W = K.random_uniform_variable((n_in, n_out),
                                      -lim,
                                      lim,
                                      name='{}_W'.format(self.name))
        b = K.zeros((n_out, ), name='{}_b'.format(self.name))
        self.W = W
        self.b = b

        self.v = K.random_normal_variable(shape=(n_out, 1),
                                          mean=0,
                                          scale=0.1,
                                          name='{}_v'.format(self.name))
        self.trainable_weights = [self.W, self.v, self.b]
示例#14
0
def init_normal(shape, name=None):
    return K.random_normal_variable(shape, mean = 0.0, scale=0.01, name=name)
示例#15
0
def norm_weight(shape, scale=0.01, name=None):
    return K.random_normal_variable(shape, 0.0, scale, name=name)
def normal(shape, scale=0.05, name=None, dim_ordering='th'):
    return K.random_normal_variable(shape, 0.0, scale, name=name)
def init_normal(shape, name=None):
    return K.random_normal_variable(shape, 0.0, 0.01, name=name)
示例#18
0
 def sampling(self, args):
     z_mean, z_log_sigma = args
     epsilon = K.random_normal_variable(shape=(BATCH_SIZE, NOISE_DIM),
                                        mean=0.0,
                                        scale=1.0)
     return z_mean + K.exp(z_log_sigma) * epsilon
示例#19
0
 def init_w_k(shape, dtype=None):
     return K.random_normal_variable(kernel_shape,
                                     mean=0,
                                     scale=s,
                                     dtype=tf.float32,
                                     seed=self.seed)
示例#20
0
def small_init(shape, dtype=None):
    return K.random_normal_variable(shape=shape, mean=.0, scale=.01)
# Set up the decoder, using `encoder_states` as initial state.
decoder_inputs = Input(shape=(max_decoder_seq_length, num_decoder_tokens))
# We set up our decoder to return full output sequences,
# and to return internal states as well. We don't use the
# return states in the training model, but we will use them in inference.
decoder_lstm = LSTM(latent_dim, return_state=True, return_sequences =True)
decoder_outputs, decoder_hidden, _ = decoder_lstm(decoder_inputs,
                                     initial_state=encoder_states)

print ("decoder_outputs")
print (decoder_outputs)

#decoder_dense = Dense(num_decoder_tokens, activation='softmax')
#decoder_outputs = decoder_dense(decoder_outputs)

vt = K.random_normal_variable(shape=
                              (1,latent_dim), mean=0, scale=1) # Gaussian distribution (input_seq_lenth,1))
print ("vt")
print (vt)

print ("decoder_hidden")
print (decoder_hidden)
#en_seq = Reshape((-1,1,latent_dim))(encoder_outputs) #?,latent_dim
#en_seq =K.squeeze(en_seq,0)
en_seq = encoder_outputs

#en_seq = K.repeat(en_seq, max_encoder_seq_length)
print ("en_seq")
print (en_seq)

#dec_seq = Reshape((-1,1,latent_dim))(decoder_hidden)
dec_seq = K.repeat(decoder_hidden, max_encoder_seq_length)
示例#22
0
def sampling(args):
    z_mean, z_log_var = args
    epsilon = K.random_normal_variable(shape=(z_mean.get_shape()),
                                       mean=0.,
                                       scale=epsilon_std)
    return z_mean + K.exp(z_log_var / 2) * epsilon
    def build(self, input_shape):
        self.input_spec = [InputSpec(shape=input_shape)]
        self.input_dim = input_shape[2]

        if self.stateful:
            self.reset_states()
        else:
            # initial states: 2 all-zero tensors of shape (output_dim)
            self.states = [None, None]

        if self.consume_less == 'gpu':
            self.W = self.init((self.input_dim, 4 * self.output_dim),
                               name='{}_W'.format(self.name))
            self.U = self.inner_init((self.output_dim, 4 * self.output_dim),
                                     name='{}_U'.format(self.name))

            self.b = K.variable(np.hstack(
                (np.zeros(self.output_dim),
                 K.get_value(self.forget_bias_init((self.output_dim, ))),
                 np.zeros(self.output_dim), np.zeros(self.output_dim))),
                                name='{}_b'.format(self.name))
            self.trainable_weights = [self.W, self.U, self.b]
        else:
            self.W_i = self.init((self.input_dim, self.output_dim),
                                 name='{}_W_i'.format(self.name))
            self.U_i = self.inner_init((self.output_dim, self.output_dim),
                                       name='{}_U_i'.format(self.name))
            self.b_i = K.zeros((self.output_dim, ),
                               name='{}_b_i'.format(self.name))

            self.W_f = self.init((self.input_dim, self.output_dim),
                                 name='{}_W_f'.format(self.name))
            self.U_f = self.inner_init((self.output_dim, self.output_dim),
                                       name='{}_U_f'.format(self.name))
            self.b_f = self.forget_bias_init((self.output_dim, ),
                                             name='{}_b_f'.format(self.name))

            self.W_c = self.init((self.input_dim, self.output_dim),
                                 name='{}_W_c'.format(self.name))
            self.U_c = self.inner_init((self.output_dim, self.output_dim),
                                       name='{}_U_c'.format(self.name))
            self.b_c = K.zeros((self.output_dim, ),
                               name='{}_b_c'.format(self.name))

            self.W_o = self.init((self.input_dim, self.output_dim),
                                 name='{}_W_o'.format(self.name))
            self.U_o = self.inner_init((self.output_dim, self.output_dim),
                                       name='{}_U_o'.format(self.name))
            self.b_o = K.zeros((self.output_dim, ),
                               name='{}_b_o'.format(self.name))

            # added by Di Kai
            self.W_h = self.init((self.output_dim, self.output_dim),
                                 name='{}_W_h'.format(self.name))
            self.u_h = K.random_normal_variable(
                (self.output_dim), 1.0, 0.1, name='{}_u_h'.format(self.name))

            self.trainable_weights = [
                self.W_i, self.U_i, self.b_i, self.W_c, self.U_c, self.b_c,
                self.W_f, self.U_f, self.b_f, self.W_o, self.U_o, self.b_o,
                self.W_h, self.u_h
            ]

            self.W = K.concatenate([self.W_i, self.W_f, self.W_c, self.W_o])
            self.U = K.concatenate([self.U_i, self.U_f, self.U_c, self.U_o])
            self.b = K.concatenate([self.b_i, self.b_f, self.b_c, self.b_o])

        self.regularizers = []
        if self.W_regularizer:
            self.W_regularizer.set_param(self.W)
            self.regularizers.append(self.W_regularizer)
        if self.U_regularizer:
            self.U_regularizer.set_param(self.U)
            self.regularizers.append(self.U_regularizer)
        if self.b_regularizer:
            self.b_regularizer.set_param(self.b)
            self.regularizers.append(self.b_regularizer)

        if self.initial_weights is not None:
            self.set_weights(self.initial_weights)
            del self.initial_weights
示例#24
0
def sampling(args):
    encoder_mean, encoder_log_sigma = args
    epsilon = K.random_normal_variable(shape=(batch_size, 2048),
                                       mean=0.,
                                       scale=0.1)
    return encoder_mean + K.exp(encoder_log_sigma) * epsilon
示例#25
0
import tensorflow as tf
import matplotlib.pyplot as plt

# Fixed
batch = 4000

alpha = np.random.normal(0, 0.5, 100)
X_tr = np.random.normal(0, 0.5, (batch, ))
Z_tr = np.mean(np.exp(-X_tr.reshape(-1, 1)**2 * alpha), axis=1)
# sort
idx = np.argsort(X_tr)
X_tr = X_tr[idx]
Z_tr = Z_tr[idx]

# Learned
coefs = K.random_normal_variable((100, ), 0, 0.1)
pows = K.arange(100, dtype='float32')
X = K.placeholder((None, 1))
Y = K.mean(K.exp(-K.square(X) * coefs), axis=1)
Z = K.placeholder(ndim=1)

loss = K.mean(K.square(Y - Z))

optimizer = tf.train.AdamOptimizer()
opt = optimizer.minimize(loss, var_list=[coefs])
grad = K.Function([X, Z], [opt])

get_loss = K.Function([X, Z], [loss])
get_Y = K.Function([X], [Y])

for i in range(1000):
from keras.losses import categorical_crossentropy
from keras.activations import softmax
from keras import optimizers
import numpy as np

dataset_size = 200000
X = np.random.rand(dataset_size, 2)
labels = np.zeros((dataset_size, 3))
labels[X[:, 0] > X[:,1]] = [0,0,1]
labels[X[:, 0] <= X[:,1]] = [1,0,0]
labels[X[:,1] + X[:, 0] > 1] = [0, 1, 0]

x = K.placeholder(shape=(None, 2))
t = K.placeholder(shape=(None, 3))

theta1 = K.random_normal_variable(shape=(2, 12), mean=0, scale=0.01)
bias1 = K.random_normal_variable(shape=(1, 12), mean=0, scale=0.01)

theta2 = K.random_normal_variable(shape=(12, 3), mean=0, scale=0.01)
bias2 = K.random_normal_variable(shape=(1, 3), mean=0, scale=0.01)

def forward(x):
    y = K.dot(x, theta1) + bias1
    y = K.maximum(y, 0.)
    return K.dot(y, theta2) + bias2


loss = categorical_crossentropy(softmax(forward(x)),t)
params= [theta1, bias1, theta2, bias2]

# sgd = optimizers.SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
def init_normal(shape, dtype=None):
    return K.random_normal_variable(shape, 0.0, scale=0.01, dtype=dtype)
示例#28
0
# K.tensorflow_backend._get_available_gpus()
from keras import metrics
from keras import backend as K
import numpy as np


def squared_error(a, b):
    return K.square(a - b)


a = K.random_normal_variable(shape=(3, 4), mean=0, scale=1)
b = K.random_normal_variable(shape=(3, 4), mean=0, scale=1)

c = squared_error(a, b)

print(c)
示例#29
0
# -*- coding: utf-8 -*-
"""
Created on Fri Sep 14 13:10:30 2018

@author: Youssef
"""

from keras import backend as K

inputs = K.placeholder(shape=(2, 4, 5))
# also works:
inputs = K.placeholder(ndim=3)

import numpy as np
x = np.random.random((3, 4, 5))
y = K.variable(value=x)

# Initializing Tensors with Random Numbers
b = K.random_uniform_variable(shape=(3, 4), low=0,
                              high=1)  # Uniform distribution
c = K.random_normal_variable(shape=(3, 4), mean=0,
                             scale=1)  # Gaussian distribution
d = K.random_normal_variable(shape=(3, 4), mean=0, scale=1)
a = b + c * K.abs(d)
c = K.dot(a, K.transpose(b))
a = K.sum(b, axis=1)
a = K.softmax(b)
a = K.concatenate([b, c], axis=-1)
示例#30
0
 def _subNM(x):
     return K.map_fn(
         lambda i: K.random_normal_variable(
             shape=(1, ), mean=m[i], scale=s[i]),
         K.arange(K.constant(len_u)))
示例#31
0
def sampling(args):
    z_mean, z_log_var = args
    epsilon = K.random_normal_variable(shape=(batch_size, dim_latent),
                                       mean=0.,
                                       scale=epsilon_std)
    return z_mean + K.exp(z_log_var / 2) * epsilon
            start.append(mean)
        mean = np.mean([model.dict_to_array(vals) for vals in start], axis=0)
        var = np.ones_like(mean)
        potential = quadpotential.QuadPotentialDiagAdapt(
            model.ndim, mean, var, 10)

        return pm.step_methods.HamiltonianMC(step_scale=stepsize,
                                             potential=potential,
                                             path_length=1)
    else:
        raise NotImplementedError()


ENERGY_FUNCTIONS = OrderedDict((
    ("banana", (Banana(), lambda: [
        K.random_normal_variable(mean=0., scale=1., shape=(1, )),
        K.random_normal_variable(mean=0., scale=1., shape=(1, ))
    ])),
    ("gmm1", (
        Gmm1(),
        lambda:
        [K.variable(K.random_normal((1, )), name="x", dtype=K.floatx())],
    )),
    ("gmm2", (
        Gmm2(),
        lambda:
        [K.variable(K.random_normal((1, )), name="x", dtype=K.floatx())],
    )),
    ("gmm3", (
        Gmm3(),
        lambda: