Beispiel #1
0
    def build(self, input_shape):
        if self.dim_ordering == 'th':
            stack_size = input_shape[1]
            self.W_shape = (self.nb_filter, stack_size, self.nb_row,
                            self.nb_col)
        elif self.dim_ordering == 'tf':
            stack_size = input_shape[3]
            self.W_shape = (self.nb_row, self.nb_col, stack_size,
                            self.nb_filter)
        else:
            raise Exception('Invalid dim_ordering: ' + self.dim_ordering)

        # self.W = self.init(self.W_shape, name='{}_W'.format(self.name))

        input_dim, output_dim = initializations.get_fans(self.W_shape)
        v = np.sqrt(6.0 / (input_dim + output_dim))
        values = np.random.uniform(low=-v, high=v, size=self.W_shape)
        self.mean = K.variable(values, name='mean')
        values = np.random.uniform(low=-v, high=v, size=self.W_shape)
        self.log_std = K.variable(values, name='log_std')

        self.epsilon = K.random_normal(self.W_shape,
                                       mean=self.mean_prior,
                                       std=self.std_prior)
        self.W = self.epsilon * K.log(1.0 + K.exp(self.log_std)) + self.mean

        if self.bias:
            self.b = K.zeros((self.nb_filter, ), name='{}_b'.format(self.name))
            self.trainable_weights = [self.mean, self.log_std, self.b]
        else:
            self.trainable_weights = [self.mean, self.log_std]
        self.regularizers = []

        if self.W_regularizer:
            self.W_regularizer.set_param(self.W)
            self.regularizers.append(self.W_regularizer)

        if self.bias and self.b_regularizer:
            self.b_regularizer.set_param(self.b)
            self.regularizers.append(self.b_regularizer)

        if self.activity_regularizer:
            self.activity_regularizer.set_layer(self)
            self.regularizers.append(self.activity_regularizer)

        self.constraints = {}
        if self.W_constraint:
            self.constraints[self.W] = self.W_constraint
        if self.bias and self.b_constraint:
            self.constraints[self.b] = self.b_constraint

        if self.initial_weights is not None:
            self.set_weights(self.initial_weights)
            del self.initial_weights
Beispiel #2
0
    def build(self, input_shape):
        if self.dim_ordering == 'th':
            stack_size = input_shape[1]
            self.W_shape = (self.nb_filter, stack_size, self.nb_row, self.nb_col)
        elif self.dim_ordering == 'tf':
            stack_size = input_shape[3]
            self.W_shape = (self.nb_row, self.nb_col, stack_size, self.nb_filter)
        else:
            raise Exception('Invalid dim_ordering: ' + self.dim_ordering)

        # self.W = self.init(self.W_shape, name='{}_W'.format(self.name))

        input_dim, output_dim = initializations.get_fans(self.W_shape)
        v = np.sqrt(6.0 / (input_dim + output_dim))
        values = np.random.uniform(low=-v, high=v, size=self.W_shape)
        self.mean = K.variable(values, name='mean')
        values = np.random.uniform(low=-v, high=v, size=self.W_shape)
        self.log_std = K.variable(values, name='log_std')


        self.epsilon = K.random_normal(self.W_shape,
                                       mean=self.mean_prior, std=self.std_prior)
        self.W = self.epsilon*K.log(1.0 + K.exp(self.log_std)) + self.mean

        if self.bias:
            self.b = K.zeros((self.nb_filter,), name='{}_b'.format(self.name))
            self.trainable_weights = [self.mean, self.log_std, self.b]
        else:
            self.trainable_weights = [self.mean, self.log_std]
        self.regularizers = []

        if self.W_regularizer:
            self.W_regularizer.set_param(self.W)
            self.regularizers.append(self.W_regularizer)

        if self.bias and self.b_regularizer:
            self.b_regularizer.set_param(self.b)
            self.regularizers.append(self.b_regularizer)

        if self.activity_regularizer:
            self.activity_regularizer.set_layer(self)
            self.regularizers.append(self.activity_regularizer)

        self.constraints = {}
        if self.W_constraint:
            self.constraints[self.W] = self.W_constraint
        if self.bias and self.b_constraint:
            self.constraints[self.b] = self.b_constraint

        if self.initial_weights is not None:
            self.set_weights(self.initial_weights)
            del self.initial_weights
Beispiel #3
0
def glorot_uniform_sigm(shape, name=None, dim_ordering='th'):
    """
    Glorot style weight initializer for sigmoid activations.
    
    Like keras.initializations.glorot_uniform(), but with uniform random interval like in 
    Deeplearning.net tutorials.
    They claim that the initialization random interval should be
      +/- sqrt(6 / (fan_in + fan_out)) (like Keras' glorot_uniform()) when tanh activations are used, 
      +/- 4 sqrt(6 / (fan_in + fan_out)) when sigmoid activations are used.
    See: http://deeplearning.net/tutorial/mlp.html#going-from-logistic-regression-to-mlp
    """
    fan_in, fan_out = get_fans(shape, dim_ordering=dim_ordering)
    s = 4. * np.sqrt(6. / (fan_in + fan_out))
    return uniform(shape, s, name=name)
def glorot_uniform_sigm(shape):
    """
    Glorot style weight initializer for sigmoid activations.
    
    Like keras.initializations.glorot_uniform(), but with uniform random interval like in 
    Deeplearning.net tutorials.
    They claim that the initialization random interval should be
      +/- sqrt(6 / (fan_in + fan_out)) (like Keras' glorot_uniform()) when tanh activations are used, 
      +/- 4 sqrt(6 / (fan_in + fan_out)) when sigmoid activations are used.
    See: http://deeplearning.net/tutorial/mlp.html#going-from-logistic-regression-to-mlp
    """
    fan_in, fan_out = get_fans(shape)
    s = 4. * np.sqrt(6. / (fan_in + fan_out))
    return uniform(shape, s)
Beispiel #5
0
def custom_initialization(shape, name=None):
    fan_in, fan_out = get_fans(shape)
    loc = (fan_in + fan_out) / 2.
    scale = (fan_in + fan_out) / 2.
    return K.variable(np.random.normal(loc, scale, shape) * (1. / np.sqrt(fan_in / 2)), name=name)
Beispiel #6
0
    def build(self, input_shape):
        #remember to modify here
        #print 'build'
        assert len(input_shape) == 2
        input_dim = input_shape[1]
        self.input_spec = [
            InputSpec(dtype=K.floatx(), shape=(None, input_dim))
        ]
        #self.W
        #print(sparse.all_dtypes)

        #self.W = self.init((input_dim, self.output_dim),name='{}_W'.format(self.name))

        temp_W = np.asarray(self.input_output_mat, dtype=K.floatx())

        #if self.input_output_mat is not None:
        #temp_W=self.W.get_value()
        #    temp_W=temp1.get_value()
        if self.input_output_mat is not None:
            fan_in, fan_out = initializations.get_fans(
                (input_dim, self.output_dim), dim_ordering='th')
            print(fan_in, fan_out)
            scale = np.sqrt(6. / (fan_in + fan_out))
            for i in range(self.input_output_mat.shape[0]):
                for j in range(self.input_output_mat.shape[1]):
                    if self.input_output_mat[i, j] == 1.:
                        temp_W[i, j] = np.random.uniform(low=-scale,
                                                         high=scale)
            #temp_W=csr_matrix(temp_W)
        #self.W=temp_W
        #print 'self.W: ',self.W

        temp_W = csr_matrix(temp_W)
        #print(temp_W)
        #print temp_W.nnz
        #print temp_W.indices
        #print temp_W.indptr
        #print 'temp_W:',temp_W
        self.W = theano.shared(value=temp_W,
                               name='{}_W'.format(self.name),
                               strict=False)

        #print 'self.W.get_value():',self.W.get_value()

        #print 'self.W:',self.W
        #print 'self.W.get_value():',self.W.get_value()
        #print 'self.W[1,1]:',self.W[1,1]
        #print 'self.W[1,:]:',self.W[1,:]

        #value = np.asarray(value, dtype=dtype)
        #return theano.shared(value=value, name=name, strict=False)

        #print 'self.W: ',self.W.get_value()
        #print 'self.W: ',self.W.get_value()[1:]

        if self.bias:
            self.b = K.zeros((self.output_dim, ),
                             name='{}_b'.format(self.name))
            self.trainable_weights = [self.W, self.b]
        else:
            self.trainable_weights = [self.W]
        if self.initial_weights is not None:
            self.set_weights(self.initial_weights)
            del self.initial_weights

        self.regularizers = []
        if self.W_regularizer:
            self.W_regularizer.set_param(self.W)
            self.regularizers.append(self.W_regularizer)

        if self.bias and self.b_regularizer:
            self.b_regularizer.set_param(self.b)
            self.regularizers.append(self.b_regularizer)

        if self.activity_regularizer:
            self.activity_regularizer.set_layer(self)
            self.regularizers.append(self.activity_regularizer)

        self.constraints = {}
        if self.W_constraint:
            self.constraints[self.W] = self.W_constraint
        if self.bias and self.b_constraint:
            self.constraints[self.b] = self.b_constraint
def nonneg_init(shape, name, dim_ordering='th'):
    fan_in, fan_out = get_fans(shape, dim_ordering=dim_ordering)
    s = np.sqrt(2. / (fan_in + fan_out))
    return K.variable(np.abs(np.random.normal(loc=0.0, scale=s, size=shape)),
                      name=name)
Beispiel #8
0
def glorot_uniform_positive(shape, name=None, dim_ordering='th'):
    fan_in, fan_out = get_fans(shape, dim_ordering=dim_ordering)
    s = np.sqrt(6. / (fan_in + fan_out))
    return K.random_uniform_variable(shape, 0.001, 2*s, name=name)