Exemplo n.º 1
0
    def __init__(self, nb_filter, filter_length, direction='Down',
                 init='glorot_uniform', inner_init='orthogonal',
                 forget_bias_init='one', activation='tanh',
                 inner_activation='hard_sigmoid',
                 border_mode="same", sub_sample=(1, 1),
                 W_regularizer=None, U_regularizer=None, b_regularizer=None,
                 dropout_W=0., dropout_U=0., **kwargs):

        self.nb_filter = nb_filter
        self.filter_length = filter_length
        self.border_mode = border_mode
        self.subsample = sub_sample
        self.direction = direction

        self.init = initializations.get(init)
        self.inner_init = initializations.get(inner_init)
        self.forget_bias_init = initializations.get(forget_bias_init)
        self.activation = activations.get(activation)
        self.inner_activation = activations.get(inner_activation)
        self.W_regularizer = regularizers.get(W_regularizer)
        self.U_regularizer = regularizers.get(U_regularizer)
        self.b_regularizer = regularizers.get(b_regularizer)
        self.dropout_W, self.dropout_U = dropout_W, dropout_U

        kwargs["nb_filter"] = nb_filter
        kwargs["filter_length"] = filter_length

        if self.dropout_W or self.dropout_U:
            self.uses_learning_phase = True
        super(DiagLSTM, self).__init__(**kwargs)
Exemplo n.º 2
0
    def __init__(self, input_dim, output_dim=128, train_init_cell=True, train_init_h=True,
                 init='glorot_uniform', inner_init='orthogonal', forget_bias_init='one',
                 input_activation='tanh', gate_activation='hard_sigmoid', output_activation='tanh',
                 weights=None, truncate_gradient=-1, return_sequences=False):

        super(LSTMLayer, self).__init__()
        self.input_dim = input_dim
        self.output_dim = output_dim
        self.truncate_gradient = truncate_gradient
        self.return_sequences = return_sequences

        self.init = initializations.get(init)
        self.inner_init = initializations.get(inner_init)
        self.forget_bias_init = initializations.get(forget_bias_init)
        self.input_activation = activations.get(input_activation)
        self.gate_activation = activations.get(gate_activation)
        self.output_activation = activations.get(output_activation)
        self.input = T.tensor3()
        self.time_range = None

        W_z = self.init((self.input_dim, self.output_dim)).get_value(borrow=True)
        R_z = self.inner_init((self.output_dim, self.output_dim)).get_value(borrow=True)
        # self.b_z = shared_zeros(self.output_dim)

        W_i = self.init((self.input_dim, self.output_dim)).get_value(borrow=True)
        R_i = self.inner_init((self.output_dim, self.output_dim)).get_value(borrow=True)
        # self.b_i = shared_zeros(self.output_dim)

        W_f = self.init((self.input_dim, self.output_dim)).get_value(borrow=True)
        R_f = self.inner_init((self.output_dim, self.output_dim)).get_value(borrow=True)
        # self.b_f = self.forget_bias_init(self.output_dim)

        W_o = self.init((self.input_dim, self.output_dim)).get_value(borrow=True)
        R_o = self.inner_init((self.output_dim, self.output_dim)).get_value(borrow=True)
        # self.b_o = shared_zeros(self.output_dim)

        self.h_m1 = shared_zeros(shape=(1, self.output_dim), name='h0')
        self.c_m1 = shared_zeros(shape=(1, self.output_dim), name='c0')

        W = np.vstack((W_z[np.newaxis, :, :],
                       W_i[np.newaxis, :, :],
                       W_f[np.newaxis, :, :],
                       W_o[np.newaxis, :, :]))  # shape = (4, input_dim, output_dim)
        R = np.vstack((R_z[np.newaxis, :, :],
                       R_i[np.newaxis, :, :],
                       R_f[np.newaxis, :, :],
                       R_o[np.newaxis, :, :]))  # shape = (4, output_dim, output_dim)
        self.W = theano.shared(W, name='Input to hidden weights (zifo)', borrow=True)
        self.R = theano.shared(R, name='Recurrent weights (zifo)', borrow=True)
        self.b = theano.shared(np.zeros(shape=(4, self.output_dim), dtype=theano.config.floatX),
                               name='bias', borrow=True)

        self.params = [self.W, self.R]
        if train_init_cell:
            self.params.append(self.c_m1)
        if train_init_h:
            self.params.append(self.h_m1)

        if weights is not None:
            self.set_weights(weights)
Exemplo n.º 3
0
 def __init__(self, output_dim, 
              init='uniform', inner_init='orthogonal', forget_bias_init='one',
              activation='tanh', inner_activation='hard_sigmoid',
              U_init = 'identity',
              v_init = 0.1,
              b_init = 0,
              weights=None, truncate_gradient=-1, return_sequences=False,
              input_dim=None, input_length=None, **kwargs):
     self.output_dim = output_dim
     self.init = initializations.get(init)
     self.inner_init = initializations.get(inner_init)
     self.forget_bias_init = initializations.get(forget_bias_init)
     self.activation = activations.get(activation)
     self.inner_activation = activations.get(inner_activation)
     self.truncate_gradient = truncate_gradient
     self.return_sequences = return_sequences
     self.initial_weights = weights
     self.U_init = U_init
     self.v_init = v_init
     self.b_init = b_init
     self.input_dim = input_dim
     self.input_length = input_length
     if self.input_dim:
         kwargs['input_shape'] = (self.input_length, self.input_dim, self.input_dim_c)
     super(LSTM_td, self).__init__(**kwargs)
Exemplo n.º 4
0
    def __init__(
        self,
        output_dim,
        n_experts,
        init="glorot_uniform",
        inner_init="orthogonal",
        activation="tanh",
        inner_activation="hard_sigmoid",
        weights=None,
        truncate_gradient=-1,
        return_sequences=False,
        input_dim=None,
        input_length=None,
        go_backwards=False,
        **kwargs
    ):
        self.output_dim = output_dim
        self.n_experts = n_experts
        self.init = initializations.get(init)
        self.inner_init = initializations.get(inner_init)
        self.activation = activations.get(activation)
        self.inner_activation = activations.get(inner_activation)
        self.truncate_gradient = truncate_gradient
        self.return_sequences = return_sequences
        self.initial_weights = weights
        self.go_backwards = go_backwards

        self.input_dim = input_dim
        self.input_length = input_length
        if self.input_dim:
            kwargs["input_shape"] = (self.input_length, self.input_dim)
        super(ExpertIIgated, self).__init__(**kwargs)
Exemplo n.º 5
0
	def __init__(self, output_dim,
		init='glorot_uniform', inner_init='orthogonal',
		activation='sigmoid', inner_activation='hard_sigmoid',
		weights=None, truncate_gradient=-1, return_sequences=False,
		input_dim=None, input_length=None, go_backwards=False, dropout=0.0, **kwargs):

		

		self.output_dim = output_dim
		self.init = initializations.get(init)
		self.inner_init = initializations.get(inner_init)
		self.activation = activations.get(activation)
		self.inner_activation = activations.get(inner_activation)
		self.truncate_gradient = truncate_gradient
		self.return_sequences = return_sequences
		self.initial_weights = weights
		self.go_backwards = go_backwards

		# for dropout
		self.p = dropout #dropout rate
		self.srng = RandomStreams(seed=np.random.randint(10e6))

		self.input_dim = input_dim
		self.input_length = input_length
		if self.input_dim:
		    kwargs['input_shape'] = (self.input_length, self.input_dim)
		super(TGRU, self).__init__(**kwargs)
Exemplo n.º 6
0
    def __init__(self, w_dim, q_dim, output_dim=1, init='glorot_uniform', activation='linear',
                 activation_w='sigmoid', activation_q='sigmoid', weights=None,
                 regularizers=[None]*4, activity_regularizer=None, constraints=[None]*4,
                 input_dim=None, **kwargs):
        self.max_words = 100
        self.w_dim = w_dim
        self.q_dim = q_dim
        self.input_dim = self.w_dim + self.q_dim
        self.activation = activations.get(activation)
        self.activation_w = activations.get(activation_w)
        self.activation_q = activations.get(activation_q)

        self.init = initializations.get(init)
        self.output_dim = output_dim

        self.W_regularizer = keras.regularizers.get(regularizers[0])
        self.w_regularizer = keras.regularizers.get(regularizers[1])
        self.Q_regularizer = keras.regularizers.get(regularizers[2])
        self.q_regularizer = keras.regularizers.get(regularizers[3])
        self.activity_regularizer = keras.regularizers.get(activity_regularizer)

        self.W_constraint = keras.constraints.get(constraints[0])
        self.w_constraint = keras.constraints.get(constraints[1])
        self.Q_constraint = keras.constraints.get(constraints[2])
        self.q_constraint = keras.constraints.get(constraints[3])
        self.constraints = [self.W_constraint, self.w_constraint,
                            self.Q_constraint, self.q_constraint]

        self.initial_weights = weights

        kwargs['input_shape'] = (self.w_dim + self.q_dim, self.max_words,)
        super(ClasRel, self).__init__(**kwargs)
Exemplo n.º 7
0
    def __init__(self, output_dim, nb_rows, nb_cols, n_dim = 2,
                 init='glorot_uniform', inner_init='orthogonal', forget_bias_init='one',
                 activation='tanh', inner_activation='hard_sigmoid',
                 weights=None, truncate_gradient=-1, return_sequences=False,
                 input_dim=None, input_length=None, go_backwards=False, **kwargs):
        
        self.n_dim = n_dim + 1
        self.nb_cols = nb_cols
        self.nb_rows = nb_rows

        self.output_dim = 1 #output_dim
        self.init = initializations.get(init)
        self.inner_init = initializations.get(inner_init)
        self.forget_bias_init = initializations.get(forget_bias_init)
        self.activation = activations.get(activation)
        self.inner_activation = activations.get(inner_activation)
        self.truncate_gradient = truncate_gradient
        self.return_sequences = return_sequences
        self.initial_weights = weights
        self.go_backwards = go_backwards

        # Calculate the number of dimensions
        self.input_dim = input_dim
        self.input_length = input_length
        if self.input_dim:
            kwargs['input_shape'] = (self.input_length, self.input_dim)
        super(GridLSTM, self).__init__(**kwargs)
Exemplo n.º 8
0
    def __init__(self, input_dim, states_dim, causes_dim,
                 init='glorot_uniform', inner_init='orthogonal',
                 activation='sigmoid', gate_activation='sigmoid',
                 weights=None, return_mode='states',
                 truncate_gradient=-1, return_sequences=False):
        super(FDPCN, self).__init__()
        self.init = initializations.get(init)
        self.inner_init = initializations.get(inner_init)
        self.input_dim = input_dim
        self.states_dim = states_dim
        self.causes_dim = causes_dim
        self.truncate_gradient = truncate_gradient
        self.activation = activations.get(activation)
        self.gate_activation = activations.get(gate_activation)
        self.return_sequences = return_sequences
        self.return_mode = return_mode
        self.input = T.tensor3()

        self.I2S = self.init((self.input_dim, self.states_dim))
        self.S2S = self.inner_init((self.states_dim, self.states_dim))
        self.Sb = shared_zeros((self.states_dim))

        self.S2C = self.init((self.states_dim, self.causes_dim))
        self.C2C = self.inner_init((self.causes_dim, self.causes_dim))
        self.Cb = shared_zeros((self.causes_dim))
        self.CbS = shared_zeros((self.states_dim))
        self.C2S = self.init((self.causes_dim, self.states_dim))
        self.params = [self.I2S, self.S2S, self.Sb,
                       self.C2S, self.C2C, self.Cb, self.S2C, self.CbS]

        if weights is not None:
            self.set_weights(weights)
Exemplo n.º 9
0
 def __init__(self, output_dim,
              init='glorot_uniform', inner_init='orthogonal', forget_bias_init='one',
              activation='tanh', inner_activation='hard_sigmoid',
              weights=None, truncate_gradient=-1,
              input_dim=None, input_length=None, hidden_state=None, batch_size=None, return_sequences = False,decoder=None,decoders=[], remember_state=False, go_backwards=False, **kwargs):
     self.output_dim = output_dim
     self.init = initializations.get(init)
     self.inner_init = initializations.get(inner_init)
     self.forget_bias_init = initializations.get(forget_bias_init)
     self.activation = activations.get(activation)
     self.inner_activation = activations.get(inner_activation)
     self.truncate_gradient = truncate_gradient
     self.initial_weights = weights
     self.initial_state = hidden_state
     self.batch_size = batch_size
     self.input_dim = input_dim
     self.input_length = input_length
     self.remember_state = remember_state
     self.return_sequences = return_sequences
     self.go_backwards = go_backwards
     if self.input_dim:
         kwargs['input_shape'] = (self.input_length, self.input_dim)
     super(LSTMEncoder, self).__init__(**kwargs)
     if decoder is not None:
         decoders += decoder
     self.decoders = decoders
     self.broadcast_state(decoders)# send hidden state to decoders
Exemplo n.º 10
0
    def __init__(self, input_dim, output_dim=128,
        init= 'uniform', inner_init='glorot_normal',
        activation='softplus', inner_activation='hard_sigmoid',
        gate_activation= 'tanh',
        weights=None, truncate_gradient=-1, return_sequences=False):

        super(SGU, self).__init__()
        self.input_dim = input_dim
        self.output_dim = output_dim
        self.truncate_gradient = truncate_gradient
        self.return_sequences = return_sequences

        self.init = initializations.get(init)
        self.inner_init = initializations.get(inner_init)
        self.activation = activations.get(activation)
        self.inner_activation = activations.get(inner_activation)
        self.gate_activation = activations.get(gate_activation)
        self.input = TT.tensor3()

        self.W = self.init((self.input_dim, self.output_dim))
        self.U = self.inner_init((self.output_dim, self.output_dim))
        self.b = shared_zeros((self.output_dim))

        self.W_gate = self.init((self.input_dim, self.output_dim))
        self.b_gate = shared_zeros((self.output_dim))
        self.U_gate = self.inner_init((self.output_dim, self.output_dim))

        self.params = [
            self.W, self.U, self.b,
            self.W_gate, self.b_gate,
            self.U_gate
        ]

        if weights is not None:
            self.set_weights(weights)
    def __init__(self, nb_filter, nb_row, nb_col,
                 init='glorot_uniform', inner_init='orthogonal',
                 forget_bias_init='one', activation='tanh',
                 inner_activation='hard_sigmoid', dim_ordering="tf",
                 border_mode="valid", sub_sample=(1, 1),
                 W_regularizer=None, U_regularizer=None, b_regularizer=None,
                 dropout_W=0., dropout_U=0., **kwargs):
        self.nb_filter = nb_filter
        self.nb_row = nb_row
        self.nb_col = nb_col
        self.init = initializations.get(init)
        self.inner_init = initializations.get(inner_init)
        self.forget_bias_init = initializations.get(forget_bias_init)
        self.activation = activations.get(activation)
        self.inner_activation = activations.get(inner_activation)
        self.border_mode = border_mode
        self.subsample = sub_sample

        assert dim_ordering in {'tf', "th"}, 'dim_ordering must be in {tf,"th}'
        self.dim_ordering = dim_ordering

        kwargs["nb_filter"] = nb_filter
        kwargs["nb_row"] = nb_row
        kwargs["nb_col"] = nb_col
        kwargs["dim_ordering"] = dim_ordering

        self.W_regularizer = W_regularizer
        self.U_regularizer = U_regularizer
        self.b_regularizer = b_regularizer
        self.dropout_W, self.dropout_U = dropout_W, dropout_U

        super(LSTMConv2D, self).__init__(**kwargs)
Exemplo n.º 12
0
    def __init__(self, units,
                 projection_units=None,
                 activation='tanh',
                 recurrent_activation='sigmoid',
                 projection_activation='linear',
                 use_bias=True,
                 kernel_initializer='glorot_uniform',
                 recurrent_initializer='orthogonal',
                 projection_initializer='glorot_uniform',
                 bias_initializer='zeros',
                 unit_forget_bias=False,
                 kernel_regularizer=None,
                 recurrent_regularizer=None,
                 projection_regularizer=None,
                 bias_regularizer=None,
                 kernel_constraint=None,
                 recurrent_constraint=None,
                 projection_constraint=None,
                 bias_constraint=None,
                 dropout=0.,
                 recurrent_dropout=0.,
                 implementation=2,
                 **kwargs):
        super(NASCell, self).__init__(**kwargs)
        self.units = units
        self.projection_units = projection_units
        self.activation = activations.get(activation)
        self.recurrent_activation = activations.get(recurrent_activation)
        self.projection_activation = activations.get(projection_activation)
        self.cell_activation = activations.get('relu')
        self.use_bias = use_bias

        self.kernel_initializer = initializers.get(kernel_initializer)
        self.recurrent_initializer = initializers.get(recurrent_initializer)
        self.projection_initializer = initializers.get(projection_initializer)
        self.bias_initializer = initializers.get(bias_initializer)
        self.unit_forget_bias = unit_forget_bias

        self.kernel_regularizer = regularizers.get(kernel_regularizer)
        self.recurrent_regularizer = regularizers.get(recurrent_regularizer)
        self.projection_regularizer = regularizers.get(projection_regularizer)
        self.bias_regularizer = regularizers.get(bias_regularizer)

        self.kernel_constraint = constraints.get(kernel_constraint)
        self.recurrent_constraint = constraints.get(recurrent_constraint)
        self.projection_constraint = constraints.get(projection_constraint)
        self.bias_constraint = constraints.get(bias_constraint)

        self.dropout = min(1., max(0., dropout))
        self.recurrent_dropout = min(1., max(0., recurrent_dropout))
        self.implementation = implementation

        if self.projection_units is not None:
            self.state_size = (self.projection_units, self.units)
        else:
            self.state_size = (self.units, self.units)

        self._dropout_mask = None
        self._recurrent_dropout_mask = None
Exemplo n.º 13
0
    def __init__(self, periods, input_dim, output_dim=128,
        init= 'uniform', inner_init='glorot_normal',
        activation='softplus', inner_activation='hard_sigmoid',
        gate_activation= 'tanh',
        weights=None, truncate_gradient=-1, return_sequences=False):

        super(ClockworkSGU, self).__init__()
        self.periods = periods
        self.input_dim = input_dim
        self.output_dim = output_dim
        self.truncate_gradient = truncate_gradient
        self.return_sequences = return_sequences

        self.init = initializations.get(init)
        self.inner_init = initializations.get(inner_init)
        self.activation = activations.get(activation)
        self.inner_activation = activations.get(inner_activation)
        self.gate_activation = activations.get(gate_activation)

        self.n = self.output_dim // len(self.periods)

        assert self.output_dim % len(self.periods) == 0

        self.input = TT.tensor3()

        self.W = self.init((self.input_dim, self.output_dim))
        self.b = shared_zeros((self.output_dim))

        self.W_gate = self.init((self.input_dim, self.output_dim))
        self.b_gate = shared_zeros((self.output_dim))


        self.clock_h = {}
        for i, period in enumerate(self.periods):
            self.clock_h[period] = self.inner_init((
                (i + 1) * self.n, self.n
            ))


        self.clock_gates = {}
        for i, period in enumerate(self.periods):
            self.clock_gates[period] = self.inner_init((
                (i + 1) * self.n, self.n

            ))


        self.params = [
            self.W, self.b,
            self.W_gate, self.b_gate,
        ]

        self.params.extend(self.clock_h.values())
        self.params.extend(self.clock_gates.values())


        if weights is not None:
            self.set_weights(weights)
Exemplo n.º 14
0
 def model(self, s, a):
     s = s.reshape((-1, 1), ndim=2)
     a = a.reshape((-1, 1), ndim=2)
     y = T.concatenate((s, a), axis=1)
     for i in range(len(self.layers)):
         act = activations.get(self.activations[i])
         y = act(T.dot(y, self.W[i]) + self.b[i])
     act = activations.get("linear")
     y = act(T.dot(y, self.Wy) + self.by)
     return y
Exemplo n.º 15
0
 def __init__(self, output_dim,
              init='glorot_uniform', inner_init='orthogonal',
              forget_bias_init='one', activation='tanh',
              inner_activation='hard_sigmoid', **kwargs):
     self.output_dim = output_dim
     self.init = initializations.get(init)
     self.inner_init = initializations.get(inner_init)
     self.forget_bias_init = initializations.get(forget_bias_init)
     self.activation = activations.get(activation)
     self.inner_activation = activations.get(inner_activation)
     super(PeepHoleLayer, self).__init__(**kwargs)
    def __init__(self, output_dim, context_dim,
                 init='glorot_uniform', inner_init='orthogonal',
                 activation='sigmoid', inner_activation='hard_sigmoid',
                 **kwargs):
        self.output_dim = output_dim
        self.context_dim = context_dim
        self.init = initializations.get(init)
        self.inner_init = initializations.get(inner_init)
        self.activation = activations.get(activation)
        self.inner_activation = activations.get(inner_activation)

        super(BiContextLayer, self).__init__(**kwargs)
Exemplo n.º 17
0
    def __init__(self, output_dim, init='glorot_uniform', inner_init='orthogonal',
                 forget_bias_init='one', activation='tanh',
                 inner_activation='hard_sigmoid', batch_size = 64, feed_state = False, **kwargs):

        self.output_dim = output_dim
        self.init = initializations.get(init)
        self.inner_init = initializations.get(inner_init)
        self.forget_bias_init = initializations.get(forget_bias_init)
        self.activation = activations.get(activation)
        self.inner_activation = activations.get(inner_activation)
        self.batch_size = batch_size
        self.feed_state = feed_state
        super(LstmAttentionLayer, self).__init__(**kwargs)
Exemplo n.º 18
0
 def __init__(self, output_dim, depth=1, readout=False, dropout=.5,
              init='glorot_uniform', inner_init='orthogonal',
              forget_bias_init='one', activation='tanh',
              inner_activation='hard_sigmoid', **kwargs):
     self.output_dim = output_dim
     self.depth = depth
     self.readout = readout
     self.dropout = dropout
     self.init = initializations.get(init)
     self.inner_init = initializations.get(inner_init)
     self.forget_bias_init = initializations.get(forget_bias_init)
     self.activation = activations.get(activation)
     self.inner_activation = activations.get(inner_activation)
     self._kwargs = kwargs
     super(DeepLSTM, self).__init__(**kwargs)
Exemplo n.º 19
0
    def __init__(self, n_test, n_support, max_depth, init="glorot_uniform", activation="linear", **kwargs):
        """
    Unlike the AttnLSTM model which only modifies the test vectors additively,
    this model allows for an additive update to be performed to both test and
    support using information from each other.

    Parameters
    ----------
    n_support: int
      Size of support set.
    n_test: int
      Size of test set.
    max_depth: int
      Number of LSTM Embedding layers.
    init: string
      Type of weight initialization (from Keras)
    activation: string
      Activation type (ReLu/Linear/etc.)
    """
        super(ResiLSTMEmbedding, self).__init__(**kwargs)

        self.init = initializations.get(init)  # Set weight initialization
        self.activation = activations.get(activation)  # Get activations
        self.max_depth = max_depth
        self.n_test = n_test
        self.n_support = n_support
Exemplo n.º 20
0
    def __init__(self, output_dim,
                 init='glorot_uniform', activation='linear', weights=None,
                 W_regularizer=None, b_regularizer=None, activity_regularizer=None,
                 W_constraint=None, b_constraint=None,
                 input_dim=None, input_length1=None, input_length2=None, **kwargs):
        self.output_dim = output_dim
        self.init = initializations.get(init)
        self.activation = activations.get(activation)

        self.W_regularizer = regularizers.get(W_regularizer)
        self.b_regularizer = regularizers.get(b_regularizer)
        self.activity_regularizer = regularizers.get(activity_regularizer)

        self.W_constraint = constraints.get(W_constraint)
        self.b_constraint = constraints.get(b_constraint)
        self.constraints = [self.W_constraint, self.b_constraint]

        self.initial_weights = weights

        self.input_dim = input_dim
        self.input_length1 = input_length1
        self.input_length2 = input_length2
        if self.input_dim:
            kwargs['input_shape'] = (self.input_length1, self.input_length2, self.input_dim)
        self.input = K.placeholder(ndim=4)
        super(HigherOrderTimeDistributedDense, self).__init__(**kwargs)
Exemplo n.º 21
0
 def __init__(
     self,
     encoder,
     decoder,
     code_dim,
     batch_size,
     beta=0.5,
     subsample=2,
     regularizer_scale=0.5,
     init="glorot_uniform",
     activation="linear",
     weights=None,
     input_dim=None,
     **kwargs
 ):
     self.regularizer_scale = regularizer_scale
     self.beta = beta
     self.max_pool = MaxPooling1D(subsample)
     self.encoder = encoder
     self.decoder = decoder
     self.variational = VariationalDense(
         code_dim, batch_size, input_dim=self.encoder.output_shape[1], regularizer_scale=regularizer_scale
     )
     self.batch_size = batch_size
     self.init = initializations.get(init)
     self.activation = activations.get(activation)
     self.code_dim = code_dim
     self.initial_weights = weights
     self.input_dim = input_dim
     if self.input_dim:
         kwargs["input_shape"] = (self.input_dim,)
     self.input = K.placeholder(ndim=4)
     super(SlowSiamese, self).__init__(**kwargs)
Exemplo n.º 22
0
    def __init__(self, nb_filter, nb_row, nb_col,
                 init='glorot_uniform', activation='linear', weights=None,
                 border_mode='valid', subsample=(1, 1),
                 W_regularizer=None, b_regularizer=None, activity_regularizer=None,
                 W_constraint=None, b_constraint=None, **kwargs):

        if border_mode not in {'valid', 'full', 'same'}:
            raise Exception('Invalid border mode for Convolution2D:', border_mode)
        self.nb_filter = nb_filter
        self.nb_row = nb_row
        self.nb_col = nb_col
        self.init = initializations.get(init)
        self.activation = activations.get(activation)
        self.border_mode = border_mode
        self.subsample = tuple(subsample)

        self.W_regularizer = regularizers.get(W_regularizer)
        self.b_regularizer = regularizers.get(b_regularizer)
        self.activity_regularizer = regularizers.get(activity_regularizer)

        self.W_constraint = constraints.get(W_constraint)
        self.b_constraint = constraints.get(b_constraint)
        self.constraints = [self.W_constraint, self.b_constraint]

        self.initial_weights = weights
        super(Convolution2D, self).__init__(**kwargs)
Exemplo n.º 23
0
    def __init__(self, input_dim, output_dim,
                 init='glorot_uniform', activation='linear', weights=None,
                 W_regularizer=None, b_regularizer=None, activity_regularizer=None,
                 W_constraint=None, b_constraint=None):
        self.input_dim = input_dim
        self.output_dim = output_dim
        self.init = initializations.get(init)
        self.activation = activations.get(activation)
        '''
        self.W_regularizer = regularizers.get(W_regularizer)
        self.b_regularizer = regularizers.get(b_regularizer)
        self.activity_regularizer = regularizers.get(activity_regularizer)

        self.W_constraint = constraints.get(W_constraint)
        self.b_constraint = constraints.get(b_constraint)
        self.constraints = [self.W_constraint, self.b_constraint]

        self.initial_weights = weights
        '''
        
        #super(TimeDistributedDense, self).__init__(**kwargs)

    #def build(self):
        

        self.W = self.init((self.input_dim, self.output_dim))
        self.b = K.zeros((self.output_dim,))

        self.params = [self.W, self.b]
        '''
Exemplo n.º 24
0
    def __init__(self, nb_filter, nb_row, nb_col, mask_type=None, direction='Down',
                 init='glorot_uniform', activation='linear', weights=None,
                 border_mode='valid', subsample=(1, 1), dim_ordering='th',
                 W_regularizer=None, b_regularizer=None, activity_regularizer=None,
                 W_constraint=None, b_constraint=None,
                 bias=True, **kwargs):
        self.mask_type = mask_type
        self.direction = direction
        if border_mode not in {'valid', 'same'}:
            raise Exception('Invalid border mode for Convolution2D:', border_mode)
        self.nb_filter = nb_filter
        self.nb_row = nb_row
        self.nb_col = nb_col
        self.init = initializations.get(init, dim_ordering=dim_ordering)
        self.activation = activations.get(activation)
        assert border_mode in {'valid', 'same'}, 'border_mode must be in {valid, same}'
        self.border_mode = border_mode
        self.subsample = tuple(subsample)
        assert dim_ordering in {'tf', 'th'}, 'dim_ordering must be in {tf, th}'
        self.dim_ordering = dim_ordering

        self.W_regularizer = regularizers.get(W_regularizer)
        self.b_regularizer = regularizers.get(b_regularizer)
        self.activity_regularizer = regularizers.get(activity_regularizer)

        self.W_constraint = constraints.get(W_constraint)
        self.b_constraint = constraints.get(b_constraint)

        self.bias = bias
        self.input_spec = [InputSpec(ndim=4)]
        self.initial_weights = weights
        super(MaskedConvolution2D, self).__init__(**kwargs)
Exemplo n.º 25
0
    def __init__(self, nb_filter, filter_length,
                 init='uniform', activation='linear', weights=None,
                 border_mode='valid', subsample_length=1,
                 input_dim=None):

        if border_mode not in {'valid', 'same'}:
            raise Exception('Invalid border mode for Convolution1D:', border_mode)
        self.nb_filter = nb_filter
        self.filter_length = filter_length
        self.init = initializations.get(init)
        self.activation = activations.get(activation)
        assert border_mode in {'valid', 'same'}, 'border_mode must be in {valid, same}'
        self.border_mode = border_mode
        self.subsample_length = subsample_length

        self.subsample = (subsample_length, 1)

        
        self.input_dim = input_dim
        
 
        input_dim = self.input_dim
        self.W_shape = (self.nb_filter, input_dim, self.filter_length, 1)
        self.W = self.init(self.W_shape)
        self.b = K.zeros((self.nb_filter,))
        self.params = [self.W, self.b]
Exemplo n.º 26
0
    def __init__(self, 
                 input_dim, 
                 hidden_dim, 
                 init='glorot_uniform', 
                 activation='linear', 
                 weights=None,
                 corruption_level=0.3):
        self.init = initializations.get(init)
        self.activation = activations.get(activation)
        self.input_dim = input_dim

        
        self.hidden_dim = hidden_dim
        self.output_dim = input_dim

        self.input = T.matrix()
        self.W = self.init((self.input_dim, self.hidden_dim))
        self.b = shared_zeros((self.hidden_dim))
        self.b_prime = shared_zeros((self.input_dim))

        numpy_rng = np.random.RandomState(123)

        self.theano_rng = RandomStreams(numpy_rng.randint(2 ** 30))

        self.params = [self.W, self.b, self.b_prime]
        self.corruption_level = corruption_level

        if weights is not None:
            self.set_weights(weights)
Exemplo n.º 27
0
def time_distributed_dense(x, w, b=None, dropout=None,
                           input_dim=None, output_dim=None, timesteps=None, activation='linear'):
    '''Apply y.w + b for every temporal slice y of x.
    '''
    activation = activations.get(activation)

    if not input_dim:
        # won't work with TensorFlow
        input_dim = K.shape(x)[2]
    if not timesteps:
        # won't work with TensorFlow
        timesteps = K.shape(x)[1]
    if not output_dim:
        # won't work with TensorFlow
        output_dim = K.shape(w)[1]

    if dropout is not None and 0. < dropout < 1.:
        # apply the same dropout pattern at every timestep
        ones = K.ones_like(K.reshape(x[:, 0, :], (-1, input_dim)))
        dropout_matrix = K.dropout(ones, dropout)
        expanded_dropout_matrix = K.repeat(dropout_matrix, timesteps)
        x = K.in_train_phase(x * expanded_dropout_matrix, x)

    # collapse time dimension and batch dimension together
    x = K.reshape(x, (-1, input_dim))

    x = K.dot(x, w)
    if b:
        x = x + b
    # reshape to 3D tensor
    x = K.reshape(activation(x), (-1, timesteps, output_dim))
    return x
Exemplo n.º 28
0
    def _step(self,
              x_tm1,
              h_tm1, c_tm1, H,
              u_i, u_f, u_o, u_c, w_i, w_f, w_c, w_o, w_x, w_a, v_i, v_f, v_c, v_o, b_i, b_f, b_c, b_o, b_x, b_a):

        s_tm1 = K.repeat(c_tm1, self.input_length)
        e = H + s_tm1
        def a(x, states):
            output = K.dot(x, w_a) + b_a
            return output, []
        _, energy, _ = K.rnn(a, e, [], mask=None)
        energy = activations.get('linear')(energy)
        energy = K.permute_dimensions(energy, (2, 0, 1))
        energy = energy[0]
        alpha = K.softmax(energy)
        alpha = K.repeat(alpha, self.hidden_dim)
        alpha = K.permute_dimensions(alpha, (0, 2 , 1))
        weighted_H = H * alpha
        
        v = K.sum(weighted_H, axis=1)

        xi_t = K.dot(x_tm1, w_i) + K.dot(v, v_i) + b_i
        xf_t = K.dot(x_tm1, w_f) + K.dot(v, v_f) + b_f
        xc_t = K.dot(x_tm1, w_c) + K.dot(v, v_c) + b_c
        xo_t = K.dot(x_tm1, w_o) + K.dot(v, v_o) + b_o

        i_t = self.inner_activation(xi_t + K.dot(h_tm1, u_i))
        f_t = self.inner_activation(xf_t + K.dot(h_tm1, u_f))
        c_t = f_t * c_tm1 + i_t * self.activation(xc_t + K.dot(h_tm1, u_c))
        o_t = self.inner_activation(xo_t + K.dot(h_tm1, u_o))
        h_t = o_t * self.activation(c_t)

        x_t = K.dot(h_t, w_x) + b_x
        return x_t, h_t, c_t
Exemplo n.º 29
0
 def __init__(self, activation=None, bias_initializer=-1, **kwargs):
     super(Highway, self).__init__(**kwargs)
     self.activation = kact.get(activation)
     self.bias_initializer = bias_initializer
     if isinstance(self.bias_initializer, int):
         self.bias_initializer = kinit.constant(self.bias_initializer)
     self.input_spec = [InputSpec(min_ndim=2)]
Exemplo n.º 30
0
    def __init__(self, input_dim, output_dim,
                 init='glorot_uniform',
                 activation='linear',
                 truncate_gradient=-1,
                 gamma=.1,
                 n_steps=10,
                 return_reconstruction=False,
                 W_regularizer=None,
                 activity_regularizer=None, **kwargs):

        self.init = initializations.get(init)
        self.input_dim = input_dim
        self.output_dim = output_dim
        self.gamma = gamma
        self.n_steps = n_steps
        self.truncate_gradient = truncate_gradient
        self.activation = activations.get(activation)
        self.return_reconstruction = return_reconstruction
        self.input = T.matrix()

        self.W = self.init((self.output_dim, self.input_dim))
        self.params = [self.W, ]

        self.regularizers = []
        if W_regularizer:
            W_regularizer.set_param(self.W)
            self.regularizers.append(W_regularizer)
        if activity_regularizer:
            activity_regularizer.set_layer(self)
            self.regularizers.append(activity_regularizer)

        kwargs['input_shape'] = (self.input_dim,)
        super(SparseCoding, self).__init__(**kwargs)
Exemplo n.º 31
0
    def __init__(
        self,
        units=1,
        activation=None,
        use_bias=True,
        kernel_initializer="zeros",
        bias_initializer="zeros",
        kernel_regularizer=None,
        bias_regularizer=None,
        **kwargs,
    ):
        """Create a Linear Model.

        Args:
          units: Positive integer, output dimension without the batch size.
          activation: Activation function to use.
            If you don't specify anything, no activation is applied.
          use_bias: whether to calculate the bias/intercept for this model. If
            set to False, no bias/intercept will be used in calculations, e.g.,
            the data is already centered.
          kernel_initializer: Initializer for the `kernel` weights matrices.
          bias_initializer: Initializer for the bias vector.
          kernel_regularizer: regularizer for kernel vectors.
          bias_regularizer: regularizer for bias vector.
          **kwargs: The keyword arguments that are passed on to
            BaseLayer.__init__.
        """

        self.units = units
        self.activation = activations.get(activation)
        self.use_bias = use_bias
        self.kernel_initializer = initializers.get(kernel_initializer)
        self.bias_initializer = initializers.get(bias_initializer)
        self.kernel_regularizer = regularizers.get(kernel_regularizer)
        self.bias_regularizer = regularizers.get(bias_regularizer)
        super().__init__(**kwargs)
        base_layer.keras_premade_model_gauge.get_cell("Linear").set(True)
Exemplo n.º 32
0
 def __init__(self,
              filters,
              kernel_size,
              strides=(1, 1, 1),
              padding='valid',
              data_format=None,
              dilation_rate=(1, 1, 1),
              groups=1,
              activation=None,
              use_bias=True,
              kernel_initializer='glorot_uniform',
              bias_initializer='zeros',
              kernel_regularizer=None,
              bias_regularizer=None,
              activity_regularizer=None,
              kernel_constraint=None,
              bias_constraint=None,
              **kwargs):
   super(Conv3D, self).__init__(
       rank=3,
       filters=filters,
       kernel_size=kernel_size,
       strides=strides,
       padding=padding,
       data_format=data_format,
       dilation_rate=dilation_rate,
       groups=groups,
       activation=activations.get(activation),
       use_bias=use_bias,
       kernel_initializer=initializers.get(kernel_initializer),
       bias_initializer=initializers.get(bias_initializer),
       kernel_regularizer=regularizers.get(kernel_regularizer),
       bias_regularizer=regularizers.get(bias_regularizer),
       activity_regularizer=regularizers.get(activity_regularizer),
       kernel_constraint=constraints.get(kernel_constraint),
       bias_constraint=constraints.get(bias_constraint),
       **kwargs)
Exemplo n.º 33
0
    def __init__(self,
                 nb_filter,
                 nb_row,
                 nb_col,
                 init='glorot_uniform',
                 activation='linear',
                 weights=None,
                 border_mode='valid',
                 subsample=(1, 1),
                 W_regularizer=None,
                 b_regularizer=None,
                 activity_regularizer=None,
                 W_constraint=None,
                 b_constraint=None,
                 **kwargs):

        if border_mode not in {'valid', 'full', 'same'}:
            raise Exception('Invalid border mode for Convolution2D:',
                            border_mode)
        self.nb_filter = nb_filter
        self.nb_row = nb_row
        self.nb_col = nb_col
        self.init = initializations.get(init)
        self.activation = activations.get(activation)
        self.border_mode = border_mode
        self.subsample = tuple(subsample)

        self.W_regularizer = regularizers.get(W_regularizer)
        self.b_regularizer = regularizers.get(b_regularizer)
        self.activity_regularizer = regularizers.get(activity_regularizer)

        self.W_constraint = constraints.get(W_constraint)
        self.b_constraint = constraints.get(b_constraint)
        self.constraints = [self.W_constraint, self.b_constraint]

        self.initial_weights = weights
        super(Convolution2D, self).__init__(**kwargs)
Exemplo n.º 34
0
    def __init__(self,
                 output_dim,
                 support=1,
                 featureless=False,
                 init='glorot_uniform',
                 activation='linear',
                 weights=None,
                 W_regularizer=None,
                 num_bases=-1,
                 b_regularizer=None,
                 bias=False,
                 dropout=0.,
                 **kwargs):
        self.init = initializers.get(init)
        self.activation = activations.get(activation)
        self.output_dim = output_dim  # number of features per node
        self.support = support  # filter support / number of weights
        self.featureless = featureless  # use/ignore input features
        self.dropout = dropout

        assert support >= 1

        self.W_regularizer = regularizers.get(W_regularizer)
        self.b_regularizer = regularizers.get(b_regularizer)

        self.bias = bias
        self.initial_weights = weights
        self.num_bases = num_bases

        # these will be defined during build()
        self.input_dim = None
        self.W = None
        self.W_comp = None
        self.b = None
        self.num_nodes = None

        super().__init__(**kwargs)
Exemplo n.º 35
0
    def __init__(self,
                 output_dim,
                 window_size=3,
                 subsample_length=1,
                 init='uniform',
                 activation='linear',
                 W_regularizer=None,
                 b_regularizer=None,
                 W_constraint=None,
                 b_constraint=None,
                 weights=None,
                 bias=True,
                 input_dim=None,
                 input_length=None,
                 **kwargs):
        self.output_dim = output_dim
        self.window_size = window_size
        self.subsample = (subsample_length, 1)

        self.bias = bias
        self.init = initializations.get(init)
        self.activation = activations.get(activation)
        self.W_regularizer = regularizers.get(W_regularizer)
        self.b_regularizer = regularizers.get(b_regularizer)

        self.W_constraint = constraints.get(W_constraint)
        self.b_constraint = constraints.get(b_constraint)

        self.initial_weights = weights

        self.supports_masking = False
        self.input_spec = [InputSpec(ndim=3)]
        self.input_dim = input_dim
        self.input_length = input_length
        if self.input_dim:
            kwargs['input_shape'] = (self.input_length, self.input_dim)
        super(GCNN, self).__init__(**kwargs)
Exemplo n.º 36
0
 def __init__(self,
              state_sync=False,
              decode=False,
              output_length=None,
              return_states=False,
              readout=False,
              readout_activation='linear',
              teacher_force=False,
              state_initializer=None,
              **kwargs):
     self.state_sync = state_sync
     self.cells = []
     if decode and output_length is None:
         raise Exception('output_length should be specified for decoder')
     self.decode = decode
     self.output_length = output_length
     if decode:
         if output_length is None:
             raise Exception(
                 'output_length should be specified for decoder')
         kwargs['return_sequences'] = True
     self.return_states = return_states
     super(RecurrentModel, self).__init__(**kwargs)
     self.readout = readout
     self.readout_activation = activations.get(readout_activation)
     self.teacher_force = teacher_force
     self._optional_input_placeholders = {}
     if state_initializer:
         if type(state_initializer) in [list, tuple]:
             state_initializer = [
                 initializers.get(init)
                 if init else initializers.get('zeros')
                 for init in state_initializer
             ]
         else:
             state_initializer = initializers.get(state_initializer)
     self._state_initializer = state_initializer
Exemplo n.º 37
0
    def __init__(self,
                 output_dim,
                 init='glorot_uniform',
                 inner_init='orthogonal',
                 activation='tanh',
                 W_regularizer=None,
                 U_regularizer=None,
                 b_regularizer=None,
                 dropout_W=0.0,
                 dropout_U=0.0,
                 tau=100,
                 dt=20,
                 noise=.1,
                 dale_ratio=None,
                 **kwargs):
        self.output_dim = output_dim
        self.init = initializations.get(init)
        self.inner_init = initializations.get(inner_init)
        self.activation = activations.get(activation)
        self.W_regularizer = regularizers.get(W_regularizer)
        self.U_regularizer = regularizers.get(U_regularizer)
        self.b_regularizer = regularizers.get(b_regularizer)
        self.dropout_W, self.dropout_U = dropout_W, dropout_U
        self.tau = tau
        self.dt = dt
        self.noise = noise
        self.dale_ratio = dale_ratio
        if dale_ratio:

            #make dales law matrix
            dale_vec = np.ones(output_dim)
            dale_vec[int(dale_ratio * output_dim):] = -1
            dale = np.diag(dale_vec)
            self.Dale = K.variable(dale)
        if self.dropout_W or self.dropout_U:
            self.uses_learning_phase = True
        super(leak_recurrent, self).__init__(**kwargs)
Exemplo n.º 38
0
 def __init__(self, rank,
              filters,
              kernel_size,
              strides=1,
              padding='valid',
              data_format=None,
              dilation_rate=1,
              activation=None,
              use_bias=True,
              kernel_initializer='lecun_normal',
              bias_initializer='zeros',
              kernel_regularizer=None,
              bias_regularizer=None,
              activity_regularizer=None,
              kernel_constraint=None,
              bias_constraint=None,
              **kwargs):
     super(WN_Conv, self).__init__(**kwargs)
     self.rank = rank
     self.filters = filters
     self.kernel_size = conv_utils.normalize_tuple(kernel_size, rank,
                                                   'kernel_size')
     self.strides = conv_utils.normalize_tuple(strides, rank, 'strides')
     self.padding = conv_utils.normalize_padding(padding)
     self.data_format = K.normalize_data_format(data_format)
     self.dilation_rate = conv_utils.normalize_tuple(dilation_rate, rank,
                                                     'dilation_rate')
     self.activation = activations.get(activation)
     self.use_bias = use_bias
     self.kernel_initializer = initializers.get(kernel_initializer)
     self.bias_initializer = initializers.get(bias_initializer)
     self.kernel_regularizer = regularizers.get(kernel_regularizer)
     self.bias_regularizer = regularizers.get(bias_regularizer)
     self.activity_regularizer = regularizers.get(activity_regularizer)
     self.kernel_constraint = constraints.get(kernel_constraint)
     self.bias_constraint = constraints.get(bias_constraint)
     self.input_spec = InputSpec(ndim=self.rank + 2)
Exemplo n.º 39
0
    def __init__(self,
                 output_dim,
                 init='glorot_uniform',
                 activation='linear',
                 weights=None,
                 W_regularizer=None,
                 b_regularizer=None,
                 bias=True,
                 self_links=True,
                 consecutive_links=True,
                 backward_links=True,
                 edge_weighting=False,
                 **kwargs):
        self.init = initializers.get(init)
        self.activation = activations.get(activation)
        self.output_dim = output_dim  # number of features per node

        self.self_links = self_links
        self.consecutive_links = consecutive_links
        self.backward_links = backward_links
        self.edge_weighting = edge_weighting

        self.W_regularizer = regularizers.get(W_regularizer)
        self.b_regularizer = regularizers.get(b_regularizer)

        self.bias = bias
        self.initial_weights = weights

        self.input_dim = None
        self.W = None
        self.b = None
        self.num_nodes = None
        self.num_features = None
        self.num_relations = None
        self.num_adjacency_matrices = None

        super(SpectralGraphConvolution, self).__init__(**kwargs)
Exemplo n.º 40
0
 def __init__(self,
              units,
              activation=None,
              use_bias_1=True,
              use_bias_2=True,
              kernel_initializer='orthogonal',
              bias_1_initializer='zeros',
              bias_2_initializer='zeros',
              scaler_initializer='ones',
              kernel_regularizer=None,
              bias_1_regularizer=None,
              bias_2_regularizer=None,
              activity_regularizer=None,
              kernel_constraint=None,
              bias_1_constraint=None,
              bias_2_constraint=None,
              **kwargs):
     if 'input_shape' not in kwargs and 'input_dim' in kwargs:
         kwargs['input_shape'] = (kwargs.pop('input_dim'), )
     super(Mobius, self).__init__(**kwargs)
     self.units = units
     self.activation = activations.get(activation)
     self.use_bias_1 = use_bias_1
     self.use_bias_2 = use_bias_2
     self.kernel_initializer = initializers.get(kernel_initializer)
     self.bias_1_initializer = initializers.get(bias_1_initializer)
     self.bias_2_initializer = initializers.get(bias_2_initializer)
     self.kernel_regularizer = regularizers.get(kernel_regularizer)
     self.bias_1_regularizer = regularizers.get(bias_1_regularizer)
     self.bias_2_regularizer = regularizers.get(bias_2_regularizer)
     self.activity_regularizer = regularizers.get(activity_regularizer)
     self.kernel_constraint = constraints.get(kernel_constraint)
     self.bias_1_constraint = constraints.get(bias_1_constraint)
     self.bias_2_constraint = constraints.get(bias_2_constraint)
     self.scaler_initializer = scaler_initializer
     self.input_spec = InputSpec(min_ndim=2)
     self.supports_masking = True
Exemplo n.º 41
0
 def __init__(self,
              units,
              activation=None,
              use_bias=True,
              init_criterion='he',
              kernel_initializer='complex',
              bias_initializer='zeros',
              kernel_regularizer=None,
              bias_regularizer=None,
              activity_regularizer=None,
              kernel_constraint=None,
              bias_constraint=None,
              seed=None,
              **kwargs):
     if 'input_shape' not in kwargs and 'input_dim' in kwargs:
         kwargs['input_shape'] = (kwargs.pop('input_dim'), )
     super(ComplexDense, self).__init__(**kwargs)
     self.units = units
     self.activation = activations.get(activation)
     self.use_bias = use_bias
     self.init_criterion = init_criterion
     if kernel_initializer in {'complex'}:
         self.kernel_initializer = kernel_initializer
     else:
         self.kernel_initializer = initializers.get(kernel_initializer)
     self.bias_initializer = initializers.get(bias_initializer)
     self.kernel_regularizer = regularizers.get(kernel_regularizer)
     self.bias_regularizer = regularizers.get(bias_regularizer)
     self.activity_regularizer = regularizers.get(activity_regularizer)
     self.kernel_constraint = constraints.get(kernel_constraint)
     self.bias_constraint = constraints.get(bias_constraint)
     if seed is None:
         self.seed = np.random.randint(1, 10e6)
     else:
         self.seed = seed
     self.input_spec = InputSpec(ndim=2)
     self.supports_masking = True
Exemplo n.º 42
0
    def __init__(self,
                 units,
                 activation='tanh',
                 name='SelfAttention',
                 kernel_initializer='glorot_uniform',
                 bias_initializer='zeros',
                 kernel_regularizer=None,
                 bias_regularizer=None,
                 kernel_constraint=None,
                 bias_constraint=None,
                 **kwargs):
        super(SelfAttention, self).__init__(**kwargs)
        self.units = units
        self.activation = activations.get(activation)
        self.name = name
        self.supports_masking = True

        self.kernel_initializer = initializers.get(kernel_initializer)
        self.kernel_regularizer = regularizers.get(kernel_regularizer)
        self.kernel_constraint = constraints.get(kernel_constraint)

        self.bias_initializer = initializers.get(bias_initializer)
        self.bias_regularizer = regularizers.get(bias_regularizer)
        self.bias_constraint = constraints.get(bias_constraint)
    def __init__(self,
                 units,
                 kl_weight,
                 num_samples_per_epoch=2,
                 activation=None,
                 prior_mu=0.,
                 prior_sigma=1.,
                 kernel_m=10,
                 bias_m=10,
                 **kwargs):
        self.units = units
        self.kl_weight = kl_weight
        self.activation = activations.get(activation)
        self.prior_mu = prior_mu
        self.prior_sigma = prior_sigma
        self.kernel_m = kernel_m
        self.bias_m = bias_m
        self.kernel_beta_dist = VIMLTS_utils.init_beta_dist(self.kernel_m)
        self.bias_beta_dist = VIMLTS_utils.init_beta_dist(self.bias_m)
        self.epsilon = tf.constant(0.001)
        self.init_gauss = True
        self.num_samples = num_samples_per_epoch

        super().__init__(**kwargs)
Exemplo n.º 44
0
    def __init__(
            self,  #[N,F]+[N,F]
            units,  #16
            support=1,  #3
            activation=None,  #'RELU'
            use_bias=True,
            kernel_initializer='glorot_uniform',  #Gaussian distribution L2(5e-4)
            bias_initializer='zeros',
            kernel_regularizer=None,
            bias_regularizer=None,
            activity_regularizer=None,
            kernel_constraint=None,
            bias_constraint=None,
            **kwargs):
        if 'input_shape' not in kwargs and 'input_dim' in kwargs:
            kwargs['input_shape'] = (kwargs.pop('input_dim'), )
        super(GraphConvolution, self).__init__(**kwargs)
        self.units = units
        self.activation = activations.get(activation)
        self.use_bias = use_bias
        self.kernel_initializer = initializers.get(kernel_initializer)
        self.bias_initializer = initializers.get(bias_initializer)
        # 施加在权重上的正则项
        self.kernel_regularizer = regularizers.get(kernel_regularizer)
        # 施加在偏置向量上的正则项
        self.bias_regularizer = regularizers.get(bias_regularizer)
        # 施加在输出上的正则项
        self.activity_regularizer = regularizers.get(activity_regularizer)
        # 对主权重矩阵进行约束
        self.kernel_constraint = constraints.get(kernel_constraint)
        # 对偏置向量进行约束
        self.bias_constraint = constraints.get(bias_constraint)
        self.supports_masking = True

        self.support = support
        assert support >= 1
Exemplo n.º 45
0
 def __init__(self, units,
              activation=None,
              use_bias=False,
              kernel_initializer='glorot_uniform',  # Gaussian distribution
              bias_initializer='zeros',
              kernel_regularizer=None,
              bias_regularizer=None,
              activity_regularizer=None,
              kernel_constraint=None,
              bias_constraint=None,
              **kwargs):
     if 'input_shape' not in kwargs and 'input_dim' in kwargs:
         kwargs['input_shape'] = (kwargs.pop('input_dim'),)
     super(GraphConvolution, self).__init__(**kwargs)
     self.units = units
     self.activation = activations.get(activation)
     self.use_bias = use_bias
     self.kernel_initializer = initializers.get(kernel_initializer)
     self.bias_initializer = initializers.get(bias_initializer)
     self.kernel_regularizer = regularizers.get(kernel_regularizer)
     self.bias_regularizer = regularizers.get(bias_regularizer)
     self.activity_regularizer = regularizers.get(activity_regularizer)
     self.kernel_constraint = constraints.get(kernel_constraint)
     self.bias_constraint = constraints.get(bias_constraint)
Exemplo n.º 46
0
    def __init__(self,
                 init='glorot_uniform',
                 transform_bias=-2,
                 n_rel=5,
                 mean=1,
                 activation='linear',
                 weights=None,
                 W_regularizer=None,
                 b_regularizer=None,
                 activity_regularizer=None,
                 W_constraint=None,
                 b_constraint=None,
                 bias=True,
                 input_dim=None,
                 **kwargs):
        self.init = initializations.get(init)
        self.transform_bias = transform_bias
        self.activation = activations.get(activation)
        self.n_rel = n_rel
        self.mean = mean

        self.W_regularizer = regularizers.get(W_regularizer)
        self.b_regularizer = regularizers.get(b_regularizer)
        self.activity_regularizer = regularizers.get(activity_regularizer)

        self.W_constraint = constraints.get(W_constraint)
        self.b_constraint = constraints.get(b_constraint)

        self.bias = bias
        self.initial_weights = weights
        self.input_spec = [InputSpec(ndim=2)]

        self.input_dim = input_dim
        if self.input_dim:
            kwargs['input_shape'] = (self.input_dim, )
        super(GraphHighway, self).__init__(**kwargs)
Exemplo n.º 47
0
	def __init__(self, hidden_dim, hidden_recurrent_dim,
				init='glorot_uniform', inner_init='orthogonal',
				activation='tanh',
                W_regularizer=None, U_regularizer=None, b_regularizer=None,
				dropout_W=0., dropout_U=0.,
				nb_gibbs_steps=1,
				persistent=False,
				finetune=False,
				Wrbm_regularizer=None,
				rbm=None,
				dropout_RBM=0.,
				**kwargs):

		self.init = initializers.get(init)
		self.init_rbm = glorot_uniform_sigm
		self.inner_init = initializers.get(inner_init)
		self.activation = activations.get(activation)
		self.W_regularizer = regularizers.get(W_regularizer)
		self.U_regularizer = regularizers.get(U_regularizer)
		self.b_regularizer = regularizers.get(b_regularizer)
		self.dropout_W, self.dropout_U = dropout_W, dropout_U
		self.dropout_RBM = dropout_RBM
		self.Wrbm_regularizer = regularizers.get(Wrbm_regularizer)
		self.rbm = rbm

		if self.dropout_W or self.dropout_U or self.dropout_RBM:
			self.uses_learning_phase = True 
			self.supports_masking = True

		super(RNNRBM, self).__init__(**kwargs)

		self.finetune = finetune
		self.hidden_dim = hidden_dim
		self.hidden_recurrent_dim = hidden_recurrent_dim
		self.nb_gibbs_steps = nb_gibbs_steps
		self.persistent = persistent
Exemplo n.º 48
0
    def __init__(self,
                 units,
                 activation=None,
                 kernel_constraint=None,
                 bias_constraint=None,
                 kernel_regularizer=None,
                 bias_regularizer=None,
                 mu_initializer=None,
                 sigma_initializer=None,
                 **kwargs):

        super(NoisyNetDense, self).__init__(**kwargs)

        self.units = units

        self.activation = activations.get(activation)
        self.kernel_constraint = constraints.get(
            kernel_constraint) if kernel_constraint is not None else None
        self.bias_constraint = constraints.get(
            bias_constraint) if kernel_constraint is not None else None
        self.kernel_regularizer = regularizers.get(
            kernel_regularizer) if kernel_constraint is not None else None
        self.bias_regularizer = regularizers.get(
            bias_regularizer) if kernel_constraint is not None else None
Exemplo n.º 49
0
    def __init__(self, units, window_size=2, stride=1,
                 return_sequences=False, go_backwards=False, 
                 stateful=False, unroll=False, activation='tanh',
                 kernel_initializer='uniform', bias_initializer='zero',
                 kernel_regularizer=None, bias_regularizer=None,
                 activity_regularizer=None,
                 kernel_constraint=None, bias_constraint=None, 
                 dropout=0, use_bias=True, input_dim=None, input_length=None,
                 **kwargs):
        self.return_sequences = return_sequences
        self.go_backwards = go_backwards
        self.stateful = stateful
        self.unroll = unroll

        self.units = units 
        self.window_size = window_size
        self.strides = (stride, 1)

        self.use_bias = use_bias
        self.activation = activations.get(activation)
        self.kernel_initializer = initializers.get(kernel_initializer)
        self.bias_initializer = initializers.get(bias_initializer)
        self.kernel_regularizer = regularizers.get(kernel_regularizer)
        self.bias_regularizer = regularizers.get(bias_regularizer)
        self.activity_regularizer = regularizers.get(activity_regularizer)
        self.kernel_constraint = constraints.get(kernel_constraint)
        self.bias_constraint = constraints.get(bias_constraint)

        self.dropout = dropout
        self.supports_masking = True
        self.input_spec = [InputSpec(ndim=3)]
        self.input_dim = input_dim
        self.input_length = input_length
        if self.input_dim:
            kwargs['input_shape'] = (self.input_length, self.input_dim)
        super(QRNN, self).__init__(**kwargs)
Exemplo n.º 50
0
Arquivo: core.py Projeto: luisoala/inn
 def __init__(
     self,
     units,
     activation=None,
     use_bias=True,
     kernel_initializer="glorot_uniform",
     bias_initializer="zeros",
     kernel_regularizer=None,
     minmax_kernel_regularizer=None,
     bias_regularizer=None,
     minmax_bias_regularizer=None,
     activity_regularizer=None,
     kernel_constraint=None,
     bias_constraint=None,
     **kwargs
 ):
     if "input_shape" not in kwargs and "input_dim" in kwargs:
         kwargs["input_shape"] = (kwargs.pop("input_dim"),)
     super(Dense, self).__init__(**kwargs)
     self.units = units
     self.activation = activations.get(activation)
     self.use_bias = use_bias
     self.kernel_initializer = initializers.get(kernel_initializer)
     self.bias_initializer = initializers.get(bias_initializer)
     self.kernel_regularizer = regularizers.get(kernel_regularizer)
     self.minmax_kernel_regularizer = regularizers.get(
         minmax_kernel_regularizer
     )
     self.bias_regularizer = regularizers.get(bias_regularizer)
     self.minmax_bias_regularizer = regularizers.get(
         minmax_bias_regularizer
     )
     self.activity_regularizer = regularizers.get(activity_regularizer)
     self.kernel_constraint = constraints.get(kernel_constraint)
     self.bias_constraint = constraints.get(bias_constraint)
     self.supports_masking = True
Exemplo n.º 51
0
 def __init__(self, group_stru,
              activation=None,
              use_bias=True,
              kernel_initializer='glorot_uniform',
              bias_initializer='zeros',
              kernel_regularizer=None,
              bias_regularizer=None,
              activity_regularizer=None,
              kernel_constraint=None,
              bias_constraint=None, **kwargs):
     self.group_stru=group_stru
     self.num_var=group_stru.shape[0]
     self.num_group=group_stru.shape[1]
     self.output_dim = group_stru.shape[1]
     self.activation = activations.get(activation)
     self.use_bias = use_bias
     self.kernel_initializer = initializers.get(kernel_initializer)
     self.bias_initializer = initializers.get(bias_initializer)
     self.kernel_regularizer = regularizers.get(kernel_regularizer)
     self.bias_regularizer = regularizers.get(bias_regularizer)
     self.activity_regularizer = regularizers.get(activity_regularizer)
     self.kernel_constraint = constraints.get(kernel_constraint)
     self.bias_constraint = constraints.get(bias_constraint)
     super(GKlayer, self).__init__(**kwargs)
Exemplo n.º 52
0
    def __init__(self,
                 first_dim,
                 last_dim,
                 init='glorot_uniform',
                 activation=None,
                 weights=None,
                 W_regularizer=None,
                 b_regularizer=None,
                 activity_regularizer=None,
                 W_constraint=None,
                 b_constraint=None,
                 bias=True,
                 input_dim=None,
                 **kwargs):

        self.init = initializers.get(init)
        self.activation = activations.get(activation)

        self.input_dim = input_dim
        self.first_dim = first_dim
        self.last_dim = last_dim

        self.W_regularizer = regularizers.get(W_regularizer)
        self.b_regularizer = regularizers.get(b_regularizer)
        self.activity_regularizer = regularizers.get(activity_regularizer)

        self.W_constraint = constraints.get(W_constraint)
        self.b_constraint = constraints.get(b_constraint)

        self.bias = bias
        self.initial_weights = weights
        self.input_spec = [InputSpec(ndim=2)]

        if self.input_dim:
            kwargs['input_shape'] = (self.input_dim, )
        super(Dense3D, self).__init__(**kwargs)
Exemplo n.º 53
0
    def __init__(self, conv_layer,
                 conv_num=3,
                 return_blocks=False,
                 gate_activation='sigmoid',
                 **kwargs):

        super(GatedConvBlock, self).__init__(conv_layer,**kwargs)
        self.conv_num= conv_num
        self.return_blocks = return_blocks
        self.gate_activation = activations.get(gate_activation)
        self.conv_layers = []
        self.input_spec = conv_layer.input_spec 
        self.rank = conv_layer.rank
        if conv_layer.padding != 'same':
            raise ValueError("The padding mode of this layer must be `same`"
                            ", But found `{}`".format(self.padding))
        
        self.filters = conv_layer.filters//2
        # create conv layers
        import copy
        for i in range(self.conv_num):
            new_conv_layer = copy.deepcopy(conv_layer)
            new_conv_layer.name = 'GatedConvBlock_{}_{}'.format(conv_layer.name, i)
            self.conv_layers.append(new_conv_layer) 
Exemplo n.º 54
0
    def __init__(self,
                 output_dim,
                 init='glorot_uniform',
                 activation='linear',
                 weights=None,
                 W_regularizer=None,
                 b_regularizer=None,
                 activity_regularizer=None,
                 W_constraint=None,
                 b_constraint=None,
                 input_dim=None,
                 input_length1=None,
                 input_length2=None,
                 **kwargs):
        self.output_dim = output_dim
        self.init = initializations.get(init)
        self.activation = activations.get(activation)

        self.W_regularizer = regularizers.get(W_regularizer)
        self.b_regularizer = regularizers.get(b_regularizer)
        self.activity_regularizer = regularizers.get(activity_regularizer)

        self.W_constraint = constraints.get(W_constraint)
        self.b_constraint = constraints.get(b_constraint)
        self.constraints = [self.W_constraint, self.b_constraint]

        self.initial_weights = weights

        self.input_dim = input_dim
        self.input_length1 = input_length1
        self.input_length2 = input_length2
        if self.input_dim:
            kwargs['input_shape'] = (self.input_length1, self.input_length2,
                                     self.input_dim)
        self.input = K.placeholder(ndim=4)
        super(HigherOrderTimeDistributedDense, self).__init__(**kwargs)
Exemplo n.º 55
0
 def __init__(self,
              output_dim,
              batch_size,
              init='glorot_uniform',
              activation='tanh',
              weights=None,
              input_dim=None,
              regularizer_scale=1,
              prior_mean=0,
              prior_logsigma=1,
              **kwargs):
     self.prior_mean = prior_mean
     self.prior_logsigma = prior_logsigma
     self.regularizer_scale = regularizer_scale
     self.batch_size = batch_size
     self.init = initializations.get(init)
     self.activation = activations.get(activation)
     self.output_dim = output_dim
     self.initial_weights = weights
     self.input_dim = input_dim
     if self.input_dim:
         kwargs['input_shape'] = (self.input_dim, )
     self.input = K.placeholder(ndim=2)
     super(VariationalDense, self).__init__(**kwargs)
Exemplo n.º 56
0
    def __init__(self,
                 kernel_size,
                 strides=(1, 1),
                 depth_multiplier=1,
                 data_format=None,
                 dilation_rate=(1, 1),
                 activation='relu',
                 depthwise_initializer='glorot_uniform',
                 bias_initializer='zeros',
                 depthwise_regularizer=None,
                 bias_regularizer=None,
                 activity_regularizer=None,
                 depthwise_constraint=None,
                 bias_constraint=None,
                 standardized=False,
                 bn_fused=True,
                 gn_groups=32,
                 **kwargs):
        super().__init__(activity_regularizer=activity_regularizer, **kwargs)
        self.input_spec = layers.InputSpec(ndim=4)

        self.kernel_size = kernel_size
        self.strides = strides
        self.depth_multiplier = depth_multiplier
        self.data_format = data_format
        self.dilation_rate = dilation_rate
        self.activation = activations.get(activation)
        self.depthwise_initializer = initializers.get(depthwise_initializer)
        self.bias_initializer = initializers.get(bias_initializer)
        self.depthwise_regularizer = regularizers.get(depthwise_regularizer)
        self.bias_regularizer = regularizers.get(bias_regularizer)
        self.depthwise_constraint = constraints.get(depthwise_constraint)
        self.bias_constraint = constraints.get(bias_constraint)
        self.standardized = standardized
        self.bn_fused = bn_fused
        self.gn_groups = gn_groups
Exemplo n.º 57
0
 def __init__(self,
              first_threshold=None,
              second_threshold=None,
              use_dimension_bias=False,
              use_intermediate_layer=False,
              intermediate_dim=64,
              intermediate_activation=None,
              from_logits=False,
              return_logits=False,
              bias_initializer=1.0,
              **kwargs):
     # if 'input_shape' not in kwargs:
     #     kwargs['input_shape'] = [(None, input_dim,), (None, input_dim)]
     super(WeightedCombinationLayer, self).__init__(**kwargs)
     self.first_threshold = first_threshold if first_threshold is not None else INFTY
     self.second_threshold = second_threshold if second_threshold is not None else INFTY
     self.use_dimension_bias = use_dimension_bias
     self.use_intermediate_layer = use_intermediate_layer
     self.intermediate_dim = intermediate_dim
     self.intermediate_activation = kact.get(intermediate_activation)
     self.from_logits = from_logits
     self.return_logits = return_logits
     self.bias_initializer = bias_initializer
     self.input_spec = [InputSpec(), InputSpec(), InputSpec()]
Exemplo n.º 58
0
 def __init__(self,
              units=1,
              activation=None,
              use_bias=True,
              kernel_initializer='glorot_uniform',
              bias_initializer='zeros',
              kernel_regularizer=None,
              bias_regularizer=None,
              activity_regularizer=None,
              kernel_constraint=None,
              bias_constraint=None,
              **kwargs):
     super(Attention, self).__init__(**kwargs)
     self.units = units
     self.activation = activations.get(activation)
     self.use_bias = use_bias
     self.kernel_initializer = initializers.get(kernel_initializer)
     self.bias_initializer = initializers.get(bias_initializer)
     self.kernel_regularizer = regularizers.get(kernel_regularizer)
     self.bias_regularizer = regularizers.get(bias_regularizer)
     self.activity_regularizer = regularizers.get(activity_regularizer)
     self.kernel_constraint = constraints.get(kernel_constraint)
     self.bias_constraint = constraints.get(bias_constraint)
     self.supports_masking = True
Exemplo n.º 59
0
    def __init__(self, units, output_dim,
                 activation='tanh',
                 return_probabilities=False,
                 name='AttentionDecoder_Class',
                 kernel_initializer='glorot_uniform',
                 recurrent_initializer='orthogonal',
                 bias_initializer='zeros',
                 kernel_regularizer=None,
                 bias_regularizer=None,
                 activity_regularizer=None,
                 kernel_constraint=None,
                 bias_constraint=None
                 ,**kwargs
                 ):
        print('__init__ : units' + str(units))
        
        self.units = units
        self.output_dim = output_dim
        self.return_probabilities = return_probabilities
        self.activation = activations.get(activation)
        self.kernel_initializer = initializers.get(kernel_initializer)
        self.recurrent_initializer = initializers.get(recurrent_initializer)
        self.bias_initializer = initializers.get(bias_initializer)

        self.kernel_regularizer = regularizers.get(kernel_regularizer)
        self.recurrent_regularizer = regularizers.get(kernel_regularizer)
        self.bias_regularizer = regularizers.get(bias_regularizer)
        self.activity_regularizer = regularizers.get(activity_regularizer)

        self.kernel_constraint = constraints.get(kernel_constraint)
        self.recurrent_constraint = constraints.get(kernel_constraint)
        self.bias_constraint = constraints.get(bias_constraint)

        super(AttentionDecoder, self).__init__(**kwargs)
        self.name = name
        self.return_sequences = True  # must return sequences
Exemplo n.º 60
0
    def __init__(self,
                 units,
                 activation='linear',
                 weights=None,
                 kernel_initializer='glorot_uniform',
                 kernel_regularizer=None,
                 kernel_constraint=None,
                 bias_initializer='uniform',
                 bias_regularizer=None,
                 bias_constraint=None,
                 activity_regularizer=None,
                 bias=True,
                 input_dim=None,
                 factorization=simple_tensor_factorization(),
                 **kwargs):
        self.activation = activations.get(activation)
        self.units = units
        self.input_dim = input_dim
        self.factorization = factorization

        self.kernel_regularizer = regularizers.get(kernel_regularizer)
        self.bias_regularizer = regularizers.get(bias_regularizer)
        self.kernel_initializer = get_initializer(kernel_initializer)
        self.bias_initializer = get_initializer(bias_initializer)
        self.kernel_constraint = constraints.get(kernel_constraint)
        self.bias_constraint = constraints.get(bias_constraint)

        self.activity_regularizer = regularizers.get(activity_regularizer)

        self.bias = bias
        self.initial_weights = weights
        self.input_spec = [InputSpec(ndim=2)]

        if self.input_dim:
            kwargs['input_shape'] = (self.input_dim, )
        super(DenseTensor, self).__init__(**kwargs)