예제 #1
0
파일: QNS_AS.py 프로젝트: akramyoussry/BQNS
    def build(self, input_shape):
        """
        This method must be defined for any custom layer, here you define the training parameters.
        
        input_shape: a tensor that automatically captures the dimensions of the input by tensorflow. 
        """
        # get the size of the input
        self.N = input_shape.as_list()[1]

        # construct the DFT matrix
        self.F = dft(self.N) / self.fs

        # construct the trapezoidal rule matrix
        self.D = np.eye(self.N // 2)
        self.D[0, 0] = 0.5
        self.D[-1, -1] = 0.5

        # define the trainable parameters representing the double side band PSD of noise
        self.S = self.add_weight(name="S",
                                 shape=tf.TensorShape((self.N // 2, 1)),
                                 initializer=initializers.Ones(),
                                 constraint=constraints.NonNeg(),
                                 trainable=True)

        # this has to be called for any tensorflow custom layer
        super(coherence_Layer, self).build(input_shape)
예제 #2
0
 def bias_initializer(_, *args, **kwargs):
     return tf.concat([
         self.bias_initializer((self.units, ), *args, **kwargs),
         initializers.Ones()((self.units, ), *args, **kwargs),
         self.bias_initializer(
             (self.units * 2, ), *args, **kwargs),
     ], -1)
예제 #3
0
 def bias_initializer(shape, *args, **kwargs):
     return K.concatenate([
         self.bias_initializer((self.units, ), *args, **kwargs),
         initializers.Ones()((self.units, ), *args, **kwargs),
         self.bias_initializer((self.units * 2, ), *args,
                               **kwargs),
     ])
예제 #4
0
def get_ann(
    n_hidden=4, n_neurons=20, kernel_initializer="he_normal", bias_initializer=initializers.Ones()
):
    model = Sequential()

    model.add(
        Dense(
            units=n_neurons,
            input_dim=14,
            kernel_initializer=kernel_initializer,
            bias_initializer=bias_initializer,
        )
    )
    model.add(keras.layers.LeakyReLU(alpha=0.2))
    model.add(Dropout(rate=0.1))

    for _ in range(n_hidden):
        model.add(
            Dense(
                units=n_neurons,
                kernel_initializer=kernel_initializer,
                bias_initializer=bias_initializer,
            )
        )
        model.add(keras.layers.LeakyReLU(alpha=0.2))
        model.add(Dropout(rate=0.1))

    model.add(Dense(units=1, activation="linear"))

    optimizer = optimizers.RMSprop()
    model.compile(loss="mse", optimizer=optimizer, metrics=["mse", "mae"])

    return model
예제 #5
0
파일: set2set.py 프로젝트: satori555/megnet
 def bias_initializer(_, *args, **kwargs):
     return kb.concatenate(
         [
             self.bias_initializer((self.n_hidden,), *args, **kwargs),
             initializers.Ones()((self.n_hidden,), *args, **kwargs),
             self.bias_initializer((self.n_hidden * 2,), *args, **kwargs),
         ]
     )
    def build(self, input_shape):
        self.input_spec = [InputSpec(shape=input_shape)]
        output_shape = (int(input_shape[self.axis]),)

        gamma_initializer = initializers.Ones()
        beta_initializer = initializers.Zeros()

        self.gamma = K.variable(gamma_initializer(output_shape))
        self.beta = K.variable(beta_initializer(output_shape))
        self.trainable_weights = [self.gamma, self.beta]
예제 #7
0
def construct_actor_network(bandits):
    """Construct the actor network with mu and sigma as output"""
    inputs = layers.Input(shape=(1,)) #input dimension
    hidden1 = layers.Dense(5, activation="relu",kernel_initializer=initializers.he_normal())(inputs)
    hidden2 = layers.Dense(5, activation="relu",kernel_initializer=initializers.he_normal())(hidden1)
    probabilities = layers.Dense(len(bandits), kernel_initializer=initializers.Ones(),activation="softmax")(hidden2)

    actor_network = keras.Model(inputs=inputs, outputs=[probabilities]) 
    
    return actor_network
예제 #8
0
 def build(self, input_shape):
     self.gain = self.add_weight(name='gain',
                                 shape=input_shape[-1:],
                                 initializer=initializers.Ones(),
                                 trainable=True)
     self.bias = self.add_weight(name='bias',
                                 shape=input_shape[-1:],
                                 initializer=initializers.Zeros(),
                                 trainable=True)
     super().build(input_shape)
예제 #9
0
def create_model(opt, nFeatures=1):
    model = ks.models.Sequential()
    #lys = Dense(2, input_shape=(1,))
    #print(lys.get_weights())
    #print(lys.get_config())
    #print('nFeatures=', nFeatures)
    k_initializer = initializers.Ones()
    b_initializer = initializers.Ones()

    if 1:  # 1 layer
        model.add(
            Dense(5,
                  input_shape=(nFeatures, ),
                  kernel_initializer=k_initializer,
                  bias_initializer=b_initializer))
        model.add(lys.ReLU())
        model.add(
            Dense(5,
                  kernel_initializer=k_initializer,
                  bias_initializer=b_initializer))
        model.add(lys.ReLU())
        model.add(
            Dense(1,
                  kernel_initializer=k_initializer,
                  bias_initializer=b_initializer))

    else:  # multify layers
        model.add(Dense(10, input_shape=(nFeatures, )))
        model.add(Dense(8, input_shape=(10, )))
        model.add(Dense(2, input_shape=(8, )))
        model.add(Dense(1, input_shape=(2, )))

    model.compile(optimizer=opt,
                  loss='mean_squared_error')  #metrics=['accuracy']
    #model.summary()
    return model
예제 #10
0
def get_lstm(kernel_initializer="he_uniform",
             bias_initializer=initializers.Ones()):
    model = Sequential()

    if lstm_params["n_hidden"] == 1:
        model.add(
            LSTM(
                units=lstm_params["units"],
                input_shape=(lstm_params["steps"],
                             lstm_params["features_num"]),
                kernel_initializer=kernel_initializer,
                bias_initializer=bias_initializer,
            ))
        model.add(LeakyReLU(alpha=0.2))
        model.add(Dropout(0.2))
    if lstm_params["n_hidden"] == 2:
        model.add(
            LSTM(
                units=lstm_params["units"],
                input_shape=(lstm_params["steps"],
                             lstm_params["features_num"]),
                return_sequences=True,
                kernel_initializer=kernel_initializer,
                bias_initializer=bias_initializer,
            ))
        model.add(LeakyReLU(alpha=0.2))
        model.add(Dropout(0.2))
        model.add(
            LSTM(
                units=lstm_params["units"],
                input_shape=(lstm_params["steps"],
                             lstm_params["features_num"]),
                kernel_initializer=kernel_initializer,
                bias_initializer=bias_initializer,
            ))
        model.add(LeakyReLU(alpha=0.2))
        model.add(Dropout(0.2))

    model.add(Dense(1, activation="linear"))

    optimizer = optimizers.RMSprop()
    model.compile(loss="mse", metrics=["mse", "mae"], optimizer=optimizer)
    return model
 def _build_model(self):
     model = tf.keras.Sequential([
         layers.Dense(
             100,
             input_shape=(4,),
             kernel_initializer=initializers.RandomNormal(stddev=5.0),
             bias_initializer=initializers.Ones(),
             # kernel_regularizer=regularizers.l1_l2(l1=1e-5, l2=1e-4),
             activation='sigmoid',
             name='state'
         ),
         layers.Dense(
             2,
             #input_shape=(4,),
             activation='relu'
         ),
         layers.Dense(1, name='action', activation='tanh'),
     ])
     model.summary()
     model.compile(
         loss='hinge',
         optimizer=optimizers.RMSprop(lr=self.lr)
     )
     return model
예제 #12
0
lr = 0.1
m = 0.6
weight_decay = [0.1, 0.5, 0.9]
exp_metrics = np.zeros([len(weight_decay), 3])
i = 0
for r in weight_decay:
    RMSEs = []
    MAEs = []
    for j in range(0, 5):
        # Model
        model = Sequential()

        # Input layer
        model.add(
            Dense(N,
                  kernel_initializer=initializers.Ones(),
                  bias_initializer=initializers.Zeros(),
                  input_dim=N))

        # Leaky ReLU activation function
        def lrelu(x):
            return relu(x, alpha=0.01)


        # Hidden layer
        model.add(
            Dense(H,
                  kernel_initializer=initializers.glorot_uniform(),
                  bias_initializer=initializers.glorot_uniform(),
                  activation=lrelu,
                  kernel_regularizer=l1(r),
예제 #13
0
    def make_network(self, amino_acid_encoding, peptide_max_length,
                     n_flank_length, c_flank_length, flanking_averages,
                     convolutional_filters, convolutional_kernel_size,
                     convolutional_activation, convolutional_kernel_l1_l2,
                     dropout_rate, post_convolutional_dense_layer_sizes):
        """
        Helper function to make a keras network given hyperparameters.
        """

        # We import keras here to avoid tensorflow debug output, etc. unless we
        # are actually about to use Keras.
        configure_tensorflow()
        from tensorflow.keras.layers import (Input, Dense, Dropout,
                                             Concatenate, Conv1D, Lambda)
        from tensorflow.keras.models import Model
        from tensorflow.keras import regularizers, initializers

        model_inputs = {}

        empty_x_dict = self.network_input(FlankingEncoding([], [], []))
        sequence_dims = empty_x_dict['sequence'].shape[1:]

        numpy.testing.assert_equal(
            sequence_dims[0],
            peptide_max_length + n_flank_length + c_flank_length)

        model_inputs['sequence'] = Input(shape=sequence_dims,
                                         dtype='float32',
                                         name='sequence')
        model_inputs['peptide_length'] = Input(shape=(1, ),
                                               dtype='int32',
                                               name='peptide_length')

        current_layer = model_inputs['sequence']
        current_layer = Conv1D(
            filters=convolutional_filters,
            kernel_size=convolutional_kernel_size,
            kernel_regularizer=regularizers.l1_l2(*convolutional_kernel_l1_l2),
            padding="same",
            activation=convolutional_activation,
            name="conv1")(current_layer)
        if dropout_rate > 0:
            current_layer = Dropout(
                name="conv1_dropout",
                rate=dropout_rate,
                noise_shape=(None, 1, int(
                    current_layer.get_shape()[-1])))(current_layer)

        convolutional_result = current_layer

        outputs_for_final_dense = []

        for flank in ["n_flank", "c_flank"]:
            current_layer = convolutional_result
            for (i, size) in enumerate(
                    list(post_convolutional_dense_layer_sizes) + [1]):
                current_layer = Conv1D(
                    name="%s_post_%d" % (flank, i),
                    filters=size,
                    kernel_size=1,
                    kernel_regularizer=regularizers.l1_l2(
                        *convolutional_kernel_l1_l2),
                    activation=("tanh" if size == 1 else
                                convolutional_activation))(current_layer)
            single_output_result = current_layer

            dense_flank = None
            if flank == "n_flank":

                def cleavage_extractor(x):
                    return x[:, n_flank_length]

                single_output_at_cleavage_position = Lambda(
                    cleavage_extractor,
                    name="%s_cleaved" % flank)(single_output_result)

                def max_pool_over_peptide_extractor(lst):
                    import tensorflow as tf
                    (x, peptide_length) = lst

                    # We generate a per-sample mask that is 1 for all peptide
                    # positions except the first position, and 0 for all other
                    # positions (i.e. n flank, c flank, and the first peptide
                    # position).
                    starts = n_flank_length + 1
                    limits = n_flank_length + peptide_length
                    row = tf.expand_dims(tf.range(0, x.shape[1]), axis=0)
                    mask = tf.logical_and(tf.greater_equal(row, starts),
                                          tf.less(row, limits))

                    # We are assuming that x >= -1. The final activation in the
                    # previous layer should be a function that satisfies this
                    # (e.g. sigmoid, tanh, relu).
                    max_value = tf.reduce_max(
                        (x + 1) *
                        tf.expand_dims(tf.cast(mask, tf.float32), axis=-1),
                        axis=1) - 1

                    # We flip the sign so that initializing the final dense
                    # layer weights to 1s is reasonable.
                    return -1 * max_value

                max_over_peptide = Lambda(max_pool_over_peptide_extractor,
                                          name="%s_internal_cleaved" % flank)([
                                              single_output_result,
                                              model_inputs['peptide_length']
                                          ])

                def flanking_extractor(lst):
                    import tensorflow as tf
                    (x, peptide_length) = lst

                    # mask is 1 for n_flank positions and 0 elsewhere.
                    starts = 0
                    limits = n_flank_length
                    row = tf.expand_dims(tf.range(0, x.shape[1]), axis=0)
                    mask = tf.logical_and(tf.greater_equal(row, starts),
                                          tf.less(row, limits))

                    # We are assuming that x >= -1. The final activation in the
                    # previous layer should be a function that satisfies this
                    # (e.g. sigmoid, tanh, relu).
                    average_value = tf.reduce_mean(
                        (x + 1) *
                        tf.expand_dims(tf.cast(mask, tf.float32), axis=-1),
                        axis=1) - 1
                    return average_value

                if flanking_averages and n_flank_length > 0:
                    # Also include average pooled of flanking sequences
                    pooled_flank = Lambda(flanking_extractor,
                                          name="%s_extracted" % flank)([
                                              convolutional_result,
                                              model_inputs['peptide_length']
                                          ])
                    dense_flank = Dense(1,
                                        activation="tanh",
                                        name="%s_avg_dense" %
                                        flank)(pooled_flank)
            else:
                assert flank == "c_flank"

                def cleavage_extractor(lst):
                    import tensorflow as tf
                    (x, peptide_length) = lst
                    indexer = peptide_length + n_flank_length - 1
                    result = tf.squeeze(
                        tf.gather(x, indexer, batch_dims=1, axis=1), -1)
                    return result

                single_output_at_cleavage_position = Lambda(
                    cleavage_extractor, name="%s_cleaved" % flank)(
                        [single_output_result, model_inputs['peptide_length']])

                def max_pool_over_peptide_extractor(lst):
                    import tensorflow as tf
                    (x, peptide_length) = lst

                    # We generate a per-sample mask that is 1 for all peptide
                    # positions except the last position, and 0 for all other
                    # positions (i.e. n flank, c flank, and the last peptide
                    # position).
                    starts = n_flank_length
                    limits = n_flank_length + peptide_length - 1
                    row = tf.expand_dims(tf.range(0, x.shape[1]), axis=0)
                    mask = tf.logical_and(tf.greater_equal(row, starts),
                                          tf.less(row, limits))

                    # We are assuming that x >= -1. The final activation in the
                    # previous layer should be a function that satisfies this
                    # (e.g. sigmoid, tanh, relu).
                    max_value = tf.reduce_max(
                        (x + 1) *
                        tf.expand_dims(tf.cast(mask, tf.float32), axis=-1),
                        axis=1) - 1

                    # We flip the sign so that initializing the final dense
                    # layer weights to 1s is reasonable.
                    return -1 * max_value

                max_over_peptide = Lambda(max_pool_over_peptide_extractor,
                                          name="%s_internal_cleaved" % flank)([
                                              single_output_result,
                                              model_inputs['peptide_length']
                                          ])

                def flanking_extractor(lst):
                    import tensorflow as tf
                    (x, peptide_length) = lst

                    # mask is 1 for c_flank positions and 0 elsewhere.
                    starts = n_flank_length + peptide_length
                    limits = n_flank_length + peptide_length + c_flank_length
                    row = tf.expand_dims(tf.range(0, x.shape[1]), axis=0)
                    mask = tf.logical_and(tf.greater_equal(row, starts),
                                          tf.less(row, limits))

                    # We are assuming that x >= -1. The final activation in the
                    # previous layer should be a function that satisfies this
                    # (e.g. sigmoid, tanh, relu).
                    average_value = tf.reduce_mean(
                        (x + 1) *
                        tf.expand_dims(tf.cast(mask, tf.float32), axis=-1),
                        axis=1) - 1
                    return average_value

                if flanking_averages and c_flank_length > 0:
                    # Also include average pooled of flanking sequences
                    pooled_flank = Lambda(flanking_extractor,
                                          name="%s_extracted" % flank)([
                                              convolutional_result,
                                              model_inputs['peptide_length']
                                          ])
                    dense_flank = Dense(1,
                                        activation="tanh",
                                        name="%s_avg_dense" %
                                        flank)(pooled_flank)

            outputs_for_final_dense.append(single_output_at_cleavage_position)
            outputs_for_final_dense.append(max_over_peptide)
            if dense_flank is not None:
                outputs_for_final_dense.append(dense_flank)

        if len(outputs_for_final_dense) == 1:
            (current_layer, ) = outputs_for_final_dense
        else:
            current_layer = Concatenate(name="final")(outputs_for_final_dense)
        output = Dense(
            1,
            activation="sigmoid",
            name="output",
            kernel_initializer=initializers.Ones(),
        )(current_layer)
        model = Model(
            inputs=[model_inputs[name] for name in sorted(model_inputs)],
            outputs=[output],
            name="predictor")

        return model
    def build(self, input_shape):

        input_dim = input_shape[-1]
        input_dim -= 1  # We add time and event afterwards

        if type(self.recurrent_initializer).__name__ == 'Identity':

            def recurrent_identity(shape, gain=1., dtype=None):
                del dtype
                return gain * np.concatenate(
                    [np.identity(shape[0])] * (shape[1] // shape[0]), axis=1)

            self.recurrent_initializer = recurrent_identity

        self.input_embedding = self.add_weight(
            shape=(input_dim, self.embedding_size),
            name='embedding_matrix',
            initializer=self.kernel_initializer,
            regularizer=self.kernel_regularizer,
            constraint=self.kernel_constraint)

        self.kernel = self.add_weight(shape=(self.embedding_size,
                                             self.units * 4),
                                      name='kernel',
                                      initializer=self.kernel_initializer,
                                      regularizer=self.kernel_regularizer,
                                      constraint=self.kernel_constraint)

        self.recurrent_kernel = self.add_weight(
            shape=(self.units, self.units * 4),
            name='recurrent_kernel',
            initializer=self.recurrent_initializer,
            regularizer=self.recurrent_regularizer,
            constraint=self.recurrent_constraint)

        self.time_kernel = self.add_weight(shape=(1, self.projection_size),
                                           name='time_kernel',
                                           initializer=self.time_initializer,
                                           regularizer=self.time_regularizer,
                                           constraint=self.time_constraint)

        self.embedding_projector = self.add_weight(
            shape=(self.projection_size, self.embedding_size),
            name='time_projection_onto_embedding_space',
            initializer=self.time_initializer,
            regularizer=self.time_regularizer,
            constraint=self.time_constraint)

        if self.use_bias:
            if self.unit_forget_bias:

                def bias_initializer(_, *args, **kwargs):
                    return K.concatenate([
                        self.bias_initializer((self.units, ), *args, **kwargs),
                        initializers.Ones()((self.units, ), *args, **kwargs),
                        self.bias_initializer((self.units * 2, ), *args,
                                              **kwargs),
                    ])
            else:
                bias_initializer = self.bias_initializer
            self.bias = self.add_weight(shape=(self.units * 4, ),
                                        name='bias',
                                        initializer=bias_initializer,
                                        regularizer=self.bias_regularizer,
                                        constraint=self.bias_constraint)

            time_bias_initializer = initializers.Ones()(
                (self.projection_size, ))
            self.time_bias = self.add_weight(shape=(self.projection_size, ),
                                             name='time_bias',
                                             initializer=None,
                                             regularizer=self.bias_regularizer,
                                             constraint=self.bias_constraint)

        else:
            self.bias = None
            self.time_bias = None

        self.kernel_i = self.kernel[:, :self.units]
        self.kernel_f = self.kernel[:, self.units:self.units * 2]
        self.kernel_c = self.kernel[:, self.units * 2:self.units * 3]
        self.kernel_o = self.kernel[:, self.units * 3:self.units * 4]

        self.recurrent_kernel_i = self.recurrent_kernel[:, :self.units]
        self.recurrent_kernel_f = (
            self.recurrent_kernel[:, self.units:self.units * 2])
        self.recurrent_kernel_c = (self.recurrent_kernel[:, self.units *
                                                         2:self.units * 3])
        self.recurrent_kernel_o = self.recurrent_kernel[:, self.units *
                                                        3:self.units * 4]

        if self.use_bias:
            self.bias_i = self.bias[:self.units]
            self.bias_f = self.bias[self.units:self.units * 2]
            self.bias_c = self.bias[self.units * 2:self.units * 3]
            self.bias_o = self.bias[self.units * 3:self.units * 4]
            self.bias_t = self.time_bias

        else:
            self.bias_i = None
            self.bias_f = None
            self.bias_c = None
            self.bias_o = None
            self.bias_t = None

        self.built = True
예제 #15
0
    def __init__(self, alpha_initializer=initializers.Ones(), **kwargs):

        self.alpha_initializer = alpha_initializer

        super().__init__(**kwargs)
test_ind = np.load('test_ind.npy', allow_pickle=True)

H = 15
learning_rate = [0.001, 0.001, 0.05, 0.1]
momentum = [0.2, 0.6, 0.6, 0.6]
exp_metrics = np.zeros([4, 4])
i = 0
for lr, m in zip(learning_rate, momentum):
    RMSEs = []
    MAEs = []
    for j in range(0, 5):
        # Model
        model = Sequential()

        # Input layer
        model.add(Dense(N, kernel_initializer=initializers.Ones(),
                        bias_initializer=initializers.Zeros(), input_dim=N))
        # Leaky ReLU activation function
        def lrelu(x): return relu(x, alpha=0.01)
        # Hidden layer
        model.add(Dense(H, kernel_initializer=initializers.glorot_uniform(),
                        bias_initializer=initializers.glorot_uniform(), activation=lrelu))
        # Output layer
        model.add(Dense(M, kernel_initializer=initializers.glorot_uniform(),
                        bias_initializer=initializers.glorot_uniform(), activation='sigmoid'))

        # SGD optimiser
        sgd = SGD(learning_rate=lr, momentum=m, nesterov=False)

        def rmse(y_true, y_pred):
            return K.sqrt(K.mean(K.square(y_pred - y_true)))