Exemplo n.º 1
0
 def __built_multi_cell_Layer(self):
     """
     :return: the output tensor.
     """
     o1 = RNN(MinimalRNNCell(32, "tanh"), return_sequences=True)(self.input)
     o1 = BatchNormalization(1)(o1)
     o2 = RNN(MinimalRNNCell(32, "tanh"), return_sequences=True)(o1)
     o2 = BatchNormalization(1)(o2)
     o3 = RNN(MinimalRNNCell(32, "tanh"), return_sequences=True)(o2)
     o3 = BatchNormalization(1)(o3)
     o4 = RNN(MinimalRNNCell(32, "tanh"), return_sequences=False)(o3)
     o5 = Dense(21, activation="relu")(o4)
     self.output = o5
Exemplo n.º 2
0
def get_test_model(seq_len, stacked_features_size, edges_features_matrix_depth,
                   neighbourhood_size, units):
    # INPUTS
    stacked_base_features_input = Input(shape=(seq_len, stacked_features_size),
                                        name='stacked_base_features')
    adjacency_matrix_input = Input(shape=(seq_len, seq_len),
                                   name='adjacency_matrix')
    edges_features_matrix_input = Input(shape=(seq_len, seq_len,
                                               edges_features_matrix_depth),
                                        name='edges_features_matrix')
    inputs = (stacked_base_features_input, adjacency_matrix_input,
              edges_features_matrix_input)

    # ACTUAL MODEL
    x = Subgraphing(neighbourhood_size)(inputs)
    x = RNN(GraphReduceCell(units), return_sequences=True)(x)

    # OUTPUTS
    reactivity_pred = TimeDistributed(Dense(1), name='reactivity')(x)
    deg_Mg_pH10_pred = TimeDistributed(Dense(1), name='deg_Mg_pH10')(x)
    deg_Mg_50C_pred = TimeDistributed(Dense(1), name='deg_Mg_50C')(x)

    scored_outputs = [reactivity_pred, deg_Mg_pH10_pred, deg_Mg_50C_pred]
    stacked_outputs = Concatenate(axis=2,
                                  name='stacked_outputs')(scored_outputs)

    # MODEL DEFINING
    model = Model(inputs=inputs,
                  outputs={'stacked_scored_labels': stacked_outputs},
                  name='graph_reduce_model')

    return model
Exemplo n.º 3
0
    def set_model(self):
        input_layer = Input(shape=(self.seq_len, self.items_len,))

        rnn_part = RNN(ExtendedMemoryRNNCell(f_block_num_layers=self.f_block_num_layers,
                                             f_block_units=self.f_block_units,
                                             s_block_num_layers=self.s_block_num_layers,
                                             s_block_units=self.s_block_units,
                                             state_shape=self.state_shape,
                                             batch_size=self.batch_size), return_sequences=True)(
            input_layer
        )

        layer1_1 = TimeDistributed(Dense(units=64, activation='relu'))(rnn_part)
        layer1_2 = TimeDistributed(Dense(units=64, activation='sigmoid'))(layer1_1)
        y1_output = TimeDistributed(Dense(units=1, activation='sigmoid'), name='y1_output')(layer1_2)

        layer2_1 = TimeDistributed(Dense(units=64, activation='relu'))(rnn_part)
        dropout_1 = Dropout(0.2)(layer2_1)
        layer2_2 = TimeDistributed(Dense(units=64, activation='relu'))(dropout_1)
        layer2_3 = TimeDistributed(Dense(units=64, activation='relu'))(layer2_2)
        y2_output = TimeDistributed(Dense(units=self.features_range, activation='softmax'), name='y2_output')(layer2_3)

        # Define the model with the input layer and a list of output layers
        model = tf.keras.models.Model(inputs=input_layer, outputs=[y1_output, y2_output])

        # optimizer = tf.keras.optimizers.SGD(lr=0.001)
        optimizer = tf.keras.optimizers.Adam(clipvalue=0.5)
        model.compile(optimizer=optimizer,
                      loss={'y1_output': 'binary_crossentropy', 'y2_output': self.missed_value_loss},
                      metrics={'y1_output': 'accuracy', 'y2_output': self.missed_value_acc})
        print(model.summary())
        return model
Exemplo n.º 4
0
    def add_lstm(self, inputs):

        if 'lstm_conf' in self.nn_config:
            lstm_conf = self.nn_config['lstm_conf']

            activation = None if lstm_conf['batch_norm'] else lstm_conf[
                'lstm_activation']

            return_seq = True if '1dCNN' in self.nn_config else False

            cell = LSTMCell(units=lstm_conf['lstm_units'],
                            activation=activation)

            if lstm_conf['method'] == 'dynamic_rnn':
                rnn_outputs1, states = dynamic_rnn(cell,
                                                   inputs,
                                                   dtype=tf.float32)
                lstm_outputs = tf.reshape(rnn_outputs1[:, -1, :],
                                          [-1, lstm_conf['lstm_units']])

            elif lstm_conf['method'] == 'keras_lstm_layer':
                lstm_outputs = LSTM(lstm_conf['lstm_units'],
                                    name="first_lstm",
                                    activation=activation,
                                    input_shape=(self.data_config['lookback'],
                                                 self.ins),
                                    return_sequences=return_seq)(inputs)

            else:
                rnn_layer = RNN(cell, return_sequences=return_seq)
                lstm_outputs = rnn_layer(inputs)  # [batch_size, neurons]
                if self.verbose > 0:
                    print(lstm_outputs.shape, 'before reshaping',
                          K.eval(tf.rank(lstm_outputs)))
                lstm_outputs = tf.reshape(lstm_outputs[:, :],
                                          [-1, lstm_conf['lstm_units']])
                if self.verbose > 0:
                    print(lstm_outputs.shape, 'after reshaping',
                          K.eval(tf.rank(lstm_outputs)))

            if lstm_conf['batch_norm']:
                rnn_outputs3 = BatchNormalization()(lstm_outputs)
                lstm_outputs = Activation('relu')(rnn_outputs3)

            if lstm_conf['dropout'] is not None:
                lstm_outputs = Dropout(lstm_conf['dropout'])(lstm_outputs)
        else:
            lstm_outputs = inputs

        return lstm_outputs
Exemplo n.º 5
0
def make_model(batch_size=None):
    source = Input(shape=(maxlen, ),
                   batch_size=batch_size,
                   dtype=tf.int32,
                   name='Input')
    embedding = Embedding(input_dim=max_features,
                          output_dim=embedding_size,
                          input_length=X.shape[1],
                          name='Embedding')(source)
    drop = SpatialDropout1D(0.5)(embedding)
    #rnn =  Bidirectional(LSTM(lstm_out, name = 'LSTM',dropout=0.50, recurrent_dropout=0.50))(drop)
    rnn = RNN(lstm_out, name='RNN', dropout=0.40, recurrent_dropout=0.40)(drop)
    predicted_var = Dense(2, activation='sigmoid', name='Output')(rnn)
    model = tf.keras.Model(inputs=[source], outputs=[predicted_var])
    model.compile(
        #optimizer='rmsprop',
        optimizer=tf.keras.optimizers.RMSprop(decay=1e-3),
        loss='categorical_crossentropy',
        metrics=['acc'])
    return model
Exemplo n.º 6
0
Arquivo: lstm.py Projeto: m-serra/adr
def lstm_gaussian(batch_shape,
                  h_dim=10,
                  n_layers=2,
                  units=256,
                  reparameterize=False,
                  name=None,
                  reg_lambda=0.0,
                  **kwargs):
    def make_cell(lstm_size):
        return LSTMCell(lstm_size,
                        activation='tanh',
                        kernel_initializer='he_normal',
                        recurrent_regularizer=l2(reg_lambda))

    # ===== Define the lstm model
    lstm_cells = [make_cell(units) for _ in range(n_layers)]
    lstm = RNN(lstm_cells,
               return_sequences=True,
               return_state=True,
               name='lstm_model')
    embed_net = Dense(units=units, activation='linear')
    sample = Sample(output_dim=h_dim, reparameterization_flag=reparameterize)
    _in = Input(batch_shape=[batch_shape[0], None, batch_shape[-1]])
    initial_state = initial_state_placeholder(units,
                                              n_layers,
                                              batch_size=batch_shape[0])

    embed = TimeDistributed(embed_net)(_in)
    out = lstm(embed, initial_state=initial_state)
    h, state = out[0], out[1:]
    # z, mu, logvar = sample(h)
    z, mu, logvar = TimeDistributed(sample)(h)

    model = Model(inputs=[_in, initial_state],
                  outputs=[z, mu, logvar, state],
                  name=name)

    return model
Exemplo n.º 7
0
Arquivo: lstm.py Projeto: m-serra/adr
def simple_lstm(batch_shape,
                h_dim=10,
                n_layers=2,
                units=256,
                name=None,
                reg_lambda=0.0,
                **kwargs):
    def make_cell(lstm_size):
        return LSTMCell(lstm_size,
                        activation='tanh',
                        kernel_initializer='glorot_uniform',
                        unit_forget_bias=False,
                        recurrent_regularizer=l2(reg_lambda))

    # ===== Define the lstm model
    lstm_cells = [make_cell(units) for _ in range(n_layers)]
    lstm = RNN(lstm_cells, return_sequences=True, return_state=True)
    embed_net = Dense(units=units, activation='linear')
    output_net = Dense(units=h_dim, activation='tanh')

    _in = Input(batch_shape=[batch_shape[0], None, batch_shape[-1]])
    initial_state = initial_state_placeholder(units,
                                              n_layers,
                                              batch_size=batch_shape[0])

    embed = TimeDistributed(embed_net)(_in)
    # embed = BatchNormalization()(embed)  # --> !!!!!!
    out = lstm(embed, initial_state=initial_state)
    predictions, state = out[0], out[1:]
    predictions = TimeDistributed(output_net)(predictions)

    model = Model(inputs=[_in, initial_state],
                  outputs=[predictions, state],
                  name=name)

    return model
Exemplo n.º 8
0
    def add_model(self, input_data, target_data=None):
        """Implements core of model that transforms input_data into predictions.

        The core transformation for this model which transforms a batch of input
        data into a batch of predictions.

        Args:
          input_data: A tensor of shape (batch_size, num_steps, time_stamps).
          target_data: A tensor of shape (batch_size, num_steps, time_stamps).
        Returns:
          predict: A tensor of shape (batch_size, num_steps, time_stamps)
        """
        with tf.variable_scope('embedding_layer'):
            input_embeddings = TimeDistributed(
                Dense(self.config.rnn_size,
                      dtype=tf.float32,
                      activity_regularizer=self.config.regularizer,
                      kernel_regularizer=self.config.regularizer,
                      bias_regularizer=self.config.regularizer),
                input_shape=(self.config.num_steps, self.config.time_stamps),
                dtype=tf.float32,
                activity_regularizer=self.config.regularizer)(
                    self.input_placeholder)
            input_embeddings = tf.keras.layers.Dropout(
                self.config.dropout)(input_embeddings)

        with tf.variable_scope('rnn_layer'):
            if self.config.model_type == 'gru':
                cell_fun = GRUCell
            elif self.config.model_type == 'lstm':
                cell_fun = LSTMCell
            else:
                cell_fun = SimpleRNNCell

            cell = cell_fun(self.config.rnn_size,
                            dtype=tf.float32,
                            activity_regularizer=self.config.regularizer,
                            bias_regularizer=self.config.regularizer,
                            recurrent_regularizer=self.config.regularizer,
                            kernel_regularizer=self.config.regularizer)
            cell = StackedRNNCells([cell] * self.config.num_layers)

            # outputs: (batch_size, num_steps * 2, rnn_size)
            outputs = Bidirectional(
                RNN(cell, return_sequences=True,
                    dtype=tf.float32))(input_embeddings)

            # output: (batch_size * (num_steps * 2), rnn_size)
            output = tf.reshape(outputs, [-1, self.config.rnn_size])
            output = tf.keras.layers.Dropout(self.config.dropout)(output)

        with tf.variable_scope('output_layer'):
            # weights: (rnn_size, time_stamps)
            weights = tf.Variable(tf.truncated_normal(
                [self.config.rnn_size, self.config.time_stamps],
                dtype=tf.float32,
            ),
                                  dtype=tf.float32)

            # bias: (time_stamps, )
            bias = tf.Variable(tf.zeros(shape=[self.config.time_stamps],
                                        dtype=tf.float32),
                               dtype=tf.float32)

            # output: (batch_size * (num_steps * 2), time_stamps)
            predict_all = tf.nn.bias_add(tf.matmul(output, weights), bias=bias)
            predict_up = tf.slice(predict_all, [0, 0], [
                self.config.batch_size * self.config.num_steps,
                self.config.time_stamps
            ])
            predict_down = tf.slice(
                predict_all,
                [self.config.batch_size * self.config.num_steps, 0], [
                    self.config.batch_size * self.config.num_steps,
                    self.config.time_stamps
                ])

            predict = (predict_up + predict_down) / 2

        return predict
Exemplo n.º 9
0
    def add_model(self, input_data):
        """Implements core of model that transforms input_data into predictions.

        The core transformation for this model which transforms a batch of input
        data into a batch of predictions.

        Args:
          input_data: A tensor of shape (batch_size, num_steps, time_stamps).
        Returns:
          predict: A tensor of shape (batch_size, num_steps, time_stamps)
        """
        with tf.variable_scope('conv_layer'):
            # add dimension, input_data: (batch_size, num_steps, time_stamps, channels=1)
            input_data = tf.expand_dims(input_data, -1)

            max_filters = self.config.num_steps

            # conv1: (batch_size, W1, H1, F1=32)
            conv1 = Conv2D(filters=max_filters, kernel_size=(3, 3), padding='same', activation='relu')(input_data)
            # W2 = (W1 - F) / S + 1
            # H2 = (H1 - F) / S + 1
            # conv1: (batch_size, W2, H2, F1)
            conv1 = MaxPooling2D(pool_size=(2, 2))(conv1)

            conv1 = tf.reshape(conv1, (self.config.batch_size, self.config.num_steps, -1))

        with tf.variable_scope('embedding_layer'):
            input_embeddings = TimeDistributed(
                Dense(self.config.rnn_size, dtype=tf.float64,
                      activation='tanh',
                      activity_regularizer=self.config.regularizer,
                      kernel_regularizer=self.config.regularizer,
                      bias_regularizer=self.config.regularizer),
                input_shape=(self.config.num_steps, self.config.time_stamps),
                dtype=tf.float64, activity_regularizer=self.config.regularizer)(
                conv1)
            # embeddings: (batch_size, num_steps, rnn_size)
            input_embeddings = tf.keras.layers.Dropout(self.config.dropout)(input_embeddings)

        with tf.variable_scope('rnn_layer'):
            if self.config.model_type == 'gru':
                cell_fun = GRUCell
            elif self.config.model_type == 'lstm':
                cell_fun = LSTMCell
            else:
                cell_fun = SimpleRNNCell

            cell = cell_fun(self.config.rnn_size, dtype=tf.float64, activity_regularizer=self.config.regularizer,
                            bias_regularizer=self.config.regularizer,
                            recurrent_regularizer=self.config.regularizer, kernel_regularizer=self.config.regularizer)
            cell = StackedRNNCells([cell] * self.config.num_layers)

            # outputs: (batch_size, num_steps * 2, rnn_size)
            outputs = Bidirectional(RNN(cell, return_sequences=True, dtype=tf.float64))(input_embeddings)

            # output: (batch_size * (num_steps * 2), rnn_size)
            output = tf.reshape(outputs, [-1, self.config.rnn_size])
            output = tf.keras.layers.Dropout(self.config.dropout)(output)

        with tf.variable_scope('output_layer'):
            # weights: (rnn_size, time_stamps)
            weights = tf.Variable(
                tf.truncated_normal([self.config.rnn_size, self.config.time_stamps], dtype=tf.float64, ),
                dtype=tf.float64)

            # bias: (time_stamps, )
            bias = tf.Variable(tf.zeros(shape=[self.config.time_stamps], dtype=tf.float64), dtype=tf.float64)

            # output: (batch_size * (num_steps * 2), time_stamps)
            predict_all = tf.nn.bias_add(tf.matmul(output, weights), bias=bias)
            predict_all = tf.nn.tanh(predict_all)
            predict_up = tf.slice(predict_all, [0, 0],
                                  [self.config.batch_size * self.config.num_steps, self.config.time_stamps])
            predict_down = tf.slice(predict_all, [self.config.batch_size * self.config.num_steps, 0],
                                    [self.config.batch_size * self.config.num_steps, self.config.time_stamps])

            predict = (predict_up + predict_down) / 2

        self.global_step = tf.Variable(0, trainable=False, name='global_step')

        return predict
Exemplo n.º 10
0
xval = [k for k in train_val['audio_embedding']]
yval = train_val['is_turkey'].values

# Pad the audio features so that all are "10 seconds" long
x_train = pad_sequences(xtrain, maxlen=10)
x_val = pad_sequences(xval, maxlen=10)

y_train = np.asarray(ytrain)
y_val = np.asarray(yval)

#Define a basic LSTM model
model = Sequential()
model.add(BatchNormalization(input_shape=(10, 128)))
model.add(Dropout(.5))
model.add(Bidirectional(RNN(128, activation='relu')))
model.add(Dense(1, activation='sigmoid'))

#maybe there is something better to use, but let's use binary_crossentropy
model.compile(loss='binary_crossentropy',
              optimizer='adam',
              metrics=['accuracy'])

#fit on a portion of the training data, and validate on the rest
model.fit(x_train, y_train,
          batch_size=300,
          nb_epoch=4,validation_data=(x_val, y_val))

model.save('./turkeydetection-RNN.h5')
# Get accuracy of model on validation data. It's not AUC but it's something at least!
score, acc = model.evaluate(x_val, y_val, batch_size=300)