Пример #1
0
def build_neural_net(hyperparameters):
    # the size of the embedding vector
    embedding_vector_size: int = hyperparameters[2]
    # whether to use a bidirectional RNN over the embedding
    use_bidirectional: bool = hyperparameters[3]
    # the vector size representing the input sequence
    input_vector_representation_size: int = hyperparameters[4]
    # whether to use a SimpleRNN, LSTM or GRU
    use_SRNN_LSTM_GRU: str = hyperparameters[5]
    # whether to use a second RNN after the first
    use_second_RNN: bool = hyperparameters[6]
    # the vector size of the second RNN
    intermediate_vector_representation_size: int = hyperparameters[7]
    # rate of dropout to use - float between 0 and 1
    dropout: float = hyperparameters[8]

    model = models.Sequential()

    # Add an Embedding layer to the model, with as many inputs as terms in the vocab,
    # and as many nodes as defined by the embedding_vector_size hyper parameter
    model.add(layers.Embedding(len(vocab), embedding_vector_size, input_length=None, mask_zero=True))

    # Add the first RNN Layer. If the use_bidirectional hyper parameter is set to True,
    # then use a bidirectional implementation
    if use_bidirectional:
        # Add the first RNN Layer as a Simple RNN, LSTM or GRU depending on the use_SRNN_LSTM_GRU hyper parameter
        # also use dropoput according to the hyper parameter
        # and return sequences of the first layer depending on whether a second Recursive Layer will be used
        if use_SRNN_LSTM_GRU == 'SRNN':
            model.add(layers.Bidirectional(layers.SimpleRNN(input_vector_representation_size, dropout=dropout,
                                                            return_sequences=use_second_RNN)))
        elif use_SRNN_LSTM_GRU == 'LSTM':
            model.add(layers.Bidirectional(layers.LSTM(input_vector_representation_size, dropout=dropout,
                                                       return_sequences=use_second_RNN)))
        elif use_SRNN_LSTM_GRU == 'GRU':
            model.add(layers.Bidirectional(layers.GRU(input_vector_representation_size, dropout=dropout,
                                                      return_sequences=use_second_RNN)))
    else:
        if use_SRNN_LSTM_GRU == 'SRNN':
            model.add(layers.SimpleRNN(input_vector_representation_size, dropout=dropout,
                                       return_sequences=use_second_RNN))
        elif use_SRNN_LSTM_GRU == 'LSTM':
            model.add(layers.LSTM(input_vector_representation_size, dropout=dropout,
                                  return_sequences=use_second_RNN))
        elif use_SRNN_LSTM_GRU == 'GRU':
            model.add(layers.GRU(input_vector_representation_size, dropout=dropout,
                                 return_sequences=use_second_RNN))

    if use_second_RNN:
        if use_SRNN_LSTM_GRU == 'SRNN':
            model.add(layers.SimpleRNN(intermediate_vector_representation_size, dropout=dropout))
        elif use_SRNN_LSTM_GRU == 'LSTM':
            model.add(layers.LSTM(intermediate_vector_representation_size, dropout=dropout))
        elif use_SRNN_LSTM_GRU == 'GRU':
            model.add(layers.GRU(intermediate_vector_representation_size, dropout=dropout))

    # softmax layer
    model.add(layers.Dense(19, activation='softmax'))

    return model
Пример #2
0
def test_TimeDistributed_with_masked_embedding_and_unspecified_shape():
    # test with unspecified shape and Embeddings with mask_zero
    model = Sequential()
    model.add(
        wrappers.TimeDistributed(layers.Embedding(5, 6, mask_zero=True),
                                 input_shape=(None, None)))
    # the shape so far: (N, t_1, t_2, 6)
    model.add(
        wrappers.TimeDistributed(layers.SimpleRNN(7, return_sequences=True)))
    model.add(
        wrappers.TimeDistributed(layers.SimpleRNN(8, return_sequences=False)))
    model.add(layers.SimpleRNN(1, return_sequences=False))
    model.compile(optimizer='rmsprop', loss='mse')
    model_input = np.random.randint(low=1,
                                    high=5,
                                    size=(10, 3, 4),
                                    dtype='int32')
    for i in range(4):
        model_input[i, i:, i:] = 0
    model.fit(model_input, np.random.random((10, 1)), epochs=1, batch_size=10)
    mask_outputs = [model.layers[0].compute_mask(model.input)]
    for layer in model.layers[1:]:
        mask_outputs.append(layer.compute_mask(layer.input, mask_outputs[-1]))
    func = K.function([model.input], mask_outputs[:-1])
    mask_outputs_val = func([model_input])
    ref_mask_val_0 = model_input > 0  # embedding layer
    ref_mask_val_1 = ref_mask_val_0  # first RNN layer
    ref_mask_val_2 = np.any(ref_mask_val_1, axis=-1)  # second RNN layer
    ref_mask_val = [ref_mask_val_0, ref_mask_val_1, ref_mask_val_2]
    for i in range(3):
        assert np.array_equal(mask_outputs_val[i], ref_mask_val[i])
    assert mask_outputs[-1] is None  # final layer
Пример #3
0
def RNN_model2(params, multi_gpu):
    model = models.Sequential()
    model.add(
        layers.Bidirectional(layers.SimpleRNN(700,
                                              activation='relu',
                                              return_sequences=True,
                                              recurrent_dropout=0.3),
                             input_shape=(11, 123)))
    model.add(
        layers.Bidirectional(
            layers.SimpleRNN(200,
                             activation='relu',
                             dropout=0.3,
                             recurrent_dropout=0.3)))
    model.add(layers.Dropout(0.3))
    model.add(layers.BatchNormalization())
    model.add(layers.Dense(700, activation='relu'))
    model.add(layers.Dropout(0.3))
    model.add(layers.Dense(700, activation='relu'))
    model.add(layers.Dense(40, activation='softmax'))
    opt = optimizers.Adam(lr=0.0005)  #half the std learning rate
    if multi_gpu:
        print("--------------- MULTIPLE GPU ---------------")
        model = multi_gpu_model(model, gpus=2)
    model.compile(loss='categorical_crossentropy',
                  optimizer=opt,
                  metrics=['acc'])
    return model
def build_model_rnn(embed_len):
    """
    построение модели rnn
    :param embed_len: длина векторного представления
    :return:
    """
    model = Sequential()
    model.add(layers.SimpleRNN(embed_len, return_sequences=True))
    model.add(layers.SimpleRNN(embed_len, return_sequences=True))
    model.add(layers.SimpleRNN(embed_len))
    model.add(layers.Dense(1, activation='sigmoid'))
    model.compile(optimizer='rmsprop',
                  loss='binary_crossentropy',
                  metrics=['accuracy'])
    return model
Пример #5
0
def test_Bidirectional_updates():
    x = Input(shape=(3, 2))
    layer = wrappers.Bidirectional(layers.SimpleRNN(3))
    layer.forward_layer.add_update(0, inputs=x)
    layer.forward_layer.add_update(1, inputs=None)
    layer.backward_layer.add_update(0, inputs=x)
    layer.backward_layer.add_update(1, inputs=None)
Пример #6
0
def cnn_2x_rnn_siamese(voc_size, max_len, dropout=0.5):
    """Two siamese branches, each embedding a statement.
    Binary classifier on top.
    Args:
      voc_size: size of the vocabulary for the input statements.
      max_len: maximum length for the input statements.
    Returns:
      A Keras model instance.
    """
    pivot_input = layers.Input(shape=(max_len, ), dtype='int32')
    statement_input = layers.Input(shape=(max_len, ), dtype='int32')

    x = layers.Embedding(output_dim=256,
                         input_dim=voc_size,
                         input_length=max_len)(pivot_input)
    x = layers.Convolution1D(256, 7, activation='relu')(x)
    x = layers.MaxPooling1D(3)(x)
    x = layers.Convolution1D(256, 7, activation='relu')(x)
    x = layers.MaxPooling1D(5)(x)
    embedded_pivot = layers.SimpleRNN(256)(x)

    encoder_model = Model(pivot_input, embedded_pivot)
    embedded_statement = encoder_model(statement_input)

    concat = layers.merge([embedded_pivot, embedded_statement], mode='concat')
    x = layers.Dense(256, activation='relu')(concat)
    x = layers.Dropout(dropout)(x)
    prediction = layers.Dense(1, activation='sigmoid')(x)

    model = Model([pivot_input, statement_input], prediction)
    return model
Пример #7
0
def recurrent(output_dim, model='keras_lstm', activation='tanh',
              regularizer=None, dropout=0., **kwargs):
    if model == 'rnn':
        return keras_layers.SimpleRNN(output_dim, activation=activation,
                                      W_regularizer=regularizer,
                                      U_regularizer=regularizer,
                                      dropout_W=dropout, dropout_U=dropout, consume_less='gpu',
                                      **kwargs)
    if model == 'gru':
        return keras_layers.GRU(output_dim, activation=activation,
                                W_regularizer=regularizer,
                                U_regularizer=regularizer, dropout_W=dropout,
                                dropout_U=dropout,
                                consume_less='gpu', **kwargs)
    if model == 'keras_lstm':
        return keras_layers.LSTM(output_dim, activation=activation,
                                 W_regularizer=regularizer,
                                 U_regularizer=regularizer,
                                 dropout_W=dropout, dropout_U=dropout,
                                 consume_less='gpu', **kwargs)
    if model == 'rhn':
        return RHN(output_dim, depth=1,
                   bias_init=highway_bias_initializer,
                   activation=activation, layer_norm=False, ln_gain_init='one',
                   ln_bias_init='zero', mi=False,
                   W_regularizer=regularizer, U_regularizer=regularizer,
                   dropout_W=dropout, dropout_U=dropout, consume_less='gpu',
                   **kwargs)

    if model == 'lstm':
        return LSTM(output_dim, activation=activation,
                    W_regularizer=regularizer, U_regularizer=regularizer,
                    dropout_W=dropout, dropout_U=dropout,
                    consume_less='gpu', **kwargs)
    raise ValueError('model %s was not recognized' % model)
    def build(self,
              rows,
              cols,
              output_dim,
              loss='binary_crossentropy',
              optimizer='adadelta',
              metrics='accuracy'):
        self.param_new(rows, cols, output_dim)

        self.model = models.Sequential()
        if self.model_type == 'LSTM':
            self.model.add(
                layers.LSTM(20,
                            return_sequences=False,
                            input_shape=(self.rows, self.cols)))
        elif self.model_type == 'GRU':
            self.model.add(
                layers.GRU(20,
                           return_sequences=False,
                           input_shape=(self.rows, self.cols)))
        elif self.model_type == 'SimpleRNN':
            self.model.add(
                layers.SimpleRNN(20,
                                 return_sequences=False,
                                 input_shape=(self.rows, self.cols)))
        else:
            NotImplementedError()
        self.model.add(layers.Dense(self.output_dim))
        self.model.add(layers.Activation('sigmoid'))

        self.model.compile(loss=loss, optimizer=optimizer, metrics=[metrics])
Пример #9
0
def generate_model(input_size):
    model = models.Sequential()
    model.add(layers.Conv2D(8, (3, 3), activation='relu',
                            input_shape=input_size))
    model.add(layers.MaxPooling2D((2, 3)))
    model.add(layers.Conv2D(8, (3, 3), activation='relu'))
    model.add(layers.MaxPooling2D((2, 3)))
    model.add(layers.Conv2D(8, (3, 3), activation='relu'))
    model.add(layers.MaxPooling2D((2, 3)))
    model.add(layers.Conv2D(16, (3, 3), activation='relu'))
    model.add(layers.MaxPooling2D((2, 3)))
    # model.add(layers.Conv2D(8, (3, 3), activation='relu'))
    # model.add(layers.MaxPooling2D((2, 2)))
    
    # model.add(layers.Flatten())
    # model.add(layers.Dense(512, activation='relu'))
    # model.add(layers.Dense(128, activation='relu'))
    # model.add(layers.Reshape(target_shape=(16, 3*16)))
    # model.add(layers.LSTM(32))
    model.add(layers.Reshape(target_shape=(16, 23*6)))
    model.add(layers.SimpleRNN(64))
    model.add(layers.Dense(1, activation='sigmoid'))

    model.trainable = False

    model.compile(optimizer=optimizers.RMSprop(lr=1e-4),
                  loss='binary_crossentropy',
                  metrics=['acc'])

    # plot_model(model, to_file='model7.png', show_shapes=True)

    model.summary()
    exit()
    
    return model
Пример #10
0
def build_model_rnn_crf(vocabulary_words,
                        embedding_matrix,
                        embedding_dim,
                        nb_classes,
                        unit_multiplier=1,
                        input_dropout=0.0,
                        dropout=0.0,
                        recurrent_dropout=0.0,
                        ouptput_dropout=0.0,
                        optimizer='rmsprop'):
    input = models.Input(shape=(None, ))
    model = layers.Embedding(len(vocabulary_words) + 2,
                             embedding_dim,
                             weights=[embedding_matrix],
                             trainable=True,
                             mask_zero=True)(input)
    model = layers.Dropout(input_dropout)(model)
    model = layers.Bidirectional(
        layers.SimpleRNN(unit_multiplier * (nb_classes + 1),
                         return_sequences=True,
                         dropout=dropout,
                         recurrent_dropout=recurrent_dropout))(model)
    model = layers.Dropout(ouptput_dropout)(model)
    crf = CRF((nb_classes + 1))  # CRF layer
    out = crf(model)  # output
    model = models.Model(input, out)
    model.compile(optimizer=optimizer,
                  loss=crf.loss_function,
                  metrics=[crf.accuracy])
    return model
Пример #11
0
    def build_regression_model(self):
        winit = keras.initializers.RandomUniform(minval=-0.05, maxval=0.05, seed=self.seed)
        binit = keras.initializers.RandomUniform(minval=-0.05, maxval=0.05, seed=self.seed)
        model = keras.Sequential([
            # layers.Dense(30, input_shape=(30,self.steps)),
            layers.SimpleRNN(
                units=240,
                input_shape=(self.input_size, self.steps),
                # batch_size=self.batch_size,
                kernel_initializer=winit,
                bias_initializer=binit,
                activation='tanh'
                # dropout=self.drop_per,
                # input_shape=self.input_shape
            ),
            layers.Dense(111, activation='tanh'),
            layers.Dense(self.output_size,
                         activation='linear')
        ])
        sgd = keras.optimizers.SGD(learning_rate=self.learn_rate)
        model.compile(

            optimizer=sgd,
            loss=keras.losses.mean_squared_error,
            metrics=['mean_squared_error']

        )
        return model
Пример #12
0
def constructor(model_type="MLP",
                history=720, 
                hindcast=1,
                input_vars=2, 
                loss="mse", 
                optimizer=optimizers.Adam()):
    
    # model instance initialization
    model = Sequential()
    
    # add a core layer
    if model_type == "MLP":
        model.add(layers.Dense(64, input_shape=(history * input_vars,), activation="relu"))
    elif model_type == "RNN":
        model.add(layers.SimpleRNN(64, return_sequences=False, input_shape=(history, input_vars)))
        optimizer = optimizers.Adam(clipvalue=0.5)
    elif model_type == "GRU":
        model.add(layers.GRU(64, return_sequences=False, input_shape=(history, input_vars)))
    elif model_type == "LSTM":
        model.add(layers.LSTM(64, return_sequences=False, input_shape=(history, input_vars)))
    
    # add the Dense layer on top
    model.add(layers.Dense(hindcast))
    
    # compilation
    model.compile(loss=loss, optimizer=optimizer)

    return model
Пример #13
0
    def build_model(self, hp):
        model = keras.Sequential()

        model.add(layers.SimpleRNN(
            hp.Int('rnn_units', min_value=self.input_min, max_value=self.input_max, step=self.input_step),
            input_shape=(self.input_size, self.steps),
            activation=hp.Choice('rnn_activation', values=['tanh', 'sigmoid', 'softmax'])
        ))

        for ii in range(hp.Int('num_h-layers', self.min_hlayers, self.max_hlayers)):
            model.add(
                layers.Dense(
                    hp.Int(f'Dense_{ii}_units', min_value=self.hid_min, max_value=self.hid_max, step=self.hid_step),
                    activation=hp.Choice(f'Dense_{ii}_activation', values=['tanh', 'sigmoid', 'softmax'])
                )
            )

        model.add(layers.Dense(1, activation='linear'))

        # keras.optimizers.SGD(learning_rate=1e-2, momentum=0, nesterov=False)
        # keras.optimizers.Adagrad(leaning_rate=___, initial_accumulator_value=1e-1, epsilon=1e-7)
        # keras.optimizers.Adam(learning_rate=1e-3, beta_1=0.9, beta_2=0.999, epsilon=1e-7, amsgrad=False)
        # keras.optimizers.RMSprop(learning_rate=1e-3, rho=0.9, momentum=0.0, epsilon=1e-7, centered=False)
        # keras.optimizers.Adadelta(learning_rate=1e-3, rho=0.95, epsilon=1e-7)
        # keras.optimizers.Adamax(learning_rate=1e-3, beta_1=0.9, beta_2=0.999, epsilon=1e-7) may be superior with embeddings


        model = self.optim_choice(hp, model)

        return model
Пример #14
0
def test_temporal_classification_functional():
    '''
    Classify temporal sequences of float numbers
    of length 3 into 2 classes using
    single layer of GRU units and softmax applied
    to the last activations of the units
    '''
    np.random.seed(1337)
    (x_train, y_train), (x_test, y_test) = get_test_data(num_train=200,
                                                         num_test=20,
                                                         input_shape=(3, 4),
                                                         classification=True,
                                                         num_classes=2)
    y_train = to_categorical(y_train)
    y_test = to_categorical(y_test)

    inputs = layers.Input(shape=(x_train.shape[1], x_train.shape[2]))
    x = layers.SimpleRNN(8)(inputs)
    outputs = layers.Dense(y_train.shape[-1], activation='softmax')(x)
    model = keras.models.Model(inputs, outputs)
    model.compile(loss='categorical_crossentropy',
                  optimizer='rmsprop',
                  metrics=['accuracy'])
    history = model.fit(x_train, y_train, epochs=5, batch_size=10,
                        validation_data=(x_test, y_test),
                        verbose=0)
    assert(history.history['accuracy'][-1] >= 0.75)
Пример #15
0
def build_model_rnn(vocabulary_words,
                    embedding_matrix,
                    embedding_dim,
                    nb_classes,
                    unit_multiplier=1,
                    input_dropout=0.0,
                    dropout=0.0,
                    recurrent_dropout=0.0,
                    ouptput_dropout=0.0,
                    optimizer='rmsprop'):
    model = models.Sequential()
    model.add(
        layers.Embedding(len(vocabulary_words) + 2,
                         embedding_dim,
                         weights=[embedding_matrix],
                         trainable=True,
                         mask_zero=True))
    model.add(layers.Dropout(input_dropout))
    model.add(
        layers.Bidirectional(
            layers.SimpleRNN(unit_multiplier * (nb_classes + 1),
                             return_sequences=True,
                             dropout=dropout,
                             recurrent_dropout=recurrent_dropout)))
    model.add(layers.Dropout(ouptput_dropout))
    model.add(layers.Dense(nb_classes + 1, activation='softmax'))
    model.compile(loss='categorical_crossentropy',
                  optimizer=optimizer,
                  metrics=['acc'])
    return model
Пример #16
0
def test_Bidirectional_losses():
    x = Input(shape=(3, 2))
    layer = wrappers.Bidirectional(
        layers.SimpleRNN(3, kernel_regularizer='l1', bias_regularizer='l1'))
    _ = layer(x)
    layer.forward_layer.add_loss(lambda: 0)
    layer.forward_layer.add_loss(lambda: 1)
    layer.backward_layer.add_loss(lambda: 0)
    layer.backward_layer.add_loss(lambda: 1)
Пример #17
0
def test_Bidirectional_trainable():
    # test layers that need learning_phase to be set
    x = Input(shape=(3, 2))
    layer = wrappers.Bidirectional(layers.SimpleRNN(3))
    _ = layer(x)
    assert len(layer.trainable_weights) == 6
    layer.trainable = False
    assert len(layer.trainable_weights) == 0
    layer.trainable = True
    assert len(layer.trainable_weights) == 6
Пример #18
0
def getModel():
    model = models.Sequential()
    model.add(layers.Embedding(max_features, 32))  # 嵌入层,序列向量字典(10000,32)
    model.add(layers.SimpleRNN(32))
    model.add(layers.Dense(1, activation='sigmoid'))

    model.compile(loss='binary_crossentropy',
                  optimizer='rmsprop',
                  metrics=['acc'])

    return model
Пример #19
0
    def build(self):
        self.model.add(
            layers.SimpleRNN(
                units=self.hidden_neurons,
                input_shape=self.input_shape,
                activation='tanh',
                kernel_initializer='random_uniform',
            ))
        self.model.add(layers.Dense(self.output_neurons, activation='sigmoid'))
        self.model.compile(loss='mse', optimizer='adam', metrics=['accuracy'])

        print("Successfully constructed networks.")
Пример #20
0
    def test_generator_dynamic_shapes(self):

        x = [
            "I think juice is great",
            "unknown is the best language since slicedbread",
            "a a a a a a a",
            "matmul",
            "Yaks are also quite nice",
        ]
        y = [1, 0, 0, 1, 1]

        vocab = {
            word: i + 1
            for i, word in enumerate(
                sorted(set(itertools.chain(*[i.split() for i in x]))))
        }

        def data_gen(batch_size=2):
            np.random.seed(0)
            data = list(zip(x, y)) * 10
            np.random.shuffle(data)

            def pack_and_pad(queue):
                x = [[vocab[j] for j in i[0].split()] for i in queue]
                pad_len = max(len(i) for i in x)
                x = np.array([i + [0] * (pad_len - len(i)) for i in x])
                y = np.array([i[1] for i in queue])
                del queue[:]
                return x, y[:, np.newaxis]

            queue = []
            for i, element in enumerate(data):
                queue.append(element)
                if not (i + 1) % batch_size:
                    yield pack_and_pad(queue)

            if queue:
                # Last partial batch
                yield pack_and_pad(queue)

        model = test_utils.get_model_from_layers(
            [
                layers_module.Embedding(input_dim=len(vocab) + 1,
                                        output_dim=4),
                layers_module.SimpleRNN(units=1),
                layers_module.Activation("sigmoid"),
            ],
            input_shape=(None, ),
        )

        model.compile(loss=losses.binary_crossentropy, optimizer="sgd")
        model.fit(data_gen(), epochs=1, steps_per_epoch=5)
Пример #21
0
def simple_rnn_optimization(x_train, y_train, x_test, y_test, params):
    """Randomized search to optimize parameters of Neural Network."""
    optimization_model = models.Sequential()
    optimization_model.add(
        layers.SimpleRNN(params['units'], return_sequences=True))
    optimization_model.add(
        layers.SimpleRNN(params['units'], return_sequences=False))
    optimization_model.add(layers.Dropout(0.5))
    optimization_model.add(layers.Dense(params['dense'], activation=relu))
    optimization_model.add(layers.Dense(params['dense'], activation=relu))
    optimization_model.add(layers.Dense(int(num_classes),
                                        activation='softmax'))
    optimization_model.compile(
        optimizer=params['optimizer'],
        loss=losses.CategoricalCrossentropy(),
        metrics=['accuracy', talos.utils.metrics.f1score])
    history = optimization_model.fit(x_train,
                                     y_train,
                                     batch_size=None,
                                     epochs=params['epoch'],
                                     validation_data=(x_test, y_test))
    return history, optimization_model
Пример #22
0
def get_model():
    """Returns the model."""
    model = Sequential()
    model.add(
        layers.Embedding(TOKENIZER_NUM_WORDS,
                         EMBEDDING_SIZE,
                         input_length=TEXT_SEQUENCE_LENGTH))
    model.add(layers.SimpleRNN(32))
    model.add(layers.Dense(1, activation='sigmoid'))
    model.compile(optimizer='rmsprop',
                  loss='binary_crossentropy',
                  metrics=['acc'])
    return model
Пример #23
0
def RNNOnly(dimFeature):

    # RNN model
    model = kmodels.Sequential()
    model.add(
        klayers.SimpleRNN(50,
                          input_shape=(1, dimFeature),
                          dropout=0.2,
                          recurrent_dropout=0.2))
    model.add(klayers.Dense(50, activation='relu'))
    model.add(klayers.Dense(1, activation='sigmoid'))

    return model
Пример #24
0
def test_Bidirectional_updates():
    x = Input(shape=(3, 2))
    layer = wrappers.Bidirectional(layers.SimpleRNN(3))
    assert len(layer.updates) == 0
    assert len(layer.get_updates_for(None)) == 0
    assert len(layer.get_updates_for(x)) == 0
    layer.forward_layer.add_update(0, inputs=x)
    layer.forward_layer.add_update(1, inputs=None)
    layer.backward_layer.add_update(0, inputs=x)
    layer.backward_layer.add_update(1, inputs=None)
    assert len(layer.updates) == 4
    assert len(layer.get_updates_for(None)) == 2
    assert len(layer.get_updates_for(x)) == 2
Пример #25
0
def build_RSS():
    model = Sequential()
    model.add(
        layers.Embedding(len(dictionary) + 2,
                         100,
                         mask_zero=True,
                         input_length=max_seq_length))
    model.layers[0].set_weights([embedding_matrix])
    model.layers[0].trainable = True
    model.add(layers.SimpleRNN(32, return_sequences=True))
    model.add(layers.Dense(len(Y_cat) + 1, activation='softmax'))

    return model
    def build_model(self, lstm=False):
        """
        output types:
        
        1. The full sequences of successive outputs for each timestep 
        (a 3D tensor of shape (batch_size, timesteps, output_features)), 
        
        2. Only the last output for each input sequence 
        (a 2D tensor of shape (batch_size, output_features)). 
        
        <= controlled by the return_sequences constructor argument.
        (default value == False)
        """
        self.model = models.Sequential()

        # input_dim: int > 0. Size of the vocabulary, i.e. maximum integer index + 1.
        # output_dim: int >= 0. Dimension of the dense embedding
        self.model.add(layers.Embedding(input_dim=10000, output_dim=32))

        if lstm:
            #layers.LSTM: Long Short-Term Memory layer - Hochreiter 1997.
            self.model.add(layers.LSTM(units=32, return_sequences=True))
            self.model.add(layers.LSTM(units=32, return_sequences=True))
            self.model.add(layers.LSTM(units=32))

        else:
            #layers.SimpleRNN: Fully-connected RNN where the output is to be fed back to input.
            #units: Positive integer, dimensionality of the output space.
            #https://keras.io/layers/recurrent/
            self.model.add(layers.SimpleRNN(units=32, return_sequences=True))
            self.model.add(layers.SimpleRNN(units=32, return_sequences=True))
            self.model.add(layers.SimpleRNN(units=32))

        self.model.add(layers.Dense(1, activation='sigmoid'))
        self.model.compile(optimizer=optimizers.RMSprop(lr=0.001), \
            loss='binary_crossentropy', \
            metrics=['acc'])
        self.model.summary()
Пример #27
0
def simple_rnn(train_X=None,
               input_shape=None,
               optimizer="rmsprop",
               loss="binary_crossentropy",
               metrics=utils.f1):

    model = models.Sequential()
    if train_X is None and input_shape is None:
        model.add(Embedding(19498, 32, input_length=51))
        model.add(layers.SimpleRNN(50))
    elif input_shape is not None:
        model.add(layers.SimpleRNN(50, input_shape=input_shape))
    else:
        model.add(
            layers.SimpleRNN(50,
                             input_shape=(train_X.shape[1], train_X.shape[2])))
    model.add(layers.Dense(1, activation="sigmoid"))

    model.compile(optimizer=optimizer,
                  loss=loss,
                  metrics=[metrics, 'binary_accuracy', 'accuracy'])

    return model
def build_model_rnn(input_shape):
    """
    построение модели rnn
    :return:
    """
    model = models.Sequential()
    # model.add(layers.Dense(16, activation='relu', input_shape=(input_shape,)))
    # model.add(layers.Reshape((1, 16)))
    model.add(layers.SimpleRNN(32))
    model.add(layers.Dense(1, activation='sigmoid'))
    model.compile(optimizer='rmsprop',
                  loss='binary_crossentropy',
                  metrics=['accuracy'])
    return model
Пример #29
0
def test_Bidirectional_losses():
    x = Input(shape=(3, 2))
    layer = wrappers.Bidirectional(
        layers.SimpleRNN(3, kernel_regularizer='l1', bias_regularizer='l1'))
    _ = layer(x)
    assert len(layer.losses) == 4
    assert len(layer.get_losses_for(None)) == 4
    assert len(layer.get_losses_for(x)) == 0
    layer.forward_layer.add_loss(0, inputs=x)
    layer.forward_layer.add_loss(1, inputs=None)
    layer.backward_layer.add_loss(0, inputs=x)
    layer.backward_layer.add_loss(1, inputs=None)
    assert len(layer.losses) == 8
    assert len(layer.get_losses_for(None)) == 6
    assert len(layer.get_losses_for(x)) == 2
Пример #30
0
def trainSimpleRNN(H, word_length, binary_dim, x_trn, y_trn, x_val, y_val):
    model = Sequential()
    model.add(layers.SimpleRNN(H, input_shape=(word_length, binary_dim)))
    model.add(layers.Dense(10, activation='softmax'))

    model.compile(optimizer='adam',
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])

    model.fit(x_trn,
              y_trn,
              epochs=3 * H,
              batch_size=3 * H,
              validation_data=(x_val, y_val))

    return model