Beispiel #1
0
def build_model_1(embedding_matrix, one_hot_shape):
    words = Input(shape=(MAX_LEN, ))
    x = Embedding(*embedding_matrix.shape,
                  weights=[embedding_matrix],
                  trainable=False)(words)
    x = Bidirectional(LSTM(LSTM_UNITS, return_sequences=True),
                      merge_mode='concat')(x)
    x = SpatialDropout1D(rate=0.3)(x)
    #x = Bidirectional(LSTM(LSTM_UNITS, return_sequences=True), merge_mode='ave')(x)
    #x = SpatialDropout1D(rate=0.3)(x)

    #x = GlobalAveragePooling1D()(x) # this layer average each output from the Bidirectional layer

    x = concatenate([
        GlobalMaxPooling1D()(x),
        GlobalAveragePooling1D()(x),
    ])

    summary = Input(shape=(MAX_LEN, ))
    x_aux = Embedding(*embedding_matrix.shape,
                      weights=[embedding_matrix],
                      trainable=False)(summary)
    x_aux = Bidirectional(LSTM(LSTM_UNITS, return_sequences=True),
                          merge_mode='concat')(x_aux)
    x_aux = SpatialDropout1D(rate=0.3)(x_aux)
    #x_aux = Bidirectional(LSTM(LSTM_UNITS, return_sequences=True), merge_mode='ave')(x_aux)
    #x_aux = SpatialDropout1D(rate=0.3)(x_aux)

    # x_aux = GlobalAveragePooling1D()(x_aux)
    x_aux = concatenate([
        GlobalMaxPooling1D()(x_aux),
        GlobalAveragePooling1D()(x_aux),
    ])

    one_hot = Input(shape=(one_hot_shape, ))
    hidden = concatenate([x, x_aux, one_hot])

    hidden = Dense(400, activation='relu')(hidden)
    hidden = Dropout(0.4)(hidden)
    hidden = Dense(400, activation='relu')(hidden)
    hidden = Dropout(0.4)(hidden)
    hidden = Dense(300, activation='relu')(hidden)
    hidden = Dropout(0.4)(hidden)
    hidden = Dense(300, activation='relu')(hidden)
    hidden = Dropout(0.4)(hidden)
    hidden = Dense(100, activation='relu')(hidden)
    result = Dense(1, activation='linear')(hidden)

    model = Model(inputs=[words, summary, one_hot], outputs=[result])
    # adam = keras.optimizers.Adam(lr=0.0001, clipnorm=1.0, clipvalue=0.5)
    model.compile(loss='mse', optimizer='adam')

    return model
Beispiel #2
0
    def get_model(self):
        input_current = Input((self.maxlen, ))
        input_left = Input((self.maxlen, ))
        input_right = Input((self.maxlen, ))

        embedder = Embedding(self.max_features,
                             self.embedding_dims,
                             input_length=self.maxlen)
        embedding_current = embedder(input_current)
        embedding_left = embedder(input_left)
        embedding_right = embedder(input_right)

        x_left = SimpleRNN(128, return_sequences=True)(embedding_left)
        x_right = SimpleRNN(128, return_sequences=True,
                            go_backwards=True)(embedding_right)
        x_right = Lambda(lambda x: K.reverse(x, axes=1))(x_right)
        x = Concatenate(axis=2)([x_left, embedding_current, x_right])

        x = Conv1D(64, kernel_size=1, activation='tanh')(x)
        x = GlobalMaxPooling1D()(x)

        output = Dense(self.class_num, activation=self.last_activation)(x)
        model = Model(inputs=[input_current, input_left, input_right],
                      outputs=output)
        return model
Beispiel #3
0
def make_cnn_model(vocab_size=10000, embed_dim=8, input_seq_length=20):
    """
    I am the builder function for the CNN Model.
    :param vocab_size: size of the vocabulary of the embedding, should be size of vocab of the vectorizer
    :param embed_dim: how many dimensions to use for the vector embedding
    :param input_seq_length: how long the sequence of inputs will be
    :return: Keras Model
    """
    x = inp = Input(shape=(None, ), dtype="int64")
    x = Embedding(
        input_dim=vocab_size,
        output_dim=embed_dim,
        input_length=input_seq_length,
    )(x)
    x = Conv1D(filters=64, kernel_size=3, strides=2, activation="linear")(x)
    x = BatchNormalization()(x)
    x = LeakyReLU()(x)
    x = Conv1D(filters=64, kernel_size=3, strides=2, activation="linear")(x)
    x = BatchNormalization()(x)
    x = LeakyReLU()(x)
    x = Conv1D(filters=64, kernel_size=3, strides=2, activation="linear")(x)
    x = BatchNormalization()(x)
    x = LeakyReLU()(x)
    x = GlobalMaxPooling1D()(x)
    x = Dense(units=128, activation="linear")(x)
    x = BatchNormalization()(x)
    x = LeakyReLU()(x)
    out = Dense(1, activation="sigmoid")(x)
    return Model(inputs=[inp], outputs=[out], name="cnn_model")
Beispiel #4
0
 def pooling_blend(self, input):
     avg_pool = GlobalAveragePooling1D()(input)
     if self.top_k > 1:
         max_pool = Lambda(self._top_k)(input)
     else:
         max_pool = GlobalMaxPooling1D()(input)
     conc = Concatenate()([avg_pool, max_pool])
     return conc
Beispiel #5
0
    def init_model(self, config):

        input_shape = config['max_len']
        num_classes = config['num_classes']

        inputs = Input(shape=(input_shape, 96))
        x = inputs
        cnn1 = Conv1D(50,
                      kernel_size=1,
                      strides=1,
                      padding='same',
                      kernel_initializer='he_normal')(x)
        cnn1 = BatchNormalization(axis=-1)(cnn1)
        cnn1 = LeakyReLU()(cnn1)
        cnn1 = GlobalMaxPooling1D()(
            cnn1)  # CNN_Dynamic_MaxPooling(cnn1,50,2,2)

        cnn2 = Conv1D(50,
                      kernel_size=3,
                      strides=1,
                      padding='same',
                      kernel_initializer='he_normal')(x)
        cnn2 = BatchNormalization(axis=-1)(cnn2)
        cnn2 = LeakyReLU()(cnn2)
        cnn2 = GlobalMaxPooling1D()(cnn2)

        cnn3 = Conv1D(50,
                      kernel_size=5,
                      strides=1,
                      padding='same',
                      kernel_initializer='he_normal')(x)
        cnn3 = BatchNormalization(axis=-1)(cnn3)
        cnn3 = LeakyReLU()(cnn3)
        cnn3 = GlobalMaxPooling1D()(cnn3)
        x = concatenate([cnn1, cnn2, cnn3], axis=-1)

        x = Dense(units=num_classes, activation='softmax')(x)
        model = TFModel(inputs=inputs, outputs=x)
        opt = optimizers.rmsprop(lr=0.0001, decay=1e-6)
        model.compile(optimizer=opt,
                      loss="sparse_categorical_crossentropy",
                      metrics=['acc'])
        model.summary()
        self._model = model
        self.is_init = True
Beispiel #6
0
        def best_model():
            epochs = [2, 5, 10, 15, 20]
            dropout_rate = [0.1, 0.2, 0.3, 0.4]
            # learning_rates = [0.01, 0.05, 0.1]
            list_of_all_scores = list()
            list_of_scores = list()
            list_of_dropout = list()
            list_of_all_dropouts = list()
            list_of_epochs = list()

            for i in dropout_rate:
                model = Sequential()
                model.add(
                    Embedding(input_dim=vocab_size,
                              output_dim=embedding_dim,
                              input_length=max_length))
                model.add(
                    Conv1D(filters=max_length,
                           kernel_size=5,
                           padding='same',
                           activation='relu'))
                model.add(GlobalMaxPooling1D())
                model.add(Dropout(i))
                model.add(
                    Dense(1,
                          kernel_regularizer=regularizers.l2(0.01),
                          activation='sigmoid'))
                model.compile(optimizer='adam',
                              loss='binary_crossentropy',
                              metrics=['acc'])

                list_of_dropout.append(i)

                for e in epochs:
                    list_of_all_dropouts.append(i)
                    list_of_epochs.append(e)

                    model.fit(X_train,
                              y_train,
                              epochs=e,
                              batch_size=128,
                              verbose=1,
                              validation_split=0.2)
                    score = model.evaluate(X_test, y_test, verbose=1)
                    list_of_all_scores.append(score)

                    if score not in list_of_scores:
                        list_of_scores.append(score)

#print('Dropout:', i, '\n', 'Epoch:', e, '\n', 'Score:', float(score))
            lowest = min(list_of_all_scores)
            num = list_of_scores.index(lowest)
            epoch = list_of_epochs[num]
            dropout = list_of_all_dropouts[num]
            print('Lowest score:', lowest, 'Epoch:', epoch, 'Dropout', dropout)

            return epoch, dropout
 def __init__(self, vocab_size, embedding_dim=20):
     super().__init__(self)
     self.embedding = Embedding(vocab_size + 1, embedding_dim)
     self.conv1d_32 = Conv1D(32, 3, activation='relu')
     self.conv1d_64 = Conv1D(64, 3, activation='relu')
     self.conv1d_128 = Conv1D(128, 3, activation='relu')
     self.max_pooling = MaxPooling1D(4)
     self.global_max_pooling = GlobalMaxPooling1D()
     self.dense = Dense(4, activation='softmax')
Beispiel #8
0
 def recurrent_layers(cls):
     num_filters = 20
     filter_size = 5
     return [
         Conv1D(num_filters, filter_size, activation='relu'),
         GlobalMaxPooling1D(),
         # Dropout(0.1),
         Dense(20, activation='relu'),
         Dropout(0.05),
     ]
Beispiel #9
0
 def shallow_and_wide_cnn(inputs, filters, kernel_sizes):
     outputs = []
     for kernel_size in kernel_sizes:
         conv = tf.layers.conv1d(inputs, filters, kernel_size, padding="same",
                 kernel_regularizer=regularizer)
         conv = tf.layers.batch_normalization(conv, training=is_training)
         conv = tf.nn.relu(conv)
         conv = GlobalMaxPooling1D()(conv)
         outputs.append(conv)
     output = tf.concat(outputs, 1)
     return dropout(output, dropout_keep_prob)
def build_cnn_1d_model(maxlen=500):
    model = Sequential()
    model.add(Embedding(max_features, 128, input_length=maxlen))
    model.add(Conv1D(32, 7, activation='relu'))
    model.add(MaxPooling1D(5))
    model.add(Conv1D(32, 7, activation='relu'))
    model.add(GlobalMaxPooling1D())
    model.add(Dense(1))
    model.summary()
    model.compile(optimizer=RMSprop(lr=1e-4),
                  loss='binary_crossentropy',
                  metrics=['acc'])
    return model
Beispiel #11
0
def build_model(embedding_weights,
                embedding_dim,
                num_words,
                input_length,
                num_classes=20):
    """ Builds a Keras model. It sets embeddings layer trainable to False
    to keep the embeddings fixed

    Parameters
    ----------
    embedding_weights: np.ndarray
        A numpy array contains embedding weights
    embedding_dim: int
        Embeddings dimension
    num_words: int
        Number of words in the dataset
    input_length: int
        Maximum sequence length
    num_classes: int
        Number of classes in the dataset

    Returns
    -------
    model: Model
        A keras compiled model instance
    """

    embedding = Embedding(num_words,
                          embedding_dim,
                          embeddings_initializer=Constant(embedding_weights),
                          input_length=input_length,
                          trainable=False)

    seq_input = Input(shape=(input_length, ), dtype='int32')
    embedded_sequences = embedding(seq_input)
    x = Conv1D(128, 5, activation='relu')(embedded_sequences)
    x = MaxPooling1D(5)(x)
    x = Conv1D(128, 5, activation='relu')(x)
    x = MaxPooling1D(5)(x)
    x = Conv1D(128, 5, activation='relu')(x)
    x = GlobalMaxPooling1D()(x)
    x = Dense(128, activation='relu')(x)
    preds = Dense(num_classes, activation='softmax')(x)

    model = Model(seq_input, preds)
    model.compile(loss='categorical_crossentropy',
                  optimizer='rmsprop',
                  metrics=['acc'])

    return model
Beispiel #12
0
 def recurrent_layers(cls):
     #num_filters = 20
     num_filters = 5
     weight_decay = 1e-4
     return [
         Conv1D(num_filters, 7, activation='relu', padding='same'),
         MaxPooling1D(2),
         Conv1D(num_filters, 7, activation='relu', padding='same'),
         GlobalMaxPooling1D(),
         Dropout(0.05),
         Dense(32,
               activation='relu',
               kernel_regularizer=regularizers.l2(weight_decay))
     ]
    def get_model(self, input_length: int, numpy_matrix_embeddings: numpy.ndarray, **kwargs):
        # get the number of topics
        lda_vector_size = kwargs.get('lda_vector_size', 0)
        assert lda_vector_size, "lda_vector_size not specified"

        # this is the placeholder tensor for the input sequences
        # input sequence is a numpy array of word indices

        input_sequence = Input(shape=(input_length,), name="input_sequence", dtype='int32')
        input_lda = Input(shape=(lda_vector_size,), name="input_lda", dtype='float32')

        # input_sequence = Input(shape=(input_length,), dtype='int32')
        # input_lda = Input(shape=(lda_vector_size,), dtype='float32')
        # this embedding layer will transform the sequences of integers
        # into vectors of size dimension of embeddings
        # Conv1D does not support masking
        embedded = Embedding(numpy_matrix_embeddings.shape[0], numpy_matrix_embeddings.shape[1],
                             input_length=input_length, weights=[numpy_matrix_embeddings], mask_zero=False)(
            input_sequence)

        # 4 convolution layers (500 filters each)
        nb_filters = 500
        kernel_sizes = [3, 5, 7, 9]
        cnn = [Conv1D(filters=nb_filters, kernel_size=kernel_size, padding="same")(embedded) for kernel_size in
               kernel_sizes]

        # concatenate
        concatenated_cnn_outputs = Concatenate()([c for c in cnn])
        # max pooling
        pooled = GlobalMaxPooling1D()(concatenated_cnn_outputs)

        # after_dropout = Dropout(dropout)(pooled)
        # just for fun - no regularization... and it worked!
        after_dropout = Dropout(0)(pooled)

        # batch normalization? No, worse performance... 0.43321227654613681
        # batch_normalization = BatchNormalization()(after_dropout)

        # now concatenate the LDA vector with output from CNN
        cnn_and_lda = Concatenate()([after_dropout, input_lda])

        output_layer = Dense(1, activation='linear')(cnn_and_lda)
        model = Model(inputs=[input_sequence, input_lda], outputs=output_layer)

        print("Compiling model")
        model.compile('rmsprop', mean_squared_error)

        return model
Beispiel #14
0
def tnet(inputs, num_features):
    bias = Constant(np.eye(num_features).flatten())
    reg = OrthogonalRegularizer(num_features)

    x = conv_bn(inputs, 32)
    x = conv_bn(x, 64)
    x = conv_bn(x, 512)
    x = GlobalMaxPooling1D()(x)
    x = dense_bn(x, 256)
    x = dense_bn(x, 128)
    x = Dense(num_features * num_features,
              kernel_initializer='zeros',
              bias_initializer=bias,
              activity_regularizer=reg)(x)
    feat_T = Reshape((num_features, num_features))(x)
    return Dot(axes=(2, 1))([inputs, feat_T])
Beispiel #15
0
    def get_model(self):
        input = Input((self.maxlen, ))  # 表示输入是maxlen维的向量
        # input_dim: 词汇表大小  output_dim:词向量的维度  input_length: 输入序列的长度
        embedding = Embedding(self.max_features,
                              self.embedding_dims,
                              input_length=self.maxlen)(input)
        convs = []
        for kernel_size in [3, 4, 5]:
            c = Conv1D(128, kernel_size, activation='relu')(embedding)
            c = GlobalMaxPooling1D()(c)
            convs.append(c)
        x = Concatenate()(convs)
        x = Dropout(0.3)(x)

        output = Dense(self.class_num, activation=self.last_activation)(x)
        model = Model(inputs=input, outputs=output)
        return model
    def create_channel(self, x, filter_size, feature_map):
        """
        Creates a layer, working channel wise

        Arguments:
            x           : Input for convoltuional channel
            filter_size : Filter size for creating Conv1D
            feature_map : Feature map

        Returns:
            x           : Channel including (Conv1D + GlobalMaxPooling + Dense + Dropout)
        """
        x = SeparableConv1D(feature_map, kernel_size=filter_size, activation='relu', strides=1, padding='same',
                            depth_multiplier=4)(x)
        x = GlobalMaxPooling1D()(x)
        x = Dense(self.hidden_units)(x)
        x = Dropout(self.dropout_rate)(x)
        return x
Beispiel #17
0
def get_rnn_model(MAX_NB_WORDS, embedding_matrix_2):
    """
    Creating the RNN model
    :param MAX_NB_WORDS: maximum length of the seuquence 
    :param embedding_matrix_2: embedding matrix
    """
    # defining input shape of the data
    inp = Input(shape=(50, ))
    # defining input shape of the metadata
    # the data predictions from the first network
    meta_input = Input(shape=(1,))

    # layers:
    # ------------------------------------------------
    x = Embedding(MAX_NB_WORDS, 300, input_length=50, weights=[
                  embedding_matrix_2], trainable=False)(inp)
    x = SpatialDropout1D(0.2)(x)
    x = Bidirectional(GRU(100, return_sequences=True))(x)
    x = Bidirectional(GRU(100, return_sequences=True))(x)
    x = Conv1D(512, kernel_size=1, padding="valid",
               kernel_initializer="he_uniform")(x)
    avg_pool = GlobalAveragePooling1D()(x)
    max_pool = GlobalMaxPooling1D()(x)
    conc = concatenate([avg_pool, max_pool])
    conc = BatchNormalization()(conc)

    # on this layer, the input and the metadata
    # from the first model are concatenated
    conc = concatenate([conc, meta_input])
    conc = Dense(512)(conc)
    conc = BatchNormalization()(conc)

    conc = LeakyReLU()(conc)
    outp = Dense(90, activation='softmax')(conc)

    # input here is an array (main input and meta-input)
    model = Model(inputs=[inp, meta_input], outputs=outp)

    # Compiling the model
    model.compile(loss='categorical_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])
    return model
Beispiel #18
0
def classification_model():
    inputs = Input(shape=(NUM_POINTS, 3))

    x = tnet(inputs, 3)
    x = conv_bn(x, 32)
    x = conv_bn(x, 32)
    x = tnet(x, 32)
    x = conv_bn(x, 32)
    x = conv_bn(x, 64)
    x = conv_bn(x, 512)
    x = GlobalMaxPooling1D()(x)
    x = dense_bn(x, 256)
    x = Dropout(0.3)(x)
    x = dense_bn(x, 128)
    x = Dropout(0.3)(x)

    outputs = Dense(NUM_CLASSES, activation='softmax')(x)

    model = Model(inputs=inputs, outputs=outputs, name='pointnet')
    return model
Beispiel #19
0
    def get_model(self):
        input = Input((self.maxlen,))

        embedding = Embedding(self.max_tokens, self.embedding_size, input_length=self.maxlen)(input)

        x_context = Bidirectional(CuDNNLSTM(128, return_sequences=True))(embedding)
        x = Concatenate()([embedding, x_context])

        convs = []
        for kernel_size in self.kernel_size_list:
            conv = Conv1D(128, kernel_size, activation='relu')(x)
            convs.append(GlobalMaxPooling1D()(conv))
            convs.append(GlobalAveragePooling1D()(conv))

        # poolings = [GlobalAveragePooling1D()(conv) for conv in convs] + \
        #            [GlobalMaxPooling1D()(conv) for conv in convs]
        x = Concatenate()(convs)

        output = Dense(self.num_class, activation=self.last_activation)(x)
        model = Model(inputs=input, outputs=output)
        return model
    def get_model(self, numpy_matrix_embeddings: numpy.ndarray, **kwargs) -> Model:
        # get default parameters
        dropout = kwargs.get('dropout', 0.9)
        assert isinstance(dropout, float)

        # this is the placeholder tensor for the input sequences
        # input sequence is a numpy array of word indices
        input_sequence = Input(shape=(None,), dtype='int32', name='input_sequence')
        # this embedding layer will transform the sequences of integers
        # into vectors of size dimension of embeddings
        # Conv1D does not support masking
        embedded = Embedding(numpy_matrix_embeddings.shape[0], numpy_matrix_embeddings.shape[1],
                             weights=[numpy_matrix_embeddings], mask_zero=False)(input_sequence)

        # 4 convolution layers (500 filters each)
        nb_filters = 500
        kernel_sizes = [5, 7, 9, 11]
        cnn = [Conv1D(filters=nb_filters, kernel_size=kernel_size, padding="same")(embedded) for kernel_size in
               kernel_sizes]

        # concatenate
        concatenated_cnn_outputs = Concatenate()([c for c in cnn])
        # max pooling
        pooled = GlobalMaxPooling1D()(concatenated_cnn_outputs)

        # after_dropout = Dropout(dropout)(pooled)
        # just for fun - no regularization... and it worked!
        after_dropout = Dropout(dropout)(pooled)

        # batch normalization? No, worse performance... 0.43321227654613681
        # batch_normalization = BatchNormalization()(after_dropout)

        # classification
        output = Dense(2, activation='softmax')(after_dropout)
        model = Model(inputs=[input_sequence], outputs=output)

        print("Compiling model")
        model.compile('adam', loss=categorical_crossentropy)

        return model
 def build_model(p_tokenizer, p_embedding_matrix, p_max_tokens,
                 number_of_classes, number_of_filters, p_weight_decay):
     cnn_model = Sequential()
     cnn_model.add(
         Embedding(
             input_dim=len(list(p_tokenizer.word_index)) + 1,
             output_dim=p_embedding_matrix.shape[1],
             weights=[p_embedding_matrix],
             input_length=p_max_tokens,
             trainable=True,  # the layer is trained
             name='embedding_layer'))
     cnn_model.add(
         Conv1D(number_of_filters, 7, activation='relu', padding='same'))
     cnn_model.add(MaxPooling1D(2))
     cnn_model.add(
         Conv1D(number_of_filters, 7, activation='relu', padding='same'))
     cnn_model.add(GlobalMaxPooling1D())
     cnn_model.add(Dropout(0.5))
     cnn_model.add(
         Dense(32,
               activation='relu',
               kernel_regularizer=regularizers.l2(p_weight_decay)))
     cnn_model.add(
         Dense(number_of_classes,
               activation='softmax'))  # multi-label (k-hot encoding)
     adam = Adam(lr=0.001,
                 beta_1=0.9,
                 beta_2=0.999,
                 epsilon=1e-08,
                 decay=0.0)
     cnn_model.compile(loss='sparse_categorical_crossentropy',
                       optimizer=adam,
                       metrics=['accuracy'])
     cnn_model.summary()
     # define callbacks
     early_stopping = EarlyStopping(monitor='val_loss',
                                    min_delta=0.01,
                                    patience=4,
                                    verbose=1)
     return cnn_model, [early_stopping]
Beispiel #22
0
def segmentation_model(num_classes):
    inputs = Input(shape=(None, 3))

    x = tnet(inputs, 3)
    x = conv_bn(x, 32)
    x = conv_bn(x, 32)
    concat = tnet(x, 32)
    x = conv_bn(concat, 32)
    x = conv_bn(x, 64)
    x = conv_bn(x, 512)
    global_vector = GlobalMaxPooling1D()(x)
    global_repeat = tf.tile(global_vector[:, tf.newaxis, ...],
                            (1, NUM_POINTS, 1))
    x = tf.keras.layers.Concatenate()([concat, global_repeat])
    x = conv_bn(x, 256)
    x = conv_bn(x, 128)
    x = conv_bn(x, 64)
    x = conv_bn(x, 64)
    outputs = conv_bn(x, num_classes)

    model = Model(inputs=inputs, outputs=outputs, name='pointnet_seg')
    return model
Beispiel #23
0
def buildprotSeq_embedding_model(num_filters_list,
                                 filter_length_list,
                                 prot_seq_dim,
                                 embed_size,
                                 max_seq_length,
                                 name=None):
    assert len(num_filters_list) == len(
        filter_length_list), "incompatible hyper parameter."
    num_conv_layers = len(num_filters_list)
    protSeq = Input(shape=(max_seq_length, ))
    seq_embed = Embedding(input_dim=prot_seq_dim + 1,
                          output_dim=embed_size,
                          input_length=max_seq_length)(protSeq)
    for i in range(num_conv_layers):
        seq_embed = Conv1D(filters=num_filters_list[i],
                           kernel_size=filter_length_list[i],
                           activation='relu',
                           padding='valid',
                           strides=1)(seq_embed)
    seq_embed = GlobalMaxPooling1D()(seq_embed)
    model = Model(inputs=protSeq, outputs=seq_embed, name=name)
    return model
Beispiel #24
0
def construct_model():
    input_creative_id = Input(shape=(None, ),
                              dtype='int32',
                              name='creative_id')
    embedded_creative_id = Embedding(
        creative_id_window, embedding_size,
        name='embedded_creative_id')(input_creative_id)
    encoded_creative_id = GlobalMaxPooling1D(
        name='encoded_creative_id')(embedded_creative_id)

    input_product_id = Input(shape=(None, ), dtype='int32', name='product_id')
    embedded_product_id = Embedding(
        product_id_max, 32, name='embedded_product_id')(input_product_id)
    encoded_product_id = GlobalMaxPooling1D(
        name='encoded_product_id')(embedded_product_id)

    input_category = Input(shape=(None, ), dtype='int32', name='category')
    embedded_category = Embedding(category_max, 2,
                                  name='embedded_category')(input_category)
    encoded_category = GlobalMaxPooling1D(
        name='encoded_category')(embedded_category)
    # encoded_category = Bidirectional(
    #     LSTM(32, dropout = 0.2, recurrent_dropout = 0.2), name = 'encoded_category')(embedded_category)

    input_advertiser_id = Input(shape=(None, ),
                                dtype='int32',
                                name='advertiser_id')
    embedded_advertiser_id = Embedding(
        advertiser_id_max, 32,
        name='embedded_advertiser_id')(input_advertiser_id)
    encoded_advertiser_id = GlobalMaxPooling1D(
        name='encoded_advertiser_id')(embedded_advertiser_id)

    input_industry = Input(shape=(None, ), dtype='int32', name='industry')
    embedded_industry = Embedding(industry_max, 16,
                                  name='embedded_industry')(input_industry)
    encoded_industry = GlobalMaxPooling1D(
        name='encoded_industry')(embedded_industry)
    # encoded_industry = Bidirectional(
    #     LSTM(32, dropout = 0.2, recurrent_dropout = 0.2, name = 'encoded_industry'))(embedded_industry)

    # LSTM(14) : 是因为 91 天正好是 14 个星期
    # LSTM(32) : 方便计算
    input_click_times = Input(shape=(None, 1),
                              dtype='float32',
                              name='click_times')
    encoded_click_times = Bidirectional(
        LSTM(32, dropout=0.2, recurrent_dropout=0.2),
        name='encoded_click_times')(input_click_times)

    concatenated = concatenate([
        encoded_creative_id, encoded_click_times, encoded_product_id,
        encoded_category, encoded_advertiser_id, encoded_industry
    ],
                               axis=-1)

    x = Dropout(0.5, name='Dropout_0101')(concatenated)
    x = Dense(embedding_size, kernel_regularizer=l2(0.001),
              name='Dense_0101')(x)
    x = BatchNormalization(name='BN_0101')(x)
    x = Activation('relu', name='relu_0101')(x)

    x = Dropout(0.5, name='Dropout_0102')(x)
    x = Dense(embedding_size, kernel_regularizer=l2(0.001),
              name='Dense_0102')(x)
    x = BatchNormalization(name='BN_0102')(x)
    x = Activation('relu', name='relu_0102')(x)

    x = Dropout(0.5, name='Dropout_0103')(x)
    x = Dense(embedding_size, kernel_regularizer=l2(0.001),
              name='Dense_0103')(x)
    x = BatchNormalization(name='BN_0103')(x)
    x = Activation('relu', name='relu_0103')(x)

    x = Dropout(0.5, name='Dropout_0201')(x)
    x = Dense(embedding_size // 2,
              kernel_regularizer=l2(0.001),
              name='Dense_0201')(x)
    x = BatchNormalization(name='BN_0201')(x)
    x = Activation('relu', name='relu_0201')(x)

    x = Dropout(0.5, name='Dropout_0202')(x)
    x = Dense(embedding_size // 2,
              kernel_regularizer=l2(0.001),
              name='Dense_0202')(x)
    x = BatchNormalization(name='BN_0202')(x)
    x = Activation('relu', name='relu_0202')(x)

    x = Dropout(0.5, name='Dropout_0203')(x)
    x = Dense(embedding_size // 2,
              kernel_regularizer=l2(0.001),
              name='Dense_0203')(x)
    x = BatchNormalization(name='BN_0203')(x)
    x = Activation('relu', name='relu_0203')(x)

    if label_name == "age" and age_sigmoid == -1:
        x = Dropout(0.5)(x)
        x = Dense(10, kernel_regularizer=l2(0.001), name='output')(x)
        x = BatchNormalization()(x)
        output_tensor = Activation('softmax')(x)

        model = Model([
            input_creative_id, input_click_times, input_product_id,
            input_category, input_advertiser_id, input_industry
        ], output_tensor)

        print('-' * 5 + ' ' * 3 + "编译模型" + ' ' * 3 + '-' * 5)

        model.compile(optimizer=optimizers.RMSprop(lr=RMSProp_lr),
                      loss=losses.sparse_categorical_crossentropy,
                      metrics=[metrics.sparse_categorical_accuracy])
    elif label_name == 'gender' or age_sigmoid != -1:
        x = Dropout(0.5)(x)
        x = Dense(1, kernel_regularizer=l2(0.001), name='output')(x)
        x = BatchNormalization()(x)
        output_tensor = Activation('sigmoid')(x)

        model = Model([
            input_creative_id, input_click_times, input_product_id,
            input_category, input_advertiser_id, input_industry
        ], output_tensor)

        print('-' * 5 + ' ' * 3 + "编译模型" + ' ' * 3 + '-' * 5)

        model.compile(optimizer=optimizers.RMSprop(lr=RMSProp_lr),
                      loss=losses.binary_crossentropy,
                      metrics=[metrics.binary_accuracy])
    else:
        raise Exception("错误的标签类型!")
    return model
def SpamDectectionCNN():

    def prepData(text, size):
        # Convert to array
        textDataArray = [text]

        # Convert into list with word ids
        Features = tokenizer.texts_to_sequences(textDataArray)
        Features = pad_sequences(Features, size, padding='post')

        return Features

    results = []
    predictions = []
    # read ds
    df = pd.read_csv('data/spam.csv', encoding='ISO-8859-1')
    # print(df.head()) # theres some missing data here

    # drop garbage columns
    df = df.drop(["Unnamed: 2", "Unnamed: 3", "Unnamed: 4"], axis=1)

    # rename columns
    df.columns = ['labels', 'data']

    # create binary labels (ham/spam)
    df['b_labels'] = df['labels'].map({'ham': 0, 'spam': 1})
    y = df['b_labels'].values

    # split dataset
    X_train, X_test, y_train, y_test = train_test_split(df['data'], y, test_size=0.1)

    # convert sentences to sequences (embedding technique: bag of words)
    # : every unique character will be given one index position
    tokenizer = Tokenizer(num_words=20000)
    tokenizer.fit_on_texts(X_train)

    seq_train = tokenizer.texts_to_sequences(X_train)  # each number is unique and corresponds to a unique word
    seq_test = tokenizer.texts_to_sequences(X_test)

    # create one big matrix to pass to the CNN
    #  --> we have to pad it because the CNN accepts a FIXED length but the above process produces a dynamic length due to
    # each sentence having a different length.
    data_train = pad_sequences(seq_train)
    T = data_train.shape[1]
    word2idx = tokenizer.word_index
    V = len(word2idx)  # total number of unique words

    # pad the test set
    data_test = pad_sequences(seq_test, maxlen=T)  # we dont know the maxlen

    # build the model
    D = 20  # this is a hyper parameter -> word vector size
    input_layer = Input(shape=T)
    x = Embedding(V + 1, D)(input_layer)  # N * T * D array. Returns sequence of word vectors
    x = Conv1D(32, 3, activation='relu', padding='same')(x)
    x = MaxPool1D(3)(x)
    x = Conv1D(64, 3, activation='relu', padding='same')(x)
    x = MaxPool1D(3)(x)
    x = Conv1D(128, 3, activation='relu', padding='same')(x)
    x = GlobalMaxPooling1D()(x)
    x = Dense(1, activation='sigmoid')(x)
    model = Model(input_layer, x)

    # compile model & train
    model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])  # ...binary problem
    hist = model.fit(x=data_train, y=y_train, epochs=5)
    # model.summary()

    accuracy = hist.history['accuracy'][-1]
    # test model
    pd.options.display.max_colwidth = 80  # show a part of the message

    # make prediction on your custom txt file with text
    for i in range(0, len(lines)):
        results.append(lines[i])
        textTokenizedTest = prepData(results[i], T)
        predictions.append(model.predict(textTokenizedTest).item())

    return accuracy, results, predictions
Beispiel #26
0
print(batch_size)
print(pool_len1)
print(learningrates)

def get_output(input_layer, hidden_layers):
    output = input_layer
    for hidden_layer in hidden_layers:
        output = hidden_layer(output)
    return output

forward_input = Input(shape=(w, channel_num))
reverse_input = Input(shape=(w, channel_num))
hidden_layers = [
    Conv1D(filters = nb_filter1, kernel_size = filter_len1, activation='relu', kernel_regularizer=regularizers.l2(0.01)),
    GlobalMaxPooling1D(),
    Dropout(dropout_pool),
    Dense(nb_dense, activation='relu', kernel_regularizer=regularizers.l2(0.01)),
    Dropout(dropout_dense),
    Dense(1, activation='sigmoid')
]
forward_output = get_output(forward_input, hidden_layers)     
reverse_output = get_output(reverse_input, hidden_layers)
output = Average()([forward_output, reverse_output])
model = Model(inputs=[forward_input, reverse_input], outputs=output)
print(model.summary())

history = []
for i in range(len(learningrates)):
    print("Testing with learning rate = " + str(learningrates[i]))
    model_dir = '/content/drive/My Drive/cs230_metagenomics/BugNet/bas_test/saved_models'
def construct_model(creative_id_num, embedding_size, max_len, RMSProp_lr,
                    model_type = "MLP", label_name = "gender"):
    '''
        构建与编译模型
    :param creative_id_num: 字典容量
    :param embedding_size: 嵌入维度
    :param max_len: 序列长度
    :param RMSProp_lr: 学习步长
    :param model_type: 模型的类型
        MLP:多层感知机
        Conv1D:1维卷积神经网络
        GlobalMaxPooling1D:1维全局池化层
        GlobalMaxPooling1D+MLP:1维全局池化层+多层感知机
        Conv1D+LSTM:1维卷积神经网络+LSTM
        Bidirectional+LSTM:双向 LSTM
    :param label_name: 标签的类型
        age : 根据年龄进行的多分类问题
        gender : 根据性别进行的二分类问题
    :return: 返回构建的模型
    '''
    print("* 构建网络")
    model = Sequential()
    model.add(Embedding(creative_id_num, embedding_size, input_length = max_len))
    if model_type == 'MLP':
        model.add(Flatten())
        model.add(Dense(8, activation = 'relu', kernel_regularizer = l2(0.001)))
        model.add(Dropout(0.5))
        model.add(Dense(4, activation = 'relu', kernel_regularizer = l2(0.001)))
        model.add(Dropout(0.5))
    elif model_type == 'Conv1D':
        model.add(Conv1D(32, 7, activation = 'relu', kernel_regularizer = l2(0.001)))
        model.add(Conv1D(32, 7, activation = 'relu', kernel_regularizer = l2(0.001)))
        model.add(GlobalMaxPooling1D())
    elif model_type == 'GlobalMaxPooling1D':
        model.add(GlobalMaxPooling1D())
    elif model_type == 'GlobalMaxPooling1D+MLP':
        model.add(GlobalMaxPooling1D())
        model.add(Dense(64, activation = 'relu', kernel_regularizer = l2(0.001)))
        model.add(Dense(32, activation = 'relu', kernel_regularizer = l2(0.001)))
    elif model_type == 'LSTM':
        # model.add(LSTM(128, dropout = 0.5, recurrent_dropout = 0.5))
        model.add(LSTM(128))
    elif model_type == 'Conv1D+LSTM':
        model.add(Conv1D(32, 5, activation = 'relu', kernel_regularizer = l2(0.001)))
        model.add(Conv1D(32, 5, activation = 'relu', kernel_regularizer = l2(0.001)))
        model.add(LSTM(16, dropout = 0.5, recurrent_dropout = 0.5))
    elif model_type == 'Bidirectional-LSTM':
        model.add(Bidirectional(LSTM(embedding_size, dropout = 0.2, recurrent_dropout = 0.2)))
    else:
        raise Exception("错误的网络模型类型")

    if label_name == "age":
        model.add(Dense(10, activation = 'softmax'))
        print("%s——模型构建完成!" % model_type)
        print("* 编译模型")
        model.compile(optimizer = optimizers.RMSprop(lr = RMSProp_lr),
                      loss = losses.sparse_categorical_crossentropy,
                      metrics = [metrics.sparse_categorical_accuracy])
    elif label_name == 'gender':
        model.add(Dense(1, activation = 'sigmoid'))
        print("%s——模型构建完成!" % model_type)
        print("* 编译模型")
        model.compile(optimizer = optimizers.RMSprop(lr = RMSProp_lr),
                      loss = losses.binary_crossentropy,
                      metrics = [metrics.binary_accuracy])
    else:
        raise Exception("错误的标签类型!")

    print(model.summary())
    return model
def get_test_model_full():
    """Returns a maximally complex test model,
    using all supported layer types with different parameter combination.
    """
    input_shapes = [
        (26, 28, 3),
        (4, 4, 3),
        (4, 4, 3),
        (4, ),
        (2, 3),
        (27, 29, 1),
        (17, 1),
        (17, 4),
    ]
    inputs = [Input(shape=s) for s in input_shapes]

    outputs = []

    for inp in inputs[6:8]:
        for padding in ['valid', 'same']:
            for s in range(1, 6):
                for out_channels in [1, 2]:
                    for d in range(1, 4):
                        outputs.append(
                            Conv1D(out_channels,
                                   s,
                                   padding=padding,
                                   dilation_rate=d)(inp))
        for padding_size in range(0, 5):
            outputs.append(ZeroPadding1D(padding_size)(inp))
        for crop_left in range(0, 2):
            for crop_right in range(0, 2):
                outputs.append(Cropping1D((crop_left, crop_right))(inp))
        for upsampling_factor in range(1, 5):
            outputs.append(UpSampling1D(upsampling_factor)(inp))
        for padding in ['valid', 'same']:
            for pool_factor in range(1, 6):
                for s in range(1, 4):
                    outputs.append(
                        MaxPooling1D(pool_factor, strides=s,
                                     padding=padding)(inp))
                    outputs.append(
                        AveragePooling1D(pool_factor,
                                         strides=s,
                                         padding=padding)(inp))
        outputs.append(GlobalMaxPooling1D()(inp))
        outputs.append(GlobalAveragePooling1D()(inp))

    for inp in [inputs[0], inputs[5]]:
        for padding in ['valid', 'same']:
            for h in range(1, 6):
                for out_channels in [1, 2]:
                    for d in range(1, 4):
                        outputs.append(
                            Conv2D(out_channels, (h, 1),
                                   padding=padding,
                                   dilation_rate=(d, 1))(inp))
                        outputs.append(
                            SeparableConv2D(out_channels, (h, 1),
                                            padding=padding,
                                            dilation_rate=(d, 1))(inp))
                    for sy in range(1, 4):
                        outputs.append(
                            Conv2D(out_channels, (h, 1),
                                   strides=(1, sy),
                                   padding=padding)(inp))
                        outputs.append(
                            SeparableConv2D(out_channels, (h, 1),
                                            strides=(sy, sy),
                                            padding=padding)(inp))
                for sy in range(1, 4):
                    outputs.append(
                        MaxPooling2D((h, 1), strides=(1, sy),
                                     padding=padding)(inp))
            for w in range(1, 6):
                for out_channels in [1, 2]:
                    for d in range(1, 4) if sy == 1 else [1]:
                        outputs.append(
                            Conv2D(out_channels, (1, w),
                                   padding=padding,
                                   dilation_rate=(1, d))(inp))
                        outputs.append(
                            SeparableConv2D(out_channels, (1, w),
                                            padding=padding,
                                            dilation_rate=(1, d))(inp))
                    for sx in range(1, 4):
                        outputs.append(
                            Conv2D(out_channels, (1, w),
                                   strides=(sx, 1),
                                   padding=padding)(inp))
                        outputs.append(
                            SeparableConv2D(out_channels, (1, w),
                                            strides=(sx, sx),
                                            padding=padding)(inp))
                for sx in range(1, 4):
                    outputs.append(
                        MaxPooling2D((1, w), strides=(1, sx),
                                     padding=padding)(inp))
    outputs.append(ZeroPadding2D(2)(inputs[0]))
    outputs.append(ZeroPadding2D((2, 3))(inputs[0]))
    outputs.append(ZeroPadding2D(((1, 2), (3, 4)))(inputs[0]))
    outputs.append(Cropping2D(2)(inputs[0]))
    outputs.append(Cropping2D((2, 3))(inputs[0]))
    outputs.append(Cropping2D(((1, 2), (3, 4)))(inputs[0]))
    for y in range(1, 3):
        for x in range(1, 3):
            outputs.append(UpSampling2D(size=(y, x))(inputs[0]))
    outputs.append(GlobalAveragePooling2D()(inputs[0]))
    outputs.append(GlobalMaxPooling2D()(inputs[0]))
    outputs.append(AveragePooling2D((2, 2))(inputs[0]))
    outputs.append(MaxPooling2D((2, 2))(inputs[0]))
    outputs.append(UpSampling2D((2, 2))(inputs[0]))
    outputs.append(keras.layers.concatenate([inputs[0], inputs[0]]))
    outputs.append(Dropout(0.5)(inputs[0]))

    outputs.append(BatchNormalization()(inputs[0]))
    outputs.append(BatchNormalization(center=False)(inputs[0]))
    outputs.append(BatchNormalization(scale=False)(inputs[0]))

    outputs.append(Conv2D(2, (3, 3), use_bias=True)(inputs[0]))
    outputs.append(Conv2D(2, (3, 3), use_bias=False)(inputs[0]))
    outputs.append(SeparableConv2D(2, (3, 3), use_bias=True)(inputs[0]))
    outputs.append(SeparableConv2D(2, (3, 3), use_bias=False)(inputs[0]))

    outputs.append(Dense(2, use_bias=True)(inputs[3]))
    outputs.append(Dense(2, use_bias=False)(inputs[3]))

    shared_conv = Conv2D(1, (1, 1),
                         padding='valid',
                         name='shared_conv',
                         activation='relu')

    up_scale_2 = UpSampling2D((2, 2))
    x1 = shared_conv(up_scale_2(inputs[1]))  # (1, 8, 8)
    x2 = shared_conv(up_scale_2(inputs[2]))  # (1, 8, 8)
    x3 = Conv2D(1, (1, 1), padding='valid')(up_scale_2(inputs[2]))  # (1, 8, 8)
    x = keras.layers.concatenate([x1, x2, x3])  # (3, 8, 8)
    outputs.append(x)

    x = Conv2D(3, (1, 1), padding='same', use_bias=False)(x)  # (3, 8, 8)
    outputs.append(x)
    x = Dropout(0.5)(x)
    outputs.append(x)
    x = keras.layers.concatenate(
        [MaxPooling2D((2, 2))(x),
         AveragePooling2D((2, 2))(x)])  # (6, 4, 4)
    outputs.append(x)

    x = Flatten()(x)  # (1, 1, 96)
    x = Dense(4, use_bias=False)(x)
    outputs.append(x)
    x = Dense(3)(x)  # (1, 1, 3)
    outputs.append(x)

    intermediate_input_shape = (3, )
    intermediate_in = Input(intermediate_input_shape)
    intermediate_x = intermediate_in
    intermediate_x = Dense(8)(intermediate_x)
    intermediate_x = Dense(5)(intermediate_x)
    intermediate_model = Model(inputs=[intermediate_in],
                               outputs=[intermediate_x],
                               name='intermediate_model')
    intermediate_model.compile(loss='mse', optimizer='nadam')

    x = intermediate_model(x)  # (1, 1, 5)

    intermediate_model_2 = Sequential()
    intermediate_model_2.add(Dense(7, input_shape=(5, )))
    intermediate_model_2.add(Dense(5))
    intermediate_model_2.compile(optimizer='rmsprop',
                                 loss='categorical_crossentropy')

    x = intermediate_model_2(x)  # (1, 1, 5)

    x = Dense(3)(x)  # (1, 1, 3)

    shared_activation = Activation('tanh')

    outputs = outputs + [
        Activation('tanh')(inputs[3]),
        Activation('hard_sigmoid')(inputs[3]),
        Activation('selu')(inputs[3]),
        Activation('sigmoid')(inputs[3]),
        Activation('softplus')(inputs[3]),
        Activation('softmax')(inputs[3]),
        Activation('relu')(inputs[3]),
        LeakyReLU()(inputs[3]),
        ELU()(inputs[3]),
        shared_activation(inputs[3]),
        inputs[4],
        inputs[1],
        x,
        shared_activation(x),
    ]

    print('Model has {} outputs.'.format(len(outputs)))

    model = Model(inputs=inputs, outputs=outputs, name='test_model_full')
    model.compile(loss='mse', optimizer='nadam')

    # fit to dummy data
    training_data_size = 1
    batch_size = 1
    epochs = 10
    data_in = generate_input_data(training_data_size, input_shapes)
    data_out = generate_output_data(training_data_size, outputs)
    model.fit(data_in, data_out, epochs=epochs, batch_size=batch_size)
    return model
Beispiel #29
0
def get_rnn_model(MAX_NB_WORDS, embedding_matrix_2):
    """
    Creating the RNN model
    :param MAX_NB_WORDS: maximum length of the seuquence 
    :param embedding_matrix_2: embedding matrix
    """

    # -----------------------------------------------------
    """
    both previous models are used here. predictions from both models are used 
    as metadata in the network
    """
    # defining input shape of the data
    inp = Input(shape=(50, ))
    # defining input shape of the level_1 predictions
    meta_input_1 = Input(shape=(1,))
    # defining input shape of the level_2 predictions
    meta_input_2 = Input(shape=(1,))
    # -----------------------------------------------------
    # defining Embedding layer
    x = Embedding(MAX_NB_WORDS, 300, input_length=50, weights=[
                  embedding_matrix_2], trainable=False)(inp)
    # -----------------------------------------------
    # defining spatial dropout
    x = SpatialDropout1D(0.2)(x)
    # ----------------------------------------------------------
    # defining RRN part
    x = Bidirectional(GRU(100, return_sequences=True))(x)
    x = Bidirectional(GRU(100, return_sequences=True))(x)

    # defining the convolutional layer
    x = Conv1D(512, kernel_size=1, padding="valid",
               kernel_initializer="he_uniform")(x)
    # --------------------------------------------------------
    # defing two pooling layers average and maximum
    avg_pool = GlobalAveragePooling1D()(x)
    max_pool = GlobalMaxPooling1D()(x)
    # concating the two pooling layers
    conc = concatenate([avg_pool, max_pool])
    # -----------------------------------------------------
    # applying batch normalization to speed the weights learning
    conc = BatchNormalization()(conc)
    # ----------------------------------------------------
    """
    both predictions are concatenated with the new input data
    and fed into a dense layer
    """
    # concating the numerical and embedding feature
    conc = concatenate([conc, meta_input_1, meta_input_2])
    # applying dense layer on the concatenation
    conc = Dense(512)(conc)
    # ---------------------------------------------------
    # increases the learning speed
    conc = BatchNormalization()(conc)
    # ---------------------------------
    # applying leakyRelu
    conc = LeakyReLU()(conc)
    # --------------------------------------------------
    # defining the ouput layer
    outp = Dense(477, activation='softmax')(conc)
    # --------------------------------------------------
    # 3 inputs
    model = Model(inputs=[inp, meta_input_1, meta_input_2], outputs=outp)

    # if you to load pre-trained weights
    # .load_weights("weights-improvement-01-0.69.hdf5")

    # Compiling the model
    model.compile(loss='categorical_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])
    return model
y_train

model = Sequential()

# we start off with an efficient embedding layer which maps
# our vocab indices into embedding_dims dimensions
model.add(Embedding(max_features, embedding_dims, input_length=maxlen))
model.add(Dropout(0.2))

# we add a Convolution1D, which will learn filters
# word group filters of size filter_length:
model.add(
    Conv1D(filters, kernel_size, padding='valid', activation='relu',
           strides=1))
# we use max pooling:
model.add(GlobalMaxPooling1D())

# We add a vanilla hidden layer:
model.add(Dense(hidden_dims))
model.add(Dropout(0.2))
model.add(Activation('relu'))

# We project onto a single unit output layer, and squash it with a sigmoid:
model.add(Dense(1))
model.add(Activation('sigmoid'))

model.compile(loss='binary_crossentropy',
              optimizer='adam',
              metrics=['accuracy'])
model.fit(x_train,
          y_train,