Ejemplo n.º 1
0
def model(reader):
    inshape = (reader.max_len, )

    known_in = L.Input(shape=inshape, dtype='int32')
    unknown_in = L.Input(shape=inshape, dtype='int32')

    embedding = L.Embedding(len(reader.vocabulary_above_cutoff) + 2, 5)

    known_emb = embedding(known_in)
    unknown_emb = embedding(unknown_in)

    lstm = L.Bidirectional(L.LSTM(100, return_sequences=True))

    char_features_known = lstm(known_emb)
    char_features_unknown = lstm(unknown_emb)

    features_known = L.GlobalAveragePooling1D()(char_features_known)
    features_unknown = L.GlobalAveragePooling1D()(char_features_unknown)

    cos_distance = L.merge([features_known, features_unknown],
                           mode='cos',
                           dot_axes=1)
    cos_distance = L.Reshape((1, ))(cos_distance)
    cos_similarity = L.Lambda(lambda x: 1 - x)(cos_distance)

    model = Model(inputs=[known_in, unknown_in], outputs=cos_similarity)

    model.compile(optimizer='adam',
                  loss='binary_crossentropy',
                  metrics=['accuracy'])

    return model
def build_model(embedding_matrix, word_index, max_len, lstm_units, 
                verbose = False, compile = True, multi=True, gpu_num=4):
    #logger.info('Build model')
    sequence_input = L.Input(shape=(max_len,), dtype='int32')
    embedding_layer = L.Embedding(*embedding_matrix.shape,
                                weights=[embedding_matrix],
                                trainable=False)
    x = embedding_layer(sequence_input)
    x = L.SpatialDropout1D(0.3)(x)
    x = L.Bidirectional(L.CuDNNLSTM(lstm_units, return_sequences=True))(x)
    x = L.Bidirectional(L.CuDNNLSTM(lstm_units, return_sequences=True))(x)
    att = Attention(max_len)(x)
    avg_pool1 = L.GlobalAveragePooling1D()(x)
    max_pool1 = L.GlobalMaxPooling1D()(x)
    x = L.concatenate([att,avg_pool1, max_pool1])
    preds = L.Dense(1, activation='sigmoid')(x)
    model = Model(sequence_input, preds)
    if multi:
        print('use multi gpus')
        model = ModelMGPU(model, gpus=gpu_num)
    if verbose:
        model.summary()
    if compile:
        model.compile(loss='binary_crossentropy',optimizer=Adam(0.005),metrics=['acc'])
    return model
Ejemplo n.º 3
0
def littlesmartcnn():
    inp = kl.Input((1024, 1))
    x = kl.Conv1D(128, 7)(inp)
    x = kl.BatchNormalization()(x)
    x = kl.LeakyReLU()(x)
    x = littelsmartblock(x, 128)
    x = kl.MaxPooling1D()(x)
    x = kl.Conv1D(256, 3, padding='same')(x)
    x = kl.BatchNormalization()(x)
    x = kl.LeakyReLU()(x)
    x = littelsmartblock(x, 256)
    x = kl.MaxPooling1D()(x)
    x = kl.Conv1D(512, 3, padding='same')(x)
    x = kl.BatchNormalization()(x)
    x = kl.LeakyReLU()(x)
    x = littelsmartblock(x, 512)
    x = littelsmartblock(x, 512)
    x = littelsmartblock(x, 512)
    x = kl.MaxPooling1D()(x)
    x = kl.Conv1D(1024, 3, padding='same')(x)
    x = kl.BatchNormalization()(x)
    x = kl.LeakyReLU()(x)
    x = littelsmartblock(x, 1024)
    x = kl.GlobalAveragePooling1D()(x)
    x = kl.Dense(1)(x)
    return km.Model(inp, x)
Ejemplo n.º 4
0
    def __call__(self, inputs):
        x = inputs[0]

        kernel_regularizer = kr.L1L2(l1=self.l1_decay, l2=self.l2_decay)
        x = kl.Conv1D(128,
                      11,
                      name='conv1',
                      kernel_initializer=self.init,
                      kernel_regularizer=kernel_regularizer)(x)
        x = kl.BatchNormalization(name='bn1')(x)
        x = kl.Activation('relu', name='act1')(x)
        x = kl.MaxPooling1D(2, name='pool1')(x)

        # 124
        x = self._res_unit(x, [32, 32, 128], stage=1, block=1, stride=2)
        x = self._res_unit(x, [32, 32, 128], stage=1, block=2)
        x = self._res_unit(x, [32, 32, 128], stage=1, block=3)

        # 64
        x = self._res_unit(x, [64, 64, 256], stage=2, block=1, stride=2)
        x = self._res_unit(x, [64, 64, 256], stage=2, block=2)
        x = self._res_unit(x, [64, 64, 256], stage=2, block=3)

        # 32
        x = self._res_unit(x, [128, 128, 512], stage=3, block=1, stride=2)
        x = self._res_unit(x, [128, 128, 512], stage=3, block=2)
        x = self._res_unit(x, [128, 128, 512], stage=3, block=3)

        # 16
        x = self._res_unit(x, [256, 256, 1024], stage=4, block=1, stride=2)

        x = kl.GlobalAveragePooling1D()(x)
        x = kl.Dropout(self.dropout)(x)

        return self._build(inputs, x)
Ejemplo n.º 5
0
    def model(embedding_size,vocab_size,encoded_image,words_input,masks,positional_encoding,bottleneck_units=512):
        
        emb = kl.Embedding(vocab_size+1
                           , embedding_size
                           , mask_zero=False
                           , name='w2v_emb')(words_input)
        
        emb = kl.Dense(name='c1'
                       ,units=bottleneck_units
                       ,activation=activation
                       ,use_bias=True)(emb)
        
        sa_input = kl.Add()([emb,positional_encoding])

        # ---- decoder block 1 ----
        dec_1 = decoder_block(sa_input=sa_input
                             ,masks=masks
                             ,encoder_output=encoded_image
                             ,heads=8
                             ,layer_number=1)

        # ---- decoder block 2 ----
        dec_2 = decoder_block(sa_input=dec_1
                             ,masks=masks
                             ,encoder_output=encoded_image
                             ,heads=8
                             ,layer_number=2)

        gap = kl.GlobalAveragePooling1D()(dec_2)
        gap = kl.Dense(1024
                       ,activation=activation)(gap)
        target = kl.Dense(vocab_size+1)(gap)
        
        return target
Ejemplo n.º 6
0
def build_model(verbose = False, compile = True):
    sequence_input = L.Input(shape=(maxlen,), dtype='int32')
    embedding_layer = L.Embedding(len(word_index) + 1,
                                300,
                                weights=[embedding_matrix],
                                input_length=maxlen,
                                trainable=False)
    x = embedding_layer(sequence_input)
    x = L.SpatialDropout1D(0.2)(x)
    x = L.Bidirectional(L.CuDNNLSTM(64, return_sequences=True))(x)

    att = Attention(maxlen)(x)
    avg_pool1 = L.GlobalAveragePooling1D()(x)
    max_pool1 = L.GlobalMaxPooling1D()(x)

    x = L.concatenate([att,avg_pool1, max_pool1])

    preds = L.Dense(1, activation='sigmoid')(x)


    model = Model(sequence_input, preds)
    if verbose:
        model.summary()
    if compile:
        model.compile(loss='binary_crossentropy',optimizer=Adam(0.005),metrics=['acc'])
    return model
Ejemplo n.º 7
0
def make_model(input_shape):
    nn = models.Sequential()
    nn.add(
        layers.SeparableConv1D(64,
                               7,
                               activation='relu',
                               input_shape=(None, input_shape[-1])))
    nn.add(layers.BatchNormalization())
    nn.add(layers.SeparableConv1D(64, 7, activation='relu'))
    nn.add(layers.BatchNormalization())
    nn.add(layers.MaxPooling1D(5))
    nn.add(layers.Dropout(0.3))

    nn.add(layers.SeparableConv1D(128, 7, activation='relu'))
    nn.add(layers.BatchNormalization())
    nn.add(layers.SeparableConv1D(128, 7, activation='relu'))
    nn.add(layers.BatchNormalization())
    nn.add(layers.MaxPooling1D(5))
    nn.add(layers.Dropout(0.3))

    nn.add(layers.SeparableConv1D(512, 7, activation='relu'))
    nn.add(layers.BatchNormalization())
    nn.add(layers.SeparableConv1D(512, 7, activation='relu'))
    nn.add(layers.BatchNormalization())
    nn.add(layers.GlobalAveragePooling1D())

    nn.add(layers.Dense(41, activation='softmax'))

    return nn
Ejemplo n.º 8
0
    def basic_model(self, input_shape, num_classes):
        model = models.Sequential()
        model.add(
            layers.Conv1D(128, 5, padding='same', input_shape=input_shape))
        model.add(layers.Activation('relu'))
        model.add(layers.MaxPooling1D())
        model.add(layers.Dropout(0.4))
        for i in range(6):
            model.add(
                layers.Conv1D(
                    int(128 / (i + 1)),
                    5 + (2 * i),
                    padding='same',
                ))
            model.add(layers.Activation('relu'))
            model.add(layers.MaxPooling1D())
            model.add(layers.Dropout(0.4))

        model.add(layers.GlobalAveragePooling1D())
        model.add(layers.Dense(256))
        model.add(layers.Activation('relu'))
        model.add(layers.Dropout(0.3))
        model.add(layers.Dense(128))
        model.add(layers.Activation('relu'))
        model.add(layers.Dropout(0.3))
        model.add(layers.Dense(64))
        model.add(layers.Activation('relu'))
        model.add(layers.Dropout(0.3))
        model.add(layers.Dense(num_classes))
        model.add(layers.Activation('softmax'))

        return model
Ejemplo n.º 9
0
    def __call__(self, inputs):
        x = inputs[0]

        w_reg = kr.WeightRegularizer(l1=self.l1_decay, l2=self.l2_decay)
        x = kl.Conv1D(128, 11,
                      name='conv1',
                      init=self.init,
                      W_regularizer=w_reg)(x)
        x = kl.Activation('relu', name='act1')(x)
        x = kl.MaxPooling1D(2, name='pool1')(x)

        # 124
        x = self._res_unit(x, [32, 32, 128], stage=1, block=1, stride=2)
        x = self._res_unit(x, [32, 32, 128], atrous=2, stage=1, block=2)
        x = self._res_unit(x, [32, 32, 128], atrous=4, stage=1, block=3)

        # 64
        x = self._res_unit(x, [64, 64, 256], stage=2, block=1, stride=2)
        x = self._res_unit(x, [64, 64, 256], atrous=2, stage=2, block=2)
        x = self._res_unit(x, [64, 64, 256], atrous=4, stage=2, block=3)

        # 32
        x = self._res_unit(x, [128, 128, 512], stage=3, block=1, stride=2)
        x = self._res_unit(x, [128, 128, 512], atrous=2, stage=3, block=2)
        x = self._res_unit(x, [128, 128, 512], atrous=4, stage=3, block=3)

        # 16
        x = self._res_unit(x, [256, 256, 1024], stage=4, block=1, stride=2)

        x = kl.GlobalAveragePooling1D()(x)
        x = kl.Dropout(self.dropout)(x)

        return self._build(inputs, x)
Ejemplo n.º 10
0
    def __call__(self, inputs):
        x = inputs[0]

        w_reg = kr.WeightRegularizer(l1=self.l1_decay, l2=self.l2_decay)
        x = kl.Conv1D(128, 11,
                      name='conv1',
                      init=self.init,
                      W_regularizer=w_reg)(x)
        x = kl.BatchNormalization(name='bn1')(x)
        x = kl.Activation('relu', name='act1')(x)
        x = kl.MaxPooling1D(2, name='pool1')(x)

        # 124
        x = self._res_unit(x, 128, stage=1, block=1, stride=2)
        x = self._res_unit(x, 128, stage=1, block=2)

        # 64
        x = self._res_unit(x, 256, stage=2, block=1, stride=2)

        # 32
        x = self._res_unit(x, 256, stage=3, block=1, stride=2)

        # 32
        x = self._res_unit(x, 512, stage=4, block=1, stride=2)

        x = kl.GlobalAveragePooling1D()(x)
        x = kl.Dropout(self.dropout)(x)

        return self._build(inputs, x)
Ejemplo n.º 11
0
def build(txt_shape,
          img_shape,
          act_1=None,
          act_2=None,
          loss_function=mAP.my_loss):
    K.clear_session()
    word_index, embedding_matrix = loading()
    input_text = layers.Input(shape=(None, ), dtype='int32')
    input_image = layers.Input(shape=(img_shape, ))
    embeddings = layers.Embedding(len(word_index) + 1,
                                  128,
                                  weights=[embedding_matrix],
                                  input_length=400,
                                  trainable=True)(input_text)
    embeddings = Position_Embedding()(embeddings)
    O_seq = Attention(8, 16)([embeddings, embeddings, embeddings])
    O_seq = layers.GlobalAveragePooling1D()(O_seq)
    text_embedding = layers.Dropout(0.5)(O_seq)
    text_dense = layers.Dense(512, activation=act_1)(text_embedding)
    image_dense = layers.Dense(512, activation=act_1)(input_image)
    mul = layers.Multiply()([text_dense, image_dense])
    pred = layers.Dense(1, activation=act_2)(mul)
    model = Model(inputs=[input_text, input_image], outputs=pred)
    model.compile(loss=loss_function, optimizer='adam', metrics=[mAP.auc])
    model.summary()
    return model
Ejemplo n.º 12
0
    def model_3(self):
        embedding_matrix = self.build_myself_embedding_matrix()
        input = layers.Input(shape=(self.max_words,))
        embedding = layers.Embedding(input_dim=embedding_matrix.shape[0], output_dim=embedding_matrix.shape[1],
                                     input_length=self.max_words, weights=[embedding_matrix])
        x = layers.SpatialDropout1D(0.2)(embedding(input))
        x = layers.Bidirectional(layers.GRU(400, return_sequences=True))(x)
        x = layers.Bidirectional(layers.GRU(400, return_sequences=True))(x)
        avg_pool = layers.GlobalAveragePooling1D()(x)
        max_pool = layers.GlobalMaxPool1D()(x)
        concat = layers.concatenate([avg_pool, max_pool])

        x = layers.Dense(1024)(concat)
        x = layers.BatchNormalization()(x)
        x = layers.Activation(activation='relu')(x)
        x = layers.Dropout(0.2)(x)

        x = layers.Dense(512)(x)
        x = layers.BatchNormalization()(x)
        x = layers.Activation(activation='relu')(x)
        x = layers.Dropout(0.2)(x)
        output = layers.Dense(self.class_num, activation='softmax')(x)

        model = models.Model(input=input, output=output)
        print(model.summary())
        return model
    def build(self, input_shape):
        if len(input_shape) != 4 and len(input_shape) != 3:
            raise ValueError(
                'expected 3D or 4D input, got shape {}'.format(input_shape))

        super(AttentiveNormalization, self).build(input_shape)

        dim = input_shape[self.axis]
        shape = (self.n_mixture, dim)  # K x C

        self.FC = layers.Dense(self.n_mixture, activation="sigmoid")
        self.FC.build(input_shape)  # (N, C)

        if len(input_shape) == 4:
            self.GlobalAvgPooling = layers.GlobalAveragePooling2D(
                self.data_format)
        else:
            self.GlobalAvgPooling = layers.GlobalAveragePooling1D(
                self.data_format)
        self.GlobalAvgPooling.build(input_shape)

        self._trainable_weights = self.FC.trainable_weights

        self.learnable_weights = self.add_weight(name='gamma2',
                                                 shape=shape,
                                                 initializer=ANInitializer(
                                                     scale=0.1, bias=1.),
                                                 trainable=True)

        self.learnable_bias = self.add_weight(name='bias2',
                                              shape=shape,
                                              initializer=ANInitializer(
                                                  scale=0.1, bias=0.),
                                              trainable=True)
Ejemplo n.º 14
0
def mini_resnet(input_x):
    # head
    x = KL.Conv1D(filters=64, kernel_size=size, padding='same')(input_x)
    x = KL.BatchNormalization()(x)
    x = KL.Activation(activation='relu')(x)
    # block 1
    x = conv_unit(x, filters=[16, 16], block=1, unit=1, trainable=True)
    x = conv_unit(x, filters=[16, 16], block=2, unit=2, trainable=True)
    # block 3
    x = conv_unit(x, filters=[32, 32], block=3, unit=1, trainable=True)
    x = conv_unit(x, filters=[32, 32], block=4, unit=2, trainable=True)

    # block 5
    x = conv_unit(x, filters=[64, 64], block=5, unit=1, trainable=True)
    x = conv_unit(x, filters=[64, 64], block=6, unit=2, trainable=True)

    # block 6
    x = conv_unit(x, filters=[128, 128], block=7, unit=1, trainable=True)
    x = conv_unit(x, filters=[128, 128], block=8, unit=2, trainable=True)

    # block 7
    # x = conv_unit(x, filters=[512, 512], block=7, unit=1, trainable=True)
    # x = conv_unit(x, filters=[512, 512], block=8, unit=2, trainable=True)

    # tail
    x = KL.BatchNormalization(name='out_bn')(x, training=True)
    x = KL.Activation(activation='relu', name='out_relu')(x)
    # x = KL.Flatten(name='out_flatten')(x)
    x = KL.GlobalAveragePooling1D()(x)
    if not conf.use_tradition_feature:
        x = KL.Dense(units=conf.num_class,
                     name='dense_soft_out',
                     activation='softmax')(x)
    return x
Ejemplo n.º 15
0
    def create_channel(self, x, kernel_size, feature_map):
        """
        Creates a layer, working channel wise

        Arguments:
            x           : Input for convoltuional channel
            kernel_size : Kernel size for creating Conv1D
            feature_map : Feature map

        Returns:
            x           : Channel including (Conv1D + {GlobalMaxPooling & GlobalAveragePooling} + Dense [+ Dropout])
        """
        x = layers.SeparableConv1D(feature_map,
                                   kernel_size=kernel_size,
                                   activation='relu',
                                   strides=1,
                                   padding='valid',
                                   depth_multiplier=4)(x)

        x1 = layers.GlobalMaxPooling1D()(x)
        x2 = layers.GlobalAveragePooling1D()(x)
        x = layers.concatenate([x1, x2])

        x = layers.Dense(self.hidden_units)(x)
        if self.dropout_rate:
            x = layers.Dropout(self.dropout_rate)(x)
        return x
Ejemplo n.º 16
0
def self_Att_channel(x,x_att,r = 16,name = '1'):
    '''
    advanced
    Hu, Jie, Li Shen, and Gang Sun."Squeeze-and-excitation networks." arXiv preprintarXiv:1709.01507 (2017).
    :param x:
    :param r:
    :return:
    '''
    x_self = x
    chanel = K.int_shape(x)[-1]
    L = K.int_shape(x)[-2]

    x_att = layers.GlobalAveragePooling1D(name='self_avg_pool' + name )(x_att)

    # x_att = layers.Conv2D(chanel,
    #                       (H,W),
    #                       padding='valid',
    #                       use_bias=None,
    #                       name='FCN' + name)(x_att)

    x_att = layers.Dense(int(chanel / r),activation='relu')(x_att)
    x_att = layers.Dense(chanel, activation='sigmoid')(x_att)
    x = layers.Multiply()([x_self,x_att])

    return x
Ejemplo n.º 17
0
def create_keras_classifier_model(number_of_words: int,
                                  input_length: int,
                                  output_neurons: int = 1) -> Model:
    """
    :param number_of_words : int
    :param input_length : int
    :param output_neurons : int
        -> the number of output neurons. this must match the labels.
    :return Model
    """
    raise Exception("Deprecated, please use bucket_classification.py instead")
    model = Sequential()
    model.add(
        layers.Embedding(number_of_words,
                         EMBEDDING_SIZE,
                         input_length=input_length))
    model.add(layers.GlobalAveragePooling1D())
    model.add(layers.Dense(64, activation="relu"))
    model.add(layers.Dropout(0.25))
    model.add(layers.Dense(64, activation="relu"))
    model.add(layers.Dropout(0.1))
    activation = "sigmoid" if output_neurons == 1 else "softmax"
    model.add(layers.Dense(output_neurons, activation=activation))
    # todo - include other metrics such as auc, roc in the future
    # see https://stackoverflow.com/questions/41032551/how-to-compute-receiving-operating-characteristic-roc-and-auc-in-keras
    model.compile(optimizer=optimizers.Adam(),
                  loss="binary_crossentropy",
                  metrics=["accuracy"])
    model.summary()
    return model
Ejemplo n.º 18
0
def SwishNet(input_shape, classes, width_multiply=1):
    _x_in = layers.Input(shape=input_shape)

    # 1 block
    _x_up = __causal_gated_conv1D(filters=16 * width_multiply, length=3)(_x_in)
    _x_down = __causal_gated_conv1D(filters=16 * width_multiply,
                                    length=6)(_x_in)
    _x = layers.Concatenate()([_x_up, _x_down])

    # 2 block
    _x_up = __causal_gated_conv1D(filters=8 * width_multiply, length=3)(_x)
    _x_down = __causal_gated_conv1D(filters=8 * width_multiply, length=6)(_x)
    _x = layers.Concatenate()([_x_up, _x_down])

    # 3 block
    _x_up = __causal_gated_conv1D(filters=8 * width_multiply, length=3)(_x)
    _x_down = __causal_gated_conv1D(filters=8 * width_multiply, length=6)(_x)
    _x_concat = layers.Concatenate()([_x_up, _x_down])

    _x = layers.Add()([_x, _x_concat])

    # 4 block
    _x_loop1 = __causal_gated_conv1D(filters=16 * width_multiply,
                                     length=3,
                                     strides=3)(_x)
    _x = layers.Add()([_x, _x_loop1])

    # 5 block
    _x_loop2 = __causal_gated_conv1D(filters=16 * width_multiply,
                                     length=3,
                                     strides=2)(_x)
    _x = layers.Add()([_x, _x_loop2])

    # 6 block
    _x_loop3 = __causal_gated_conv1D(filters=16 * width_multiply,
                                     length=3,
                                     strides=2)(_x)
    _x = layers.Add()([_x, _x_loop3])

    # 7 block
    _x_forward = __causal_gated_conv1D(filters=16 * width_multiply,
                                       length=3,
                                       strides=2)(_x)

    # 8 block
    _x_loop4 = __causal_gated_conv1D(filters=32 * width_multiply,
                                     length=3,
                                     strides=2)(_x)

    # output
    _x = layers.Concatenate()([_x_loop2, _x_loop3, _x_forward, _x_loop4])
    _x = layers.Conv1D(filters=classes, kernel_size=1)(_x)
    _x = layers.GlobalAveragePooling1D()(_x)
    _x = layers.Activation("softmax")(_x)

    model = Model(inputs=_x_in, outputs=_x)

    return model
Ejemplo n.º 19
0
def build_transformer(max_words=20000, maxlen=200, embedding_dim=400, classification_type=2):
    S_inputs = layers.Input(shape=(None,), dtype='int32')
    embeddings = layers.Embedding(max_words, embedding_dim, input_length=maxlen)(S_inputs)
    embeddings = Position_Embedding()(embeddings)  # 增加Position_Embedding能轻微提高准确率
    O_seq = Attention(8, 16)([embeddings, embeddings, embeddings])
    O_seq = layers.GlobalAveragePooling1D()(O_seq)
    O_seq = layers.Dropout(0.5)(O_seq)
    outputs = layers.Dense(classification_type, activation='softmax')(O_seq)
    model = models.Model(inputs=S_inputs, outputs=outputs)
    return model
def build_model(max_words, embedding_dim, sequence_length):
    print('Build model...')
    model = models.Sequential()
    model.add(
        layers.Embedding(max_words,
                         embedding_dim,
                         input_length=sequence_length))

    model.add(layers.GlobalAveragePooling1D())
    return model.input, model.output
Ejemplo n.º 21
0
    def __call__(self, inputs):
        x = self._merge_inputs(inputs)

        shape = getattr(x, '_keras_shape')
        replicate_model = self._replicate_model(kl.Input(shape=shape[2:]))
        x = kl.TimeDistributed(replicate_model)(x)
        x = kl.GlobalAveragePooling1D()(x)
        x = kl.Dropout(self.dropout)(x)

        return self._build(inputs, x)
def build_model(emb_cid, emb_advid):

    inp1 = layers.Input(shape=(max_len, ))
    inp2 = layers.Input(shape=(max_len, ))

    emb1 = layers.Embedding(input_dim=emb_cid.shape[0],
                            output_dim=emb_cid.shape[1],
                            input_length=max_len,
                            weights=[emb_cid],
                            trainable=False)(inp1)
    emb2 = layers.Embedding(input_dim=emb_advid.shape[0],
                            output_dim=emb_advid.shape[1],
                            input_length=max_len,
                            weights=[emb_advid],
                            trainable=False)(inp2)

    sdrop = layers.SpatialDropout1D(rate=0.2)

    emb1 = sdrop(emb1)
    emb2 = sdrop(emb2)

    content = layers.Concatenate()([emb1, emb2])

    mha = MultiHeadAttention(head_num=16)(content)
    mha = layers.Dropout(0.01)(mha)
    mha = layers.Add()([content, mha])
    mha = LayerNormalization()(mha)
    mha = layers.Dropout(0.01)(mha)
    mha_ff = FeedForward(256)(mha)
    mha_out = layers.Add()([mha, mha_ff])
    mha_out = LayerNormalization()(mha_out)

    lstm = layers.Bidirectional(layers.LSTM(128,
                                            return_sequences=True))(mha_out)

    avg_pool = layers.GlobalAveragePooling1D()(lstm)
    max_pool = layers.GlobalMaxPool1D()(lstm)

    x = layers.Concatenate()([avg_pool, max_pool])

    x = layers.Dense(128, activation='relu')(x)
    x = layers.BatchNormalization()(x)

    x = layers.Dense(64, activation='relu')(x)
    x = layers.BatchNormalization()(x)

    x = layers.Dropout(0.1)(x)

    out = layers.Dense(10, activation='softmax')(x)
    model = keras.Model(inputs=[inp1, inp2], outputs=out)
    model.compile(loss='categorical_crossentropy',
                  optimizer=keras.optimizers.Adam(1e-3),
                  metrics=['accuracy'])

    return model
Ejemplo n.º 23
0
def test_globalpooling_1d_supports_masking():
    # Test GlobalAveragePooling1D supports masking
    model = Sequential()
    model.add(layers.Masking(mask_value=0., input_shape=(3, 4)))
    model.add(layers.GlobalAveragePooling1D())
    model.compile(loss='mae', optimizer='adam')

    model_input = np.random.randint(low=1, high=5, size=(2, 3, 4))
    model_input[0, 1:, :] = 0
    output = model.predict(model_input)
    assert np.array_equal(output[0], model_input[0, 0, :])
Ejemplo n.º 24
0
def FastText(param):
    inp = layers.Input(shape=(param['sentence_len'], ))
    x = layers.Embedding(param['vocab_size'], param['embed_size'])(inp)
    x = layers.SpatialDropout1D(rate=0.1)(x)
    x = layers.GlobalAveragePooling1D()(x)
    outp = layers.Dense(param['num_class'], activation='sigmoid')(x)
    model = Model(inputs=inp, outputs=outp)
    optimizer = optimizers.Adam(lr=0.01)
    model.compile(loss='binary_crossentropy', optimizer=optimizer)

    return model
Ejemplo n.º 25
0
def TimeAvgLSTM_embedding(input_shape,
                          dropW=0.2,
                          dropU=0.2,
                          lstm_dims=128,
                          embedding=None):
    model = Sequential()
    model.add(layers.InputLayer(input_shape=(input_shape, )))
    model.add(embedding)
    model.add(layers.LSTM(lstm_dims, dropout_W=dropW, dropout_U=dropU))
    model.add(layers.GlobalAveragePooling1D())

    return model
def build_model(max_words, embedding_dim, output_dim, sequence_length):
    print('Build model...')
    model = models.Sequential()
    model.add(
        layers.Embedding(max_words,
                         embedding_dim,
                         input_length=sequence_length))

    # 我们增加 GlobalAveragePooling1D, 这将平均计算文档中所有词汇的的词嵌入
    model.add(layers.GlobalAveragePooling1D())
    model.add(layers.Dense(output_dim, activation='sigmoid'))
    model.add(layers.Activation('softmax'))
    return model
Ejemplo n.º 27
0
 def build_Model(self, input_tensor, reshape_dimension):
     
     x = lpk.ContextGating()(input_tensor)
     x = layers.Reshape((100, reshape_dimension))(x)
     x = lpk.NetVLAD(feature_size = reshape_dimension,
                     max_samples = 100, 
                     cluster_size = 32,
                     output_dim =3*16)(x)
     x = layers.Reshape((3,16))(x)
     x = layers.GlobalAveragePooling1D()(x)
     x = layers.Dense(24, activation = 'relu')(x)
     x = layers.Dense(3, activation = 'sigmoid')(x)
     return x
Ejemplo n.º 28
0
def make_model(filters=CHANNELS):
    """Создает сеть на основе сгенеренных признаков."""
    backend.clear_session()

    y = x = layers.Input(shape=(None, 4))
    y = conv1d.make_net(y, BLOCKS, LINK, CHANNELS, LAYERS_TYPE, se=SE)
    y = layers.GlobalAveragePooling1D()(y)
    y = layers.Dense(units=filters // 2, activation="relu")(y)
    y = layers.Dense(units=4, activation=None)(y)

    model = models.Model(inputs=x, outputs=y)
    model.summary()
    return model
Ejemplo n.º 29
0
 def model(self, embedding_matrix):
     model = models.Sequential()
     model.add(layers.Embedding(self.nn_param.vocab_size + 1, self.nn_param.embedding_dim,
                                input_length=self.nn_param.max_words))
     # input_length=self.nn_param.max_words, weights=[embedding_matrix]))
     model.add(layers.GlobalAveragePooling1D())
     if self.nn_param.class_num == 2:
         model.add(layers.Dense(1, activation='sigmoid'))
         model.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
     else:
         model.add(layers.Dense(self.nn_param.class_num, activation='softmax'))
         model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
     print(model.summary())
     return model
Ejemplo n.º 30
0
def new_model():
    model = keras.Sequential()
    model.add(layers.GaussianNoise(0.5, input_shape=(None, FEATURE_SIZE)))
    model.add(layers.Conv1D(128, 5, activation="relu"))
    model.add(layers.MaxPooling1D(5))
    model.add(layers.Conv1D(256, 5, activation="relu"))
    model.add(layers.GlobalAveragePooling1D())
    model.add(layers.Dense(500, activation="relu"))
    model.add(layers.Dropout(0.5))
    model.add(layers.Dense(500, activation="relu"))
    model.add(layers.Dropout(0.5))
    model.add(
        layers.Dense(max(allophone_mapping.values()) + 1,
                     activation="softmax"))
    return model