Esempio n. 1
0
def combined_model(dense_layers, output_dim):
    conv_base = VGG16(weights='imagenet',
                      include_top=False,
                      input_shape=(128, 128, 3))
    image_model = models.Sequential()
    image_model.add(conv_base)
    image_model.add(layers.Flatten())
    image_model.add(layers.Dense(2048, activation='relu'))
    image_model.add(layers.Dropout(0.5))
    image_model.add(layers.Dense(1024, activation='relu'))
    image_model.add(layers.Dropout(0.5))
    image_model.add(layers.Dense(512, activation='relu'))

    text_model = models.Sequential()
    text_model.add(layers.Embedding(UNK_WORD + 1, 100, input_length=20))
    text_model.add(layers.Convolution1D(256, 3, padding='same'))
    text_model.add(layers.MaxPool1D(3, 3, padding='same'))
    text_model.add(layers.Convolution1D(128, 3, padding='same'))
    text_model.add(layers.MaxPool1D(3, 3, padding='same'))
    text_model.add(layers.Convolution1D(64, 3, padding='same'))
    text_model.add(layers.Flatten())

    model = models.Sequential()
    model.add(Merge([image_model, text_model], mode='concat'))
    model.add(layers.BatchNormalization())
    for item in dense_layers:
        model.add(layers.Dense(item, activation='relu'))
        model.add(layers.Dropout(0.5))
    model.add(layers.Dense(output_dim, activation='softmax'))

    model.compile(loss='categorical_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])

    return model
def base_multi_channel_net__runable(vocabulary_size):
    """
    可执行的多路一维卷积
    :param vocabulary_size:
    :return:
    """
    text_input = Input(shape=(32, ), dtype='int32', name='text')
    embedded_text = layers.Embedding(vocabulary_size, 64)(text_input)

    # kernel_size = 3
    channel1 = layers.Conv1D(64, 3, padding="same",
                             activation='relu')(embedded_text)
    channel1 = layers.MaxPool1D(4)(channel1)

    channel2 = layers.Conv1D(64, 4, padding="same",
                             activation='relu')(embedded_text)
    channel2 = layers.MaxPool1D(4)(channel2)

    channel3 = layers.Conv1D(64, 5, padding="same",
                             activation='relu')(embedded_text)
    channel3 = layers.MaxPool1D(4)(channel3)

    concatenated = layers.concatenate([channel1, channel2, channel3], axis=-1)
    concatenated = layers.LSTM(64)(concatenated)

    output = layers.Dense(64, activation='relu')(concatenated)
    output = layers.Dense(1, activation='sigmoid')(output)
    model = Model(text_input, output)
    return model
Esempio n. 3
0
    def model(self, embedding_matrix):
        inputs = layers.Input(shape=(self.nn_param.max_words,), dtype='float32')
        embedding = layers.Embedding(input_dim=self.nn_param.vocab_size + 1, output_dim=self.nn_param.embedding_dim,
                                     input_length=self.nn_param.max_words, trainable=False, weights=[embedding_matrix])
        embed = embedding(inputs)

        cnn1 = layers.Conv1D(256, 3, padding='same', strides=1, activation='relu')(embed)
        cnn1 = layers.MaxPool1D(pool_size=48)(cnn1)

        cnn2 = layers.Conv1D(256, 4, padding='same', strides=1, activation='relu')(embed)
        cnn2 = layers.MaxPool1D(pool_size=47)(cnn2)

        cnn3 = layers.Conv1D(256, 5, padding='same', strides=1, activation='relu')(embed)
        cnn3 = layers.MaxPool1D(pool_size=46)(cnn3)

        cnn = layers.concatenate(inputs=[cnn1, cnn2, cnn3], axis=1)
        flat = layers.Flatten()(cnn)
        drop = layers.Dropout(0.2)(flat)
        if self.nn_param.class_num == 2:
            output = layers.Dense(1, activation='sigmoid')(drop)
        else:
            output = layers.Dense(self.nn_param.class_num, activation='sigmoid')(drop)
        model = models.Model(inputs=inputs, outputs=output)
        print(model.summary())
        return model
    def cnn_model_simple(self, kernel_sizes_cnn: List[int], filters_cnn: int, dense_size: int,
                  coef_reg_cnn: float = 0., coef_reg_den: float = 0., dropout_rate: float = 0.,
                  input_projection_size: Optional[int] = None, **kwargs) -> Model:
        
        inp = Input(shape=(self.opt['text_size'], self.opt['embedding_size']))
        output = inp
        
        if input_projection_size is not None:
            output = Dense(input_projection_size, activation='relu')(output)

        
        output = layers.Conv1D(filters=64, kernel_size=3, activation='relu')(output)
        output = layers.Conv1D(filters=64, kernel_size=3, activation='relu')(output)
        output = layers.MaxPool1D(2, strides=2)(output)
        output = layers.BatchNormalization()(output)
        output = layers.Conv1D(filters=128, kernel_size=3, activation='relu')(output)
        output = layers.Conv1D(filters=128, kernel_size=3, activation='relu')(output)
        output = layers.MaxPool1D(2, strides=2)(output)
        output = layers.BatchNormalization()(output)

        output = layers.Flatten()(output)
        output = layers.Dense(dense_size, activation='relu')(output)
        output = layers.Dense(self.n_classes, activation='softmax', use_bias=False)(output)
        model = Model([inp], [output])
        
        return model
Esempio n. 5
0
def conv_lstm(x):
    x = KL.Conv1D(filters=40, kernel_size=5, strides=1)(x)
    x = KL.MaxPool1D(pool_size=2, strides=2)(x)
    x = KL.Conv1D(filters=32, kernel_size=3, strides=1)(x)
    x = KL.MaxPool1D(pool_size=2, strides=2)(x)
    x = KL.LSTM(units=32, recurrent_dropout=0.25, dropout=0.5, return_sequences=True)(x)
    x = KL.LSTM(units=16, recurrent_dropout=0.25, return_sequences=True)(x)
    x = KL.LSTM(units=4, return_sequences=False)(x)
    x = KL.Dense(units=conf.num_class, activation='softmax')(x)
    return x
Esempio n. 6
0
def text_cnn(token_per_text, embedding_dim):
    i_emb = layers.Input(shape=(token_per_text, embedding_dim))
    conv5_1 = layers.Conv1D(128, 5, padding='same', activation='relu')(i_emb)
    pool5_1 = layers.MaxPool1D(5, padding='same')(conv5_1)
    conv5_2 = layers.Conv1D(64, 5, padding='same', activation='relu')(pool5_1)
    pool5_2 = layers.MaxPool1D(5, padding='same')(conv5_2)
    conv5_3 = layers.Conv1D(32, 5, padding='same', activation='relu')(pool5_2)
    pool5_3 = layers.MaxPool1D(5, padding='same')(conv5_3)
    flat5 = layers.Flatten()(pool5_3)

    conv3_1 = layers.Conv1D(128, 3, padding='same', activation='relu')(i_emb)
    pool3_1 = layers.MaxPool1D(3, padding='same')(conv3_1)
    conv3_2 = layers.Conv1D(64, 3, padding='same', activation='relu')(pool3_1)
    pool3_2 = layers.MaxPool1D(3, padding='same')(conv3_2)
    conv3_3 = layers.Conv1D(32, 4, padding='same', activation='relu')(pool3_2)
    pool3_3 = layers.MaxPool1D(13, padding='same')(conv3_3)
    flat3 = layers.Flatten()(pool3_3)

    conv1_1 = layers.Conv1D(128, 1, padding='same', activation='relu')(i_emb)
    pool1_1 = layers.MaxPool1D(1, padding='same')(conv1_1)
    conv1_2 = layers.Conv1D(64, 1, padding='same', activation='relu')(pool1_1)
    pool1_2 = layers.MaxPool1D(1, padding='same')(conv1_2)
    conv1_3 = layers.Conv1D(32, 1, padding='same', activation='relu')(pool1_2)
    pool1_3 = layers.MaxPool1D(150, padding='same')(conv1_3)
    flat1 = layers.Flatten()(pool1_3)

    o = layers.Concatenate()([flat5, flat3, flat1])

    return Model(i_emb, o)
Esempio n. 7
0
def model_twitter_convnet_lstm(embedding_matrix, maxlen):
    input_twitter = layers.Input(shape=(maxlen, ),
                                 dtype='int32',
                                 name='input_twitter')
    x = layers.Embedding(embedding_matrix.shape[0],
                         embedding_matrix.shape[1],
                         input_length=maxlen,
                         trainable=True,
                         weights=[embedding_matrix],
                         mask_zero=False,
                         name='embedded')(input_twitter)

    x = layers.Conv1D(32, (5),
                      padding='same',
                      kernel_initializer='orthogonal',
                      name='conv1d_1')(x)
    x = layers.Activation('elu', name='act_elu_1')(x)
    x = layers.MaxPool1D(3, name='maxpool_1')(x)
    x = layers.Dropout(0.25, name='dropout_1')(x)

    x = layers.Conv1D(32, (5),
                      padding='same',
                      kernel_initializer='orthogonal',
                      name='conv1d_2')(x)
    x = layers.Activation('elu', name='act_elu_2')(x)
    x = layers.MaxPool1D(3, name='maxpool_2')(x)
    x = layers.Dropout(0.25, name='dropout_2')(x)

    x = layers.Bidirectional(
        layers.LSTM(32,
                    return_sequences=True,
                    kernel_initializer='orthogonal',
                    name='lstm_1'))(x)
    x = layers.Activation('elu', name='act_elu_3')(x)
    x = layers.Bidirectional(
        layers.LSTM(64,
                    return_sequences=False,
                    kernel_initializer='orthogonal',
                    name='lstm_2'))(x)
    x = layers.Activation('elu', name='act_elu_4')(x)

    x = layers.Dense(1, name='dense_final', activation='sigmoid')(x)

    model = Model(input_twitter, x)
    model.compile(optimizer=Adam(lr=0.0001, decay=1e-6),
                  loss='binary_crossentropy',
                  metrics=['acc'])
    print(model.summary())
    return model
Esempio n. 8
0
def get_baseline_convolutional_encoder(filters,
                                       embedding_dimension,
                                       input_shape=None,
                                       dropout=0.05):
    encoder = Sequential()

    # Initial conv
    if input_shape is None:
        # In this case we are using the encoder as part of a siamese network and the input shape will be determined
        # automatically based on the input shape of the siamese network
        encoder.add(
            layers.Conv1D(filters, 32, padding='same', activation='relu'))
    else:
        # In this case we are using the encoder to build a classifier network and the input shape must be defined
        encoder.add(
            layers.Conv1D(filters,
                          32,
                          padding='same',
                          activation='relu',
                          input_shape=input_shape))
    encoder.add(layers.BatchNormalization())
    encoder.add(layers.SpatialDropout1D(dropout))
    encoder.add(layers.MaxPool1D(4, 4))

    # Further convs
    encoder.add(
        layers.Conv1D(2 * filters, 3, padding='same', activation='relu'))
    encoder.add(layers.BatchNormalization())
    encoder.add(layers.SpatialDropout1D(dropout))
    encoder.add(layers.MaxPool1D())

    encoder.add(
        layers.Conv1D(3 * filters, 3, padding='same', activation='relu'))
    encoder.add(layers.BatchNormalization())
    encoder.add(layers.SpatialDropout1D(dropout))
    encoder.add(layers.MaxPool1D())

    encoder.add(
        layers.Conv1D(4 * filters, 3, padding='same', activation='relu'))
    encoder.add(layers.BatchNormalization())
    encoder.add(layers.SpatialDropout1D(dropout))
    encoder.add(layers.MaxPool1D())

    encoder.add(layers.GlobalMaxPool1D())

    encoder.add(layers.Dense(embedding_dimension))

    return encoder
def base_multi_channel_net(vocabulary_size, time_steps=32):
    """
    多通道卷积
    :param vocabulary_size:
    :param time_steps:
    :return:
    """
    text_input = Input(shape=(time_steps, ), dtype='int32', name='text')
    embedded_text = layers.Embedding(vocabulary_size, 128)(text_input)

    channels = []
    for kernel_size in range(3, 7):
        channel = layers.Conv1D(16, kernel_size,
                                activation='relu')(embedded_text)
        channel = layers.MaxPool1D(time_steps - kernel_size + 1)(channel)
        channels.append(channel)

    concatenated = layers.concatenate(channels, axis=-1)
    concatenated = layers.Flatten()(concatenated)
    # concatenated = layers.LSTM(64)(concatenated)
    output = layers.Dense(
        64, activation='relu',
        kernel_regularizer=regularizers.l2(0.001))(concatenated)
    output = layers.Dense(1, activation='sigmoid')(output)
    model = Model(text_input, output)
    return model
Esempio n. 10
0
    def __call__(self, inp):
        # Conv-layers
        x = inp
        for i, (n_filter,
                n_col) in enumerate(zip(self.filters, self.conv_widths)):
            if i == 0:  # if first layer, specify input shape to enable auto build
                x = kl.Conv1D(filters=n_filter,
                              kernel_size=n_col,
                              kernel_initializer='he_normal')(x)
            else:
                x = kl.Conv1D(filters=n_filter,
                              kernel_size=n_col,
                              kernel_initializer='he_normal')(x)
                x = kl.BatchNormalization()(x)
                x = kl.Activation(self.activation_func)(x)

                if i in self.pool_layers_indices:  # Add pool layers where appropriate
                    x = kl.MaxPool1D(pool_size=self.pool_widths[
                        self.pool_layers_indices.index(i)],
                                     strides=self.pool_strides[
                                         self.pool_layers_indices.index(i)])(x)

        seq_preds = kl.Flatten()(x)

        # fully connected
        for drop_rate, fc_layer_size in zip(self.dropout, self.hidden):
            seq_preds = kl.Dropout(drop_rate)(seq_preds)
            seq_preds = kl.Dense(fc_layer_size)(seq_preds)
            if self.batchnorm:
                seq_preds = kl.BatchNormalization()(seq_preds)
            seq_preds = kl.Activation('relu')(seq_preds)
        seq_preds = kl.Dropout(self.final_dropout)(seq_preds)

        return seq_preds
Esempio n. 11
0
def get_predefined_model():
    # input shape: window_size x len(x_cols)
    x = layers.Input((33, 91), name='input_62')
    y = layers.Conv1D(filters=126,
                      kernel_size=2,
                      padding='causal',
                      name='conv1d_154')(x)
    #y = layers.BatchNormalization(name='batch_normalization_118')(y)
    y = layers.Activation('relu', name='activation_118')(y)
    y = layers.Conv1D(filters=126,
                      kernel_size=2,
                      padding='causal',
                      dilation_rate=2,
                      name='conv1d_155')(y)
    #y = layers.BatchNormalization(name='batch_normalization_119')(y)
    y = layers.Activation('relu', name='activation_119')(y)

    shortcut = layers.Conv1D(filters=126,
                             kernel_size=1,
                             padding='causal',
                             dilation_rate=2,
                             name='conv1d_156')(x)
    y = layers.add([shortcut, y], name='add_37')

    y = layers.MaxPool1D(pool_size=33, name='global_max_pooling1d_62')(y)
    y = layers.Flatten()(y)
    y = layers.Dense(units=len(cfg.data_cfg['Target_param_names']),
                     name='dense_62')(y)

    model = models.Model(inputs=x, outputs=y)
    model.compile(optimizer=opts.Adam(lr=1e-9), loss='mse')
    return model
Esempio n. 12
0
 def cnn_module(self, tra_charas, tes_charas, tra_y, tes_y, out_len):
     self.model.add(layers.Conv1D(32, 4, activation='relu', input_shape=(20, 128)))  # 125
     self.model.add(layers.MaxPool1D(2))    # 63
     self.model.add(layers.Conv1D(64, 4, activation='relu'))   # 60
     self.model.add(layers.MaxPool1D(2))    # 30
     self.model.add(layers.Conv1D(64, 4, activation='relu'))   # 27
     self.model.add(layers.MaxPool1D(2))    # 14
     self.model.add(layers.Flatten())
     self.model.add(layers.Dense(128, activation='relu'))
     self.model.add(layers.Dense(out_len, activation='softmax'))
     self.model.compile(optimizer='rmsprop', loss='categorical_crossentropy', metrics=['accuracy'])
     self.model.fit(tra_charas, tra_y, epochs=5, batch_size=64)
     test_loss, test_acc = self.model.evaluate(tes_charas, tes_y)
     print("test loss is : ", test_loss)
     print("test accuracy is : ", test_acc)
     return test_acc
Esempio n. 13
0
    def _build_model(self):
        # Neural Net for Deep-Q learning Model
        inp = KL.Input(shape=(self.state_size))
        x = inp
        x = KL.Conv1D(64,72,strides=8,activation='relu')(x)
        x = KL.Conv1D(64,12,strides=4,activation='relu')(x)
        x = KL.Conv1D(128,7,strides=3,activation='relu')(x)
        x = KL.Conv1D(128,3,strides=3,activation='relu')(x)
        x = KL.Conv1D(256,3,activation='relu')(x)
        x = KL.MaxPool1D(3)(x)
        x = KL.Flatten()(x)
        x = KL.Dropout(0.3)(x)
        x = KL.Dense(64,activation='relu')(x)

        inp_R = KL.Input(shape=(self.statement_size,1))
        R = inp_R
        R = KL.Masking()(R)
        R = KL.GRU(64)(R)

        out = KL.Add()([x,R])
        out = KL.Dense(128,activation='relu')(out)
        out = KL.Dense(self.action_size)(out)

        model = Model([inp,inp_R],out)

        model.compile(loss=self._huber_loss,
                      optimizer=Adam(lr=self.learning_rate))
        return model
Esempio n. 14
0
    def _build_model(self):
        """
        论文中的模型结构
        :return:
        """
        model = Sequential()

        # 词嵌入
        model.add(
            layers.Embedding(self.alphabet_size + 1, 128, input_length=self.input_size)
        )
        # 卷积层
        for cl in self.conv_layers:
            model.add(layers.Conv1D(filters=cl[0], kernel_size=cl[1]))
            model.add(layers.ThresholdedReLU(self.threshold))
            if cl[-1] is not None:
                model.add(layers.MaxPool1D(pool_size=cl[-1]))

        model.add(layers.Flatten())
        # 全连接层
        for fl in self.fully_layers:
            # model.add(layers.Dense(fl, activity_regularizer=regularizers.l2(0.01)))
            model.add(layers.Dense(fl))
            model.add(layers.ThresholdedReLU(self.threshold))
            model.add(layers.Dropout(self.dropout_p))
        # 输出层
        model.add(layers.Dense(self.num_of_classes, activation="softmax"))

        model.compile(optimizer=self.optimizer, loss=self.loss, metrics=["accuracy"])
        print("CharCNN model built success")
        model.summary()
        return model
Esempio n. 15
0
def generate_convnet_model(maxwords, vocabsize, units, layercnt, conv_layercnt,
                           embedding_units, conv_filters, conv_window,
                           pool_size, flatten_after_conv, dropout,
                           conv_activation, activation, final_activation,
                           optimizer, loss, metrics):
    model = models.Sequential()
    model.add(
        layers.Embedding(vocabsize, embedding_units, input_length=maxwords))
    for i in range(conv_layercnt):
        model.add(
            layers.Conv1D(conv_filters,
                          conv_window,
                          activation=conv_activation))
        if i < conv_layercnt - 1:  # Max pooling until last layer
            model.add(layers.MaxPool1D(pool_size))
        else:  # Layer after last conv layer prepares output for dense layers
            if flatten_after_conv:
                model.add(layers.Flatten())
            else:
                model.add(layers.GlobalMaxPool1D())
    for i in range(layercnt):
        if dropout is not None:
            model.add(layers.Dropout(dropout))
        model.add(layers.Dense(units, activation=activation))
    if dropout is not None:
        model.add(layers.Dropout(dropout))
    model.add(layers.Dense(1, activation=final_activation))

    model.compile(optimizer=optimizer, loss=loss, metrics=metrics)
    return model
Esempio n. 16
0
    def __init__(self, n_classes: int, hparams: Dict):
        cfg: Big01Cfg = Big01Cfg.scheme(hparams)
        input = layers.Input(shape=(None, 1))
        net = layers.BatchNormalization()(input)
        for i in range(cfg.num_blocks):
            channels = 2**i * cfg.block_init_channels
            net = layers.Conv1D(channels,
                                cfg.receptive_width,
                                padding="same",
                                bias_regularizer=regularizers.l1(0.1))(net)
            with tf.name_scope(f"block_{i}"):
                for _ in range(cfg.block_elem):
                    x = net
                    net = layers.Conv1D(channels,
                                        cfg.receptive_width,
                                        padding='same')(net)
                    net = layers.BatchNormalization()(net)
                    net = layers.Activation('relu')(net)
                    net = layers.Conv1D(channels,
                                        cfg.receptive_width,
                                        padding='same')(net)
                    net = layers.BatchNormalization()(net)
                    net = layers.Activation('relu')(net)
                    net = ConstMultiplierLayer()(net)
                    net = layers.add([x, net])
            net = layers.MaxPool1D(padding='same', pool_size=2)(net)

        net = layers.Conv1D(n_classes, cfg.receptive_width,
                            padding="same")(net)
        self.forward_model = models.Model(inputs=[input], outputs=[net])
        self.ratio = 2**cfg.num_blocks
Esempio n. 17
0
    def _foraward_model(n_classes, cfg: FunnyFermatCfg) -> models.Model:
        input = layers.Input(shape=(None, 1))
        net = input
        net = layers.BatchNormalization()(net)
        for i in range(cfg.num_blocks):
            channels = cfg.block_init_channels * 2**i
            net = layers.Conv1D(channels, cfg.receptive_width,
                                padding='same')(net)
            with K.name_scope(f"block_{i}"):
                for _ in range(cfg.block_elem):
                    x = net
                    net = layers.Conv1D(channels,
                                        cfg.receptive_width,
                                        padding='same')(net)
                    net = layers.BatchNormalization()(net)
                    net = layers.Activation('relu')(net)
                    net = layers.Conv1D(channels,
                                        cfg.receptive_width,
                                        padding='same')(net)
                    net = layers.BatchNormalization()(net)
                    net = layers.Activation('relu')(net)
                    net = ConstMultiplierLayer()(net)
                    net = layers.add([x, net])
            net = layers.MaxPool1D(padding="same", pool_size=2)(net)

        net = layers.Conv1D(n_classes, cfg.receptive_width,
                            padding="same")(net)
        return models.Model(inputs=[input], outputs=[net])
Esempio n. 18
0
def test_conv_sequence():
    (x_train, y_train), (x_test,
                         y_test) = imdb.load_data(num_words=max_feature)

    x_train = sequence.pad_sequences(x_train, maxlen=max_len)
    x_test = sequence.pad_sequences(x_test, maxlen=max_len)

    model = Sequential()
    model.add(layers.Embedding(max_feature, 128, input_length=max_len))
    model.add(layers.Conv1D(32, 7, activation='relu'))
    model.add(layers.MaxPool1D(5))
    model.add(layers.Conv1D(32, 7, activation='relu'))
    model.add(layers.GlobalMaxPool1D())
    model.add(layers.Dense(1))

    model.summary()

    model.compile(optimizer=RMSprop(lr=1e-4),
                  loss='binary_crossentropy',
                  metrics=['acc'])
    history = model.fit(x_train,
                        y_train,
                        epochs=5,
                        batch_size=128,
                        validation_split=0.2)

    model_plot(history)
Esempio n. 19
0
def convNetLSTM(dimFeature):

    # Convnet model
    filters = 16
    kernel_size = 16
    """"
    model = kmodels.Sequential((
        # The first conv layer learns `nb_filter` filters (aka kernels), each of size ``(filter_length, nb_input_series)``.
        # Its output will have shape (None, window_size - filter_length + 1, nb_filter), i.e., for each position in
        # the input timeseries, the activation of each filter at that position.
        klayers.Convolution1D(nb_filter=nb_filter, filter_length=filter_length, activation='relu', input_shape=(a, b)),
        klayers.MaxPooling1D(),     # Downsample the output of convolution by 2X.
        #klayers.Convolution1D(nb_filter=nb_filter, filter_length=filter_length, activation='relu'),
        #klayers.MaxPooling1D(),
        klayers.Flatten(),
        klayers.Dense(1, activation='linear'),     # For binary classification, change the activation to 'sigmoid'
    ))
    """
    model = kmodels.Sequential()
    #model.add(klayers.Conv1D(filters=filters, kernel_size=kernel_size, activation='relu', input_shape=(1, trainFeatures.shape[2]), data_format="channels_first"))
    model.add(
        klayers.Conv1D(filters=filters,
                       kernel_size=kernel_size,
                       activation='relu',
                       input_shape=(1, dimFeature),
                       data_format="channels_first"))
    model.add(klayers.MaxPool1D())
    #model.add(klayers.LSTM(10, dropout=0.25, recurrent_dropout=0.25))
    model.add(klayers.LSTM(50, dropout=0.25, recurrent_dropout=0.25))
    model.add(klayers.Dense(16, activation="relu"))
    model.add(klayers.Dense(1, activation="sigmoid"))

    return model
Esempio n. 20
0
def pixel_branch(input_tensor):
    filters = [8, 16, 32, 64, 96, 128]
    conv0 = L.Conv1D(filters[3], 11, padding='valid')(input_tensor)
    conv0_a = attention_block_3(conv0, 170, 64)
    conv0 = L.concatenate([conv0, conv0_a])
    conv0 = L.BatchNormalization(axis=-1)(conv0)
    conv0 = L.advanced_activations.LeakyReLU(alpha=0.2)(conv0)
    conv3 = L.Conv1D(filters[5], 3, padding='valid')(conv0)
    conv3 = L.advanced_activations.LeakyReLU(alpha=0.2)(conv3)
    conv3 = L.MaxPool1D(pool_size=2, padding='valid')(conv3)
    conv3 = L.Conv1D(filters[5], 3, padding='valid')(conv3)
    conv3 = L.advanced_activations.LeakyReLU(alpha=0.2)(conv3)
    conv3 = L.MaxPool1D(pool_size=2, padding='valid')(conv3)
    conv3 = L.Conv1D(filters[5], 3, padding='valid')(conv3)
    conv3 = L.advanced_activations.LeakyReLU(alpha=0.2)(conv3)
    conv3 = L.MaxPool1D(pool_size=2, padding='valid')(conv3)
    conv3 = L.Flatten()(conv3)
    return conv3
Esempio n. 21
0
def mount_conv_model(vb_size,
                     emb_dim,
                     *,
                     max_len=100,
                     num_classes=3,
                     act='relu',
                     weights=None):
    """

    :param max_len:
    :param weights:
    :param vb_size:
    :param emb_dim:
    :param num_classes:
    :param act:
    :return:
    """

    print('cls', num_classes)

    model = Sequential([
        layers.Embedding(vb_size, emb_dim, input_length=max_len),
        layers.Dropout(0.2),
        layers.Conv1D(32, 5, activation=act),
        layers.MaxPool1D(3),
        layers.Conv1D(32, 5, activation=act),
        layers.MaxPool1D(3),
        layers.Conv1D(32, 5, activation=act),
        # layers.GlobalAveragePooling1D(),
        # layers.Bidirectional(layers.GRU(16, dropout=0.1, recurrent_dropout=0.5)),
        # layers.Bidirectional(layers.GRU(16, dropout=0.1, recurrent_dropout=0.5)),
        # layers.Bidirectional(layers.GRU(16, dropout=0.1, recurrent_dropout=0.5, return_sequences=True)),
        layers.Flatten(),
        layers.Dense(32),
        layers.Dense(num_classes, activation='softmax'),
    ])

    # If any pre-trained weights (glove/w2v), use em
    if weights is not None:
        model.layers[0].set_weights([weights])
        model.layers[0].trainable = False

    return model
Esempio n. 22
0
    def __init__(self, strategy):
        NUM_CLASS = variables.NUM_CLASS
        ksize = variables.ksize
        hchn = variables.hchn
        filters = [32, 64, 128, 256, 512]
        dilations = [1, 3, 5, 7]
        self.input_hsi2D = L.Input((ksize, ksize, hchn))
        getindicelayer = Lambda(
            lambda x: x[:, variables.r, variables.r, :, np.newaxis])
        self.input_hsi1D = getindicelayer(self.input_hsi2D)
        # self.first=L.initializers.RandomNormal(mean=0.0,stddev=0.05,seed=None)
        self.conv0_0 = L.Conv2D(64, (3, 3), padding='same')(self.input_hsi2D)
        self.conv0_1 = L.BatchNormalization(axis=-1)(self.conv0_0)
        self.conv0_2 = L.advanced_activations.LeakyReLU(alpha=0.2)(
            self.conv0_1)
        self.conv0_3 = L.Conv2D(128, (1, 1), padding='same')(self.conv0_2)
        self.conv0_4 = L.advanced_activations.LeakyReLU(alpha=0.2)(
            self.conv0_3)
        self.conv0_5 = L.MaxPool2D(pool_size=(2, 2),
                                   padding='same')(self.conv0_4)
        self.conv0_6 = L.Flatten()(self.conv0_5)

        self.conv1_0 = L.Conv1D(64, 3, padding='valid')(self.input_hsi1D)
        self.conv1_1 = L.BatchNormalization(axis=-1)(self.conv1_0)
        self.conv1_2 = L.advanced_activations.LeakyReLU(alpha=0.2)(
            self.conv1_1)
        self.conv1_3 = L.Conv1D(128, 3, padding='valid')(self.conv1_2)
        self.conv1_4 = L.advanced_activations.LeakyReLU(alpha=0.2)(
            self.conv1_3)
        self.conv1_5 = L.MaxPool1D(pool_size=2, padding='same')(self.conv1_4)
        self.conv1_6 = L.Flatten()(self.conv1_5)

        self.conv7 = L.concatenate([self.conv0_6, self.conv1_6])
        # self.conv7=self.conv0_6
        self.conv8 = L.Dropout(0.5)(self.conv7)
        self.conv9 = L.Dense(NUM_CLASS,
                             activation='linear',
                             kernel_regularizer=regularizers.l2(0.5))(
                                 self.conv8)
        #    activation='softmax')(self.conv8)
        self.model = K.models.Model([self.input_hsi2D], self.conv9)
        if strategy == 'Adam':
            opti = K.optimizers.Adam(lr=0.001)
        if strategy == 'SGD_005_1e-6_0':
            opti = K.optimizers.SGD(lr=0.005, momentum=1e-6)
        if strategy == 'SGD_001_95_1e-5':
            opti = K.optimizers.SGD(lr=0.001, momentum=0.95, decay=1e-5)
        if strategy == 'SGD_001_99_1e-3':
            opti = K.optimizers.SGD(lr=0.001, momentum=0.99, decay=1e-3)
        kwargs = K.backend.moving_averages
        self.model.compile(
            optimizer=opti,
            loss='categorical_squared_hinge',  #'mean_squared_error',
            #    loss='categorical_crossentropy',
            metrics=['acc'])
Esempio n. 23
0
def _network4():

    optimizer = optimizers.Adadelta(lr=2)

    model = Sequential()
    # output 44,6
    model.add(
        layers.Conv1D(filters=30, kernel_size=70, strides=2,
                      activation='relu'))
    # output 11, 3
    if NO_FEATURES == 308:
        model.add(layers.MaxPool1D(120, 2))
    if NO_FEATURES == 300:
        model.add(layers.MaxPool1D(116, 2))
    model.add(layers.Dense(units=3, activation='softmax'))
    model.add(layers.Dropout(0.5))
    model.add(layers.Dense(units=3, activation='relu'))
    model.add(layers.Softmax())
    model.compile(loss=losses.categorical_crossentropy,
                  optimizer=optimizer,
                  metrics=['accuracy'])

    [favorites_dataset, retweets_dl] = _transform_to_dataloader()
    x_train, y_train = favorites_dataset[0]
    x_validation, y_validation = favorites_dataset[1]

    x_train = np.expand_dims(x_train, axis=2)
    y_train = np.expand_dims(to_categorical(y_train, num_classes=3), axis=1)

    x_validation = np.expand_dims(x_validation, axis=2)
    y_validation = np.expand_dims(to_categorical(y_validation, num_classes=3),
                                  axis=1)

    x = model.fit(x_train,
                  y_train,
                  validation_split=0.3,
                  epochs=EPOCHS,
                  batch_size=BATCH_SIZE,
                  verbose=1,
                  shuffle=True,
                  callbacks=[es])
Esempio n. 24
0
def build_model(embedding_layer):

    #model = bidirectional_attention.BidirectionalAttentionFlow(params=params.Params())

    # train a 1D convnet with global maxpooling
    question_input = layers.Input(
        shape=(MAX_SEQUENCE_LENGTH, ),
        dtype='int32')  # * 2 since doubling the question and passage
    answer_input = layers.Input(
        shape=(MAX_SEQUENCE_LENGTH, ),
        dtype='int32')  # * 2 since doubling the question and passage

    question_embedding = embedding_layer(question_input)
    answer_embedding = embedding_layer(answer_input)

    question = layers.Conv1D(
        filters=200,
        kernel_size=10,
        activation='relu',
        name="question",
        kernel_regularizer=l2(0.0001),
        kernel_initializer='he_uniform')(question_embedding)
    answer = layers.Conv1D(filters=200,
                           kernel_size=10,
                           activation='relu',
                           name="answer",
                           kernel_regularizer=l2(0.0001),
                           kernel_initializer='he_uniform')(answer_embedding)
    question = layers.MaxPool1D(pool_size=5)(question)
    answer = layers.MaxPool1D(pool_size=5)(answer)
    question = layers.Flatten()(question)
    answer = layers.Flatten()(answer)

    question = layers.Dropout(0.1)(question)
    answer = layers.Dropout(0.1)(answer)

    merged = layers.concatenate([question, answer])
    preds = layers.Dense(1, activation='sigmoid', name="activation")(merged)

    model = models.Model(inputs=[question_input, answer_input], outputs=preds)
    return model
Esempio n. 25
0
def Conv_model(train_gen, val_gen, val_steps, float_data_shape):
    model = Sequential()
    model.add(
        layers.Conv1D(32,
                      5,
                      activation='relu',
                      input_shape=(None, float_data_shape[-1])))
    model.add(layers.MaxPool1D(3))
    model.add(layers.Conv1D(32, 5, activation='relu'))
    model.add(layers.MaxPool1D(3))
    model.add(layers.Conv1D(32, 5, activation='relu'))
    model.add(layers.GlobalMaxPool1D())
    model.add(layers.Dense(1))

    model.compile(optimizer=RMSprop(), loss='mae')
    history = model.fit_generator(train_gen,
                                  steps_per_epoch=500,
                                  epochs=5,
                                  validation_data=val_gen,
                                  validation_steps=val_steps)
    return history
Esempio n. 26
0
def pixel_branch(input_tensor):
    filters = [8, 16, 32, 64, 96, 128]
    # input_tensor=L.Permute((2,1))(input_tensor)
    conv0 = L.Conv1D(filters[3], 11, padding='valid')(input_tensor) 
    conv0 = L.BatchNormalization(axis=-1)(conv0)
    conv0 = L.advanced_activations.LeakyReLU(alpha=0.2)(conv0)
    conv3 = L.Conv1D(filters[5], 3, padding='valid')(conv0)  
    conv3 = L.advanced_activations.LeakyReLU(alpha=0.2)(conv3)
    conv3 = L.MaxPool1D(pool_size=2, padding='valid')(conv3)
    conv3 = L.Flatten()(conv3)
    # conv3 = L.Dense(192)(conv3)
    return conv3
Esempio n. 27
0
    def build_M(self):
        inp = KL.Input(shape=(self.len_data, self.len_segment, 1))
        x = inp

        x = KL.Reshape((self.len_segment, 1))(x)
        x = KL.Conv1D(128, 17, strides=1, padding='same', name='conv1')(x)
        x = KL.BatchNormalization(name='bn1')(x)
        x = KL.LeakyReLU(0.2)(x)
        x = KL.MaxPool1D(16, name='pool1')(x)
        x = KL.Conv1D(128, 17, strides=1, padding='same', name='conv2')(x)
        x = KL.BatchNormalization(name='bn2')(x)
        x = KL.LeakyReLU(0.2)(x)
        x = KL.MaxPool1D(16, name='pool2')(x)
        x = KL.Conv1D(128, 3, padding='same', name='conv3')(x)
        x = KL.BatchNormalization(name='bn3')(x)
        x = KL.LeakyReLU(0.2)(x)
        x = KL.MaxPool1D(2, name='pool3')(x)

        x = KL.Flatten()(x)
        out = x
        # out_deconv = y
        return Model([inp], [out])
Esempio n. 28
0
def CreateCONVGRUModel():
    model = models.Sequential()
    model.add(layers.Conv1D(32,5,activation='relu',input_shape=(None, float_data.shape[-1])))
    model.add(layers.MaxPool1D(3))
    model.add(layers.Conv1D(32,5,activation='relu'))
    model.add(layers.GRU(32,dropout=0.1,recurrent_dropout=0.5))
    model.add(layers.Dense(1))
    print(model.summary())
    plot_model(model=model,
              to_file='{}/{}.png'.format(res_path,'conv_gru_model'),
              show_shapes=True)
    model.compile(optimizer=optimizers.RMSprop(), loss='mae')
    return model
Esempio n. 29
0
    def model_1(self):
        inputs = layers.Input(shape=(self.max_words,), dtype='float32')
        embedding = layers.Embedding(input_dim=self.vocab_size + 1, output_dim=self.embedding_dim,
                                     input_length=self.max_words)
        embed = embedding(inputs)
        # 907*100 - 907*256 -
        cnn1 = layers.Conv1D(256, 3, padding='same', strides=1, activation='relu')(embed)
        cnn1 = layers.MaxPool1D(pool_size=48)(cnn1)

        cnn2 = layers.Conv1D(256, 4, padding='same', strides=1, activation='relu')(embed)
        cnn2 = layers.MaxPool1D(pool_size=47)(cnn2)

        cnn3 = layers.Conv1D(256, 5, padding='same', strides=1, activation='relu')(embed)
        cnn3 = layers.MaxPool1D(pool_size=46)(cnn3)

        cnn_con = layers.concatenate(inputs=[cnn1, cnn2, cnn3], axis=1)

        flat = layers.Flatten()(cnn_con)
        drop = layers.Dropout(0.5)(flat)
        output = layers.Dense(self.class_num, activation='softmax')(drop)
        model = models.Model(inputs=inputs, outputs=output)
        print(model.summary())
        return model
Esempio n. 30
0
def muti_output():
    vocabulary_size = 50000
    num_income_groups=10

    posts_input = Input(shape=(None,),dtype='int32',name='posts')
    embedded_posts = layers.Embedding(256,vocabulary_size)(posts_input)
    x = layers.Conv1D(128,5,activation='relu')(embedded_posts)
    x = layers.MaxPool1D(5)(x)
    x = layers.Conv1D(256,5,activation='relu')(x)
    x = layers.Conv1D(256,5,activation='relu')(x)
    x = layers.MaxPool1D(5)(x)
    x = layers.Conv1D(256,5,activation='relu')(x)
    x = layers.Conv1D(256,5,activation='relu')(x)
    x = layers.MaxPool1D(5)(x)
    x = layers.Dense(128,activation='relu')(x)

    age_prediction = layers.Dense(1,name='age')(x)
    income_prediction = layers.Dense(num_income_groups,activation='softmax',name='income')(x)
    gender_prediction = layers.Dense(1,activation='sigmoid')

    model = Model(posts_input,[age_prediction,income_prediction,gender_prediction])

    model.compile(optimizer='rmsprop',loss=['mse','categorical_crossentropy','binary_crossentropy'],loss_weights=[0.25,1.,10.])#损失加权