示例#1
0
def add_positional_pooling(model, args):
    if args.embedding_layer_name is not None:
        target_layer = model.get_layer(args.embedding_layer_name)
    else:
        target_layer = model._layers[args.embedding_layer_number]
    if (target_layer.__class__.__name__.startswith("Conv") == False):
        #We only want to change the input shape for a conv layer
        return model

    #user did not specify pooling should be performed
    if (args.global_pool_on_position
            == False) and (args.non_global_pool_on_position_size is None):
        return model

    if args.global_pool_on_position == True:
        pooled_embedding = GlobalAveragePooling1D(data_format="channels_last")(
            model.output)
        flat_embedding = pooled_embedding
    elif args.non_global_pool_on_position_size is not None:
        if args.non_global_pool_on_position_stride is None:
            non_global_pool_on_position_stride = args.non_global_pool_on_position_size
        else:
            non_global_pool_on_position_stride = args.non_global_pool_on_position_stride
        pooled_embedding = AveragePooling1D(
            pool_size=args.non_global_pool_on_position_size,
            strides=args.non_global_pool_on_position_stride,
            padding="same",
            data_format="channels_last")(model.output)
        flat_embedding = Flatten()(pooled_embedding)
    #create graph of your new model
    new_model = Model(model.input, flat_embedding)
    return new_model
示例#2
0
def transition_layer(input_tensor, k, name=None):

    conv = Conv1D(k, 5 , padding='same')(input_tensor)

    pool = AveragePooling1D(pool_size=2 )(conv)

    return pool
def build_swem(dropout_rate,
               input_shape,
               embedding_matrix=None,
               pool_type='max'):

    inp = Input(shape=(input_shape[0], ))
    x = Embedding(input_dim=embedding_matrix.shape[0],
                  output_dim=embedding_matrix.shape[1],
                  input_length=input_shape[0],
                  weights=[embedding_matrix],
                  trainable=False)(inp)

    # x = SpatialDropout1D(rate=dropout_rate)(x)
    x = Dense(1200, activation="relu")(x)

    if pool_type == 'aver':
        x = GlobalAveragePooling1D()(x)
    if pool_type == 'max':
        x = GlobalMaxPooling1D()(x)
    if pool_type == 'concat':
        x_aver = GlobalAveragePooling1D()(x)
        x_max = GlobalMaxPooling1D()(x)
        x = concatenate([x_aver, x_max])
    if pool_type == 'hier':
        x = AveragePooling1D(pool_size=3, strides=None, padding='same')(x)
        x = GlobalMaxPooling1D()(x)

    x = Dense(300, activation="relu")(x)
    x = Dropout(rate=dropout_rate)(x)
    x = Dense(1, activation="sigmoid")(x)
    model = Model(inputs=inp, outputs=x)
    return model
示例#4
0
  def get_model(self):
    I = Input(shape=(self.hparams.sequence_length,), dtype='float32')
    E = Embedding(
        self.hparams.vocab_size,
        self.hparams.embedding_dim,
        weights=[self.embeddings_matrix],
        input_length=self.hparams.sequence_length,
        trainable=self.hparams.train_embedding)(I)
    C = []
    A = []
    P = []
    for i, size in enumerate(self.hparams.filter_sizes):
        C.append(Conv1D(self.hparams.num_filters[i], size, activation='relu', padding='same')(E))
        A.append(Dense(self.hparams.attention_intermediate_size, activation = 'relu')(C[i]))
        A[i] = Dense(1, use_bias=False)(A[i])
        # Permute trick to apply softmax to second to last layer.
        A[i] = Permute((2,1))(A[i])
        A[i] = Activation('softmax')(A[i])
        A[i] = Permute((2,1))(A[i])
        P.append(Multiply()([A[i], C[i]]))
        P[i] = AveragePooling1D(self.hparams.sequence_length, padding='same')(P[i])
    X = Concatenate(axis=-1)(P)
    X = Flatten()(X)
    X = Dropout(self.hparams.dropout_rate)(X)
    X = Dense(128, activation='relu')(X)
    X = Dropout(self.hparams.dropout_rate)(X)
    Output = Dense(self.num_labels, activation='sigmoid', name='outputs')(X)

    model = Model(inputs=I, outputs=Output)
    model.compile(optimizer='rmsprop',
                  loss='binary_crossentropy',
                  metrics=['accuracy', auc_roc])
    print(model.summary())
    return model
def BLSTM_model(input_shape):

    with tf.name_scope('input'):
        inputs = Input(input_shape)

    with tf.name_scope('Conv1D'):
        conv1 = Conv1D(64, 2, padding='same')(inputs)
        conv1 = AveragePooling1D(2)(conv1)

    with tf.name_scope('BLSTM_forward'):
        net1 = Bidirectional(LSTM(80, go_backwards=False))(conv1)
        net1 = Dropout(0.3)(net1)

#    with tf.name_scope('BLSTM_backward'):
#        net2 = Bidirectional(LSTM(80, go_backwards=True))(conv1)
#        net2 = Dropout(0.3)(net2)

    with tf.name_scope('Dense_100'):
        #        net = concatenate([net1, net2])
        net = Dense(100, activation='relu')(net1)
        net = Dropout(0.3)(net)

    with tf.name_scope('Dense_50'):
        net = Dense(70, activation='relu')(net)
        net = Dropout(0.2)(net)

    with tf.name_scope('Dense_10'):
        net = Dense(20, activation='relu')(net)
        net = Dropout(0.1)(net)

    with tf.name_scope('output'):
        outputs = Dense(1)(net)

    return Model(inputs=inputs, outputs=outputs)
示例#6
0
def createModel(embedding_matrix):
    sequence_input = Input(shape=(101, 84), name='sequence_input')
    sequence = Convolution1D(filters=128, kernel_size=3,
                             padding='same')(sequence_input)
    sequence = BatchNormalization(axis=-1)(sequence)
    sequence = Activation('swish')(sequence)
    profile_input = Input(shape=(101, ), name='profile_input')
    embedding = Embedding(input_dim=embedding_matrix.shape[0],
                          output_dim=embedding_matrix.shape[1],
                          weights=[embedding_matrix],
                          trainable=False)(profile_input)
    profile = Convolution1D(filters=128, kernel_size=3,
                            padding='same')(embedding)
    profile = BatchNormalization(axis=-1)(profile)
    profile = Activation('swish')(profile)
    mergeInput = Concatenate(axis=-1)([sequence, profile])
    overallResult = MultiScale(mergeInput)
    overallResult = AveragePooling1D(pool_size=5)(overallResult)
    overallResult = Dropout(0.3)(overallResult)
    overallResult = Bidirectional(GRU(120,
                                      return_sequences=True))(overallResult)
    overallResult = SeqSelfAttention(
        attention_activation='sigmoid',
        name='Attention',
    )(overallResult)
    overallResult = Flatten()(overallResult)
    overallResult = Dense(101, activation='swish')(overallResult)
    ss_output = Dense(2, activation='softmax', name='ss_output')(overallResult)
    return Model(inputs=[sequence_input, profile_input], outputs=[ss_output])
示例#7
0
def attention_pooling(model_input):
    """
    attention pooling module

    Args:
        model_input: sequential input

    Returns:
        attention_output: attention weight
    """

    # average pooling for lstm units
    model_input_mean = AveragePooling1D(pool_size=128,
                                        data_format='channels_first',
                                        padding='valid')(model_input)
    model_input_mean = Lambda(lambda x: K.squeeze(x, axis=2))(model_input_mean)

    # transposed input
    model_input_tran = Lambda(lambda x: K.permute_dimensions(x, [0, 2, 1]))(
        model_input)

    # calculate attention weight
    attention = Dense(50, activation='softmax',
                      name='attention')(model_input_mean)

    # input * attention weight
    attention_output = Lambda(lambda x: K.batch_dot(x[0], x[1], axes=(1, 2)))(
        [attention, model_input_tran])

    return attention_output
示例#8
0
    def mydesen_block(self, x, out_channels=256, activation='tanh'):
        x_base = self.conv1d_bn(x,
                                int(out_channels * 1 / 4),
                                1,
                                strides=1,
                                padding='same',
                                activation=activation)
        x_2 = self.conv1d_bn(x_base,
                             int(out_channels / 4),
                             5,
                             strides=1,
                             padding='same',
                             activation=activation)
        # x_2 = self.conv1d_bn(x_2, int(out_channels / 4), 3, strides=1, padding='same', activation=activation)

        # x_3 = self.conv1d_bn(x, int(out_channels * 3 / 4), 1, strides=1, padding='same', activation=activation)
        x_3 = self.conv1d_bn(x_base,
                             int(out_channels / 4),
                             3,
                             strides=1,
                             padding='same',
                             activation=activation)

        # x_4 = self.conv1d_bn(x, int(out_channels / 4), 1, strides=1, padding='same', activation=activation)

        x_5 = AveragePooling1D(3, strides=1, padding='same')(x)
        x_5 = self.conv1d_bn(x_5,
                             int(out_channels / 4),
                             1,
                             strides=1,
                             activation=activation)

        x = L.concatenate([x, x_2, x_3, x_base, x_5], axis=-1, name=None)
        return x
示例#9
0
def get_model(embedding_matrix):
    input_1 = Input((max_len,))
    embedding_1 = Embedding(num_words, 300,
                            weights=[embedding_matrix],
                            trainable=False)(input_1)
    # x = SpatialDropout1D(0.25)(embedding_1)
    x = GRU(300,
            dropout=0.2,
            recurrent_dropout=0.2,
            activation='relu')(embedding_1)

    input_2 = Input((max_len,))
    embedding_2 = Embedding(num_words, 300,
                            weights=[embedding_matrix],
                            trainable=False)(input_2)
    # y = SpatialDropout1D(0.25)(embedding_2)
    y = GRU(300,
            dropout=0.2,
            recurrent_dropout=0.2,
            activation='relu')(embedding_2)

    embedding_3= Embedding(num_words,500)(input_1)
    z=AveragePooling1D(pool_size=3,strides=1)(embedding_3)
    z=GlobalMaxPooling1D()(z)

    a = keras.layers.concatenate([x, y,z])
    output_1 = Dense(1, activation='sigmoid')(a)

    model = Model(inputs=[input_1, input_2], outputs=[output_1])
    model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
    return model
示例#10
0
def model_raw_sound(x_train, num_labels):
    model_input = x = Input(shape=x_train[0].shape)
    x = Conv1D(filters=16, kernel_size=filter_size)(x)
    x = activation()(x)
    x = MaxPooling1D(pool_size=4)(x)
    x = Dropout(0.2)(x)

    x = Conv1D(filters=32, kernel_size=filter_size)(x)
    x = activation()(x)
    x = MaxPooling1D(pool_size=4)(x)
    x = Dropout(0.2)(x)

    x = Conv1D(filters=64, kernel_size=filter_size)(x)
    x = activation()(x)
    x = MaxPooling1D(pool_size=2)(x)
    x = Dropout(0.2)(x)

    x = Conv1D(filters=128, kernel_size=filter_size)(x)
    x = activation()(x)
    x = MaxPooling1D(pool_size=2)(x)
    x = Dropout(0.2)(x)

    x = AveragePooling1D(pool_size=(int(x.get_shape()[1]), ))(x)

    x = Conv1D(filters=num_labels,
               kernel_size=1,
               padding='valid',
               activation='softmax' if num_labels > 1 else 'sigmoid')(x)

    model = Model(inputs=[model_input], outputs=[x])

    model.summary()
    return model
示例#11
0
def discriminator(InputShape):
    model = Sequential()
    
    model.add(Reshape((InputShape, 1), input_shape=(InputShape,)))
    model.add(Conv1D(32, 100, strides=7, padding='valid'))
    model.add(ReLU())
    model.add(AveragePooling1D(4))
    model.add(BatchNormalization(momentum=0.9))
    model.add(Dropout(rate=0.1))

    model.add(Conv1D(16, 50, strides=5, padding='valid'))
    model.add(ReLU())
    model.add(BatchNormalization(momentum=0.9))
    model.add(Dropout(rate=0.1))

    model.add(Conv1D(8, 25, strides=3, padding='valid'))
    model.add(ReLU())
    model.add(BatchNormalization(momentum=0.9))
    model.add(Dropout(rate=0.1))

    model.add(Flatten())
    model.add(Dense(1024))
    model.add(LeakyReLU(alpha=0.01))
    model.add(BatchNormalization(momentum=0.9))
    model.add(Dense(1, activation='sigmoid'))
    return model
示例#12
0
def create_model(params):
    input_ecg = output = Input(shape=(MAX_LENGTH, NUM_CHANNELS))

    num_convolutions_in_each_block = _get_num_convolutions_in_each_block(
        params)
    num_vgg_blocks = len(num_convolutions_in_each_block)

    for block_num in range(num_vgg_blocks):
        num_convolutions = num_convolutions_in_each_block[block_num]
        num_filters = _get_num_filters(params, block_num)

        output = _vgg_block(output,
                            num_convolutions,
                            num_filters,
                            run_batch_norm=_get_run_batch_norm(params),
                            filter_size=_get_filter_size(params))

    output = AveragePooling1D(
        pool_size=_get_block_num_to_fiters_mapper(params)['GAP'])(output)

    output = Flatten()(output)

    output = Dense(1, activation='sigmoid')(output)

    model = Model(inputs=[input_ecg], outputs=[output])

    optimizer = Adam(lr=params['learning_rate'])
    model.compile(optimizer=optimizer,
                  loss='binary_crossentropy',
                  metrics=['accuracy', *all_metrics])

    model.summary()

    return model
def get_test_model_variable():
    """Returns a small model for variably shaped input tensors."""

    input_shapes = [
        (None, None, 1),
        (None, None, 3),
        (None, 4),
    ]

    inputs = [Input(shape=s) for s in input_shapes]

    outputs = []

    # same as axis=-1
    outputs.append(Concatenate()([inputs[0], inputs[1]]))
    outputs.append(Conv2D(8, (3, 3), padding='same', activation='elu')(inputs[0]))
    outputs.append(Conv2D(8, (3, 3), padding='same', activation='relu')(inputs[1]))
    outputs.append(GlobalMaxPooling2D()(inputs[0]))
    outputs.append(MaxPooling2D()(inputs[1]))
    outputs.append(AveragePooling1D()(inputs[2]))
    outputs.append(PReLU(shared_axes=[1, 2])(inputs[0]))
    outputs.append(PReLU(shared_axes=[1, 2])(inputs[1]))
    outputs.append(PReLU(shared_axes=[1, 2, 3])(inputs[1]))
    outputs.append(PReLU(shared_axes=[1])(inputs[2]))

    model = Model(inputs=inputs, outputs=outputs, name='test_model_variable')
    model.compile(loss='mse', optimizer='nadam')

    # fit to dummy data
    training_data_size = 1
    data_in = generate_input_data(training_data_size, input_shapes)
    initial_data_out = model.predict(data_in)
    data_out = generate_output_data(training_data_size, initial_data_out)
    model.fit(data_in, data_out, epochs=10)
    return model
示例#14
0
def inception_module(layer_in, f1, f2, f3):
    # 1x1 conv
    conv1 = TimeDistributed(
        Conv1D(f1,
               kernel_size=1,
               padding='same',
               activation='relu',
               kernel_initializer='glorot_normal'))(layer_in)
    # 3x3 conv
    conv3 = TimeDistributed(
        Conv1D(f2,
               kernel_size=3,
               padding='same',
               activation='relu',
               kernel_initializer='glorot_normal'))(layer_in)
    # 5x5 conv
    conv5 = TimeDistributed(
        Conv1D(f3,
               kernel_size=5,
               padding='same',
               activation='relu',
               kernel_initializer='glorot_normal'))(layer_in)
    # 3x3 max pooling
    pool = TimeDistributed(
        AveragePooling1D(pool_size=3, strides=1, padding='same'))(layer_in)
    # concatenate filters, assumes filters/channels last
    layer_out = concatenate([conv1, conv3, conv5, pool], axis=-1)
    return layer_out
示例#15
0
    def _create_model_cnn(dataset):
        num_seq = len(dataset[0])
        num_features = len(dataset[0][0])

        inpt = Input(shape=(num_seq, num_features))

        convs = []

        conv1 = Conv1D(8, 1, activation='relu')(inpt)
        pool1 = GlobalMaxPooling1D()(conv1)
        convs.append(pool1)

        conv2 = Conv1D(8, 3, activation='relu')(inpt)
        pool2_1 = AveragePooling1D(pool_size=5)(conv2)
        conv2_1 = Conv1D(16, 3, activation='relu')(pool2_1)
        pool2_2 = GlobalMaxPooling1D()(conv2_1)
        convs.append(pool2_2)

        out = Concatenate()(convs)
        first_segment_model = Model(inputs=[inpt], outputs=[out])

        model = Sequential()
        model.add(first_segment_model)
        model.add(Dropout(0.2))
        model.add(Dense(16, activation='sigmoid'))
        model.add(Dense(1, activation='sigmoid'))

        print(first_segment_model.summary())
        print(model.summary())
        return model
示例#16
0
def buld_model(ngram=3):
    input_data = get_input(ngram=ngram)
    main_input = Input(shape=((max_len - ngram + 1) * ngram, ))
    # embedding1 = Embedding(num_words * ngram, word_dim, embeddings_initializer=keras.initializers.Orthogonal())(main_input)
    embedding1 = Embedding(num_words * ngram, word_dim)(main_input)
    x = AveragePooling1D(pool_size=ngram)(embedding1)
    x = GlobalMaxPooling1D()(x)
    # output = Dense(1, activation='sigmoid')(x)

    weight = np.ones((word_dim, 1), dtype=np.float)
    weight[int(word_dim /
               2):] = -1 * np.ones([int(word_dim / 2), 1], dtype=np.float)
    output = Dense(1,
                   weights=[weight, np.zeros([1])],
                   trainable=False,
                   activation='sigmoid')(x)

    model = Model(inputs=main_input, outputs=output)
    model.compile(loss='binary_crossentropy',
                  optimizer='nadam',
                  metrics=['accuracy'])
    model.fit([input_data[0]],
              input_data[1],
              batch_size=32,
              shuffle=True,
              epochs=12,
              validation_data=([input_data[2]], input_data[3]))
示例#17
0
def cloneLayerFromLayer(pLayer):
    if isinstance(pLayer, Convolution1D):
        return Convolution1D.from_config(pLayer.get_config())
    elif isinstance(pLayer, Convolution2D):
        return Convolution2D.from_config(pLayer.get_config())
    elif isinstance(pLayer, Convolution3D):
        return Convolution3D.from_config(pLayer.get_config())
    # Max-Pooling:
    elif isinstance(pLayer, MaxPooling1D):
        return MaxPooling2D.from_config(pLayer.get_config())
    elif isinstance(pLayer, MaxPooling2D):
        return MaxPooling2D.from_config(pLayer.get_config())
    elif isinstance(pLayer, MaxPooling3D):
        return MaxPooling3D.from_config(pLayer.get_config())
    # Average-Pooling
    elif isinstance(pLayer, AveragePooling1D):
        return AveragePooling1D.from_config(pLayer.get_config())
    elif isinstance(pLayer, AveragePooling2D):
        return AveragePooling2D.from_config(pLayer.get_config())
    elif isinstance(pLayer, AveragePooling3D):
        return AveragePooling3D.from_config(pLayer.get_config())
    #
    elif isinstance(pLayer, Flatten):
        return Flatten.from_config(pLayer.get_config())
    elif isinstance(pLayer, Merge):
        return Merge.from_config(pLayer.get_config())
    elif isinstance(pLayer, Activation):
        return Activation.from_config(pLayer.get_config())
    elif isinstance(pLayer, Dropout):
        return Dropout.from_config(pLayer.get_config())
    #
    elif isinstance(pLayer, Dense):
        return Dense.from_config(pLayer.get_config())
    return None
    def __transition_layer(x,
                           nb_channels,
                           dropout_rate=None,
                           compression=1.0,
                           weight_decay=1e-4):
        """
        Creates a transition layer between dense blocks as transition, which do convolution and pooling.
        Works as downsampling.
        """

        x = BatchNormalization()(x)
        x = Activation('relu')(
            x)  #LeakyReLU(alpha=0.2)(x)#Activation('relu')(x)
        #x = Convolution2D(int(nb_channels*compression), (1, 1), padding='same',
        #                  use_bias=False, kernel_regularizer=l2(weight_decay))(x)

        x = Convolution1D(int(nb_channels * compression),
                          32,
                          padding='same',
                          strides=2,
                          use_bias=False,
                          kernel_regularizer=l2(weight_decay))(x)

        # Adding dropout
        if dropout_rate:
            x = Dropout(dropout_rate)(x)

        #x = AveragePooling2D((2, 2), strides=(2, 2))(x)
        x = AveragePooling1D(2, 2)(x)
        #
        return x
示例#19
0
def transition_layer(x,
                     nb_channels,
                     dropout_rate=None,
                     compression=1.0,
                     weight_decay=1e-4,
                     avg_pooling=True):
    """
    Creates a transition layer between dense blocks as transition, which do convolution and pooling.
    Works as downsampling.
    """

    x = BatchNormalization(gamma_regularizer=l2(weight_decay),
                           beta_regularizer=l2(weight_decay))(x)
    #x = BatchNormalization()(x)
    x = Activation('relu')(x)
    x = Conv1D(int(nb_channels * compression), (1, ),
               padding='same',
               use_bias=False,
               kernel_regularizer=l2(weight_decay),
               kernel_initializer='he_uniform')(x)
    #x = Conv1D(int(nb_channels*compression), (1, ), padding='same',kernel_initializer='he_uniform')(x)
    x = squeeze_excite_block(x)
    # Adding dropout
    if dropout_rate:
        x = Dropout(dropout_rate)(x)
    if avg_pooling:
        x = AveragePooling1D((2, ), strides=(2, ))(x)
    return x
示例#20
0
def cnn_architecture(input_size=1250, learning_rate=0.00001, alpha_value=10, classes=256, rkl_loss=True):

    # Personal design
    input_shape = (input_size,1)
    img_input = Input(shape=input_shape)

    # 1st convolutional block
    x = Conv1D(2, 1, kernel_initializer='he_uniform', activation='selu', padding='same', name='block1_conv1')(img_input)
    x = BatchNormalization()(x)
    x = AveragePooling1D(2, strides=2, name='block1_pool')(x)

    x = Flatten(name='flatten')(x)

    # Classification layer
    x = Dense(2, kernel_initializer='he_uniform', activation='selu', name='fc1')(x)

    # Logits layer
    score_layer = Dense(classes, activation=None, name='score')(x)
    predictions = Activation('softmax')(score_layer)

    # Create model
    inputs = img_input
    model = Model(inputs, predictions, name='aes_hd')
    optimizer = Adam(lr=learning_rate)

    if(rkl_loss==True):
        model.compile(loss=loss_sca(score_layer, nb_class=classes, alpha_value=alpha_value), optimizer=optimizer, metrics=['accuracy'])
    else:
        model.compile(loss="categorical_crossentropy", optimizer=optimizer, metrics=['accuracy'])
        
    return model
示例#21
0
def make_cnn_lstm(word_index, max_seq):
    embeds, embed_dim = read_embeds(EFILE, word_index)

    embedding_layer = Embedding(len(word_index) + 1,
                                embed_dim,
                                weights=[embeds],
                                input_length=max_seq,
                                trainable=False)

    sequence_input = Input(shape=(max_seq, ), dtype='int32')
    embedded_sequences = embedding_layer(sequence_input)
    x = LSTM(256,
             activation="relu",
             return_sequences=True,
             kernel_initializer=init)(embedded_sequences)
    #x = LSTM(150, activation="relu")(x)
    x = Conv1D(150, 5, activation='relu', kernel_initializer=init)(x)
    x = AveragePooling1D()(x)
    x = Flatten()(x)
    x = Dense(100, activation='relu', kernel_initializer=init)(x)
    #x = Dropout(0.1)(x)
    #x = Dense(50, activation='relu', kernel_initializer = init)(x)
    preds = Dense(1, activation='sigmoid', kernel_initializer=init)(x)

    return sequence_input, x, preds
示例#22
0
def get_model(name,
              recurrent_units=512,
              dropout_rate=0.3,
              recurrent_dropout_rate=0.3,
              dense_size=300,
              nb_classes=2):
    if name == 'DPCNN':
        return get_DPCNN(recurrent_units, nb_classes)
    if name == 'gru_best':
        return get_gru_best(recurrent_units, dropout_rate, dense_size,
                            nb_classes)
    if name == 'LSTM_CONV':
        return get_LSTM_CONV(recurrent_units, dropout_rate, dense_size,
                             nb_classes)
    if name == 'bidirectional_LSTM':
        return get_bidirectional_LSTM(recurrent_units, dropout_rate,
                                      dense_size, nb_classes)
    if name == 'bid_GRU_bid_LSTM':
        return get_bid_GRU_bid_LSTM(recurrent_units, dropout_rate,
                                    recurrent_dropout_rate, dense_size,
                                    nb_classes)
    # Default
    model = Sequential()
    model.add(
        LSTM(recurrent_units,
             activation='sigmoid',
             return_sequences=True,
             input_shape=(max_len, vector_size)))
    model.add(AveragePooling1D())
    model.add(Flatten())
    model.add(Dense(nb_classes, activation='softmax'))
    return model
示例#23
0
 def set_model(self, config):
     layer_descriptions = config.split('\n')
     layers = [Input(shape=(17, 13))]
     prev_ind = 0
     for line in layer_descriptions:
         parameters = line.split('-')
         for i in range(len(parameters)):
             if parameters[i].isdigit():
                 parameters[i] = int(parameters[i])
         if parameters[0] == 'dense':
             layers.append(Dense(parameters[1], activation=parameters[2])(layers[prev_ind]))
         elif parameters[0] == 'conv':
             layers.append(Convolution1D(parameters[1], parameters[2], activation=parameters[3])(layers[prev_ind]))
         elif parameters[0] == 'flatten':
             layers.append(Flatten()(layers[prev_ind]))
         elif parameters[0] == 'bn':
             layers.append(BatchNormalization()(layers[prev_ind]))
         elif parameters[0] == 'mp':
             layers.append(MaxPooling1D(parameters[1])(layers[prev_ind]))
         elif parameters[0] == 'avp':
             layers.append(AveragePooling1D(parameters[1])(layers[prev_ind]))
         else:
             raise ValueError(parameters)
         prev_ind += 1
     layers.append(Dense(11, activation='tanh')(layers[-1]))
     self.model = Model(layers[0], layers[-1])
     self.model.compile(RMSprop(),
                        loss='hinge')
示例#24
0
def get_CNN_3_LSTM_model_1(maxlen, vocab_size, vec_size, W, filter_window, filter_size, out_dim, non_static):
    input_text = Input(shape=(maxlen, vec_size))
    conv = []
    for i in filter_window:
        conv_out = Convolution1D(filter_size, i, border_mode='same',
                                 activation='relu', W_constraint=maxnorm(3),
                                 input_dim=vec_size, input_length=maxlen,
                                 bias=False)(input_text)
        conv.append(conv_out)
    merged_conv = merge(conv, mode='concat', concat_axis=2)
    pooling = MaxPooling1D(pool_length=maxlen)(merged_conv)
    reshape1 = Reshape([len(filter_window) * filter_size])(pooling)
    cnn_model = Model(input=input_text, output=reshape1)

    input_1 = Input(shape=(maxlen, vec_size))
    input_2 = Input(shape=(maxlen, vec_size))
    input_3 = Input(shape=(maxlen, vec_size))
    input_4 = Input(shape=(3, 300,))
    cnn_out_1 = cnn_model(input_1)
    cnn_out_2 = cnn_model(input_2)
    cnn_out_3 = cnn_model(input_3)
    merged_cnn_out = merge([cnn_out_1, cnn_out_2, cnn_out_3], mode='concat', concat_axis=2)
    reshape2 = Reshape([3, len(filter_window) * filter_size])(merged_cnn_out)
    #mul = merge([reshape2, input_4], mode='mul', name='mul')
    #masking = Masking(mask_value=0.)(mul)
    lstm = LSTM(output_dim=vec_size, return_sequences=True, unroll=True)(reshape2)
    pooling = AveragePooling1D(pool_length=3*maxlen)(lstm)
    dropout2 = Dropout(.5)(lstm)
    out = Dense(output_dim=1, activation='sigmoid')(dropout2)
    model = Model(input=[input_1, input_2, input_3, input_4], output=out)
    model.compile(optimizer=Adam(), loss='binary_crossentropy',
                  metrics=['accuracy'])
    return model
示例#25
0
    def build_discriminator(self):
        # Discriminator
        # 1) 16 * 200 Conv1D with LeakyRelu, Dropout
        model = Sequential()

        model.add(
            Conv1D(hidden_filters_1,
                   200,
                   padding="valid",
                   strides=1,
                   kernel_initializer='he_normal',
                   input_shape=self.output_shape))
        model.add(LeakyReLU())

        # 2) Average Pooling, Flatten, Dense, and LeakyRelu
        model.add(AveragePooling1D(25))
        model.add(Flatten())
        model.add(Dense(int(window_size / 16), kernel_initializer='he_normal'))
        model.add(LeakyReLU())

        # 3) Final output with no activation
        model.add(Dense(1, kernel_initializer="he_normal"))

        print "Discriminator"
        model.summary()

        return model
def cnn_architecture(input_size=1250,learning_rate=0.00001,classes=256):
        
        # Designing input layer
        input_shape = (input_size,1)
        img_input = Input(shape=input_shape)

        # 1st convolutional block
        x = Conv1D(2, 1, kernel_initializer='he_uniform', activation='selu', padding='same', name='block1_conv1')(img_input)
        x = BatchNormalization()(x)
        x = AveragePooling1D(2, strides=2, name='block1_pool')(x)

        x = Flatten(name='flatten')(x)

        # Classification layer
        x = Dense(2, kernel_initializer='he_uniform', activation='selu', name='fc1')(x)

        # Logits layer
        x = Dense(classes, activation='softmax', name='predictions')(x)

        # Create model
        inputs = img_input
        model = Model(inputs, x, name='aes_hd_model')
        optimizer = Adam(lr=learning_rate)
        model.compile(loss='categorical_crossentropy',optimizer=optimizer, metrics=['accuracy'])
        return model
示例#27
0
def get_model(embedding_matrix):
    input_1 = Input((max_len, ))
    embedding_1 = Embedding(num_words,
                            300,
                            weights=[embedding_matrix],
                            trainable=False)(input_1)
    x = SpatialDropout1D(0.25)(embedding_1)
    x = GRU(300, dropout=0.2, recurrent_dropout=0.2, activation='relu')(x)
    x = Dense(300, activation='relu')(x)

    input_2 = Input((max_len, ))
    embedding_2 = Embedding(num_words,
                            300,
                            weights=[embedding_matrix],
                            trainable=False)(input_2)
    y = SpatialDropout1D(0.25)(embedding_2)
    y = GRU(300, dropout=0.2, recurrent_dropout=0.2, activation='relu')(y)
    y = Dense(300, activation='relu')(y)

    embedding_3 = Embedding(num_words, embedding_dimension)(input_1)
    z = AveragePooling1D(pool_size=n_gram, strides=1,
                         padding='valid')(embedding_3)
    z = GlobalMaxPooling1D()(z)

    a = keras.layers.concatenate([x, y, z])
    output_1 = Dense(20, activation='softmax')(a)

    model = Model(inputs=[input_1, input_2], outputs=[output_1])
    model.compile(loss='categorical_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])
    return model
示例#28
0
def transition_block(x, stage, nb_filter, compression=1.0, dropout_rate=None, weight_decay=1E-4):
    ''' Apply BatchNorm, 1x1 Convolution, averagePooling, optional compression, dropout
        # Arguments
            x: input tensor
            stage: index for dense block
            nb_filter: number of filters
            compression: calculated as 1 - reduction. Reduces the number of feature maps in the transition block.
            dropout_rate: dropout rate
            weight_decay: weight decay factor
    '''

    eps = 1.1e-5
    conv_name_base = 'conv' + str(stage) + '_blk'
    relu_name_base = 'relu' + str(stage) + '_blk'
    pool_name_base = 'pool' + str(stage)

    x = BatchNormalization(epsilon=eps, axis=concat_axis, name=conv_name_base + '_bn', scale=True)(x)
    x = Activation('relu', name=relu_name_base)(x)
    x = Conv1D(int(nb_filter * compression), 1, name=conv_name_base, use_bias=False)(x)

    if dropout_rate:
        x = Dropout(dropout_rate)(x)

    x = AveragePooling1D(2, strides=2, name=pool_name_base)(x)

    return x
示例#29
0
    def build_discriminator(self):
        # Discriminator
        # 1) 16 * 200 Conv1D with LeakyRelu, Dropout, and BatchNorm
        model = Sequential()

        model.add(Conv1D(hidden_filters_1,
                         200,
                         padding="valid",
                         strides=1,
                         input_shape=self.output_shape))
        model.add(LeakyReLU(alpha=0.2))
        model.add(Dropout(dropout_rate))
        model.add(BatchNormalization(momentum=0.8))

        # 2) Average Pooling, Flatten, Dense, and LeakyRelu
        model.add(AveragePooling1D(25))
        model.add(Flatten())
        model.add(Dense(int(window_size/16)))
        model.add(LeakyReLU(alpha=0.2))

        # 3) Final output with sigmoid
        model.add(Dense(1, activation='sigmoid'))

        print "Discriminator"
        model.summary()

        img = Input(shape=self.output_shape)
        validity = model(img)

        return Model(img, validity)
示例#30
0
文件: cnn.py 项目: mecthew/NJU-AES
    def init_model(self,
                   input_shape,
                   output_dim,
                   dropout=0.0,
                   **kwargs):

        inputs = Input(shape=input_shape)
        kernel_sizes = [2, 3, 4]
        convs = []
        for i in range(len(kernel_sizes)):
            conv_l = Conv1D(filters=100,
                            kernel_size=(kernel_sizes[i]),
                            kernel_initializer='normal',
                            bias_initializer='random_uniform',
                            activation='relu',
                            padding='same')(inputs)
            maxpool_l = MaxPooling1D(pool_size=int(conv_l.shape[1]),
                                     padding='valid')(conv_l)
            avepool_l = AveragePooling1D(pool_size=int(conv_l.shape[1]),
                                         padding='valid')(conv_l)
            convs.append(maxpool_l)
            convs.append(avepool_l)

        concatenated_tensor = Concatenate(axis=1)(convs)
        flatten = Flatten()(concatenated_tensor)
        dropout = Dropout(rate=dropout)(flatten)
        dense = Dense(128, activation='softplus')(dropout)
        outputs = Dense(output_dim)(dense)

        model = Model(inputs=inputs, outputs=outputs, name='CNN')
        adam = Adam(lr=0.0001, beta_1=0.9, beta_2=0.999, epsilon=None, decay=0.0, amsgrad=False)
        model.compile(optimizer=adam, loss='mse', metrics=['mse', 'mae'])
        model.summary()
        self._is_init = True
        self._model = model