Esempio n. 1
0
    def f(x, h):
        if isinstance(dilation_rate, tuple):
            d1, d2 = dilation_rate
        else:
            d1 = d2 = dilation_rate

        # two causal convolutions (the first one being either type a or b, the second one being b)
        if padding == "b":  # access to the past and the present, but not the future
            xx = Conv1D(num_filters, kernel_size, padding="causal", BN=BN, use_bias=not BN, activation=activation, dropout=dropout, dilation_rate=d1)(x)
        elif padding == "a":  # shift to the right so that you only have access to the past
            xx = ZeroPadding1D(padding=(2, 0))(x)
            xx = Conv1D(num_filters, kernel_size, BN=BN, activation=activation, use_bias=not BN, dropout=dropout, dilation_rate=d1)(xx)
            xx = Lambda(lambda x_: x_[:, :-2, :])(xx)
        else:
            raise ValueError
        xx = Conv1D(num_filters * 2, kernel_size, padding="causal", BN=BN, use_bias=not BN, activation=None, dropout=dropout, dilation_rate=d2)(xx)

        # conditional vector
        h = Conv1D(num_filters * 2, 1, activation=activation, BN=BN, dropout=dropout)(h)
        xx = Add()([xx, h])

        # gate
        xx = Lambda(gate, arguments={"num_filters": 2 * num_filters})(xx)

        # add residual connexions if needed
        if residual:
            if padding == "b":  # access to the past and the present, but not the future
                x_ = x
            elif padding == "a":  # shift to the right so that you only have access to the past
                x_ = ZeroPadding1D(padding=(1, 0))(x)
                x_ = Lambda(lambda x_: x_[:, :-1, :])(x_)
            else:
                raise ValueError
            xx = Add()([xx, x_])
        return xx
Esempio n. 2
0
def model(input_shape, optimizer):
    print('Building CONV LSTM RNN model ...')

    recurrent_layer = CuDNNGRU if gpu else GRU

    model = Sequential()

    model.add(ZeroPadding1D(1, input_shape=input_shape))

    model.add(Conv1D(128, 3))

    model.add(LeakyReLU())

    model.add(BatchNormalization())

    model.add(Dropout(0.4))

    model.add(ZeroPadding1D(1))

    model.add(Conv1D(64, 3))

    model.add(LeakyReLU())

    model.add(BatchNormalization())

    model.add(Dropout(0.4))

    model.add(recurrent_layer(64, return_sequences=True))

    model.add(LeakyReLU())

    model.add(Dropout(0.4))

    model.add(BatchNormalization())

    model.add(recurrent_layer(32, return_sequences=True))

    model.add(LeakyReLU())

    model.add(Dropout(0.4))

    model.add(TimeDistributed(Dense(10)))

    model.add(LeakyReLU())

    model.add(BatchNormalization())

    model.add(Dropout(0.4))

    model.add(TimeDistributed(Dense(1, activation="sigmoid")))

    print("Compiling ...")
    model.compile(loss='binary_crossentropy',
                  optimizer=optimizer,
                  metrics=['accuracy'])
    model.summary()

    return model
Esempio n. 3
0
    def f(x):
        if isinstance(dilation_rate, tuple):
            d1, d2 = dilation_rate
        else:
            d1 = d2 = dilation_rate

        # two causal convolutions (the first one being either type a or b, the second one being b)
        if padding == "b":  # access to the past and the present, but not the future
            xx = Conv1D(num_filters,
                        kernel_size,
                        padding="causal",
                        BN=BN,
                        use_bias=not BN,
                        activation=activation,
                        dropout=dropout,
                        dilation_rate=d1)(x)
        elif padding == "a":  # shift to the right so that you only have access to the past
            xx = ZeroPadding1D(padding=(2, 0))(x)
            xx = Conv1D(num_filters,
                        kernel_size,
                        BN=BN,
                        activation=activation,
                        use_bias=not BN,
                        dropout=dropout,
                        dilation_rate=d1)(xx)
            xx = Lambda(lambda x_: x_[:, :-2, :])(xx)
        else:
            raise ValueError
        xx = Conv1D(num_filters * 2,
                    kernel_size,
                    padding="causal",
                    BN=BN,
                    use_bias=not BN,
                    activation=None,
                    dropout=dropout,
                    dilation_rate=d2)(xx)

        # gate
        xx = Lambda(gate, arguments={"num_filters": 2 * num_filters})(xx)

        xx = Conv1D(num_filters, 1, BN=BN, activation=activation)(xx)

        # create two outputs: out which will be passed to the next block and skip which will be summed to the others in the end
        skip = xx
        out = xx

        if padding == "b":  # access to the past and the present, but not the future
            x_ = x
        elif padding == "a":  # shift to the right so that you only have access to the past
            x_ = ZeroPadding1D(padding=(1, 0))(x)
            x_ = Lambda(lambda x_: x_[:, :-1, :])(x_)
        else:
            raise ValueError
        out = Add()([out, x_])

        return out, skip
Esempio n. 4
0
def model_Alexnet_single_channel(input_layer, data_length, number_of_classes,
                                 name):
    input_shape = (data_length, 1)

    x = Convolution1D(96,
                      11,
                      strides=4,
                      border_mode='valid',
                      name=name + 'conv1')(
                          input_layer)  #strides is same as 'strides'
    x = Activation('relu', name=name + 'act1')(x)
    x = BatchNormalization(name=name + 'bn1')(x)
    x = MaxPooling1D(pool_size=2,
                     strides=2,
                     padding='valid',
                     name=name + 'pool1')(x)

    x = ZeroPadding1D(2, name=name + 'zp1')(x)
    x = Convolution1D(256, 5, strides=1, name=name + 'conv2')(x)
    x = Activation('relu', name=name + 'act2')(x)
    x = BatchNormalization(name=name + 'bn2')(x)
    x = MaxPooling1D(pool_size=3,
                     strides=2,
                     padding='valid',
                     name=name + 'pool2')(x)

    x = ZeroPadding1D(1, name=name + 'zp2')(x)
    x = Convolution1D(384, 3, strides=1, name=name + 'conv3')(x)
    x = Activation('relu', name=name + 'act3')(x)

    x = ZeroPadding1D(1, name=name + 'zp3')(x)
    x = Convolution1D(384, 3, strides=1, name=name + 'conv4')(x)
    x = Activation('relu', name=name + 'act4')(x)

    x = ZeroPadding1D(1, name=name + 'zp4')(x)
    x = Convolution1D(256, 3, strides=1, name=name + 'conv5')(x)
    x = Activation('relu', name=name + 'act5')(x)
    x = MaxPooling1D(pool_size=3,
                     strides=2,
                     padding='valid',
                     name=name + 'pool3')(x)

    x = Flatten(name=name + 'flat')(x)
    x = Dense(4096, name=name + 'dn1')(x)
    x = Activation('relu', name=name + 'act6')(x)
    x = Dropout(0.5, name=name + 'dr1')(x)

    x = Dense(4096, name=name + 'dn2')(x)
    x = Activation('relu', name=name + 'act7')(x)
    x = Dropout(0.5, name=name + 'dr2')(x)

    x = Dense(number_of_classes, activation='relu', name=name + 'softmax')(x)
    return x
Esempio n. 5
0
    def f(x):
        y = ZeroPadding1D(padding=1,
                          name="padding{}{}_branch2a".format(
                              stage_char, block_char))(x)

        y = Conv1D(filters,
                   kernel_size,
                   strides=stride,
                   use_bias=False,
                   name="res{}{}_branch2a".format(stage_char, block_char))(y)

        y = BatchNormalization(epsilon=1e-5,
                               name="bn{}{}_branch2a".format(
                                   stage_char, block_char))(y)

        y = Activation("relu",
                       name="res{}{}_branch2a_relu".format(
                           stage_char, block_char))(y)

        y = ZeroPadding1D(padding=1,
                          name="padding{}{}_branch2b".format(
                              stage_char, block_char))(y)

        y = Conv1D(filters,
                   kernel_size,
                   use_bias=False,
                   name="res{}{}_branch2b".format(stage_char, block_char))(y)

        y = BatchNormalization(epsilon=1e-5,
                               name="bn{}{}_branch2b".format(
                                   stage_char, block_char))(y)

        if block == 0:
            shortcut = Conv1D(filters,
                              1,
                              strides=stride,
                              use_bias=False,
                              name="res{}{}_branch1".format(
                                  stage_char, block_char))(x)

            shortcut = BatchNormalization(epsilon=1e-5,
                                          name="bn{}{}_branch1".format(
                                              stage_char,
                                              block_char))(shortcut)
        else:
            shortcut = x

        y = Add(name="res{}{}".format(stage_char, block_char))([y, shortcut])

        y = Activation("relu",
                       name="res{}{}_relu".format(stage_char, block_char))(y)

        return y
Esempio n. 6
0
    def build(self):
        input_text = Input(shape=(self.max_len, ))

        embedding_layer = Embedding(
            self.word_embeddings.shape[0],
            self.word_embeddings.shape[1],
            weights=[self.word_embeddings],
            trainable=self.config.word_embed_trainable)(input_text)
        text_embed = SpatialDropout1D(0.2)(embedding_layer)

        # wide convolution
        zero_padded_1 = ZeroPadding1D((6, 6))(text_embed)
        conv_1 = Conv1D(filters=128, kernel_size=7, strides=1,
                        padding='valid')(zero_padded_1)
        # dynamic k-max pooling
        k_maxpool_1 = KMaxPooling(int(self.max_len / 3 * 2))(conv_1)
        # non-linear feature function
        non_linear_1 = ReLU()(k_maxpool_1)

        # wide convolution
        zero_padded_2 = ZeroPadding1D((4, 4))(non_linear_1)
        conv_2 = Conv1D(filters=128, kernel_size=5, strides=1,
                        padding='valid')(zero_padded_2)
        # dynamic k-max pooling
        k_maxpool_2 = KMaxPooling(int(self.max_len / 3 * 1))(conv_2)
        # non-linear feature function
        non_linear_2 = ReLU()(k_maxpool_2)

        # wide convolution
        zero_padded_3 = ZeroPadding1D((2, 2))(non_linear_2)
        conv_3 = Conv1D(filters=128, kernel_size=5, strides=1,
                        padding='valid')(zero_padded_3)
        # folding
        folded = Folding()(conv_3)
        # dynamic k-max pooling
        k_maxpool_3 = KMaxPooling(k=10)(folded)
        # non-linear feature function
        non_linear_3 = ReLU()(k_maxpool_3)

        sentence_embed = Flatten()(non_linear_3)

        dense_layer = Dense(256, activation='relu')(sentence_embed)
        if self.config.loss_function == 'binary_crossentropy':
            output = Dense(1, activation='sigmoid')(dense_layer)
        else:
            output = Dense(self.n_class, activation='softmax')(dense_layer)

        model = Model(input_text, output)
        model.compile(loss=self.config.loss_function,
                      metrics=['acc'],
                      optimizer=self.config.optimizer)
        return model
def setup_first_stage_layers(stage1_filters):
    layers_start = []
    layers_start.append(ZeroPadding1D(padding=3))
    layers_start.append(
        Conv1D(filters=stage1_filters,
               kernel_size=2,
               padding='valid',
               kernel_initializer='he_normal'))
    layers_start.append(BatchNormalization())
    layers_start.append(Activation('relu'))
    layers_start.append(ZeroPadding1D(padding=1))
    layers_start.append(MaxPooling1D(pool_size=3, strides=2))

    return layers_start
Esempio n. 8
0
def cnn_model():
    """
    return : threshold_score,accuracy
    """
    # Loading data
    X_train, X_test, y_train, y_test = load_train_test_data()
    # defining the hyper-parameters
    max_input_length = 50
    vocabulary_size = 20000
    embedding_dim = 100
    # Configuring the neural network
    model = Sequential()
    model.add(Embedding(vocabulary_size, 100, input_length=max_input_length))
    model.add(ZeroPadding1D((49, 49)))
    model.add(Conv1D(64, 50, padding="same"))
    model.add(KMaxPooling(k=5, axis=1))
    model.add(Activation("relu"))
    model.add(ZeroPadding1D((24, 24)))
    model.add(Conv1D(64, 25, padding="same"))
    model.add(Folding())
    model.add(KMaxPooling(k=5, axis=1))
    model.add(Activation("relu"))
    model.add(Flatten())
    model.add(Dense(y_train.shape[1], activation="softmax"))
    model.compile(loss='categorical_crossentropy',
                  optimizer='rmsprop',
                  metrics=['accuracy'])
    print("model fitting - CNN network")
    model.summary()
    # Training the model
    history = model.fit(X_train,
                        y_train,
                        validation_data=(X_test, y_test),
                        epochs=10)
    plot_accuracy_and_loss_curves(history, "categorical_crossentropy", "cnn")
    # Saving the model
    model.save("models/cnn.h5")
    plot_model(model,
               to_file='model_images/cnn_model.png',
               show_shapes=True,
               show_layer_names=True)
    # Calculating accuracy and threshold score
    test_accuracy = model.evaluate(X_test, y_test, verbose=0)
    train_accuracy = model.evaluate(X_train, y_train, verbose=0)
    # Making predictions
    prediction = model.predict_classes(X_test, batch_size=10, verbose=0)
    # Calculating accuracy and threshold score
    threshold = threshold_score(y_test.argmax(axis=-1), prediction)
    return train_accuracy[1] * 100, test_accuracy[1] * 100, threshold * 100
Esempio n. 9
0
def sc_lstm_decoder(text_idx, text_one_hot, dialogue_act, nclasses, sample_out_size, lstm_size, inputs, step):

    def remove_last_column(x):
        return x[:, :-1, :]

    padding = ZeroPadding1D(padding=(1, 0))(text_one_hot)
    previous_char_slice = Lambda(remove_last_column, output_shape=(sample_out_size, nclasses))(padding)

    temperature = 1 / step

    lstm = SC_LSTM(
        lstm_size,
        nclasses,
        softmax_temperature=None,
        return_da=True,
        return_state=False,
        use_bias=True,
        return_sequences=True,
        implementation=2,
        dropout=0.2,
        recurrent_dropout=0.2,
        sc_dropout=0.2
    )

    recurrent_component, da_t, da_history = lstm([previous_char_slice, dialogue_act])

    decoder = Model(inputs=inputs + [text_idx], outputs=[recurrent_component, da_t, da_history], name='decoder_{}'.format('train'))
    return decoder
Esempio n. 10
0
def sc_lstm_decoder(decoder_input, nclasses, sample_out_size, lstm_size,
                    text_idx, text_one_hot, dialogue_act, inputs, step):
    def remove_last_column(x):
        return x[:, :-1, :]

    padding = ZeroPadding1D(padding=(1, 0))(text_one_hot)
    previous_char_slice = Lambda(remove_last_column,
                                 output_shape=(sample_out_size,
                                               nclasses))(padding)

    temperature = 1 / step

    lstm = SC_LSTM(lstm_size,
                   nclasses,
                   softmax_temperature=temperature,
                   generation_only=True,
                   condition_on_ptm1=True,
                   semantic_condition=True,
                   return_da=False,
                   return_state=False,
                   use_bias=True,
                   return_sequences=True,
                   implementation=2,
                   dropout=0.2,
                   recurrent_dropout=0.2,
                   sc_dropout=0.2)

    recurrent_component = lstm([previous_char_slice, dialogue_act])

    decoder_train = Model(inputs=[decoder_input, text_idx] + inputs,
                          outputs=recurrent_component,
                          name='decoder_{}'.format('train'))
    #decoder_test = Model(inputs=[decoder_input, text_idx] + inputs, outputs=recurrent_component, name='decoder_{}'.format('test'))
    # decoder_train.summary()
    return decoder_train, decoder_train
Esempio n. 11
0
def residual_block(x, s, i, activation, causal, nb_filters, kernel_size):
    original_x = x

    if causal:
        x = ZeroPadding1D(((2**i) // 2, 0))(x)
        conv = AtrousConvolution1D(filters=nb_filters,
                                   kernel_size=kernel_size,
                                   atrous_rate=2**i,
                                   padding='same',
                                   name='dilated_conv_%d_tanh_s%d' %
                                   (2**i, s))(x)
        conv = Cropping1D((0, (2**i) // 2))(conv)
    else:
        conv = AtrousConvolution1D(filters=nb_filters,
                                   kernel_size=kernel_size,
                                   atrous_rate=2**i,
                                   padding='same',
                                   name='dilated_conv_%d_tanh_s%d' %
                                   (2**i, s))(x)

    if activation == 'norm_relu':
        x = Activation('relu')(conv)
        x = Lambda(channel_normalization)(x)
    elif activation == 'wavenet':
        x = wave_net_activation(conv)
    else:
        x = Activation(activation)(conv)

    x = SpatialDropout1D(0.05)(x)

    # 1x1 conv.
    x = Convolution1D(nb_filters, 1, padding='same')(x)
    res_x = Merge(mode='sum')([original_x, x])
    return res_x, x
Esempio n. 12
0
def LSTMCNN(input_size, lost):  # number of nodes in first layer. in this case 126.
    #
    input_signal = Input((input_size,2))
    #
    # our data is 1d. however, 1d doesn't work well in this package, so, we do 2d and set the the second dimension to 1.
    x = ZeroPadding1D(1)(input_signal)  # this means you run it on signal
    x = Conv1D(32, 3, activation='relu', padding='same')(x)  # this means you run it on x, and so forth in the next lines.
    x = MaxPooling1D(2)(x)
    x = Conv1D(16, 3, activation='relu', padding='same')(x)
    x = MaxPooling1D(2)(x)
    x = Conv1D(8, 3, activation='relu', padding='same')(x)
    x = MaxPooling1D(2)(x)
    x = Conv1D(4, 3, activation='relu', padding='same')(x)
    x= LSTM(32)(x)
    #x = Flatten()(x)
    encoded = Dense(32, activation='tanh')(x)
    x = RepeatVector(32)(encoded )
    x= LSTM(2, return_sequences=True)(x)
    x = Reshape((32, 2))(x)
    x = Conv1D(8, 3, activation='relu', padding='same')(x)
    x = UpSampling1D(2)(x)
    x = Conv1D(16, 3, activation='relu', padding='same')(x)
    x = UpSampling1D((2))(x)
    decoded = Conv1D(32, 3, activation='relu', padding='same')(x)
    decoded = Conv1D(2, 3, activation='sigmoid')(decoded)  # this is the last layer. it is the same size as the input.
    autoencoder = Model(input_signal,decoded)
    # Adam = keras.optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-07, decay=0.0)
    autoencoder.compile(optimizer='adam', loss=lost)
    #
    encoder = Model(input_signal, encoded)
    encoder.compile(optimizer='adam', loss=lost)
    #
    return autoencoder, encoder, encoded  # returns the two different models, already compiled.
Esempio n. 13
0
def BuildCNNNetRaw(input_size,lost):  # number of nodes in first layer. in this case 126.
    
    input_signal = Input(shape=(input_size,2))

    # our data is 1d. however, 1d doesn't work well in this package, so, we do 2d and set the the second dimension to 1.
    x = ZeroPadding1D((3))(input_signal)  #this means you run it on signal
    x = Conv1D(10,5, activation='linear', padding='same')(x)  #this means you run it on x, and so forth in the next lines.
    x = Conv1D(20,5, activation='relu',padding='same')(x)
    x = MaxPooling1D((2))(x)
    x = Conv1D(5,5, activation='relu', padding='same')(x)
    x = MaxPooling1D((4))(x)
    x = Conv1D(2,5, activation='relu', padding='same')(x)
    x = MaxPooling1D((2))(x)
    x = Conv1D(5,5, activation='relu', padding='same')(x)
    x = MaxPooling1D((2))(x)
    x = Flatten()(x)
    encoded = Dense(32, activation='relu')(x)

    x = Dense(126, activation='relu')(encoded)
    x = Reshape((63,2))(x)
    x = UpSampling1D((2))(x)
    x = Conv1D(5,5, activation='relu',padding='same')(x)
    x = UpSampling1D((2))(x)
    decoded = Conv1D(2,3, activation='linear')(x)    
    
    autoencoder = Model(input_signal, x)
    #Adam = keras.optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-07, decay=0.0)
    autoencoder.compile(optimizer='adam', loss=lost)
    
    encoder = Model(input= input_signal, output=encoded)
    encoder.compile(optimizer='adam', loss=lost)
    
    return autoencoder, encoder  # returns the two different models, already compiled.
Esempio n. 14
0
def decoder_1d(Ne, dictionary_dim, num_conv):
    """
    Create decoder for conv 1d
    :param Ne: dimesion of code
    :param dictionary_dim: dimension of dictionary
    :param num_conv: number of conv filters
    :return: decoder
    """
    # input
    input_signal = Input(shape=(Ne, num_conv),
                         name="input")  # Input placeholder
    # Zero-pad
    input_signal_padded = ZeroPadding1D(padding=(dictionary_dim - 1),
                                        name="zeropad")(input_signal)
    # build convolution
    decoded = Conv1D(
        filters=1,
        kernel_size=dictionary_dim,
        padding="valid",
        use_bias=False,
        activation=None,
        trainable=False,
        input_shape=(Ne, num_conv),
        name="decoder",
    )(input_signal_padded)
    # output Y = HZ
    decoder = Model(input_signal, decoded)
    return decoder
Esempio n. 15
0
def conv_model_species_embedding(shape=[50, 20], n_species=50, embed_dim=10):
    # Input should be [x0,x1] where the first x value is a sequence vector, and the second is an integer representing the bacterium_id
    # (See beginning of generate_pca_plot_from_data method for application of predictions)
    # NOTE: shuffling the data before training one of these models is VERY IMPORTANT (see the line x0,x1,y = shuffle(x0,x1,y) below)
    # If the data is not shuffled than the performance will be worse and the embeddings will be disrupted
    sequence_input = Input(shape=shape)
    zero_pad = ZeroPadding1D(5)(sequence_input)
    conv1 = Conv1D(64, kernel_size=5, strides=1, activation='relu')(zero_pad)
    max_pool_1 = MaxPooling1D(pool_size=2, strides=2)(conv1)
    conv2 = Conv1D(64, kernel_size=5, strides=1, activation='relu')(max_pool_1)
    max_pool_2 = MaxPooling1D(pool_size=2, strides=2)(conv2)
    flatten = Flatten()(max_pool_2)
    dropout = Dropout(0.5)(flatten)
    dense1 = Dense(100, activation='relu')(dropout)
    embed_input = Input(shape=[1])
    embedding_layer = Embedding(n_species, embed_dim, input_length=1)
    bacteria_embedding = embedding_layer(embed_input)
    flatten_embed = Flatten()(bacteria_embedding)
    concat_embed = concatenate([dense1, flatten_embed])
    dense2_embed = Dense(50, activation='relu')(concat_embed)
    output_embed = Dense(1)(dense2_embed)
    model_embed = Model(inputs=[sequence_input, embed_input],
                        outputs=output_embed)
    model_embed.compile(loss='mean_squared_error', optimizer='adam')
    return model_embed
Esempio n. 16
0
def recurrent_sequence_decoder(latent_dim,
                               seqlen,
                               ncell=512,
                               alphabet_size=21,
                               project_x=True,
                               upsample=False,
                               min_deconv_dim=42,
                               input_dropout=None,
                               intermediate_dim=63,
                               max_filters=336,
                               n_conditions=0,
                               cond_concat_each_timestep=False):

    latent_vector = Input((latent_dim, ))
    latent_v = latent_vector

    prot_oh = Input((seqlen, alphabet_size))
    input_x = ZeroPadding1D(padding=(1, 0))(prot_oh)
    input_x = Lambda(lambda x_: x_[:, :-1, :])(input_x)

    if input_dropout is not None:
        input_x = Dropout(input_dropout,
                          noise_shape=(None, seqlen, 1))(input_x)
    if project_x:
        input_x = Conv1D(alphabet_size,
                         1,
                         activation=None,
                         name='decoder_x_embed')(input_x)

    if n_conditions > 0:
        cond_inp = Input((n_conditions, ))
        conditions = cond_inp
        latent_v = Concatenate()([latent_v, conditions])

    rnn = GRU(ncell, return_sequences=True)
    if upsample:
        z_seq = upsampler(latent_v,
                          intermediate_dim,
                          min_deconv_dim=min_deconv_dim,
                          n_deconv=3,
                          activation='prelu',
                          max_filters=max_filters)
        if cond_concat_each_timestep:
            cond_seq = RepeatVector(seqlen)(conditions)
            z_seq = Concatenate(axis=-1)([z_seq, cond_seq])
    else:
        z_seq = RepeatVector(seqlen)(latent_v)

    xz_seq = Concatenate(axis=-1)([z_seq, input_x])
    rnn_out = rnn(xz_seq)

    processed_x = Conv1D(alphabet_size, 1, activation=None,
                         use_bias=True)(rnn_out)
    output = Activation('softmax')(processed_x)

    if n_conditions > 0:
        G = Model([latent_vector, cond_inp, prot_oh], output)
    else:
        G = Model([latent_vector, prot_oh], output)
    return G
Esempio n. 17
0
    def call(self, inputs, **kwargs):
        if self.normalize_signal:
            inputs = (inputs - K.mean(inputs, axis=(1, 2), keepdims=True)) / (
                K.std(inputs, axis=(1, 2), keepdims=True) + K.epsilon()
            )

        if self.length < self.nfft:
            inputs = ZeroPadding1D(padding=(0, self.nfft - self.length))(inputs)

        real_part = []
        imag_part = []
        for n in range(inputs.shape[-1]):
            real_part.append(
                K.conv1d(
                    K.expand_dims(inputs[:, :, n]),
                    kernel=self.real_kernel,
                    strides=self.shift,
                    padding="valid",
                )
            )
            imag_part.append(
                K.conv1d(
                    K.expand_dims(inputs[:, :, n]),
                    kernel=self.imag_kernel,
                    strides=self.shift,
                    padding="valid",
                )
            )

        real_part = K.stack(real_part, axis=-1)
        imag_part = K.stack(imag_part, axis=-1)

        # real_part = K.expand_dims(real_part)
        # imag_part = K.expand_dims(imag_part)
        if self.mode == "abs":
            fft = K.sqrt(K.square(real_part) + K.square(imag_part))
        if self.mode == "phase":
            fft = tf.atan(real_part / imag_part)
        elif self.mode == "real":
            fft = real_part
        elif self.mode == "imag":
            fft = imag_part
        elif self.mode == "complex":
            fft = K.concatenate((real_part, imag_part), axis=-1)
        elif self.mode == "log":
            fft = K.clip(
                K.sqrt(K.square(real_part) + K.square(imag_part)), K.epsilon(), None
            )
            fft = K.log(fft) / np.log(10)

        fft = K.permute_dimensions(fft, (0, 2, 1, 3))[:, : self.nfft // 2, :, :]
        if self.normalize_feature:
            if self.mode == "complex":
                warnings.warn(
                    'spectrum normalization will not applied with mode == "complex"'
                )
            else:
                fft = (fft - K.mean(fft, axis=1, keepdims=True)) / (
                    K.std(fft, axis=1, keepdims=True) + K.epsilon()
                )
Esempio n. 18
0
    def build_model(self):
        word_input = Input(shape=(self.maxlen,), dtype='int32', name='word_input')
        word_emb = Embedding(input_dim=len(self.emb), output_dim=100, input_length=self.maxlen, weights=[self.emb],
                            trainable=False)(word_input)
        # bilstm
        bilstm = Bidirectional(LSTM(32, return_sequences=True))(word_emb)
        bilstm_d = Dropout(0.1)(bilstm)

        # cnn
        half_window_size = 2
        padding_layer = ZeroPadding1D(padding=half_window_size)(word_emb)
        conv = Conv1D(nb_filter=50, filter_length=2 * half_window_size + 1, padding='valid')(padding_layer)
        conv_d = Dropout(0.1)(conv)
        dense_conv = TimeDistributed(Dense(50))(conv_d)

        # merge
        rnn_cnn_merge = concatenate([bilstm_d,dense_conv], axis=2)
        dense = TimeDistributed(Dense(self.label_size))(rnn_cnn_merge)

        outs = Dense(self.label_size, activation='softmax')(dense)
        # build model
        model = Model(input=[word_input], output=[outs])

        model.compile(optimizer='rmsprop',  # 还可以通过optimizer = optimizers.RMSprop(lr=0.001)来为优化器指定参数
                      loss='sparse_categorical_crossentropy', metrics=['accuracy'])

        model.summary()
        self.model = model
        return model
Esempio n. 19
0
    def __createModel(self):
        # [NOTED] Kim used 2-channels input, static and non-static channels (Section 2, Page 2)
        model = Sequential()
        multiFilterSizeConvolution = []

        for h in [3, 4, 5]:
            submodel = Sequential()
            submodel.add(ZeroPadding1D(
                padding=1, 
                input_shape=(self.__numberOfWords, self.__sizeOfWordVectors)))
            submodel.add(Conv1D(
                    filters=self.__numberOfFilter, 
                    kernel_size=h, 
                    padding='valid', 
                    activation='relu', 
                    strides=1,
                    kernel_regularizer=regularizers.l2(LAMBDA)))
            submodel.add(GlobalMaxPooling1D())

            multiFilterSizeConvolution.append(submodel)

        #  UserWarning: The `Merge` layer is deprecated and will be removed after 08/2017. 
        #  Use instead layers from `keras.layers.merge`, e.g. `add`, `concatenate`, etc.
        model.add(Merge(multiFilterSizeConvolution, mode="concat"))
        model.add(Dropout(P_DROPOUT))
        model.add(Dense(1, input_shape=(300,)))
        model.add(Activation('softmax'))
        print('Compiling model')
        model.compile(
            loss='mean_squared_error',
            optimizer='adadelta',
            metrics=['accuracy'])
        return model
Esempio n. 20
0
def build_dcnn():
    model_1 = Sequential([
        Embedding(max_features, embed_size),
        ZeroPadding1D((49, 49)),
        Conv1D(64, 50, padding="same"),
        KMaxPooling_non_flatten(k=5, axis=1),
        Activation("relu"),
        ZeroPadding1D((24, 24)),
        Conv1D(64, 25, padding="same"),
        Folding(),
        KMaxPooling_non_flatten(k=5, axis=1),
        Activation("relu"),
        Flatten(),
        Dense(num_classes, activation="softmax")
    ])
    return model_1
Esempio n. 21
0
def get_cnn_model(num_amino_acids, max_sequence_size, max_num_functions):
    logging.info('Building CNN model using functional API ...')
    logging.debug("Embedding dims = " + str(embedding_dims))

    # no need to mention the num_amino_acids when using functional API
    input = Input(shape=(max_sequence_size,))

    embedding = Embedding(num_amino_acids, embedding_dims, input_length=max_sequence_size)(input)

    x = Convolution1D(200, 3, activation='relu', subsample_length=1)(embedding)
    x = Dropout(0.2)(x)

    z = Convolution1D(200, 3, activation='relu', subsample_length=1)(x)
    z = Dropout(0.2)(z)
    x = ZeroPadding1D((0,4))(x)

    # residual connection
    x = merge([z, x], mode='sum')


    x = GlobalMaxPooling1D()(x)

    x = Dense(max_num_functions)(x)
    x = BatchNormalization()(x) # can also try to do this after the activation (didn't work)
    output = Activation('sigmoid')(x)


    model = Model([input], output)
    model.summary()
    return model
Esempio n. 22
0
	def BiLSTM_CNN_CRF1(self,wordsids,classes,train,val,train_label,val_label,istrain =True):
		self.gpu_config()
		output_dim = 50
		lstm_cell = 50
		max_len = 100
		model = Model()
		inputs = Input(shape = (None,))
		word_emd = Embedding(len(wordsids),output_dim)(inputs)
		bilstm = Bidirectional(LSTM(lstm_cell,return_sequences = True,dropout_W = 0.1,dropout_U = 0.1))(word_emd)
		bilstm_d = Dropout(0.3)(bilstm)
		paddinglayer = ZeroPadding1D(2)(bilstm_d)
		conv1 = Conv1D(32,2*2+1,border_mode = "valid")(paddinglayer)
		conv1_d = Dropout(0.1)(conv1)
		conv_dense = TimeDistributed(Dense(100))(conv1_d)
		crf = CRF(len(classes),sparse_target = True)
		crf_output = crf(conv_dense)
		model = Model(inputs,crf_output)
		model.compile(optimizer = keras.optimizers.Adam(1e-2),
					loss = crf.loss_function,
					metrics = [crf.accuracy])
		model.summary()		
		checkpoint = ModelCheckpoint("model/model_{}.h5".format(nowtime),monitor = "val_acc",verbose = 1,save_best_only = True,mode = "max")

		if istrain:
			history = model.fit(train,
								train_label,
								self.batch_size,
								epochs = self.epochs,
								callbacks = [checkpoint],
								validation_data = (val,val_label)
								)			

			return model,history
		else:
			return model
def BiLSTM_CNN_CRF(
    input_length,
    input_dim,
    class_label_count,
    embedding_size,
    embedding_weights=None,
    is_train=True,
):
    word_input = Input(shape=(input_length, ),
                       dtype="int32",
                       name="word_input")

    if is_train:
        word_emb = Embedding(
            input_dim=input_dim,
            output_dim=embedding_size,
            input_length=input_length,
            weights=[embedding_weights],
            name="word_emb",
        )(word_input)
    else:
        word_emb = Embedding(
            input_dim=input_dim,
            output_dim=embedding_size,
            input_length=input_length,
            name="word_emb",
        )(word_input)

    # bilstm
    bilstm = Bidirectional(LSTM(64, return_sequences=True))(word_emb)
    bilstm_drop = Dropout(0.1)(bilstm)
    bilstm_dense = TimeDistributed(Dense(embedding_size))(bilstm_drop)

    # cnn
    half_window_size = 2
    filter_kernel_number = 64
    padding_layer = ZeroPadding1D(padding=half_window_size)(word_emb)
    conv = Conv1D(
        nb_filter=filter_kernel_number,
        filter_length=2 * half_window_size + 1,
        padding="valid",
    )(padding_layer)
    conv_drop = Dropout(0.1)(conv)
    conv_dense = TimeDistributed(Dense(filter_kernel_number))(conv_drop)

    # merge
    rnn_cnn_merge = Concatenate(axis=2)([bilstm_dense, conv_dense])
    dense = TimeDistributed(Dense(class_label_count))(rnn_cnn_merge)

    # crf
    crf = CRF(class_label_count, sparse_target=False)
    crf_output = crf(dense)

    # mdoel
    model = Model(input=[word_input], output=crf_output)
    model.compile(loss=crf_loss,
                  optimizer="adam",
                  metrics=[crf_accuracy, crf_viterbi_accuracy])

    return model
def resblock_body(x, num_filters, num_blocks, all_narrow=True):
    # ----------------------------------------------------------------#
    #   利用ZeroPadding1D和一个步长为2x2的卷积块进行高和宽的压缩
    # ----------------------------------------------------------------#
    preconv1 = ZeroPadding1D((1, 0))(x)
    preconv1 = DarknetConv1D_BN_Mish(num_filters, 3, strides=2)(preconv1)

    # --------------------------------------------------------------------#
    #   然后建立一个大的残差边shortconv、这个大残差边绕过了很多的残差结构
    # --------------------------------------------------------------------#
    shortconv = DarknetConv1D_BN_Mish(num_filters // 2 if all_narrow else num_filters, 1)(preconv1)

    # ----------------------------------------------------------------#
    #   主干部分会对num_blocks进行循环,循环内部是残差结构。
    # ----------------------------------------------------------------#
    mainconv = DarknetConv1D_BN_Mish(num_filters // 2 if all_narrow else num_filters, 1)(preconv1)
    for i in range(num_blocks):
        y = compose(
            DarknetConv1D_BN_Mish(num_filters // 2, 1),
            DarknetConv1D_BN_Mish(num_filters // 2 if all_narrow else num_filters, 3))(mainconv)
        mainconv = Add()([mainconv, y])
    postconv = DarknetConv1D_BN_Mish(num_filters // 2 if all_narrow else num_filters, 1)(mainconv)

    # ----------------------------------------------------------------#
    #   将大残差边再堆叠回来
    # ----------------------------------------------------------------#
    route = Concatenate()([postconv, shortconv])

    # 最后对通道数进行整合
    return DarknetConv1D_BN_Mish(num_filters, 1)(route)
Esempio n. 25
0
def build_ds5_no_ctc_and_xfer_weights(loaded_model, input_dim=161, fc_size=1024, rnn_size=512, output_dim=29, initialization='glorot_uniform',
                  conv_layers=4):
    """ Pure CNN implementation"""


    K.set_learning_phase(0)
    for ind, i in enumerate(loaded_model.layers):
        print(ind, i)

    kernel_size = 11  #
    conv_depth_1 = 64  #
    conv_depth_2 = 256  #

    input_data = Input(shape=(None, input_dim), name='the_input') #batch x time x spectro size
    conv = ZeroPadding1D(padding=(0, 2048))(input_data) #pad on time dimension

    x = Conv1D(filters=128, name='conv_1', kernel_size=kernel_size, padding='valid', activation='relu', strides=2,
            weights = loaded_model.layers[2].get_weights())(conv)
    # x = Conv1D(filters=1024, name='conv_2', kernel_size=kernel_size, padding='valid', activation='relu', strides=2,
    #            weights=loaded_model.layers[3].get_weights())(x)


    # Last Layer 5+6 Time Dist Dense Layer & Softmax
    x = TimeDistributed(Dense(fc_size, activation='relu',
                              weights=loaded_model.layers[3].get_weights()))(x)
    y_pred = TimeDistributed(Dense(output_dim, name="y_pred", activation="softmax"))(x)

    model = Model(inputs=input_data, outputs=y_pred)

    return model
Esempio n. 26
0
    def build_discriminator(self):

        model = Sequential()

        model.add(
            Conv1D(32,
                   kernel_size=3,
                   strides=2,
                   input_shape=self.img_shape,
                   padding="same"))
        model.add(LeakyReLU(alpha=0.2))
        model.add(Dropout(0.25))
        model.add(Conv1D(64, kernel_size=3, strides=2, padding="same"))
        model.add(ZeroPadding1D(padding=(0, 1)))
        model.add(BatchNormalization(momentum=0.8))
        model.add(LeakyReLU(alpha=0.2))
        model.add(Dropout(0.25))
        model.add(Conv1D(128, kernel_size=3, strides=2, padding="same"))
        model.add(BatchNormalization(momentum=0.8))
        model.add(LeakyReLU(alpha=0.2))
        model.add(Dropout(0.25))
        model.add(Conv1D(256, kernel_size=3, strides=1, padding="same"))
        model.add(BatchNormalization(momentum=0.8))
        model.add(LeakyReLU(alpha=0.2))
        model.add(Dropout(0.25))
        model.add(Flatten())
        model.add(Dense(1, activation='sigmoid'))

        model.summary()

        img = Input(shape=self.img_shape)
        validity = model(img)

        return Model(img, validity)
Esempio n. 27
0
def conv_model():
    model = keras.models.Sequential()
    model.add(ZeroPadding1D(
        5, input_shape = (MAX_SEQUENCE_LENGTH, len(character_to_index) + 1)
    ))

    model.add(Conv1D(
        64,
        kernel_size = 5,
        strides = 1,
        activation = 'relu',
        #input_shape = (MAX_SEQUENCE_LENGTH, len(character_to_index) + 1)
    ))
    model.add(MaxPooling1D(pool_size=2, strides=2))
    #model.add(Dropout(0.5))
    model.add(Conv1D(64, 5, activation='relu'))
    model.add(MaxPooling1D(pool_size=2))
    model.add(Flatten())
    model.add(Dropout(0.5))
    model.add(Dense(100, activation='relu'))
    #model.add(Dense(100, activation='relu'))
    model.add(Dense(20, activation='relu'))
    model.add(Dense(1))
    model.compile(loss='mean_squared_error', optimizer='adam')

    return model
Esempio n. 28
0
def fourier_plus_conv(conv_embed_length=len(CHARACTER_DICT),
                      fourier_embed_length=len(CHARACTER_DICT)):
    conv = keras.models.Sequential()
    conv.add(
        ZeroPadding1D(kernelsize,
                      input_shape=(MAX_SEQUENCE_LENGTH, conv_embed_length)))
    conv.add(Conv1D(
        64,
        kernel_size=kernelsize,
        strides=1,
        activation='relu',
    ))
    conv.add(MaxPooling1D(pool_size=2, strides=2))
    #model.add(Dropout(0.5))
    conv.add(Conv1D(64, kernelsize, activation='relu'))
    conv.add(MaxPooling1D(pool_size=2))
    conv.add(Flatten())
    conv.add(Dropout(0.5))
    conv.add(Dense(100, activation='relu'))

    fourier = keras.models.Sequential()
    fourie.add(Dense(256, activation='relu'))
    # model.add(BatchNormalization())
    fourier.add(Flatten())
    fourier.add(Dense(128, activation='relu'))
    fourier.add(Dropout(0.5))
    fourier.add(Dense(64, activation='relu'))
    fourier.add(Dense(64, activation='relu'))

    model = keras.models.Sequential()
    model.add(Merge([conv, fourier], mode='concat'))
    model.add(Dense(64, activation='relu'))
    model.add(Dense(64, activation='relu'))
    model.add(Dense(1))
    model.compile(loss='mean_squared_error', optimizer='adam')
Esempio n. 29
0
    def resnet(self, x, init_maxpool=False):
        """
        Get ResNet.

        :param x: Input.
        :param init_maxpool: Maxpool or Conv downsample.
        :returns: ResNet Architecture.
        """

        x = ZeroPadding1D(padding=3)(x)
        x = Conv1D(64, 7, strides=2, use_bias=False)(x)
        #x = VirtualBatchNormalization(
        #    virtual_batch_size=self.virtual_batch_size
        #)(x)
        x = BatchNormalization()(x)
        x = Activation("elu")(x)

        if init_maxpool:
            x = MaxPooling1D(3, strides=2, padding="same")(x)
        else:
            x = Conv1D(64, 3, strides=2, use_bias=False)(x)
            #x = VirtualBatchNormalization(
            #    virtual_batch_size=self.virtual_batch_size)(x)
            x = BatchNormalization()(x)
            x = Activation("elu")(x)
        for stage_id, iterations in enumerate(self.blocks):
            for block_id in range(iterations):
                x = self.resnet_block(self.features, stage_id, block_id)(x)
            self.features *= 2

        return x
Esempio n. 30
0
def conv_block(x, stage, branch, nb_filter, dropout_rate=None, weight_decay=1e-4):
    '''Apply BatchNorm, Relu, bottleneck 1x1 Conv2D, 3x3 Conv2D, and option dropout
        # Arguments
            x: input tensor
            stage: index for dense block
            branch: layer index within each dense block
            nb_filter: number of filters
            dropout_rate: dropout rate
            weight_decay: weight decay factor
    '''
    eps = 1.1e-5
    concat_axis = 2
    conv_name_base = 'conv' + str(stage) + '_' + str(branch)
    relu_name_base = 'relu' + str(stage) + '_' + str(branch)

    # 1x1 Convolution (Bottleneck layer)
    inter_channel = nb_filter * 4
    x = BatchNormalization(epsilon=eps, axis=concat_axis, name=conv_name_base + '_x1_bn', scale=True)(x)
    #     x = Scale(axis=concat_axis, name=conv_name_base+'_x1_scale')(x)
    x = Activation('relu', name=relu_name_base + '_x1')(x)
    x = Conv1D(inter_channel, 1, name=conv_name_base + '_x1', use_bias=False)(x)

    if dropout_rate:
        x = Dropout(dropout_rate)(x)

    # 3x3 Convolution
    x = BatchNormalization(epsilon=eps, axis=concat_axis, name=conv_name_base + '_x2_bn', scale=True)(x)
    x = Activation('relu', name=relu_name_base + '_x2')(x)
    x = ZeroPadding1D(1, name=conv_name_base + '_x2_zeropadding')(x)
    x = Conv1D(nb_filter, 3, name=conv_name_base + '_x2', use_bias=False)(x)

    if dropout_rate:
        x = Dropout(dropout_rate)(x)

    return x