Example #1
0
def without_fully_connected_cnn(input_shape, num_classes):
    model = Sequential()
    activation = 'relu'
    model.add(Convolution1D(2, 9, input_shape=input_shape, activation=activation))
    model.add(BatchNormalization())
    model.add(AveragePooling1D())

    model.add(Convolution1D(2, 7, activation=activation))
    model.add(BatchNormalization())
    model.add(AveragePooling1D())

    model.add(Convolution1D(4, 7, activation=activation))
    model.add(BatchNormalization())
    model.add(AveragePooling1D())

    model.add(Convolution1D(8, 5, activation=activation))
    model.add(BatchNormalization())
    model.add(AveragePooling1D())

    model.add(Convolution1D(12, 3, activation=activation))
    model.add(BatchNormalization())
    model.add(AveragePooling1D())

    model.add(Dropout(0.85, seed=23087))
    model.add(Convolution1D(num_classes, 1))
    model.add(BatchNormalization())
    model.add(GlobalAveragePooling1D())

    model.add(Activation('softmax', name='loss'))

    model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])

    print(model.summary())
    print("CNN Model created.")
    return model
Example #2
0
def discriminator_model(model_name="discriminator"):
    disc_input = Input(shape=(400, 1), name="discriminator_input")
    aux_input = Input(shape=(47, ), name="auxilary_input")

    # Conv Layer 1
    x = Conv1D(filters=100, kernel_size=13, padding='same')(disc_input)
    x = LeakyReLU(0.2)(x)  # output shape is 100 x 400
    x = AveragePooling1D(pool_size=20)(x)  # ouput shape is 100 x 20

    # Conv Layer 2
    x = Conv1D(filters=250, kernel_size=13, padding='same')(x)
    x = LeakyReLU(0.2)(x)  # output shape is 250 x 20
    x = AveragePooling1D(pool_size=5)(x)  # output shape is 250 x 4

    # Conv Layer 3
    x = Conv1D(filters=300, kernel_size=13, padding='same')(x)
    x = LeakyReLU(0.2)(x)  # output shape is 300 x 4
    x = Flatten()(x)  # output shape is 1200

    x = concatenate([x, aux_input], axis=-1)  # shape is 1247

    # Dense Layer 1
    x = Dense(200)(x)
    x = LeakyReLU(0.2)(x)  # output shape is 200

    # Dense Layer 2
    x = Dense(1)(x)
    x = Activation('sigmoid')(x)

    discriminator_model = Model(outputs=[x],
                                inputs=[disc_input, aux_input],
                                name=model_name)

    return discriminator_model
Example #3
0
def create_model(model_type, input_shape, loss_function):
    print('input_shape', input_shape)
    if model_type.startswith('cnn'):
        data_input = Input(shape=(input_shape[1], input_shape[2]))
        kernel_size = 7
        if input_shape[1] > 500:
            kernel_size = 71
        # Conv Block 1
        x = Conv1D(filters=32, kernel_size=kernel_size, padding='same', activation='relu')(data_input)
        x = Conv1D(filters=32, kernel_size=kernel_size, padding='same', activation='relu')(x)
        x = AveragePooling1D(pool_size=2, strides=2)(x)

        # residual block 1
        x1 = Conv1D(filters=32, kernel_size=kernel_size, padding='same', activation='relu')(x)
        c1 = concatenate([x, x1])
        x2 = Conv1D(filters=32, kernel_size=kernel_size, padding='same', activation='relu')(c1)
        x = concatenate([x, x1, x2])

        # Conv Block 2
        x = Conv1D(filters=24, kernel_size=kernel_size, padding='same', activation='relu')(x)
        x = AveragePooling1D(pool_size=2, strides=2)(x)

        # residual block 2
        x1 = Conv1D(filters=24, kernel_size=kernel_size, padding='same', activation='relu')(x)
        c1 = concatenate([x, x1])
        x2 = Conv1D(filters=24, kernel_size=kernel_size, padding='same', activation='relu')(c1)
        x = concatenate([x, x1, x2])

        # Conv Block 3
        x = Conv1D(filters=16, kernel_size=kernel_size, padding='same', activation='relu')(x)
        x = AveragePooling1D(pool_size=2, strides=2)(x)

        # residual block 3
        x1 = Conv1D(filters=16, kernel_size=kernel_size, padding='same', activation='relu')(x)
        c1 = concatenate([x, x1])
        x2 = Conv1D(filters=16, kernel_size=kernel_size, padding='same', activation='relu')(c1)
        x = concatenate([x, x1, x2])

        # Flatten
        x = Flatten()(x)
        x = Dense(16, activation='relu')(x)
        x = Dense(64, activation='relu')(x)
        y = Dense(1)(x)
        model = Model(inputs=data_input, outputs=y)
        model.compile(loss=losses.logcosh, optimizer=Adam(lr=args.learning_rate), metrics=[r2_keras])
        # model.compile(loss=losses.mean_squared_error, optimizer=Adam(lr=args.learning_rate), metrics=[r2_keras])
    else:
        model = Sequential()
        model.add(Dense(512, activation='relu', input_dim=input_shape))
        model.add(Dense(512, activation='relu'))
        model.add(Dense(24, activation='linear'))
        model.compile(loss=loss_function, optimizer='adam', metrics=[r2_score])

    return model
Example #4
0
def build_model():
    dropout = 0.3
    seq_input = Input(shape=(23, 4))
    seq_conv1 = Convolution1D(256, 5, kernel_initializer='glorot_uniform', name='seq_conv_1')(seq_input)
    seq_act1 = Activation('relu', name='seq_activation1')(seq_conv1)
    seq_pool1 = AveragePooling1D(2, name='seq_pooling_1')(seq_act1)
    seq_drop1 = Dropout(dropout)(seq_pool1)

    seq_conv2 = Convolution1D(256, 5, kernel_initializer='glorot_uniform', name='seq_conv_2')(seq_drop1)
    seq_act2 = Activation('relu', name='seq_activation_2')(seq_conv2)
    seq_pool2 = AveragePooling1D(2, name='seq_pooling_2')(seq_act2)
    seq_drop2 = Dropout(dropout)(seq_pool2)
    seq_flat = Flatten()(seq_drop2)

    seq_dense1 = Dense(256, activation='relu', name='seq_dense_1')(seq_flat)
    seq_drop3 = Dropout(dropout)(seq_dense1)
    seq_dense2 = Dense(128, activation='relu', name='seq_dense_2')(seq_drop3)
    seq_drop4 = Dropout(dropout)(seq_dense2)
    seq_dense3 = Dense(64, activation='relu', name='seq_dense_3')(seq_drop4)
    seq_drop5 = Dropout(dropout)(seq_dense3)
    seq_out = Dense(40, activation='relu', name='seq_dense_4')(seq_drop5)

    epi_input = Input(shape=(23, 4))
    epi_conv1 = Convolution1D(256, 5, kernel_initializer='glorot_uniform', name='epi_conv_1')(epi_input)
    epi_act1 = Activation('relu', name='epi_activation_1')(epi_conv1)
    epi_pool1 = AveragePooling1D(2, name='epi_pooling_1')(epi_act1)
    epi_drop1 = Dropout(dropout)(epi_pool1)

    epi_conv2 = Convolution1D(256, 5, kernel_initializer='glorot_uniform', name='epi_conv_2')(epi_drop1)
    epi_act2 = Activation('relu', name='epi_activation_2')(epi_conv2)
    epi_pool2 = AveragePooling1D(2, name='epi_pooling_2')(epi_act2)
    epi_drop2 = Dropout(dropout)(epi_pool2)
    epi_flat = Flatten()(epi_drop2)

    epi_dense1 = Dense(256, activation='relu', name='epi_dense_1')(epi_flat)
    epi_drop3 = Dropout(dropout)(epi_dense1)
    epi_dense2 = Dense(128, activation='relu', name='epi_dense_2')(epi_drop3)
    epi_drop4 = Dropout(dropout)(epi_dense2)
    epi_dense3 = Dense(64, activation='relu', name='epi_dense_3')(epi_drop4)
    epi_drop5 = Dropout(dropout)(epi_dense3)
    epi_out = Dense(40, activation='relu', name='epi_dense_4')(epi_drop5)

    merged = concatenate([seq_out, epi_out], axis=-1)

    pretrain_model = Model(inputs=[seq_input, epi_input], outputs=[merged])

    # Load weights for the model
    pretrain_model.load_weights("weights/weights.h5", by_name=True)

    prediction = Dense(1, activation='linear', name='prediction')(merged)
    model = Model([seq_input, epi_input], prediction)
    return merged, model
Example #5
0
def build_model(sequ_len=30):
    input = Input(shape=(sequ_len, 4))
    conv1 = Convolution1D(filters=32,
                          kernel_size=3,
                          padding="valid",
                          activation="relu",
                          strides=1,
                          kernel_initializer='glorot_normal',
                          name='conv1')(input)
    conv2 = Convolution1D(filters=32,
                          kernel_size=3,
                          padding="valid",
                          activation="relu",
                          strides=1,
                          kernel_initializer='glorot_normal',
                          name='conv2')(conv1)
    pool1 = AveragePooling1D(pool_size=2, strides=2)(conv2)
    # 	conv3 = Convolution1D(filters=64, kernel_size=3, padding="valid",
    # 		activation="relu",
    # 		strides=1,
    # 		kernel_initializer='glorot_normal', name='conv3')(pool1)
    # 	conv4 = Convolution1D(filters=64, kernel_size=3, padding="valid",
    # 		activation="relu",
    # 		strides=1,
    # 		kernel_initializer='glorot_normal', name='conv4')(conv3)
    # 	pool2 = AveragePooling1D(pool_size=1, strides=1)(conv4)

    mlp = Dense(32, activation='relu')(Flatten()(pool1))
    output = Dense(1, activation='sigmoid')(mlp)

    model = Model(input=input, output=output)
    model.compile(optimizer='adam',
                  loss='binary_crossentropy',
                  metrics=['acc'])
    return model
Example #6
0
    def build_seq_model(self):
        self.logger.info("Building models")
        Seq_deepCpf1_Input_SEQ = Input(shape=(34, 4))
        Seq_deepCpf1_C1 = Convolution1D(
            80, 5, activation='relu')(Seq_deepCpf1_Input_SEQ)
        Seq_deepCpf1_P1 = AveragePooling1D(2)(Seq_deepCpf1_C1)
        Seq_deepCpf1_F = Flatten()(Seq_deepCpf1_P1)
        Seq_deepCpf1_DO1 = Dropout(0.3)(Seq_deepCpf1_F)
        Seq_deepCpf1_D1 = Dense(80, activation='relu')(Seq_deepCpf1_DO1)
        Seq_deepCpf1_DO2 = Dropout(0.3)(Seq_deepCpf1_D1)
        Seq_deepCpf1_D2 = Dense(40, activation='relu')(Seq_deepCpf1_DO2)
        Seq_deepCpf1_DO3 = Dropout(0.3)(Seq_deepCpf1_D2)
        Seq_deepCpf1_D3 = Dense(40, activation='relu')(Seq_deepCpf1_DO3)
        Seq_deepCpf1_DO4 = Dropout(0.3)(Seq_deepCpf1_D3)
        Seq_deepCpf1_Output = Dense(1, activation='linear')(Seq_deepCpf1_DO4)
        Seq_deepCpf1 = Model(inputs=[Seq_deepCpf1_Input_SEQ],
                             outputs=[Seq_deepCpf1_Output])

        # Load scores from the data file into the model
        try:
            self.logger.info("Loading scores for the SEQ models")
            Seq_deepCpf1.load_weights(
                os.path.join(os.path.dirname(__file__), 'kimsong_seq_scores.h5'
                             ))  # Renamed 'Seq_deepCpf1_weights.h5' file
        except FileNotFoundError:
            raise Exception(
                "Could not find DeepCpf1 data files containing scores")

        return Seq_deepCpf1
Example #7
0
def test_conv():
    train_x, train_y, test_x, test_y = data_load()

    print("Building models")
    Seq_deepCpf1_Input_SEQ = Input(shape=(34, 4))  #(None, 34, 4)
    # 这代表80个5*5的卷积核吗
    Seq_deepCpf1_C1 = Convolution1D(80,
                                    5)(Seq_deepCpf1_Input_SEQ)  #(None, 30, 80)

    Seq_deepCpf1_P1 = AveragePooling1D(2)(Seq_deepCpf1_C1)  #(None, 15, 80)
    # Flatten 压平 变1维
    Seq_deepCpf1_F = Flatten()(Seq_deepCpf1_P1)  #(None, 1200)
    Seq_deepCpf1_DO1 = Dropout(0.3)(Seq_deepCpf1_F)  #(None, 1200)
    # Dense 全连接层
    Seq_deepCpf1_D1 = Dense(80,
                            activation='relu')(Seq_deepCpf1_DO1)  #(None, 80)
    Seq_deepCpf1_DO2 = Dropout(0.3)(Seq_deepCpf1_D1)  #(None, 80)
    Seq_deepCpf1_D2 = Dense(40,
                            activation='relu')(Seq_deepCpf1_DO2)  #(None, 40)
    Seq_deepCpf1_DO3 = Dropout(0.3)(Seq_deepCpf1_D2)  #(None, 40)
    Seq_deepCpf1_D3 = Dense(40,
                            activation='relu')(Seq_deepCpf1_DO3)  #(None, 40)
    Seq_deepCpf1_DO4 = Dropout(0.3)(Seq_deepCpf1_D3)  #(None, 40)
    Seq_deepCpf1_Output = Dense(1, activation='linear')(
        Seq_deepCpf1_DO4)  #(None, 1)
    Seq_deepCpf1 = Model(inputs=[Seq_deepCpf1_Input_SEQ],
                         outputs=[Seq_deepCpf1_Output])
    print(Seq_deepCpf1.summary())
    for layer in Seq_deepCpf1.layers:
        print(layer.output_shape)
    # with a Sequential model
    get_1_layer_output = K.function([Seq_deepCpf1.layers[0].input],
                                    [Seq_deepCpf1.layers[1].output])
    layer_output = get_1_layer_output([train_x])[0]
    print(layer_output)
Example #8
0
def create_cnn():
    # define our CNN for the "Player Data" half of the Network
    inputs = Input(shape=209)
    layers = Conv1D(64, (1,208), activation="relu")(inputs)
    layers = BatchNormalization()(layers)
    layers = AveragePooling1D(pool_size=2)(layers)
    layers = Conv1D(128, (1,104), activation="relu")(layers)
    layers = BatchNormalization()(layers)
    layers = AveragePooling1D(pool_size=2)(layers)
    layers = Flatten()(layers)
    layers = Dense(52, activation="relu")(layers)
    layers = BatchNormalization()(layers)
    layers = Dropout(0.25)(layers)

    cnn = Model(inputs,layers)
    return cnn
def ResneXt(type, input_shape, num_outputs, repetitions):
    input = Input(shape=input_shape)
    conv1 = Conv1D(filters=32, kernel_size=7, strides=2)(input)
    conv1 = _bn_relu(conv1)
    #print(conv1.shape)
    pool1 = MaxPooling1D(pool_size=3, strides=2, padding="same")(conv1)
    block = pool1
    #print('first',block.shape)
    filters = 32
    for i, r in enumerate(repetitions):
        block = _residual_block(type,
                                block,
                                filters=filters,
                                repetitions=r,
                                is_first_layer=(i == 0))
        #print(block.shape)
        filters *= 2

    # Last activation
    block = _bn_relu(block)

    # Classifier block
    block_shape = K.int_shape(block)
    pool2 = AveragePooling1D(pool_size=block_shape[1], strides=1)(block)
    flatten1 = Flatten()(pool2)
    dense = Dense(units=num_outputs,
                  kernel_initializer="he_normal",
                  activation="softmax")(flatten1)

    return input, dense
Example #10
0
def build_model():
    # 모델을 반환한다. (v4.2.2)

    inputs1 = Input(shape=INPUTSHAPE)
    # GRU
    inter = Bidirectional(GRU(512, return_sequences=True),
                          merge_mode='concat')(inputs1)
    # pooling
    avg_pool = AveragePooling1D(pool_size=3)(inter)
    avg_pool = time_distributed_layer(avg_pool)
    max_pool = MaxPooling1D(pool_size=3)(inter)
    max_pool = time_distributed_layer(max_pool)
    inter = concatenate([avg_pool, max_pool])
    # fully connected layers
    inter = layer(1024, inter)
    inter = layer(256, inter)
    inter = layer(64, inter)
    inter = Flatten()(inter)
    inter = layer(1024, inter)
    inter = Dense(64, kernel_constraint=max_norm(5.))(inter)
    inter = LeakyReLU()(inter)
    outputs = Dense(2, activation='softmax')(inter)

    model = Model(inputs=inputs1, outputs=outputs)

    optimizer = Adam(lr=0.001)
    model.compile(loss="binary_crossentropy",
                  optimizer=optimizer,
                  metrics=[custom_acc])

    return model
Example #11
0
    def Conv(input_dim, classes):
        if len(input_dim) == 1:
            input_dim = (input_dim[0], 1)
        check_input_dimensions(input_dim)

        # Esto dio 99% de accuracy con el conjunto de entrenamiento de los murcielagos, con meanMFCCs
        # NO TOCAR

        model = Sequential()
        model.add(Conv1D(15, 5, padding="same", input_shape=input_dim))
        # model.add(Activation("tanh"))
        model.add(LeakyReLU(alpha=0.3))
        # model.add(MaxPooling1D())
        model.add(AveragePooling1D(padding="same"))
        model.add(Conv1D(40, 5, padding="same"))
        model.add(Activation("tanh"))
        model.add(MaxPooling1D())
        model.add(Flatten())
        # model.add(Dense(100))
        # model.add(LeakyReLU(alpha=0.3))
        # model.add(Dropout(0.5))
        model.add(Dense(600))
        model.add(Activation("tanh"))
        # model.add(Dropout(0.5))
        # model.add(LeakyReLU(alpha=0.3))
        model.add(Dense(classes))
        model.add(Activation("softmax"))

        return model
Example #12
0
    def build_ca_model(self):
        DeepCpf1_Input_SEQ = Input(shape=(34, 4))
        DeepCpf1_C1 = Convolution1D(80, 5,
                                    activation='relu')(DeepCpf1_Input_SEQ)
        DeepCpf1_P1 = AveragePooling1D(2)(DeepCpf1_C1)
        DeepCpf1_F = Flatten()(DeepCpf1_P1)
        DeepCpf1_DO1 = Dropout(0.3)(DeepCpf1_F)
        DeepCpf1_D1 = Dense(80, activation='relu')(DeepCpf1_DO1)
        DeepCpf1_DO2 = Dropout(0.3)(DeepCpf1_D1)
        DeepCpf1_D2 = Dense(40, activation='relu')(DeepCpf1_DO2)
        DeepCpf1_DO3 = Dropout(0.3)(DeepCpf1_D2)
        DeepCpf1_D3_SEQ = Dense(40, activation='relu')(DeepCpf1_DO3)

        DeepCpf1_Input_CA = Input(shape=(1, ))
        DeepCpf1_D3_CA = Dense(40, activation='relu')(DeepCpf1_Input_CA)
        DeepCpf1_M = Multiply()([DeepCpf1_D3_SEQ, DeepCpf1_D3_CA])

        DeepCpf1_DO4 = Dropout(0.3)(DeepCpf1_M)
        DeepCpf1_Output = Dense(1, activation='linear')(DeepCpf1_DO4)
        DeepCpf1 = Model(inputs=[DeepCpf1_Input_SEQ, DeepCpf1_Input_CA],
                         outputs=[DeepCpf1_Output])

        # Load scores from the data file into the model
        try:
            self.logger.info("Loading scores for the CA model")
            DeepCpf1.load_weights(
                os.path.join(os.path.dirname(__file__), 'kimsong_ca_scores.h5')
            )  # Renamed 'DeepCpf1_weights.h5' file
        except FileNotFoundError:
            raise Exception(
                "Could not find DeepCpf1 data files containing scores")

        return DeepCpf1
Example #13
0
def _optimized_res_block_disc(input, dim):
    shortcut = AveragePooling1D(2)(input)
    shortcut = Conv1D(dim, kernel_size=1, padding="same")(shortcut)

    output = input
    output = Conv1D(dim,
                    kernel_size=3,
                    padding="same",
                    kernel_initializer="he_normal")(output)
    output = LeakyReLU()(output)
    output = Conv1D(dim,
                    kernel_size=3,
                    padding="same",
                    kernel_initializer="he_normal")(output)
    output = AveragePooling1D(2)(output)

    return add([shortcut, output])
Example #14
0
def build_hcnn_model(opts,
                     vocab_size=0,
                     maxnum=50,
                     maxlen=50,
                     embedd_dim=50,
                     embedding_weights=None,
                     verbose=False):

    N = maxnum
    L = maxlen

    logger.info(
        "Model parameters: max_sentnum = %d, max_sentlen = %d, embedding dim = %s, nbfilters = %s, filter1_len = %s, filter2_len = %s, drop rate = %s, l2 = %s"
        % (N, L, embedd_dim, opts.nbfilters, opts.filter1_len,
           opts.filter2_len, opts.dropout, opts.l2_value))

    word_input = Input(shape=(N * L, ), dtype='int32', name='word_input')
    x = Embedding(output_dim=embedd_dim,
                  input_dim=vocab_size,
                  input_length=N * L,
                  weights=embedding_weights,
                  name='x')(word_input)
    drop_x = Dropout(opts.dropout, name='drop_x')(x)

    resh_W = Reshape((N, L, embedd_dim), name='resh_W')(drop_x)

    z = TimeDistributed(Convolution1D(opts.nbfilters,
                                      opts.filter1_len,
                                      border_mode='valid'),
                        name='z')(resh_W)

    avg_z = TimeDistributed(AveragePooling1D(pool_length=L - opts.filter1_len +
                                             1),
                            name='avg_z')(z)  # shape= (N, 1, nbfilters)

    resh_z = Reshape((N, opts.nbfilters),
                     name='resh_z')(avg_z)  # shape(N, nbfilters)

    hz = Convolution1D(opts.nbfilters,
                       opts.filter2_len,
                       border_mode='valid',
                       name='hz')(resh_z)
    # avg_h = MeanOverTime(mask_zero=True, name='avg_h')(hz)

    avg_hz = GlobalAveragePooling1D(name='avg_hz')(hz)
    y = Dense(output_dim=1, activation='sigmoid', name='output')(avg_hz)

    model = Model(input=word_input, output=y)

    if verbose:
        model.summary()

    start_time = time.time()
    model.compile(loss='mse', optimizer='rmsprop')
    total_time = time.time() - start_time
    logger.info("Model compiled in %.4f s" % total_time)

    return model
Example #15
0
def imageFeature(inputs):
    features = Reshape(target_shape=(num_region, 512))(inputs)
    features = Dense(embedding_size, activation="tanh",
                     use_bias=False)(features)
    features_pooling = AveragePooling1D(pool_size=num_region,
                                        padding="same")(features)
    features_pooling = Lambda(lambda x: K.squeeze(x, axis=1))(features_pooling)

    return features, features_pooling
Example #16
0
def discriminator_model(model_name="discriminator"):
    disc_input = Input(shape=(400, 1), name="discriminator_input")
    aux_input = Input(shape=(47, ), name="auxilary_input")

    # Conv Layer 1
    x = Convolution1D(nb_filter=100,
                      filter_length=13,
                      border_mode='same',
                      subsample_length=1)(disc_input)
    x = LeakyReLU(0.2)(x)  # output shape is 100 x 400
    x = AveragePooling1D(pool_length=20)(x)  # ouput shape is 100 x 20

    # Conv Layer 2
    x = Convolution1D(nb_filter=250,
                      filter_length=13,
                      border_mode='same',
                      subsample_length=1)(x)
    x = LeakyReLU(0.2)(x)  # output shape is 250 x 20
    x = AveragePooling1D(pool_length=5)(x)  # output shape is 250 x 4

    # Conv Layer 3
    x = Convolution1D(nb_filter=300,
                      filter_length=13,
                      border_mode='same',
                      subsample_length=1)(x)
    x = LeakyReLU(0.2)(x)  # output shape is 300 x 4
    x = Flatten()(x)  # output shape is 1200

    x = merge([x, aux_input], mode="concat", concat_axis=-1)  # shape is 1247

    # Dense Layer 1
    x = Dense(200)(x)
    x = LeakyReLU(0.2)(x)  # output shape is 200

    # Dense Layer 2
    x = Dense(1)(x)
    #x = Activation('sigmoid')(x)
    x = Activation('linear')(x)  # output shape is 1

    discriminator_model = Model(input=[disc_input, aux_input],
                                output=[x],
                                name=model_name)

    return discriminator_model
Example #17
0
def build_model(sequ_len=300, num_output=NUM_OUTPUT, class_weights=None):
    if class_weights is None:
        class_weights = np.ones((num_output, 2))
    ## sequence input
    input = Input(shape=(sequ_len, 4), name='sequence_input')
    conv1 = Convolution1D(
        filters=150,
        kernel_size=6,
        padding="valid",
        activation="sigmoid",
        use_bias=False,
        strides=1,  #W_regularizer = regularizers.l1(0.001),
        kernel_initializer='glorot_normal',
        name='conv1')(input)
    pool1 = AveragePooling1D(pool_size=2, strides=2)(conv1)
    conv2 = Convolution1D(filters=200,
                          kernel_size=6,
                          padding="valid",
                          activation="relu",
                          strides=1,
                          kernel_initializer='glorot_normal',
                          name='conv2')(pool1)
    pool2 = MaxPooling1D(pool_size=2, strides=2)(conv2)
    conv3 = Convolution1D(filters=400,
                          kernel_size=6,
                          padding="valid",
                          activation="relu",
                          strides=1,
                          kernel_initializer='glorot_normal',
                          name='conv3')(pool2)
    #pool3 = MaxPooling1D(pool_size=4, strides=4)(conv3)

    # fully-connected layers
    #flat = Flatten()(pool2)
    #flat = BatchNormalization()(flat)
    #mlp1 = Dense(units=100, activation='relu')( flat )
    #mlp1 = Dropout(0.3)(mlp1)

    # global pooling layer
    mlp1 = GlobalMaxPooling1D()(conv3)
    mlp1 = Lambda(sparsek_vec)(mlp1)

    output = Dense(units=num_output, name='output', activation='sigmoid')(mlp1)
    # compile
    #sgd = SGD(lr=0.01, momentum=0.05, decay=0.99, nesterov=True)
    #adam = Adam(lr=0.005, decay=0.01)
    model = Model(input=input, output=output)
    model.compile(
        loss='binary_crossentropy',
        #loss=get_weighted_loss(class_weights),
        optimizer='adam',
        metrics=['acc', keras.metrics.top_k_categorical_accuracy])
    #keras.metrics.sparse_top_k_categorical_accuracy])

    return model
Example #18
0
def run_CRIP(parser):
    protein = parser.protein
    # model_dir = parser.model_dir
    batch_size = parser.batch_size
    hiddensize = parser.hiddensize
    n_epochs = parser.n_epochs
    nbfilter = parser.nbfilter
    trainXeval, test_X, trainYeval, test_y = dealwithdata(protein)
    test_y = test_y[:, 1]
    kf = KFold(len(trainYeval), n_folds=5)
    aucs = []
    for train_index, eval_index in kf:
        train_X = trainXeval[train_index]
        train_y = trainYeval[train_index]
        eval_X = trainXeval[eval_index]
        eval_y = trainYeval[eval_index]
        print('configure cnn network')
        model = Sequential()
        model.add(
            Convolution1D(input_dim=21,
                          input_length=99,
                          nb_filter=nbfilter,
                          filter_length=7,
                          border_mode="valid",
                          activation="relu",
                          subsample_length=1))
        model.add(AveragePooling1D(pool_size=5))
        model.add(Dropout(0.5))
        # model.add(LSTM(128, input_dim=102, input_length=31, return_sequences=True))
        model.add(Bidirectional(LSTM(hiddensize, return_sequences=True)))
        model.add(Flatten())
        model.add(Dense(nbfilter, activation='relu'))
        model.add(Dropout(0.25))
        model.add(Dense(2))
        model.add(Activation('softmax'))
        # sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
        model.compile(loss='categorical_crossentropy',
                      optimizer=Adam(lr=1e-4))  # 'rmsprop'
        print('model training')
        # checkpointer = ModelCheckpoint(filepath="models/" + protein + "_bestmodel.hdf5", verbose=0, save_best_only=True)
        earlystopper = EarlyStopping(monitor='val_loss', patience=5, verbose=0)

        model.fit(train_X,
                  train_y,
                  batch_size=batch_size,
                  nb_epoch=n_epochs,
                  verbose=0,
                  validation_data=(eval_X, eval_y),
                  callbacks=[earlystopper])
        predictions = model.predict_proba(test_X)[:, 1]
        auc = roc_auc_score(test_y, predictions)
        aucs.append(auc)
    print("acid AUC: %.4f " % np.mean(aucs), protein)
Example #19
0
def create_model():
    assert ((depth - 4) % 6 == 0)

    n = (depth - 4) / 6
    inputs = Input(shape=input_shape)

    n_stages = [16, 16 * k, 32 * k, 64 * k]

    conv1 = Convolution1D(
        nb_filter=n_stages[0],
        filter_length=3,
        subsample_length=1,
        border_mode="same",
        init=weight_init,
        W_regularizer=l2(weight_decay),
        bias=use_bias)(
            inputs)  # "One conv at the beginning (spatial size: 32x32)"

    # Add wide residual blocks
    block_fn = _wide_basic
    conv2 = _layer(block_fn,
                   n_input_plane=n_stages[0],
                   n_output_plane=n_stages[1],
                   n_block=1,
                   count=n,
                   stride=1)(conv1)  # "Stage 1 (spatial size: 32x32)"
    conv3 = _layer(block_fn,
                   n_input_plane=n_stages[1],
                   n_output_plane=n_stages[2],
                   n_block=2,
                   count=n,
                   stride=2)(conv2)  # "Stage 2 (spatial size: 16x16)"
    conv4 = _layer(block_fn,
                   n_input_plane=n_stages[2],
                   n_output_plane=n_stages[3],
                   n_block=3,
                   count=n,
                   stride=2)(conv3)  # "Stage 3 (spatial size: 8x8)"

    batch_norm = BatchNormalization(axis=channel_axis)(conv4)
    relu = Activation("relu")(batch_norm)

    # Classifier block
    pool = AveragePooling1D(pool_length=8, stride=1, border_mode="same")(relu)
    flatten = Flatten()(pool)
    predictions = Dense(output_dim=nb_classes,
                        init=weight_init,
                        bias=use_bias,
                        W_regularizer=l2(weight_decay),
                        activation="softmax")(flatten)

    model = Model(input=inputs, output=predictions)
    return model
Example #20
0
def get_layer(x, state):
    if state.Layer_type == 'dense':
        return Dense(**state.Layer_attributes)(x)

    elif state.Layer_type == 'sfc':
        return SeparableFC(**state.Layer_attributes)(x)

    elif state.Layer_type == 'input':
        return Input(**state.Layer_attributes)

    elif state.Layer_type == 'conv1d':
        return Conv1D(**state.Layer_attributes)(x)

    elif state.Layer_type == 'denovo':
        x = Lambda(lambda x: K.expand_dims(x))(x)
        x = Permute(dims=(2, 1, 3))(x)
        x = Layer_deNovo(**state.Layer_attributes)(x)
        x = Lambda(lambda x: K.squeeze(x, axis=1))(x)
        return x

    elif state.Layer_type == 'sparsek_vec':
        x = Lambda(sparsek_vec)(x)
        return x

    elif state.Layer_type == 'maxpool1d':
        return MaxPooling1D(**state.Layer_attributes)(x)

    elif state.Layer_type == 'avgpool1d':
        return AveragePooling1D(**state.Layer_attributes)(x)

    elif state.Layer_type == 'lstm':
        return LSTM(**state.Layer_attributes)(x)

    elif state.Layer_type == 'flatten':
        return Flatten()(x)

    elif state.Layer_type == 'globalavgpool1d':
        return GlobalAveragePooling1D()(x)

    elif state.Layer_type == 'globalmaxpool1d':
        return GlobalMaxPooling1D()(x)

    elif state.Layer_type == 'dropout':
        return Dropout(**state.Layer_attributes)(x)

    elif state.Layer_type == 'identity':
        return Lambda(lambda x: x)(x)

    else:
        raise Exception('Layer_type "%s" is not understood' % layer_type)
Example #21
0
def Res_model():

    x_input = Input(shape=(
        20,
        1,
    ))

    x = GaussianNoise(stddev=10)(x_input)
    y = Conv1D(filters=20, kernel_size=3, activation='tanh')(x)
    y = Conv1D(filters=20, kernel_size=3, activation='relu')(y)
    y = MaxPooling1D(pool_size=2)(y)
    y = Dropout(0.2)(y)

    x = Conv1D(filters=20, kernel_size=3, activation=None)(x)
    x = Conv1D(filters=20, kernel_size=3, activation=None)(x)
    x = AveragePooling1D(pool_size=2)(x)
    x = Add()([x, y])

    y = Conv1D(filters=20, kernel_size=3, activation='tanh')(x)
    y = Conv1D(filters=20, kernel_size=3, activation='relu')(y)
    y = Conv1D(filters=20, kernel_size=3, activation='relu')(y)
    y = MaxPooling1D(pool_size=2)(y)
    y = Dropout(0.2)(y)

    x = Conv1D(filters=20, kernel_size=3, activation=None)(x)
    x = Conv1D(filters=20, kernel_size=3, activation=None)(x)
    x = Conv1D(filters=20, kernel_size=3, activation=None)(x)
    x = AveragePooling1D(pool_size=2)(x)
    x = Add()([x, y])

    x = Flatten()(x)
    x = Dense(units=1)(x)

    Res1D = Model(input=x_input, output=x)
    Res1D.compile(loss='logcosh', optimizer=Adam(), metrics=['MSE'])

    return (Res1D)
Example #22
0
def lstm_memory_train(X_train_list, y_train, vocab_size):
    N = len(X_train_list)

    X_train_list = [
        sequence.pad_sequences(x_train, maxlen=MAX_LEN)
        for x_train in X_train_list
    ]

    input_list = []
    out_list = []
    for i in range(N):
        input, out = get_embedding_input_output('f%d' % i, vocab_size)
        input_list.append(input)
        out_list.append(out)

    x = merge(out_list, mode='concat')

    lstm_out = LSTM(HIDDEN_SIZE, return_sequences=True)(x)

    lstm_share = GRU(HIDDEN_SIZE, return_sequences=True)

    x = lstm_out
    for i in range(2):
        att = TimeDistributed(Dense(1))(x)
        att = Flatten()(att)
        att = Activation(activation="softmax")(att)
        att = RepeatVector(HIDDEN_SIZE)(att)
        att = Permute((2, 1))(att)

        mer = merge([att, lstm_out], "mul")
        mer = merge([mer, out_list[-1]], 'mul')

        z = merge([lstm_out, mer], 'sum')
        z = lstm_share(z)
        x = z

    hid = AveragePooling1D(pool_length=2)(x)
    hid = Flatten()(hid)

    #hid = merge([hid,out_list[-1]], mode='concat')

    main_loss = Dense(1, activation='sigmoid', name='main_output')(hid)

    model = Model(input=input_list, output=main_loss)

    model.compile(loss='binary_crossentropy', optimizer='rmsprop')
    model.fit(X_train_list, y_train, batch_size=BATCH_SIZE, nb_epoch=EPOCHS)

    return model
Example #23
0
def build_cnn_architecture():
    model = Sequential()
    activation = 'relu'
    model.add(Convolution1D(2, 9, input_shape=(500, 1), activation=activation))
    model.add(AveragePooling1D())
    model.add(BatchNormalization())

    model.add(Convolution1D(2, 7, activation=activation))
    model.add(AveragePooling1D())
    model.add(BatchNormalization())

    model.add(Convolution1D(4, 7, activation=activation))
    model.add(AveragePooling1D())
    model.add(BatchNormalization())

    model.add(Convolution1D(4, 5, activation=activation))
    model.add(AveragePooling1D())
    model.add(BatchNormalization())

    model.add(Convolution1D(8, 3, activation=activation))
    model.add(AveragePooling1D())
    model.add(BatchNormalization())

    model.add(Dropout(0.10))
    model.add(Convolution1D(3, 1))
    model.add(GlobalAveragePooling1D())

    model.add(Activation('softmax', name='loss'))

    model.compile(loss='categorical_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])

    print(model.summary())
    print("CNN Model created.")
    return model
Example #24
0
def block_inception_b(input, seq_length):
    branch_0 = conv1d_bn(input, 48, 20, seq_length)

    branch_1 = conv1d_bn(input, 32, 20, seq_length)
    branch_1 = conv1d_bn(branch_1, 48, 60, seq_length)

    branch_2 = conv1d_bn(input, 32, 20, seq_length)
    branch_2 = conv1d_bn(branch_2, 48, 60, seq_length)
    branch_2 = conv1d_bn(branch_2, 48, 60, seq_length)

    branch_3 = AveragePooling1D(60, strides=1, padding='same')(input)
    branch_3 = conv1d_bn(branch_3, 48, 20, seq_length)

    x = concatenate([branch_0, branch_1, branch_2, branch_3])

    return x
Example #25
0
def block_inception_a(input, nb_filter, filter_length, seq_length):
    branch_0 = conv1d_bn(input, nb_filter, filter_length, seq_length)

    branch_1 = conv1d_bn(input, 44, 1, seq_length)
    branch_1 = conv1d_bn(branch_1, 64, 3, seq_length)

    branch_2 = conv1d_bn(input, 44, 1, seq_length)
    branch_2 = conv1d_bn(branch_2, 64, 3, seq_length)
    branch_2 = conv1d_bn(branch_2, 64, 3, seq_length)

    branch_3 = AveragePooling1D(3, strides=1, padding='same')(input)
    branch_3 = conv1d_bn(branch_3, 64, 1, seq_length)

    x = concatenate([branch_0, branch_1, branch_2, branch_3])

    return x
Example #26
0
 def _lstm_model(self, shape):
     model = Sequential()
     model.add(
         LSTM(512, return_sequences=True, input_shape=(shape[1], shape[2])))
     model.add(Dropout(0.5))
     model.add(AveragePooling1D(shape[1]))
     model.add(Flatten())
     model.add(Dropout(0.5))
     model.add(Dense(128))
     model.add(Activation('relu'))
     model.add(Dense(15))
     model.add(Activation('softmax'))
     model.compile(loss='categorical_crossentropy',
                   optimizer='rmsprop',
                   metrics=['accuracy'])
     return model
Example #27
0
def CONV_net(window_size):
    input = Input(shape=(window_size, 30))
    conv = Conv1D(window_size, 3, activation='relu', padding='same')(input)
    mp = MaxPooling1D(2)(conv)
    ap = AveragePooling1D(2)(conv)
    conv = concatenate([mp, ap])
    conv = Conv1D(window_size, 3, activation='relu', padding='valid')(conv)
    fc = Flatten()(conv)
    fc = Dropout(0.5)(fc)
    output = Dense(1, activation='linear')(fc)

    model = Model(inputs=input, output=output)
    model.compile(optimizer='adam',
                  metrics=['mse'],
                  loss='mse')
    model.summary()
    return model
def attention2(X_train, y_train, X_test, y_test, vocab_size):
    X_train = sequence.pad_sequences(X_train, maxlen=MAX_LEN)
    X_test = sequence.pad_sequences(X_test, maxlen=MAX_LEN)

    print('Build model...')

    input_ = Input(shape=(input_length, input_dim))
    lstm = GRU(self.HID_DIM,
               input_dim=input_dim,
               input_length=input_length,
               return_sequences=True)(input_)
    att = TimeDistributed(Dense(1))(lstm)
    att = Flatten()(att)
    att = Activation(activation="softmax")(att)
    att = RepeatVector(self.HID_DIM)(att)
    att = Permute((2, 1))(att)
    mer = merge([att, lstm], "mul")
    hid = AveragePooling1D(pool_length=input_length)(mer)
    hid = Flatten()(hid)
Example #29
0
def get_shallow_convnet(window_size=4096, channels=2, output_size=84):
    inputs = Input(shape=(window_size, channels))

    conv = ComplexConv1D(32, 512, strides=16, activation='relu')(inputs)
    pool = AveragePooling1D(pool_size=4, strides=2)(conv)

    pool = Permute([2, 1])(pool)
    flattened = Flatten()(pool)

    dense = ComplexDense(2048, activation='relu')(flattened)
    predictions = ComplexDense(output_size,
                               activation='sigmoid',
                               bias_initializer=Constant(value=-5))(dense)
    predictions = GetReal(predictions)
    model = Model(inputs=inputs, outputs=predictions)

    model.compile(optimizer=Adam(lr=1e-4),
                  loss='binary_crossentropy',
                  metrics=['accuracy'])
    return model
Example #30
0
    def build(input_shape, num_outputs, block_fn, repetitions):
        _handle_dim_ordering()
        if len(input_shape) != 2:
            raise Exception(
                "Input shape should be a tuple (nb_channels, nb_rows)")

        # Permute dimension order if necessary
        if K.image_dim_ordering() == 'tf':
            input_shape = (input_shape[1], input_shape[0])

        # Load function from str if needed.
        block_fn = _get_block(block_fn)

        input = Input(shape=input_shape)
        conv1 = _conv_bn_relu(filters=64, kernel_size=7, strides=2)(input)
        pool1 = MaxPooling1D(pool_size=3, strides=2, padding="same")(conv1)

        block = pool1
        # filters = 64
        filters = 32
        for i, r in enumerate(repetitions):
            block = _residual_block(block_fn,
                                    filters=filters,
                                    repetitions=r,
                                    is_first_layer=(i == 0))(block)
            filters *= 2

        # Last activation
        block = _bn_relu(block)

        # Classifier block
        block_shape = K.int_shape(block)
        pool2 = AveragePooling1D(pool_size=(block_shape[ROW_AXIS]),
                                 strides=(1))(block)
        flatten1 = Flatten()(pool2)
        dense = Dense(units=num_outputs,
                      kernel_initializer="he_normal",
                      activation="softmax")(flatten1)

        model = Model(inputs=input, outputs=dense)
        return model