Example #1
0
def LSTMCNN(embeddingMatrix=None,
            embed_size=300,
            max_features=20000,
            maxlen=100,
            filter_sizes={2, 3, 4, 5},
            use_fasttext=False,
            trainable=True,
            use_additive_emb=False):
    if use_fasttext:
        inp = Input(shape=(maxlen, embed_size))
        x = inp
    else:
        inp = Input(shape=(maxlen, ))
        x = Embedding(input_dim=max_features,
                      output_dim=embed_size,
                      weights=[embeddingMatrix],
                      trainable=trainable)(inp)

    if use_additive_emb:
        x = AdditiveLayer()(x)
        x = Dropout(0.5)(x)

    x = Bidirectional(CuDNNLSTM(128, return_sequences=True))(x)

    conv_ops = []
    for filter_size in filter_sizes:
        conv = Conv1D(128, filter_size, activation='relu')(x)
        pool = MaxPool1D(5)(conv)
        conv_ops.append(pool)

    concat = Concatenate(axis=1)(conv_ops)
    concat = Dropout(0.5)(concat)
    # concat = BatchNormalization()(concat)

    conv_2 = Conv1D(128, 5, activation='relu')(concat)
    conv_2 = MaxPool1D(5)(conv_2)
    # conv_2 = BatchNormalization()(conv_2)
    conv_2 = Dropout(0.5)(conv_2)

    # conv_3 = Conv1D(128, 5, activation = 'relu')(conv_2)
    # conv_3 = MaxPool1D(5)(conv_3)
    # conv_3 = BatchNormalization()(conv_3)
    # conv_3 = Dropout(0.1)(conv_3)

    flat = Flatten()(conv_2)

    op = Dense(64, activation="relu")(flat)
    op = Dropout(0.5)(op)
    # op = BatchNormalization()(op)
    op = Dense(1, activation="sigmoid")(op)

    model = Model(inputs=inp, outputs=op)
    model.compile(loss='binary_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy', f1])
    return model
def CNN():
    model = Sequential()
    model.add(Embedding(len(vocab) + 1, 256, input_length=20))

    # Convolutional moedl (3x conv, flatten, 2x dense)
    model.add(Convolution1D(256, 3, padding='same'))
    model.add(MaxPool1D(3, 3, padding='same'))
    model.add(Convolution1D(128, 3, padding='same'))
    model.add(MaxPool1D(3, 3, padding='same'))
    model.add(Convolution1D(64, 3, padding='same'))
    model.add(Flatten())
    model.add(Dropout(0.1))
    model.add(BatchNormalization())
    model.add(Dense(256, activation='relu'))
    model.add(Dropout(0.1))
    model.add(Dense(num_labels, activation='softmax'))

    model.compile(loss='categorical_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])

    model.summary()

    # Initialization hyperparameters
    EPOCHS = 12

    H = model.fit(x_train_padded_seqs,
                  y_train,
                  batch_size=32,
                  epochs=EPOCHS,
                  validation_data=(x_test_padded_seqs, y_test))

    # Show Result
    N = np.arange(0, EPOCHS)
    plt.style.use("ggplot")
    plt.figure()
    plt.plot(N, H.history["loss"], label="train_loss")
    plt.plot(N, H.history["val_loss"], label="val_loss")
    plt.plot(N, H.history["accuracy"], label="train_acc")
    plt.plot(N, H.history["val_accuracy"], label="val_acc")
    plt.title("Training Loss and Accuracy (CNN)")
    plt.xlabel("Epoch #")
    plt.ylabel("Loss/Accuracy")
    plt.legend()
    plt.savefig('./output/CNN2.0.png')

    loss, accuracy = model.evaluate(x_test_padded_seqs, y_test)
    print('\ntest loss: ', loss)
    print('\ntest accuracy: ', accuracy)

    loss, accuracy = model.evaluate(x_train_padded_seqs, y_train)
    print('\ntrain loss: ', loss)
    print('\ntrain accuracy: ', accuracy)

    model.save('./Model/CNNModel2.0.h5')
Example #3
0
def create_model(init_mode='glorot_uniform',
                 activation='relu',
                 dropout_rate=0.5,
                 neurons=64,
                 optimizer='sgd',
                 filters=8):

    #print(init_mode, activation, dropout_rate, neurons, optimizer, filters )
    seed = 7
    np.random.seed(seed)
    model = Sequential()
    model.add(
        Conv1D(filters=filters,
               kernel_size=11,
               kernel_initializer=init_mode,
               activation=activation,
               input_shape=x_train_sm.shape[1:]))
    model.add(MaxPool1D(strides=4))
    model.add(BatchNormalization())
    model.add(
        Conv1D(filters=(2 * filters),
               kernel_size=11,
               kernel_initializer=init_mode,
               activation=activation))
    model.add(MaxPool1D(strides=4))
    model.add(BatchNormalization())
    model.add(
        Conv1D(filters=(4 * filters),
               kernel_size=11,
               kernel_initializer=init_mode,
               activation=activation))
    model.add(MaxPool1D(strides=4))
    model.add(BatchNormalization())
    model.add(
        Conv1D(filters=(8 * filters),
               kernel_size=11,
               kernel_initializer=init_mode,
               activation=activation))
    model.add(MaxPool1D(strides=4))
    model.add(Flatten())
    model.add(Dropout(dropout_rate))
    model.add(
        Dense(units=neurons,
              activation=activation,
              kernel_initializer=init_mode))
    model.add(Dropout(dropout_rate / 2))
    model.add(
        Dense(units=neurons,
              activation=activation,
              kernel_initializer=init_mode))
    model.add(Dense(1, activation='sigmoid', kernel_initializer=init_mode))
    #model.compile(optimizer="sgd", loss = 'binary_crossentropy', metrics=['accuracy'])
    #model.compile(optimizer=optimizer, loss = 'binary_crossentropy', metrics=['accuracy', f1_m,precision_m, recall_m])
    model.compile(optimizer=optimizer, loss='binary_crossentropy')
    return model
Example #4
0
def get_base_model():
    inp = Input(shape=(10, 24))
    img_1 = Convolution1D(16,
                          kernel_size=5,
                          activation=activations.relu,
                          padding="valid")(inp)
    img_1 = Convolution1D(16,
                          kernel_size=5,
                          activation=activations.relu,
                          padding="valid")(img_1)
    img_1 = MaxPool1D(pool_size=2)(img_1)
    img_1 = SpatialDropout1D(rate=0.01)(img_1)
    img_1 = Convolution1D(32,
                          kernel_size=3,
                          activation=activations.relu,
                          padding="valid")(img_1)
    img_1 = Convolution1D(32,
                          kernel_size=3,
                          activation=activations.relu,
                          padding="valid")(img_1)
    img_1 = MaxPool1D(pool_size=2)(img_1)
    img_1 = SpatialDropout1D(rate=0.01)(img_1)
    img_1 = Convolution1D(32,
                          kernel_size=3,
                          activation=activations.relu,
                          padding="valid")(img_1)
    img_1 = Convolution1D(32,
                          kernel_size=3,
                          activation=activations.relu,
                          padding="valid")(img_1)
    img_1 = MaxPool1D(pool_size=2)(img_1)
    img_1 = SpatialDropout1D(rate=0.01)(img_1)
    img_1 = Convolution1D(256,
                          kernel_size=3,
                          activation=activations.relu,
                          padding="valid")(img_1)
    img_1 = Convolution1D(256,
                          kernel_size=3,
                          activation=activations.relu,
                          padding="valid")(img_1)
    img_1 = GlobalMaxPool1D()(img_1)
    img_1 = Dropout(rate=0.01)(img_1)

    dense_1 = Dropout(0.01)(Dense(64,
                                  activation=activations.relu,
                                  name="dense_1")(img_1))

    base_model = models.Model(inputs=inp, outputs=dense_1)
    opt = optimizers.Adam(0.001)

    base_model.compile(optimizer=opt,
                       loss=losses.sparse_categorical_crossentropy,
                       metrics=['acc'])
    # base_model.summary()
    return base_model
def get_model():
    nclass = len(list_labels)
    inp = Input(shape=(input_length, 1))
    img_1 = Convolution1D(16,
                          kernel_size=9,
                          activation="relu",
                          padding="valid")(inp)
    img_1 = Convolution1D(16,
                          kernel_size=9,
                          activation="relu",
                          padding="valid")(img_1)
    img_1 = MaxPool1D(pool_size=16)(img_1)
    img_1 = Dropout(rate=0.1)(img_1)
    img_1 = Convolution1D(32,
                          kernel_size=3,
                          activation="relu",
                          padding="valid")(img_1)
    img_1 = Convolution1D(32,
                          kernel_size=3,
                          activation="relu",
                          padding="valid")(img_1)
    img_1 = MaxPool1D(pool_size=4)(img_1)
    img_1 = Dropout(rate=0.1)(img_1)
    img_1 = Convolution1D(32,
                          kernel_size=3,
                          activation="relu",
                          padding="valid")(img_1)
    img_1 = Convolution1D(32,
                          kernel_size=3,
                          activation="relu",
                          padding="valid")(img_1)
    img_1 = MaxPool1D(pool_size=4)(img_1)
    img_1 = Dropout(rate=0.1)(img_1)
    img_1 = Convolution1D(256,
                          kernel_size=3,
                          activation="relu",
                          padding="valid")(img_1)
    img_1 = Convolution1D(256,
                          kernel_size=3,
                          activation="relu",
                          padding="valid")(img_1)
    img_1 = GlobalMaxPool1D()(img_1)
    img_1 = Dropout(rate=0.2)(img_1)

    dense_1 = Dense(64, activation="relu")(img_1)
    dense_1 = Dense(1028, activation="relu")(dense_1)
    dense_1 = Dense(nclass, activation="softmax")(dense_1)

    model = Model(inputs=inp, outputs=dense_1)

    model.compile(optimizer=Adam(0.001),
                  loss=sparse_categorical_crossentropy,
                  metrics=['acc'])
    model.summary()
    return model
Example #6
0
def VDCNN(embed_type, maxlen=250, filter_sizes={2, 3, 4, 5}):
    embed_size = utils.get_embedding_dim(embed_type)
    inp = Input(shape=(maxlen, embed_size))
    x = inp

    conv_ops = []
    for filter_size in filter_sizes:
        conv = Conv1D(256, filter_size, activation='relu')(x)
        pool = MaxPool1D(5)(conv)
        conv_ops.append(pool)

    concat = Concatenate(axis=1)(conv_ops)
    # concat = Dropout(0.1)(concat)
    concat = BatchNormalization()(concat)

    conv_2_main = Conv1D(256, 5, activation='relu', padding='same')(concat)
    conv_2_main = BatchNormalization()(conv_2_main)
    conv_2_main = Conv1D(256, 5, activation='relu',
                         padding='same')(conv_2_main)
    conv_2_main = BatchNormalization()(conv_2_main)
    conv_2 = Add()([concat, conv_2_main])
    conv_2 = MaxPool1D(pool_size=2, strides=2)(conv_2)
    # conv_2 = BatchNormalization()(conv_2)
    # conv_2 = Dropout(0.1)(conv_2)

    conv_3_main = Conv1D(256, 5, activation='relu', padding='same')(conv_2)
    conv_3_main = BatchNormalization()(conv_3_main)
    conv_3_main = Conv1D(256, 5, activation='relu',
                         padding='same')(conv_3_main)
    conv_3_main = BatchNormalization()(conv_3_main)
    conv_3 = Add()([conv_2, conv_3_main])
    conv_3 = MaxPool1D(pool_size=2, strides=2)(conv_3)
    # conv_3 = BatchNormalization()(conv_3)
    # conv_3 = Dropout(0.1)(conv_3)

    flat = Flatten()(conv_3)

    op = Dense(256, activation="relu")(flat)
    op = Dropout(0.5)(op)
    op = BatchNormalization()(op)
    op = Dense(128, activation="relu")(op)
    op = Dropout(0.5)(op)
    op = BatchNormalization()(op)
    op = Dense(3, activation="softmax")(op)

    model = Model(inputs=inp, outputs=op)

    model.compile(loss='sparse_categorical_crossentropy',
                  optimizer='adam',
                  metrics=[
                      'sparse_categorical_accuracy',
                      km.sparse_categorical_f1_score()
                  ])
    return model, 'VDCNN_{}.hdf5'.format(embed_type)
def build_cnn_model():
    '''
    cnn可以自适应输入的channel数,通道维度可以变化,不影响cnn的定义。
    '''
    model = Sequential()
    model.add(
        Conv1D(filters=8,
               kernel_size=3,
               strides=1,
               padding="same",
               activation='relu'))
    model.add(
        Conv1D(filters=8,
               kernel_size=3,
               strides=1,
               padding="same",
               activation='relu'))
    model.add(MaxPool1D(strides=2))

    model.add(
        Conv1D(filters=16,
               kernel_size=3,
               strides=1,
               padding="same",
               activation='relu'))
    model.add(
        Conv1D(filters=16,
               kernel_size=3,
               strides=1,
               padding="same",
               activation='relu'))
    model.add(MaxPool1D(strides=2))

    model.add(
        Conv1D(filters=32,
               kernel_size=3,
               strides=1,
               padding="same",
               activation='relu'))
    model.add(
        Conv1D(filters=32,
               kernel_size=3,
               strides=1,
               padding="same",
               activation='relu'))

    model.add(GlobalAveragePooling1D())
    # model.add(Reshape((3, 4), input_shape=(12,)))
    model.add(Dense(units=8, activation='tanh'))
    model.add(Dense(units=1, activation='linear'))

    adam = optimizers.Adam(lr=0.001, clipvalue=0.05)
    model.compile(loss=MLE_loss, optimizer=adam)
    return model
Example #8
0
def lstm_run(load_data):

    os.environ["CUDA_VISIBLE_DEVICES"] = "2"
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    sess = tf.Session(config=config)
    K.set_session(sess)

    max_features = 20000
    maxlen = 200  # cut texts after this number of words (among top max_features most common words)
    batch_size = 164

    print('Loading data...')
    (x_train, y_train), (x_test, y_test) = load_data
    print(len(x_train), 'train sequences')
    print(len(x_test), 'test sequences')

    print('Pad sequences (samples x time)')
    x_train = sequence.pad_sequences(x_train, maxlen=maxlen)
    x_test = sequence.pad_sequences(x_test, maxlen=maxlen)
    print('x_train shape:', x_train.shape)
    print('x_test shape:', x_test.shape)

    print('Build model...')
    model = Sequential()
    model.add(Embedding(max_features, 128))
    model.add(Conv1D(kernel_size=30, filters=2, activation="relu"))
    model.add(MaxPool1D(pool_size=2))
    model.add(Conv1D(kernel_size=30, filters=2, activation="relu"))
    model.add(MaxPool1D(pool_size=2))
    model.add(Conv1D(kernel_size=30, filters=2, activation="relu"))
    model.add(MaxPool1D(pool_size=2))
    model.add(LSTM(30))
    model.add(Dropout(0.5))
    model.add(Dense(10, activation='tanh'))
    model.add(Dropout(0.5))
    model.add(Dense(1, activation='sigmoid'))

    # try using different optimizers and different optimizer configs
    model.compile(loss='binary_crossentropy',
                  optimizer='adam',
                  metrics=['accuracy'])

    print('Train...')
    history_callback = model.fit(x_train,
                                 y_train,
                                 batch_size=batch_size,
                                 epochs=20,
                                 validation_data=(x_test, y_test))
    score, acc = model.evaluate(x_test, y_test, batch_size=batch_size)
    print('Test score:', score)
    print('Test accuracy:', acc)

    return (history_callback.history, acc, score, model)
def get_profile_neural_network(n_steps_out, n_steps_in):
    """
    Get A instance of the profile neural network
    :param n_steps_out: The number of prediction, the network should make (horizon)
    :return: A keras model
    """

    conv_input = Input(shape=(n_steps_in, ), name="hist_input")
    conv = Reshape((n_steps_in, 1))(conv_input)
    conv = Conv1D(4, [3], activation=elu, padding='same')(conv)
    conv = MaxPool1D(pool_size=2)(conv)
    conv = Conv1D(1, [7], activation=elu, padding='same')(conv)
    conv = MaxPool1D(pool_size=2)(conv)
    conv = Flatten()(conv)

    trend_input = Input(shape=(n_steps_out, 10), name="full_trend")
    trend = Dense(8, activation=elu)(trend_input)
    trend = Dense(4, activation=elu)(trend)
    trend = Conv1D(4, [5], activation=elu, padding='same')(trend)
    trend = Conv1D(1, [5], activation=elu, padding='same')(trend)

    dummy_input = Input(shape=(n_steps_out, 16), name="dummy_input")
    dummy = Conv1D(2, [7], activation=elu, padding='same')(dummy_input)
    dummy = Conv1D(1, [7], activation=elu, padding='same')(dummy)
    dummy = Flatten()(dummy)

    conv = Dense(n_steps_out)(conv)
    conv = Reshape((n_steps_out, 1))(conv)
    dummy = Reshape((n_steps_out, 1))(dummy)
    fc = concatenate([dummy, conv], axis=2)
    fc = Conv1D(16, [7], padding='same', activation=elu)(fc)
    fc = SpatialDropout1D(rate=0.3)(fc)
    fc = Conv1D(8, [7], padding='same', activation=elu)(fc)
    fc = SpatialDropout1D(rate=0.3)(fc)
    fc = Conv1D(1, [7], padding='same')(fc)

    profile_input = Input(shape=(n_steps_out, ), name="profile")
    profile = Reshape((n_steps_out, 1))(profile_input)

    out = concatenate([fc, profile, trend])
    out = Conv1D(1, [1],
                 padding='same',
                 use_bias=False,
                 activation=linear,
                 kernel_initializer=Constant(value=1 / 3))(out)
    pred = Flatten()(out)

    model = Model(inputs=[conv_input, trend_input, dummy_input, profile_input],
                  outputs=pred)
    model.compile(optimizer=optimizers.Adam(),
                  loss=sum_squared_error,
                  metrics=[root_mean_squared_error])
    return model
Example #10
0
def get_1d_res_model(config):

    nclass = config.n_classes
    input_length = config.audio_length

    inp = Input(shape=(input_length, 1))
    x = conv1d_bn(inp, 16, 3, padding='same')
    x = conv1d_bn(x, 16, 3, padding='same')
    x = conv1d_bn(x, 16, 3, padding='same')

    x = MaxPool1D(16)(x)
    x = Dropout(rate=0.5)(x)

    for i in range(4):
        x = block_residual(x, 32, 3, i)
        #x = Dropout(rate=0.1)(x)

    x = MaxPool1D(8)(x)
    x = Dropout(rate=0.4)(x)

    for i in range(8):
        x = block_residual(x, 64, 3, i)
        #x = Dropout(rate=0.1)(x)

    x = MaxPool1D(16)(x)
    x = Dropout(rate=0.3)(x)

    for i in range(16):
        x = block_residual(x, 128, 3, i)
        #x = Dropout(rate=0.1)(x)

    x = GlobalMaxPool1D()(x)
    x = Dropout(rate=0.2)(x)

    x = Dense(1024, kernel_initializer='he_normal')(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(0.1)(x)
    x = Dropout(rate=0.5)(x)

    x = Dense(256, kernel_initializer='he_normal')(x)
    x = BatchNormalization()(x)
    x = LeakyReLU(0.1)(x)
    x = Dropout(rate=0.2)(x)

    out = Dense(nclass, activation=softmax)(x)

    model = models.Model(inputs=inp, outputs=out)
    opt = optimizers.Adam(config.learning_rate)

    model.compile(optimizer=opt,
                  loss=losses.categorical_crossentropy,
                  metrics=['acc'])
    return model
 def test_fit_octave(self):
     inputs = Input(shape=(32, 3))
     high, low = OctaveConv1D(13, kernel_size=3, octave=4)(inputs)
     high, low = MaxPool1D()(high), MaxPool1D()(low)
     conv = OctaveConv1D(5, kernel_size=3, octave=4,
                         ratio_out=0.0)([high, low])
     flatten = Flatten()(conv)
     outputs = Dense(units=2, activation='softmax')(flatten)
     model = Model(inputs=inputs, outputs=outputs)
     model.compile(optimizer='adam', loss='sparse_categorical_crossentropy')
     model.summary(line_length=200)
     self._test_fit(model)
Example #12
0
    def predictions_1d_conv_double_deep_unif(self, config):

        nclass = config.n_classes
        input_length = config.audio_length

        inp = Input(shape=(input_length, 1))
        x = Convolution1D(16, 9, activation=relu, padding="valid")(inp)
        x = Convolution1D(16, 9, activation=relu, padding="valid")(x)
        x = Convolution1D(16, 9, activation=relu, padding="valid")(x)
        x = Convolution1D(16, 9, activation=relu, padding="valid")(x)
        x = Convolution1D(16, 9, activation=relu, padding="valid")(x)
        x = Convolution1D(16, 9, activation=relu, padding="valid")(x)
        x = MaxPool1D(16)(x)
        x = Dropout(rate=0.1)(x)

        x = Convolution1D(32, 3, activation=relu, padding="valid")(x)
        x = Convolution1D(32, 3, activation=relu, padding="valid")(x)
        x = Convolution1D(32, 3, activation=relu, padding="valid")(x)
        x = Convolution1D(32, 3, activation=relu, padding="valid")(x)
        x = Convolution1D(32, 3, activation=relu, padding="valid")(x)
        x = Convolution1D(32, 3, activation=relu, padding="valid")(x)
        x = MaxPool1D(4)(x)
        x = Dropout(rate=0.1)(x)

        x = Convolution1D(32, 3, activation=relu, padding="valid")(x)
        x = Convolution1D(32, 3, activation=relu, padding="valid")(x)
        x = Convolution1D(32, 3, activation=relu, padding="valid")(x)
        x = Convolution1D(32, 3, activation=relu, padding="valid")(x)
        x = Convolution1D(32, 3, activation=relu, padding="valid")(x)
        x = Convolution1D(32, 3, activation=relu, padding="valid")(x)
        x = MaxPool1D(4)(x)
        x = Dropout(rate=0.1)(x)

        x = Convolution1D(256, 3, activation=relu, padding="valid")(x)
        x = Convolution1D(256, 3, activation=relu, padding="valid")(x)
        x = Convolution1D(256, 3, activation=relu, padding="valid")(x)
        x = Convolution1D(256, 3, activation=relu, padding="valid")(x)
        x = Convolution1D(256, 3, activation=relu, padding="valid")(x)
        x = Convolution1D(256, 3, activation=relu, padding="valid")(x)
        x = GlobalMaxPool1D()(x)
        x = Dropout(rate=0.2)(x)

        x = Dense(64, activation=relu)(x)
        x = Dense(1028, activation=relu)(x)
        out = Dense(nclass, activation=softmax)(x)

        model = models.Model(inputs=inp, outputs=out)
        opt = optimizers.Adam(config.learning_rate)

        model.compile(optimizer=opt,
                      loss=losses.categorical_crossentropy,
                      metrics=['acc'])
        return model
Example #13
0
def model_basic(num_frame,num_sing):
	pos_anchor = Input(shape = (num_frame,128))

	# item model **audio**
	conv1 = Conv1D(128,4,padding='same',use_bias=True,kernel_regularizer=l2(1e-5),kernel_initializer='he_uniform')
	bn1 = BatchNormalization()
	activ1 = Activation('relu')
	MP1 = MaxPool1D(pool_size=4)
	conv2 = Conv1D(128,4,padding='same',use_bias=True,kernel_regularizer=l2(1e-5),kernel_initializer='he_uniform')
	bn2 = BatchNormalization()
	activ2 = Activation('relu')
	MP2 = MaxPool1D(pool_size=4)
	conv3 = Conv1D(128,4,padding='same',use_bias=True,kernel_regularizer=l2(1e-5),kernel_initializer='he_uniform')
	bn3 = BatchNormalization()
	activ3 = Activation('relu')
	MP3 = MaxPool1D(pool_size=4)
	conv4 = Conv1D(128,2,padding='same',use_bias=True,kernel_regularizer=l2(1e-5),kernel_initializer='he_uniform')
	bn4 = BatchNormalization()
	activ4 = Activation('relu')
	MP4 = MaxPool1D(pool_size=2)
	conv5 = Conv1D(256,1,padding='same',use_bias=True,kernel_regularizer=l2(1e-5),kernel_initializer='he_uniform')
	bn5 = BatchNormalization()
	activ5 = Activation('relu')
	drop1 = Dropout(0.5)

	item_sem = GlobalAvgPool1D()
	
	# pos anchor
	pos_anchor_conv1 = conv1(pos_anchor)
	pos_anchor_bn1 = bn1(pos_anchor_conv1)
	pos_anchor_activ1 = activ1(pos_anchor_bn1)
	pos_anchor_MP1 = MP1(pos_anchor_activ1)
	pos_anchor_conv2 = conv2(pos_anchor_MP1)
	pos_anchor_bn2 = bn2(pos_anchor_conv2)
	pos_anchor_activ2 = activ2(pos_anchor_bn2)
	pos_anchor_MP2 = MP2(pos_anchor_activ2)
	pos_anchor_conv3 = conv3(pos_anchor_MP2)
	pos_anchor_bn3 = bn3(pos_anchor_conv3)
	pos_anchor_activ3 = activ3(pos_anchor_bn3)
	pos_anchor_MP3 = MP3(pos_anchor_activ3)
	pos_anchor_conv4 = conv4(pos_anchor_MP3)
	pos_anchor_bn4 = bn4(pos_anchor_conv4)
	pos_anchor_activ4 = activ4(pos_anchor_bn4)
	pos_anchor_MP4 = MP4(pos_anchor_activ4)
	pos_anchor_conv5 = conv5(pos_anchor_MP4)
	pos_anchor_bn5 = bn5(pos_anchor_conv5)
	pos_anchor_activ5 = activ5(pos_anchor_bn5)
	pos_anchor_sem = item_sem(pos_anchor_activ5)

	output = Dense(num_sing, activation='softmax')(pos_anchor_sem)
	model = Model(inputs = pos_anchor, outputs = output)
	return model
 def test_fit_octave_conv_low(self):
     inputs = Input(shape=(32, 3))
     conv = octave_conv_1d(inputs, filters=13, kernel_size=3)
     pool = octave_dual(conv, MaxPool1D())
     conv = octave_conv_1d(pool, filters=7, kernel_size=3, name='Mid')
     pool = octave_dual(conv, MaxPool1D())
     conv = octave_conv_1d(pool, filters=5, kernel_size=3, ratio_out=1.0)
     flatten = octave_dual(conv, Flatten())
     outputs = Dense(units=2, activation='softmax')(flatten)
     model = Model(inputs=inputs, outputs=outputs)
     model.compile(optimizer='adam', loss='sparse_categorical_crossentropy')
     model.summary(line_length=200)
     self._test_fit(model)
Example #15
0
def mod_conv(conv):
    from keras.layers import Conv1D, MaxPool1D, GlobalMaxPooling1D
    modelconv = Sequential()
    modelconv.add(Embedding(input_dim=MAX_NB_WORDS, output_dim=100, input_length=X.shape[1]))
    modelconv.add(Conv1D(conv,5,activation='relu'))
    modelconv.add(MaxPool1D(3))
    modelconv.add(Conv1D(conv,5,activation='relu'))
    modelconv.add(MaxPool1D(3))
    modelconv.add(Conv1D(conv,5,activation='relu'))
    modelconv.add(GlobalMaxPooling1D())
    modelconv.add(Dense(1, activation='sigmoid'))
    modelconv.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
    return modelconv
Example #16
0
def cnn():
    model = Sequential()
    model.add(Conv1D(32, (5, ),input_shape=(( 300,34))))
    model.add(Dropout(0.5))
    model.add(MaxPool1D())
    model.add(Conv1D(64, (5, )))
    model.add(MaxPool1D())
    model.add(Flatten())
    model.add(Dense(64,activation='relu'))
    model.add(Dense(3,activation='softmax'))
    model.summary()
    model.compile(loss='mean_squared_error', optimizer="adam")
    return model
 def test_make_dual_layer(self):
     inputs = Input(shape=(32, 3))
     conv = OctaveConv1D(13, kernel_size=3)(inputs)
     pool = octave_dual(conv, MaxPool1D())
     conv = OctaveConv1D(7, kernel_size=3)(pool)
     pool = octave_dual(conv, MaxPool1D())
     conv = OctaveConv1D(5, kernel_size=3, ratio_out=0.0)(pool)
     flatten = octave_dual(conv, Flatten())
     outputs = Dense(units=2, activation='softmax')(flatten)
     model = Model(inputs=inputs, outputs=outputs)
     model.compile(optimizer='adam', loss='sparse_categorical_crossentropy')
     model.summary(line_length=200)
     self._test_fit(model)
Example #18
0
 def __buildModel__(self,modelParams):
     '''
     This function builds the model.
     '''
     
     #===model parameters===
     embed_dim,n_featMap,kernel_size,strides,d_r,l2_reg=\
         (modelParams['embed_dim'],
          modelParams['n_featMap'],
          modelParams['kernel_size'],
          modelParams['strides'],
          modelParams['d_r'],
          modelParams['l2_reg'])
     #===model parameters===
     
     #===build model===
     #-input layer-
     max_seq_len=self.tox.trainFeatureMat.shape[1]
     dict_size=np.max((self.tox.trainFeatureMat.max(),
                       self.tox.testFeatureMat.max()))
     inputs = Input(shape=(max_seq_len,))
     #-input layer-
     
     #-embedding layer-
     embed = Embedding(input_dim=dict_size+1,output_dim=embed_dim,
                       input_length=max_seq_len)(inputs)
     #-embedding layer-
     
     #-convolutional units-
     conv0=Conv1D(filters=n_featMap,kernel_size=kernel_size[0],
                     strides=strides[0],activation='relu')(embed)
     conv1=Conv1D(filters=n_featMap,kernel_size=kernel_size[1],
                     strides=strides[1],activation='relu')(embed)
     conv2=Conv1D(filters=n_featMap,kernel_size=kernel_size[2],
                     strides=strides[2],activation='relu')(embed)
     #-convolutional units-
     
     #-max pool over all words-
     pool0 = MaxPool1D(pool_size=int(conv0.get_shape()[1]))(conv0)
     pool1 = MaxPool1D(pool_size=int(conv1.get_shape()[1]))(conv1)
     pool2 = MaxPool1D(pool_size=int(conv2.get_shape()[1]))(conv2)
     #-max pool over all words-
     
     #-dropuout and output-
     concat = concatenate([pool0,pool1,pool2])
     features = Dropout(d_r)(Flatten()(concat))
     out = Dense(6,activation='sigmoid',
                 kernel_regularizer=regularizers.l2(l2_reg))(features)
     #-dropuout and output-
     
     self.model = Model(inputs=inputs,outputs=out)
def createNetwork(seq_len):
    
    # Function to add a convolution layer with batch normalization
    def addConv(network, features, kernel):
        network = BatchNormalization()(network)
        return Conv1D(features, kernel, padding='same', activation='relu')(network)
    
    # Function to add a dense layer with batch normalization and dropout
    def addDense(network, size):
        network = BatchNormalization()(network)
        network = Dropout(0.2)(network)
        return Dense(size, activation='relu')(network)
    
    
    # Input layer
    input = Input(shape=(seq_len, 2))
    network = input
    
    # Add 1D Convolution
    for features in [16, 24, 32]:
        network = addConv(network, features, 5)
    network = MaxPool1D(pool_size=5)(network)
    
    # Add 1D Convolution
    for features in [64, 96, 128]:
        network = addConv(network, features, 5)
    network = MaxPool1D(pool_size=5)(network)

    # Add 1D Convolution
    for features in [256, 384, 512]:
        network = addConv(network, features, 5)
    #network = MaxPool1D(pool_size=5)(network)

    # Flatten
    network = Flatten()(network)
    
    # Dense layer for combination
    for size in [128, 128]:
        network = addDense(network, size)
    
    # Output layer
    output = Dense(len(files), activation='softmax')(network)


    # Create and compile model
    model = Model(inputs = input, outputs = output)
#     model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['acc'])

#     # Display model
#     model.summary()
    return model
Example #20
0
    def build(input_shape, classes):
        model = Sequential()
        # CONV => RELU => POOL
        # 第一个卷积层,16个卷积核,大小3*3,卷积模式SAME,激活函数relu,输入张量的大小
        model.add(
            Convolution1D(16,
                          kernel_size=3,
                          padding="same",
                          input_shape=input_shape,
                          W_regularizer=l2(0.01),
                          b_regularizer=l2(0.01)))
        model.add(Activation("relu"))
        # 池化层,池化核大小2x2
        model.add(MaxPool1D(pool_size=2))
        # CONV => RELU => POOL
        model.add(
            Convolution1D(32,
                          kernel_size=4,
                          padding="same",
                          input_shape=input_shape,
                          W_regularizer=l2(0.01),
                          b_regularizer=l2(0.01)))
        model.add(Activation("relu"))
        model.add(MaxPool1D(pool_size=2))
        # CONV => RELU => POOL
        model.add(
            Convolution1D(64,
                          kernel_size=5,
                          padding="same",
                          input_shape=input_shape,
                          W_regularizer=l2(0.01),
                          b_regularizer=l2(0.01)))
        model.add(Activation("relu"))
        model.add(MaxPool1D(pool_size=2))
        # Flatten => RELU layers
        model.add(Dropout(0.5))
        # 全连接层,展开操作
        model.add(Flatten())
        # 添加隐藏层神经元的数量和激活函数
        model.add(Dense(128))
        model.add(Activation("relu"))
        model.add(Dropout(0.5))

        # a softmax classifier
        model.add(Dense(classes))
        # 输出层
        model.add(Activation("softmax"))
        # 模型可视化
        plot_model(model, to_file="SQL_CNN.png", show_shapes=True)
        display.Image('SQL_CNN.png')
        return model
def architecture_basic(X, nbclasses, nb_conv=1, nb_fc=1,
                       dropout_rate=0.5, conv_filters=64,
                       pool_kernel=3,
                       conv_kernel=3, nb_neurons=256,
                       weight_initializer='he_nromal'):
    # input size
    width, height, depth = X.shape
    input_shape = (height, depth)

    # parameters of the architecture
    l1_l2_rate = 1.e-3
    activation = relu

    model = Sequential(name=str(nb_conv) +
                            'CONV_' + str(nb_fc) +
                            'k' + str(conv_kernel) +
                            'f' + str(conv_filters) +
                            '_FC_' + str(nb_neurons) +
                            'dropout' + str(dropout_rate) +
                            'batch_norm' +
                            'pool' + str(pool_kernel))

    model.add(Conv1D(input_shape=input_shape,
                     activation=activation,
                     kernel_regularizer=l1_l2(l1_l2_rate),
                     kernel_initializer=weight_initializer,
                     kernel_size=conv_kernel,
                     filters=conv_filters))
    model.add(MaxPool1D(pool_size=pool_kernel, padding='same'))
    model.add(BatchNormalization())

    # if more covolutional layers are defined in parameters
    if nb_conv > 1:
        for _layer in range(nb_conv - 1):
            model.add(Conv1D(kernel_size=conv_kernel, filters=conv_filters,
                             kernel_regularizer=l1_l2(l1_l2_rate),
                             activation=activation))
            model.add(MaxPool1D(pool_size=pool_kernel, padding='same'))
            model.add(BatchNormalization())

    # Flatten + FC layers
    model.add(Flatten())
    for _layer in range(nb_fc):
        model.add(Dense(nb_neurons,
                        kernel_regularizer=l1_l2(l1_l2_rate),
                        activation=activation))
        model.add(Dropout(dropout_rate))

    model.add(Dense(nbclasses, activation=softmax))

    return model
Example #22
0
def construct_model(classe_nums):
    model = Sequential()

    model.add(
        Conv1D(filters=256,
               kernel_size=3,
               strides=1,
               activation='relu',
               input_shape=(99, 40),
               name='block1_conv1'))
    model.add(MaxPool1D(pool_size=2, name='block1_pool1'))
    model.add(BatchNormalization(momentum=0.9, epsilon=1e-5, axis=1))

    model.add(
        Conv1D(filters=256,
               kernel_size=3,
               strides=1,
               activation='relu',
               name='block1_conv2'))
    model.add(MaxPool1D(pool_size=2, name='block1_pool2'))

    model.add(Flatten(name='block1_flat1'))
    model.add(Dropout(0.5, name='block1_drop1'))

    model.add(Dense(512, activation='relu', name='block2_dense2'))
    model.add(MaxoutDense(512, nb_feature=4, name="block2_maxout2"))
    model.add(Dropout(0.5, name='block2_drop2'))

    model.add(
        Dense(512,
              activation='relu',
              name='block2_dense3',
              kernel_regularizer=l2(1e-4)))
    model.add(MaxoutDense(512, nb_feature=4, name="block2_maxout3"))

    model.summary()

    model_input = Input(shape=(99, 40))
    features = model(model_input)
    extract_feature_model = Model(inputs=model_input, outputs=features)

    category_predict = Dense(classe_nums, activation='softmax',
                             name="predict")(features)

    sr_model = Model(inputs=model_input, outputs=category_predict)

    plot_model(sr_model,
               to_file='model.png',
               show_shapes=True,
               show_layer_names=False)
    return extract_feature_model, sr_model
Example #23
0
 def build_model(self):
     model = Sequential()
     model.add(Embedding(self.vocab_size, self.vector_size, input_length=self.seq_length))
     model.add(Conv1D(128, kernel_size=5, activation='relu', padding='same'))
     model.add(MaxPool1D())
     model.add(Conv1D(128, kernel_size=3, activation='relu', padding='same'))
     # model.add(MaxPool1D())
     # model.add(Conv1D(128, kernel_size=3, activation='relu'))
     model.add(MaxPool1D())
     model.add(Dense(128, activation='relu'))
     model.add(Flatten())
     model.add(Dense(self.output_size, activation='softmax'))
     model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
     return model
def architecture_CONV9_FC_batch_norm_dropout_pool(X, nbclasses, nb_conv=1, nb_fc=1):
    '''
    dropout(strong; 0.8) + pooling + batch norm + l1_l2 norm
    '''
    # input size
    width, height, depth = X.shape
    input_shape = (height, depth)

    # parameters of the architecture
    l1_l2_rate = 1.e-3
    dropout_rate = 0.5
    conv_kernel = 9
    conv_filters = 64
    pool_kernel = 3
    nbunits_fc = 128
    activation = relu

    model = Sequential(name=str(nb_conv) +
                            'CONV_' + str(nb_fc) +
                            'k' + str(conv_kernel) +
                            'f' + str(conv_filters) +
                            '_FC128_' +
                            'dropout' + str(dropout_rate) +
                            'batch_norm' + 'pool' +
                            str(pool_kernel)
                       )
    model.add(Conv1D(input_shape=input_shape,
                     kernel_size=conv_kernel, filters=conv_filters))
    model.add(MaxPool1D(pool_size=pool_kernel, padding='same'))
    model.add(BatchNormalization())

    # if more covolutional layers are defined in parameters
    if nb_conv > 1:
        for _layer in range(nb_conv):
            model.add(Conv1D(kernel_size=conv_kernel, filters=conv_filters,
                             kernel_regularizer=l1_l2(l1_l2_rate),
                             activation=activation))
            model.add(MaxPool1D(pool_size=pool_kernel, padding='same'))
            model.add(BatchNormalization())

    # Flatten + FC layers
    model.add(Flatten())
    for _layer in range(nb_fc):
        model.add(Dense(nbunits_fc, kernel_regularizer=l1_l2(l1_l2_rate),
                        activation=activation))
        model.add(Dropout(dropout_rate))

    model.add(Dense(nbclasses, activation=softmax))

    return model
def architecture_CONV_FC_batch_norm_dropout2(X, nbclasses, nb_conv=1, nb_fc=1):
    '''
    dropout + pooling + batch norm + l1_l2 norm; kernel size reduced to 3
    '''
    # input size
    width, height, depth = X.shape
    input_shape = (height, depth)

    # parameters of the architecture
    l1_l2_rate = 1.e-3
    dropout_rate = 0.8
    conv_kernel = 3
    conv_filters = 64
    nbunits_fc = 128
    activation = relu
    pool_kernel = 2

    model = Sequential(name=str(nb_conv) +
                            'CONV_' + 'k' + str(conv_kernel) +
                            'POOL' + str(pool_kernel) +
                            str(nb_fc) +
                            '_FC128_bn_d' + str(dropout_rate))
    model.add(Conv1D(input_shape=input_shape,
                     kernel_regularizer=l1_l2(l1_l2_rate),
                     kernel_initializer='he_normal',
                     activation=activation,
                     kernel_size=conv_kernel, filters=conv_filters))
    model.add(BatchNormalization())
    model.add(MaxPool1D(pool_kernel))

    # if more covolutional layers are defined in parameters
    if nb_conv > 1:
        for _layer in range(nb_conv):
            model.add(Conv1D(kernel_size=conv_kernel, filters=conv_filters,
                             activation=activation,
                             kernel_regularizer=l1_l2(l1_l2_rate)))
            model.add(BatchNormalization())
            model.add(MaxPool1D(pool_kernel))

    # Flatten + FC layers
    model.add(Flatten())
    for _layer in range(nb_fc):
        model.add(Dense(nbunits_fc,
                        kernel_regularizer=l1_l2(l1_l2_rate),
                        activation=activation))
        model.add(Dropout(dropout_rate))

    model.add(Dense(nbclasses, activation=softmax))

    return model
def fullyConnected(data):
    learning_rate = 0.001
    clip_norm = 2.0
    
    sqrtDim = int(np.sqrt(Dimension))

    im_shape = (sqrtDim,sqrtDim)
    
    x_train,x_test,y_train,y_test = tts(data,y["target"],test_size=0.15)
    
    
    x_train=np.array(x_train)
    x_test=np.array(x_test)
    
    x_train_img =np.reshape(x_train,(len(x_train),sqrtDim,sqrtDim))
    x_test_img =np.reshape(x_test,(len(x_test),sqrtDim,sqrtDim))
    
    print("Images succesfully created")
    
    #Remember to reshape the input
    model = Sequential()
    model.add(Conv1D(filters=32, kernel_size=(12), 
                     input_shape=im_shape, activation='relu'))
    model.add(Conv1D(filters=64, kernel_size=(8), activation="relu"))
    model.add(MaxPool1D(pool_size=(10), strides=(5), padding="same"))
    model.add(Conv1D(filters=128, kernel_size=(5), activation="relu"))
    model.add(Conv1D(filters=256, kernel_size=(5), activation="relu"))
    model.add(MaxPool1D(pool_size=(10), strides=(4), padding="same"))
    model.add(Dropout(0.5))
    model.add(Flatten())
    
    #model.add(Dense(Dimension,input_dim = Dimension, kernel_initializer='normal', activation='relu'))
    model.add(Dense(1024,kernel_initializer='normal', activation='tanh'))
    model.add(Dense(2048,kernel_initializer='normal', activation='sigmoid'))
    model.add(Dropout(0.5))
    model.add(Dense(2048,kernel_initializer='normal', activation='relu'))
    model.add(Dense(1024,kernel_initializer='normal',activation='tanh'))
    model.add(Dense(512,kernel_initializer='normal',activation='sigmoid'))
    model.add(Dropout(0.5))
    model.add(Dense(256,kernel_initializer='normal',activation='tanh'))
    model.add(Dense(128,kernel_initializer='normal',activation='relu'))
    model.add(Dense(1,kernel_initializer='normal',activation='relu'))
    #rmsprop = RMSprop(lr=learning_rate,clipnorm=clip_norm)
    adam = Adam(lr=learning_rate,clipnorm=clip_norm)
    model.compile(loss=root_mean_squared_error, optimizer=adam, metrics=['mse'])
    
    print(model.summary())
    model.fit(x_train_img, np.array(y_train), validation_data=(x_test_img, np.array(y_test)),
              epochs=100, batch_size=70, verbose=1)
    return model,[x_test_img,np.array(y_test)]
Example #27
0
def get_model():
    nclass = 5
    inp = Input(shape=(187, 1))
    img_1 = Convolution1D(16,
                          kernel_size=5,
                          activation=activations.relu,
                          padding="valid")(inp)
    img_1 = Convolution1D(16,
                          kernel_size=5,
                          activation=activations.relu,
                          padding="valid")(img_1)
    img_1 = MaxPool1D(pool_size=2)(img_1)
    img_1 = Dropout(rate=0.1)(img_1)

    img_1 = Dropout(rate=0.1)(img_1)
    img_1 = Convolution1D(32,
                          kernel_size=3,
                          activation=activations.relu,
                          padding="valid")(img_1)
    img_1 = Convolution1D(32,
                          kernel_size=3,
                          activation=activations.relu,
                          padding="valid")(img_1)
    img_1 = MaxPool1D(pool_size=2)(img_1)
    img_1 = Dropout(rate=0.1)(img_1)
    img_1 = Convolution1D(256,
                          kernel_size=3,
                          activation=activations.relu,
                          padding="valid")(img_1)
    img_1 = Convolution1D(256,
                          kernel_size=3,
                          activation=activations.relu,
                          padding="valid")(img_1)
    img_1 = GlobalMaxPool1D()(img_1)
    img_1 = Dropout(rate=0.2)(img_1)

    dense_1 = Dense(64, activation=activations.relu, name="dense_1")(img_1)
    dense_1 = Dense(64, activation=activations.relu, name="dense_2")(dense_1)
    dense_1 = Dense(nclass,
                    activation=activations.softmax,
                    name="dense_3_mitbih")(dense_1)

    model = models.Model(inputs=inp, outputs=dense_1)
    opt = optimizers.Adam(0.001)

    model.compile(optimizer=opt,
                  loss=losses.sparse_categorical_crossentropy,
                  metrics=['acc'])
    model.summary()
    return model
def create_model(config):
    #正文的网络结构
    content_input = Input(shape=(content_len, ), name='content')
    emb_c = Embedding(config.tokenizer.num_words + 1, 512)(content_input)
    pool_output = []
    kernel_sizes = [2, 3, 4, 5]
    for kernel_size in kernel_sizes:
        #         emb_c = Dropout(0.3)(emb_c)
        c = Conv1D(filters=64, kernel_size=kernel_size, strides=1)(emb_c)
        p = MaxPool1D(pool_size=int(c.shape[1]))(c)
        pool_output.append(p)
    pool_output = concatenate([p for p in pool_output])
    normal = BatchNormalization()(pool_output)
    act = Activation("relu")(normal)
    feature_content = Flatten()(act)
    if not config.title_name:
        feature = feature_content
    else:
        title_input = Input(shape=(title_len, ), name='title')
        emb_t = Embedding(config.tokenizer.num_words + 1, 512)(title_input)
        pool_output = []
        kernel_sizes = [2, 3, 4, 5]
        for kernel_size in kernel_sizes:
            #         emb_t = Dropout(0.3)(emb_t)
            c = Conv1D(filters=64, kernel_size=kernel_size, strides=1)(emb_t)
            p = MaxPool1D(pool_size=int(c.shape[1]))(c)
            pool_output.append(p)
        pool_output = concatenate([p for p in pool_output])
        normal = BatchNormalization()(pool_output)
        act = Activation("relu")(normal)
        feature_title = Flatten()(act)
        feature = concatenate([feature_title, feature_content])
    output1 = Dense(len(config.label1_id), activation='softmax',
                    name="dl")(feature)
    if config.label2_name:
        output2 = Dense(len(config.label2_id), activation='softmax',
                        name="xl")(feature)
        if config.title_name:
            model = Model(inputs=[title_input, content_input],
                          outputs=[output1, output2])
        else:
            model = Model(inputs=[content_input], outputs=[output1, output2])
    else:
        if config.title_name:
            model = Model(inputs=[title_input, content_input],
                          outputs=[output1])
        else:
            model = Model(inputs=[content_input], outputs=[output1])
    return model
Example #29
0
    def __build_ann_architecture(self):
        init_tensorflow()
        # Define the inputs
        embedding_input = Input(shape=(self.vec_size, 1),
                                dtype='float32',
                                name='comment_text')

        # Define convolutional layers
        conv = Conv1D(384, 5, activation='relu')(embedding_input)
        conv = SpatialDropout1D(0.1)(conv)
        conv = MaxPool1D(2, strides=2, padding='valid')(conv)

        conv = Conv1D(192, 2, activation='relu')(conv)
        conv = SpatialDropout1D(0.1)(conv)
        conv = MaxPool1D(2, strides=2, padding='valid')(conv)

        conv = Conv1D(96, 2, activation='relu')(conv)
        conv = SpatialDropout1D(0.1)(conv)
        conv = MaxPool1D(2, strides=2, padding='valid')(conv)

        conv = Conv1D(48, 2, activation='relu')(conv)
        conv = SpatialDropout1D(0.1)(conv)
        conv = MaxPool1D(2, strides=2, padding='valid')(conv)

        conv = Conv1D(32, 2, activation='relu')(conv)
        conv = SpatialDropout1D(0.1)(conv)
        conv = MaxPool1D(2, strides=2, padding='valid')(conv)

        conv_output = Flatten()(conv)

        # Define dense layers
        # minimize the dense layers - maybe add one of 64
        x = Dense(416, activation='tanh')(conv_output)
        x = Dropout(0.2)(x)
        x = Dense(208, activation='tanh')(x)
        x = Dropout(0.2)(x)

        # And finally make the predictions using the previous layer as input
        main_output = Dense(self.classes,
                            activation='softmax',
                            name='prediction')(x)

        ann_model = Model(inputs=embedding_input, outputs=main_output)
        optimizer = Adam(learning_rate=self.learning_rate)
        ann_model.compile(optimizer=optimizer,
                          loss='categorical_crossentropy',
                          metrics=['accuracy'])

        self.ann_model = ann_model
Example #30
0
def convModel():
    model = Sequential()
    model.add(Conv1D(32, 4, input_shape=((maxlength, 26))))
    model.add(MaxPool1D(4))
    model.add(Conv1D(64, 8))
    model.add(MaxPool1D(8))
    # model.add(Conv1D(64,16))
    # model.add(MaxPool1D(16))
    model.add(Flatten())
    model.add(Dense(32, activation='relu'))
    model.add(Dense(2, activation='softmax'))
    model.compile(loss="categorical_crossentropy",
                  optimizer=RMSprop(lr=0.001),
                  metrics=[categorical_accuracy])
    return model