Пример #1
0
def build_mlp():
    model = build_single_input()
    model.add(Flatten())
    model.add(Dropout(0.5))
    model.add(Dense(embedding_size * 4, activation='relu', kernel_regularizer=l2(0.001)))
    model.add(Dropout(0.5))
    return build_single_output(model)
Пример #2
0
def build_fcn():
    """
    全卷积神经网络
    :param model:
    :return:
    """
    model = build_single_input()
    return build_single_output(model)
Пример #3
0
def build_text_cnn():
    """
    文本卷积神经网络
    :param model:
    :return:
    """
    model = build_single_input()
    return build_single_output(model)
Пример #4
0
def build_le_net():
    """
    卷积神经网络
    :param model:
    :return:
    """
    model = build_single_input()
    model.add(Conv1D(embedding_size * 2, 2, activation='relu', kernel_regularizer=l2(0.001)))
    model.add(MaxPooling1D())
    model.add(Conv1D(embedding_size * 3, 2, activation='relu', kernel_regularizer=l2(0.001)))
    model.add(MaxPooling1D())
    model.add(Flatten())
    model.add(Dropout(0.2))
    model.add(Dense(embedding_size * 3, activation='relu', kernel_regularizer=l2(0.001)))
    model.add(Dropout(0.2))
    return build_single_output(model)
Пример #5
0
def build_nin():
    """
    网络中的网络,训练这个数据效果不好,可能是参数不合适
    :param model:
    :return:
    """
    model = build_single_input()
    model.add(nin_block(embedding_size * 2, 3, 2, 'valid'))
    model.add(MaxPooling1D())
    model.add(nin_block(embedding_size * 3, 2, 1, 'same'))
    model.add(MaxPooling1D())
    model.add(nin_block(embedding_size * 4, 2, 1, 'same'))
    model.add(MaxPooling1D())
    model.add(Dropout(0.5))
    model.add(nin_block(1, 2, 1, 'same'))
    model.add(GlobalAveragePooling1D())
    return build_single_output(model)
Пример #6
0
def build_conv1d():
    model = build_single_input()
    model.add(Conv1D(embedding_size * 2, 2, activation='relu', kernel_regularizer=l2(0.001)))
    model.add(Conv1D(embedding_size * 2, 2, activation='relu', kernel_regularizer=l2(0.001)))
    model.add(MaxPooling1D())
    model.add(Conv1D(embedding_size * 4, 2, activation='relu', kernel_regularizer=l2(0.001)))
    model.add(Conv1D(embedding_size * 4, 2, activation='relu', kernel_regularizer=l2(0.001)))
    model.add(MaxPooling1D())
    model.add(Conv1D(embedding_size * 8, 2, activation='relu', kernel_regularizer=l2(0.001)))
    model.add(Conv1D(embedding_size * 8, 2, activation='relu', kernel_regularizer=l2(0.001)))
    # model.add(MaxPooling1D())
    # model.add(Conv1D(embedding_size * 16, 2, activation='relu', kernel_regularizer=l2(0.001)))
    # model.add(Conv1D(embedding_size * 16, 2, activation='relu', kernel_regularizer=l2(0.001)))
    # model.add(MaxPooling1
    # model.add(Flatten())
    # model.add(Conv1D(embedding_size * 2, 3, strides=2, activation='relu', kernel_regularizer=l2(0.001)))
    # model.add(Conv1D(embedding_size * 4, 3, strides=2, activation='relu', kernel_regularizer=l2(0.001)))
    # model.add(Conv1D(embedding_size * 8, 3, strides=2, activation='relu', kernel_regularizer=l2(0.001)))
    # model.add(Conv1D(embedding_size * 16, 3, strides=2, activation='relu', kernel_regularizer=l2(0.001)))
    model.add(GlobalMaxPooling1D())
    model.add(Dropout(0.2))
    return build_single_output(model)
def build_vgg():
    """
    使用重复元素的网络
    :param model:
    :return:
    """
    model = build_single_input()
    conv_arch = ((2, 64), (2, 64), (2, 128), (2, 128))
    for (num_convs, num_channels) in conv_arch:
        model.add(vgg_block(num_convs, num_channels))
    model.add(Flatten())
    model.add(
        Dense(embedding_size * 2,
              activation='relu',
              kernel_regularizer=l2(0.001)))
    model.add(Dropout(0.5))
    model.add(
        Dense(embedding_size * 2,
              activation='relu',
              kernel_regularizer=l2(0.001)))
    model.add(Dropout(0.5))
    return build_single_output(model)
Пример #8
0
def build_alex_net():
    """
    深度总面积神经网络
    :param model:
    :return:
    """
    model = build_single_input()
    model.add(Conv1D(embedding_size * 2, kernel_size=2, activation='relu', kernel_regularizer=l2(0.001)))
    model.add(Conv1D(embedding_size * 2, kernel_size=2, activation='relu', kernel_regularizer=l2(0.001)))
    model.add(MaxPooling1D())
    model.add(Conv1D(embedding_size * 4, kernel_size=2, padding='same', activation='relu', kernel_regularizer=l2(0.001)))
    model.add(Conv1D(embedding_size * 4, kernel_size=2, padding='same', activation='relu', kernel_regularizer=l2(0.001)))
    model.add(MaxPooling1D())
    model.add(Conv1D(embedding_size * 8, kernel_size=2, padding='valid', activation='relu', kernel_regularizer=l2(0.001)))
    model.add(Conv1D(embedding_size * 8, kernel_size=2, padding='valid', activation='relu', kernel_regularizer=l2(0.001)))
    model.add(Conv1D(embedding_size * 4, kernel_size=2, padding='valid', activation='relu', kernel_regularizer=l2(0.001)))
    model.add(MaxPooling1D())
    model.add(Flatten())
    model.add(Dense(embedding_size * 8, activation='relu', kernel_regularizer=l2(0.001)))
    model.add(Dropout(0.5))
    model.add(Dense(embedding_size * 8, activation='relu', kernel_regularizer=l2(0.001)))
    model.add(Dropout(0.5))
    return build_single_output(model)
Пример #9
0
def build_global_max_pooling1d():
    model = build_single_input()
    model.add(GlobalMaxPooling1D())
    return build_single_output(model)