def neural_Conv1D(input_shape,
                  net_conv_num=[64, 64],
                  kernel_size=[5, 5],
                  pooling=True,
                  pooling_size=[5, 5],
                  net_dense_shape=[128, 64, 2],
                  optimizer_name='Adagrad',
                  lr=0.001):
    '''
    
    :param input_shape: 样本数据格式
    :param net_shape: 神经网络格式
    :param optimizer_name: 优化器
    :param lr: 学习率
    :param return: 返回神经网络模型
    '''
    model = Sequential()
    # 增加Conv1D层

    for n in range(len(net_conv_num)):
        model.add(InputLayer(input_shape=input_shape))
        model.add(
            Conv1D(
                filters=net_conv_num[n],  # 卷积核数量
                kernel_size=kernel_size[n],  # 卷积核尺寸,或者[3]
                strides=1,
                padding='same',
                activation='relu',
                kernel_initializer=initializers.normal(stddev=0.1),
                bias_initializer=initializers.normal(stddev=0.1),
                name='Conv1D_' + str(n)))
        if pooling == True:
            model.add((
                MaxPooling1D(
                    pool_size=pooling_size[n],  # 卷积核尺寸,或者[3]
                    strides=2,
                    padding='same',
                    name='MaxPooling1D_' + str(n))))
    # 增加展平层
    model.add(Flatten())
    # 增加全连接隐藏层
    for n, units in enumerate(net_dense_shape[0:-1]):
        model.add(
            Dense(units=units,
                  activation='relu',
                  kernel_initializer=initializers.normal(stddev=0.1),
                  name='Dense_' + str(n)))
    # 增加最后的softmax层
    model.add(
        Dense(units=net_dense_shape[-1],
              activation='softmax',
              kernel_initializer=initializers.normal(stddev=0.1),
              name='softmax'))

    optimizer = optimizers(name=optimizer_name, lr=lr)
    model.compile(optimizer=optimizer,
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])

    return model
 def add_Conv1D(filters=16,  # 卷积核数量
                kernel_size=3,  # 卷积核尺寸,或者[3]
                strides=1,
                padding='same',
                activation='relu',
                kernel_initializer=initializers.normal(stddev=0.1),
                bias_initializer=initializers.normal(stddev=0.1),
                **param):
     model.add(Conv1D(filters=filters,
                      kernel_size=kernel_size,
                      strides=strides,
                      padding=padding,
                      activation=activation,
                      kernel_initializer=kernel_initializer,
                      bias_initializer=bias_initializer,
                      **param))
 def add_Dense(units=16,
               activation='relu',
               kernel_initializer=initializers.normal(stddev=0.1),
               **param):
     model.add(Dense(units=units,
                     activation=activation,
                     kernel_initializer=kernel_initializer,
                     **param))
Beispiel #4
0
def cnn1d(input_dim,
          input_length=100,
          output_dim=50,
          label_n=6,
          loss='categorical_crossentropy'):
    '''

    :param input_dim: 字典长度,即onehot的长度
    :param input_length: 文本长度
    :param output_dim: 词向量长度
    :return: 
    '''
    model = Sequential()
    model.add(
        Embedding(input_dim=input_dim + 1,
                  input_length=input_length,
                  output_dim=output_dim,
                  mask_zero=0))
    model.add(
        Conv1D(
            filters=64,  # 卷积核数量
            kernel_size=5,  # 卷积核尺寸,或者[3]
            strides=1,
            padding='same',
            kernel_initializer=initializers.normal(stddev=0.1),
            bias_initializer=initializers.normal(stddev=0.1),
            activation='relu'))
    model.add(MaxPooling1D(pool_size=2, strides=2, padding='valid'))
    model.add(
        Conv1D(
            filters=64,  # 卷积核数量
            kernel_size=5,  # 卷积核尺寸,或者[3]
            strides=1,
            padding='same',
            kernel_initializer=initializers.normal(stddev=0.1),
            bias_initializer=initializers.normal(stddev=0.1),
            activation='relu'))
    model.add(MaxPooling1D(pool_size=2, strides=2, padding='valid'))
    model.add(Flatten(name='Flatten'))
    model.add(Dense(units=64, activation='relu'))
    model.add(Dropout(0.25))
    model.add(Dense(units=label_n, activation='sigmoid'))
    optimizer = Adagrad(lr=0.01)
    model.compile(optimizer=optimizer, loss=loss, metrics=['accuracy'])
    return model
def neural_LSTM(input_shape,
                net_shape=[64, 64, 128, 2],
                optimizer_name='Adagrad',
                lr=0.001):
    '''
    
    :param input_shape: 样本数据格式
    :param net_shape: 神经网络格式
    :param optimizer_name: 优化器
    :param lr: 学习率
    :param return: 返回神经网络模型
    '''
    model = Sequential()
    # 识别之前的'截断/填充',跳过填充
    model.add(Masking(mask_value=0, input_shape=input_shape))
    #增加LSTM层
    model.add(
        LSTM(units=net_shape[0],
             activation='tanh',
             recurrent_activation='hard_sigmoid',
             implementation=1,
             dropout=0.2,
             kernel_initializer=initializers.normal(stddev=0.1),
             name='LSTM'))
    #增加全连接隐藏层
    for n, units in enumerate(net_shape[1:-1]):
        model.add(
            Dense(units=units,
                  activation='relu',
                  kernel_initializer=initializers.normal(stddev=0.1),
                  name='Dense' + str(n)))
    #增加最后的softmax层
    model.add(
        Dense(units=net_shape[-1],
              activation='softmax',
              kernel_initializer=initializers.normal(stddev=0.1),
              name='softmax'))

    optimizer = optimizers(name=optimizer_name, lr=lr)
    model.compile(optimizer=optimizer,
                  loss='categorical_crossentropy',
                  metrics=['accuracy'])

    return model
Beispiel #6
0
def CuDNNGRU_drop(input_dim=100, input_length=100, output_dim=200, label_n=6):
    '''

    :param input_dim: 字典长度,即onehot的长度
    :param input_length: 文本长度
    :param output_dim: 词向量长度
    :return: 
    '''
    # input_dim = 100
    # input_length = 100
    # output_dim = 200
    # label_n = 6
    model = Sequential()
    model.add(
        Embedding(input_dim=input_dim + 1,
                  input_length=input_length,
                  output_dim=output_dim,
                  mask_zero=0))
    model.add(CuDNNGRU(units=32, return_sequences=True))
    model.add(
        Conv1D(
            filters=64,  # 卷积核数量
            kernel_size=5,  # 卷积核尺寸,或者[3]
            strides=1,
            padding='same',
            kernel_initializer=initializers.normal(stddev=0.1),
            bias_initializer=initializers.normal(stddev=0.1),
            activation='relu'))
    model.add(MaxPooling1D(pool_size=2, strides=2, padding='valid'))
    model.add(Flatten())
    model.add(Dense(units=128, activation='tanh'))
    model.add(Dropout(0.20))
    model.add(Dense(units=label_n, activation='sigmoid'))
    optimizer = Adagrad(lr=0.001)
    model.compile(optimizer=optimizer,
                  loss=multiply_loss,
                  metrics=['accuracy'])
    return model
def neural_bulit(net_shape,
                 optimizer_name='Adagrad',
                 lr=0.001,
                 loss='categorical_crossentropy'):
    '''
    :param net_shape: 神经网络格式
    :param optimizer_name: 优化器
    :param lr: 学习率
    :param loss: 损失函数
    :param return: 返回神经网络模型
    '''
    model = Sequential()

    for n in range(len(net_shape)):

        if net_shape[n]['name'] == 'InputLayer':
            model.add(
                InputLayer(input_shape=net_shape[n]['input_shape'],
                           name='num_' + str(n) + '_InputLayer'))

        elif net_shape[n]['name'] == 'Dropout':
            if 'rate' not in net_shape[n]:
                net_shape[n].update({'rate': 0.2})
            model.add(
                Dropout(rate=net_shape[n]['rate'],
                        name='num_' + str(n) + '_Dropout'))
        elif net_shape[n]['name'] == 'Masking':
            model.add(Masking(mask_value=0))

        elif net_shape[n]['name'] == 'LSTM':
            if 'units' not in net_shape[n]:
                net_shape[n].update({'units': 16})
            if 'activation' not in net_shape[n]:
                net_shape[n].update({'activation': 'tanh'})
            if 'recurrent_activation' not in net_shape[n]:
                net_shape[n].update({'recurrent_activation': 'hard_sigmoid'})
            if 'dropout' not in net_shape[n]:
                net_shape[n].update({'dropout': 0.})
            if 'recurrent_dropout' not in net_shape[n]:
                net_shape[n].update({'recurrent_dropout': 0.})

            model.add(
                LSTM(units=net_shape[n]['units'],
                     activation=net_shape[n]['activation'],
                     recurrent_activation=net_shape[n]['recurrent_activation'],
                     implementation=1,
                     dropout=net_shape[n]['dropout'],
                     recurrent_dropout=net_shape[n]['recurrent_dropout'],
                     name='num_' + str(n) + '_LSTM'))

        elif net_shape[n]['name'] == 'Conv1D':
            if 'filters' not in net_shape[n]:
                net_shape[n].update({'filters': 16})
            if 'kernel_size' not in net_shape[n]:
                net_shape[n].update({'kernel_size': 3})
            if 'strides' not in net_shape[n]:
                net_shape[n].update({'strides': 1})
            if 'padding' not in net_shape[n]:
                net_shape[n].update({'padding': 'same'})

            model.add(
                Conv1D(
                    filters=net_shape[n]['filters'],  # 卷积核数量
                    kernel_size=net_shape[n]['kernel_size'],  # 卷积核尺寸,或者[3]
                    strides=net_shape[n]['strides'],
                    padding=net_shape[n]['padding'],
                    activation='relu',
                    kernel_initializer=initializers.normal(stddev=0.1),
                    bias_initializer=initializers.normal(stddev=0.1),
                    name='num_' + str(n) + '_Conv1D'))

        elif net_shape[n]['name'] == 'MaxPooling1D':
            if 'pool_size' not in net_shape[n]:
                net_shape[n].update({'pool_size': 3})
            if 'strides' not in net_shape[n]:
                net_shape[n].update({'strides': 1})
            if 'padding' not in net_shape[n]:
                net_shape[n].update({'padding': 'same'})
            model.add(
                MaxPooling1D(
                    pool_size=net_shape[n]['pool_size'],  # 卷积核尺寸,或者[3]
                    strides=net_shape[n]['strides'],
                    padding=net_shape[n]['padding'],
                    name='num_' + str(n) + '_MaxPooling1D'))

        elif net_shape[n]['name'] == 'Flatten':
            model.add(Flatten())

        elif net_shape[n]['name'] == 'Dense':
            if 'units' not in net_shape[n]:
                net_shape[n].update({'units': 16})
            model.add(
                Dense(units=net_shape[n]['units'],
                      activation='relu',
                      kernel_initializer=initializers.normal(stddev=0.1),
                      name='num_' + str(n) + '_Dense'))

        elif net_shape[n]['name'] == 'softmax':
            if 'units' not in net_shape[n]:
                net_shape[n].update({'units': 16})
            model.add(
                Dense(units=net_shape[n]['units'],
                      activation='softmax',
                      kernel_initializer=initializers.normal(stddev=0.1),
                      name='num_' + str(n) + '_softmax'))
    optimizer = optimizers(name=optimizer_name, lr=lr)
    model.compile(optimizer=optimizer, loss=loss, metrics=['accuracy'])

    return model
label_transform = np.array(pd.get_dummies(total.loc[:, 'label']))
print(data_transform.shape)
# 拆分为训练集和测试集
train_data, test_data, train_label, test_label = train_test_split(data_transform,
                                                                  label_transform,
                                                                  test_size=0.33,
                                                                  random_state=42)

model = Sequential()
# 识别之前的'截断/填充',跳过填充
model.add(Masking(mask_value=0, input_shape=data_transform.shape[-2:]))
model.add(LSTM(units=64,
               activation='relu',
               implementation=1,
               dropout=0.2,
               kernel_initializer=initializers.normal(stddev=0.1),
               name='LSTM'))
# model.add(LSTM(units=64,
#                activation='relu',
#                # dropout=0.01,
#                implementation=1,
#                dropout=0.2,
#                name='LSTM'))
model.add(Dense(units=64,
                activation='relu',
                kernel_initializer=initializers.normal(stddev=0.1),
                name='Dense1'))
model.add(Dense(units=128,
                activation='relu',
                kernel_initializer=initializers.normal(stddev=0.1),
                name='Dense2'))