コード例 #1
0
def main(data_path, output_path):

    X_trainS1, X_trainS2, X_trainS3, Y_train, X_valS1, X_valS2, X_valS3, Y_val = load_data(
        data_path)

    epochs = 100
    batch_size = 256
    kernel_size = 3
    pool_size = 2
    dropout_rate = 0.15
    n_classes = 6

    f_act = 'relu'

    # 三个子模型的输入数据
    main_input1 = Input(shape=(128, 3), name='main_input1')
    main_input2 = Input(shape=(128, 3), name='main_input2')
    main_input3 = Input(shape=(128, 3), name='main_input3')

    def cnn_lstm_cell(main_input):
        """
        基于DeepConvLSTM算法, 创建子模型
        :param main_input: 输入数据
        :return: 子模型
        """
        sub_model = Conv1D(512,
                           kernel_size,
                           input_shape=(128, 3),
                           activation=f_act,
                           padding='same')(main_input)
        sub_model = BatchNormalization()(sub_model)
        sub_model = MaxPooling1D(pool_size=pool_size)(sub_model)
        sub_model = Dropout(dropout_rate)(sub_model)
        sub_model = Conv1D(64, kernel_size, activation=f_act,
                           padding='same')(sub_model)
        sub_model = BatchNormalization()(sub_model)
        sub_model = MaxPooling1D(pool_size=pool_size)(sub_model)
        sub_model = Dropout(dropout_rate)(sub_model)
        sub_model = Conv1D(32, kernel_size, activation=f_act,
                           padding='same')(sub_model)
        sub_model = BatchNormalization()(sub_model)
        sub_model = MaxPooling1D(pool_size=pool_size)(sub_model)
        sub_model = LSTM(128, return_sequences=True)(sub_model)
        sub_model = LSTM(128, return_sequences=True)(sub_model)
        sub_model = LSTM(128)(sub_model)
        main_output = Dropout(dropout_rate)(sub_model)
        return main_output

    first_model = cnn_lstm_cell(main_input1)
    second_model = cnn_lstm_cell(main_input2)
    third_model = cnn_lstm_cell(main_input3)

    model = Concatenate()([first_model, second_model, third_model])  # 合并模型
    model = Dropout(0.4)(model)
    model = Dense(n_classes)(model)
    model = BatchNormalization()(model)
    output = Activation('softmax', name="softmax")(model)

    model = Model([main_input1, main_input2, main_input3], output)
    model.compile(loss='categorical_crossentropy',
                  optimizer='rmsprop',
                  metrics=['accuracy'])

    #    graph_path = os.path.join(output_path, "merged_model.png")
    #    plot_model(model, to_file=graph_path, show_shapes=True)  # 绘制模型图

    metrics = Metrics()  # 度量FPR
    history = model.fit([X_trainS1, X_trainS2, X_trainS3],
                        Y_train,
                        batch_size=batch_size,
                        validation_data=([X_valS1, X_valS2, X_valS3], Y_val),
                        epochs=epochs,
                        callbacks=[metrics])  # 增加FPR输出

    model_path = os.path.join(output_path, "merged_dcl.h5")
    model.save(model_path)  # 存储模型
    print(history.history)
コード例 #2
0
y = GlobalAveragePooling1D()(y)

regressor = Concatenate(axis = -1)([x, y])

#Adding the output layer
regressor_final = Dense(units = 2, activation = 'softmax')(regressor)
regressor = Model(inputs = ip, outputs = regressor_final)

#Compiling the RNN
regressor.compile(optimizer = 'adam', loss = 'mean_squared_error')

y_train = to_categorical(y_train, len(np.unique(y_train)))

#Fitting the RNN to the training set
regressor.fit(X_train, y_train, epochs = 20, batch_size = 32)

# Part 3 - Making the predictions and visualising the results

# Getting the real stock price of 2017
dataset_test = pd.read_csv('AAPL_test.csv')
real_stock_price = dataset_test.iloc[:, 1:2].values

#Getting the predicted stock price of 2017
dataset_total = pd.concat((dataset_train['Open'], dataset_test['Open']), axis = 0)
inputs = dataset_total[len(dataset_total) - len(dataset_test) - 100:].values
inputs = inputs.reshape(-1,1)
inputs = sc.transform(inputs)
X_test = []
for i in range(100, 298):
    X_test.append(inputs[i-100:i, 0])
コード例 #3
0
ファイル: ner_utils.py プロジェクト: cjer/NER
def create_model(words,
                 chars,
                 max_len,
                 n_words,
                 n_tags,
                 max_len_char,
                 n_pos,
                 n_chars,
                 embedding_mats,
                 use_word=True,
                 use_pos=False,
                 embedding_matrix=None,
                 embed_dim=70,
                 trainable=True,
                 input_dropout=False,
                 stack_lstm=1,
                 epochs=100,
                 early_stopping=True,
                 patience=20,
                 min_delta=0.0001,
                 use_char=False,
                 crf=False,
                 add_random_embedding=True,
                 pretrained_embed_dim=300,
                 stack_cross=False,
                 stack_double=False,
                 rec_dropout=0.1,
                 validation_split=0.1,
                 output_dropout=False,
                 optimizer='rmsprop',
                 pos_dropout=None,
                 char_dropout=False,
                 all_spatial_dropout=True,
                 print_summary=True,
                 verbose=2):
    X_tr, X_te, y_tr, y_te, pos_tr, pos_te = words
    X_char_tr, X_char_te, _, _ = chars
    all_input_embeds = []
    all_inputs = []
    train_data = []
    if use_word and not add_random_embedding and embedding_matrix is None:
        raise ValueError('Cannot use word without embedding')
    if use_word:
        input = Input(shape=(max_len, ))
        if add_random_embedding:
            input_embed = Embedding(input_dim=n_words + 2,
                                    output_dim=embed_dim,
                                    input_length=max_len)(input)
            all_input_embeds.append(input_embed)
        if embedding_matrix is not None:
            input_embed = Embedding(input_dim=n_words + 2,
                                    output_dim=pretrained_embed_dim,
                                    input_length=max_len,
                                    weights=[embedding_mats[embedding_matrix]],
                                    trainable=trainable)(input)
            all_input_embeds.append(input_embed)
        all_inputs.append(input)
        train_data.append(X_tr)
    if use_pos:
        pos_input = Input(shape=(max_len, ))
        pos_embed = Embedding(input_dim=n_pos + 1,
                              output_dim=10,
                              input_length=max_len)(pos_input)
        if pos_dropout is not None:
            pos_embed = Dropout(pos_dropout)(pos_embed)
        all_input_embeds.append(pos_embed)
        all_inputs.append(pos_input)
        train_data.append(pos_tr)
    if use_char:
        # input and embeddings for characters
        char_in = Input(shape=(
            max_len,
            max_len_char,
        ))
        emb_char = TimeDistributed(
            Embedding(input_dim=n_chars + 2,
                      output_dim=20,
                      input_length=max_len_char))(char_in)
        # character LSTM to get word encodings by characters
        char_enc = TimeDistributed(
            Bidirectional(
                LSTM(units=10, return_sequences=False,
                     recurrent_dropout=0.5)))(emb_char)
        if char_dropout:
            char_enc = SpatialDropout1D(0.3)(char_enc)
        all_input_embeds.append(char_enc)
        all_inputs.append(char_in)
        train_data.append(
            np.array(X_char_tr).reshape(
                (len(X_char_tr), max_len, max_len_char)))
    if len(all_inputs) > 1:
        model = Concatenate()(all_input_embeds)
        if (use_char and all_spatial_dropout):
            model = SpatialDropout1D(0.3)(model)
    else:
        model = all_input_embeds[0]
        all_input_embeds = all_input_embeds[0]
        all_inputs = all_inputs[0]
        train_data = train_data[0]

    if input_dropout:
        model = Dropout(0.1)(model)

    if stack_double:
        front = LSTM(units=100,
                     return_sequences=True,
                     recurrent_dropout=rec_dropout)(model)
        front = LSTM(units=100,
                     return_sequences=True,
                     recurrent_dropout=rec_dropout)(front)
        back = LSTM(units=100,
                    return_sequences=True,
                    recurrent_dropout=rec_dropout,
                    go_backwards=True)(model)
        model = LSTM(units=100,
                     return_sequences=True,
                     recurrent_dropout=rec_dropout,
                     go_backwards=True)(back)
    if stack_cross:
        front = LSTM(units=100,
                     return_sequences=True,
                     recurrent_dropout=rec_dropout)(model)
        front = LSTM(units=100,
                     return_sequences=True,
                     recurrent_dropout=rec_dropout)(front)
        back = LSTM(units=100,
                    return_sequences=True,
                    recurrent_dropout=rec_dropout,
                    go_backwards=True)(model)
        back = LSTM(units=100,
                    return_sequences=True,
                    recurrent_dropout=rec_dropout,
                    go_backwards=True)(back)
        model = concatenate([back, front])
    for i in range(stack_lstm):
        model = Bidirectional(
            LSTM(units=100,
                 return_sequences=True,
                 recurrent_dropout=rec_dropout))(model)

    if output_dropout:
        model = Dropout(0.1)(model)

    if crf:
        model = TimeDistributed(Dense(50, activation="relu"))(
            model)  # a dense layer as suggested by neuralNer
        crf = CRF(n_tags + 1)
        loss = crf_loss
        metric = crf_accuracy
        monitor = 'val_crf_accuracy'
        out = crf(model)
    else:
        out = TimeDistributed(Dense(n_tags + 1, activation="softmax"))(
            model)  # softmax output layer
        loss = "categorical_crossentropy"
        metric = 'accuracy'
        monitor = 'val_acc'

    model = Model(all_inputs, out)
    model.compile(optimizer=optimizer, loss=loss, metrics=[metric])
    if early_stopping:
        es = [
            EarlyStopping(monitor=monitor,
                          mode='max',
                          verbose=1,
                          patience=patience,
                          restore_best_weights=True,
                          min_delta=min_delta)
        ]
    else:
        es = None
    if print_summary:
        print(model.summary())
    history = model.fit(train_data,
                        np.array(y_tr),
                        batch_size=32,
                        epochs=epochs,
                        validation_split=validation_split,
                        verbose=verbose,
                        callbacks=es)
    hist = pd.DataFrame(history.history)

    return model, hist