Пример #1
0
def model_256_128_64_2_100Ep(FrameSize, X, X_train, X_test, y_train, y_test):
    model = Sequential()
    # model.add(Embedding(2, 50, input_length=None))
    # model.add(LSTM(256, return_sequences=True))

    model.add(
        LSTM(256,
             input_shape=(FrameSize, X[0].shape[1]),
             return_sequences=True,
             recurrent_dropout=0.3))
    model.add(SpatialDropout1D(0.2))
    model.add(LSTM(128, return_sequences=False, recurrent_dropout=0.3))
    model.add(Dropout(0.2))
    model.add(Dense(64))
    model.add(Dropout(0.2))
    model.add(Dense(2, activation='softmax'))

    model.compile(loss='categorical_crossentropy',
                  optimizer='Adam',
                  metrics=['accuracy'])

    history = model.fit(X_train,
                        y_train,
                        epochs=100,
                        batch_size=128,
                        shuffle=True,
                        verbose=2,
                        validation_data=(X_test, y_test))

    plot.plot(history, "One_256_128_64_2_100Ep")

    ROC_PR.ROC(model, X_test, y_test, "One_256_128_64_2_100Ep")
Пример #2
0
def model_CNN_LSTM_limited_1(FrameSize, X, X_train, X_test, y_train, y_test,
                             epoch, earlyStopping, name):
    print(X.shape)
    print(FrameSize)
    model = Sequential()
    model.add(Dropout(0.43369355853937297))
    model.add(
        Conv1D(filters=5, kernel_size=4, activation='relu', padding='same'))
    model.add(MaxPooling1D(pool_size=4, padding='same'))
    model.add(
        Conv1D(filters=7, kernel_size=7, activation='relu', padding='same'))
    model.add(MaxPooling1D(pool_size=7, padding='same'))

    model.add(LSTM(398, return_sequences=True, recurrent_dropout=0.3))
    model.add(SpatialDropout1D(0.43369355853937297))
    model.add(LSTM(106, return_sequences=True, recurrent_dropout=0.3))
    model.add(SpatialDropout1D(0.43369355853937297))
    model.add(LSTM(475, return_sequences=True, recurrent_dropout=0.3))
    model.add(SpatialDropout1D(0.43369355853937297))
    model.add(LSTM(264, return_sequences=False, recurrent_dropout=0.3))
    model.add(Dropout(0.43369355853937297))

    model.add(Dense(352))
    model.add(Dropout(0.43369355853937297))
    model.add(Dense(378))
    model.add(Dropout(0.43369355853937297))

    model.add(Dense(7, activation='sigmoid'))

    model.compile(loss=masked_loss_function,
                  optimizer='Adam',
                  metrics=[masked_accuracy])

    history = model.fit(
        X_train,
        y_train,
        epochs=epoch,
        batch_size=128,
        # shuffle=True,
        verbose=2,
        validation_data=(X_test, y_test),
        callbacks=[
            earlyStopping,
            ModelCheckpoint('result/CNN_LSTM_limited_1.h5',
                            monitor='val_masked_accuracy',
                            mode='max',
                            save_best_only=True)
        ])

    # plot_model(model, to_file='model_plot.png', show_shapes=True)

    plot.plot(history, ("CNN_LSTM_limited_1" + name))

    score = ROC_PR.ROC(model,
                       X_test,
                       y_test, ("CNN_LSTM_limited_1" + name),
                       True,
                       limited=True)
    return score
Пример #3
0
def model_CNN_LSTM_random_data(FrameSize, X, X_train, X_test, y_train, y_test,
                               epoch, earlyStopping, name):
    print(X.shape)
    print(FrameSize)
    model = Sequential()

    model.add(Dropout(0.3311428861138142))
    model.add(
        Conv1D(filters=4, kernel_size=6, activation='relu', padding='same'))
    model.add(MaxPooling1D(pool_size=4, padding='same'))
    model.add(Dropout(0.3311428861138142))
    model.add(
        Conv1D(filters=7, kernel_size=4, activation='relu', padding='same'))
    model.add(MaxPooling1D(pool_size=4, padding='same'))
    model.add(Dropout(0.3311428861138142))
    model.add(
        Conv1D(filters=6, kernel_size=6, activation='relu', padding='same'))
    model.add(MaxPooling1D(pool_size=4, padding='same'))
    model.add(Dropout(0.3311428861138142))
    model.add(
        Conv1D(filters=4, kernel_size=4, activation='relu', padding='same'))
    model.add(MaxPooling1D(pool_size=4, padding='same'))

    model.add(LSTM(425, return_sequences=True, recurrent_dropout=0.3))
    model.add(Dropout(0.3311428861138142))
    model.add(LSTM(189, return_sequences=True, recurrent_dropout=0.3))
    model.add(Dropout(0.3311428861138142))
    model.add(LSTM(283, return_sequences=True, recurrent_dropout=0.3))
    model.add(Dropout(0.3311428861138142))
    model.add(LSTM(333, return_sequences=False, recurrent_dropout=0.3))
    model.add(Dropout(0.3311428861138142))

    model.add(Dense(331))
    model.add(Dropout(0.3311428861138142))
    model.add(Dense(12, activation='sigmoid'))

    model.compile(loss=masked_loss_function,
                  optimizer='Adam',
                  metrics=[masked_accuracy])

    history = model.fit(X_train,
                        y_train,
                        epochs=epoch,
                        batch_size=128,
                        verbose=2,
                        validation_data=(X_test, y_test),
                        callbacks=[
                            earlyStopping,
                            ModelCheckpoint('result/CNN256_LSTM128_64_2.h5',
                                            monitor='val_masked_accuracy',
                                            mode='max',
                                            save_best_only=True)
                        ])

    plot.plot(history, ("LRCN" + name))

    score = ROC_PR.ROC(model, X_test, y_test, ("LRCN" + name), True)
    return score, ROC_PR.ROC_Score(model, X_train, y_train, limited=False)
Пример #4
0
def model_CNN_LSTM_time(FrameSize, X, X_train, X_test, y_train, y_test, epoch,
                        earlyStopping, name):
    print(X.shape)
    print(X_train.shape)
    print(X_test.shape)
    print(FrameSize)
    print(y_train.shape)
    print(y_test.shape)
    X_train = X_train.reshape(X_train.shape[0], X_train.shape[1],
                              X_train.shape[2], 1)
    X_test = X_test.reshape(X_test.shape[0], X_test.shape[1], X_test.shape[2],
                            1)
    # y_train = y_train.reshape(7060, 12, 1)
    # y_test = y_test.reshape(785, 12, 1)
    model = Sequential()
    model.add(Dropout(0.1))
    model.add(
        TimeDistributed(
            Conv1D(filters=8, kernel_size=3, activation='relu',
                   padding='same')))
    model.add(TimeDistributed(MaxPooling1D(pool_size=3, padding='same')))
    model.add(
        TimeDistributed(
            Conv1D(filters=8, kernel_size=3, activation='relu',
                   padding='same')))
    model.add(TimeDistributed(MaxPooling1D(pool_size=3, padding='same')))
    model.add(TimeDistributed(Flatten()))

    model.add(LSTM(518, return_sequences=False, recurrent_dropout=0.3))
    model.add(Dropout(0.1))

    model.add(Dense(64))
    model.add(Dropout(0.1))

    model.add(Dense(12, activation='sigmoid'))

    model.compile(loss=masked_loss_function,
                  optimizer='Adam',
                  metrics=[masked_accuracy])

    history = model.fit(X_train,
                        y_train,
                        epochs=epoch,
                        batch_size=128,
                        verbose=2,
                        validation_data=(X_test, y_test),
                        callbacks=[
                            earlyStopping,
                            ModelCheckpoint('result/CNN256_LSTM128_64_2.h5',
                                            monitor='val_masked_accuracy',
                                            mode='max',
                                            save_best_only=True)
                        ])

    plot.plot(history, ("LRCN" + name))

    score = ROC_PR.ROC(model, X_test, y_test, ("LRCN" + name), True)
    return score, ROC_PR.ROC_Score(model, X_train, y_train, limited=False)
Пример #5
0
def model_256_128_64_2_StateFul(FrameSize, X, X_train, X_test, y_train, y_test,
                                epoch, earlyStopping):
    batch_size = 128
    if len(X_train) // 128 != len(X_train) / 128:
        tmp = X_train[0]
        tmp2 = y_train[0]
        for j in range(0, len(tmp2)):
            tmp2[j] = 0
        for j in range(0, len(tmp)):
            for k in range(0, len(tmp[j])):
                if tmp[j][k] == 1:
                    tmp[j][k] = 0
        for j in range(0,
                       ((((len(X_train) // 128) + 1) * 128) - len(X_train))):
            X_train = np.append(X_train, np.expand_dims(tmp, axis=0), axis=0)
            y_train = np.append(y_train, np.expand_dims(tmp2, axis=0), axis=0)

        for j in range(0, ((((len(X_test) // 128) + 1) * 128) - len(X_test))):
            X_test = np.append(X_test, np.expand_dims(tmp, axis=0), axis=0)
            y_test = np.append(y_test, np.expand_dims(tmp2, axis=0), axis=0)

    model = Sequential()
    model.add(
        LSTM(256,
             batch_input_shape=(batch_size, FrameSize, X[0].shape[1]),
             return_sequences=True,
             stateful=True))
    model.add(SpatialDropout1D(0.2))
    model.add(LSTM(128, return_sequences=False, stateful=True))
    model.add(Dropout(0.2))
    model.add(Dense(64))
    model.add(Dropout(0.2))
    model.add(Dense(12, activation='sigmoid'))

    model.compile(loss=masked_loss_function,
                  optimizer='Adam',
                  metrics=[masked_accuracy])

    print(model.summary())

    history = model.fit(X_train,
                        y_train,
                        epochs=epoch,
                        batch_size=batch_size,
                        shuffle=True,
                        verbose=2,
                        validation_data=(X_test, y_test),
                        callbacks=[
                            earlyStopping,
                            ModelCheckpoint('result/256_128_64_2_StateFul.h5',
                                            monitor='val_masked_accuracy',
                                            mode='max',
                                            save_best_only=True)
                        ])

    plot.plot(history, "256_128_64_2_StateFul")
Пример #6
0
def model_lrcn_simple(FrameSize,
                      X,
                      X_train,
                      X_test,
                      y_train,
                      y_test,
                      epoch,
                      earlyStopping,
                      name,
                      limited=False):
    print(X.shape)
    print(FrameSize)
    model = Sequential()
    model.add(Dropout(0.2))
    model.add(
        Conv1D(filters=5, kernel_size=5, activation='relu', padding='same'))
    model.add(MaxPooling1D(pool_size=3, padding='same'))
    model.add(LSTM(256, return_sequences=True, recurrent_dropout=0.3))
    model.add(SpatialDropout1D(0.2))
    model.add(LSTM(128, return_sequences=False, recurrent_dropout=0.3))
    model.add(Dropout(0.2))
    model.add(Dense(128))
    model.add(Dropout(0.2))
    if limited:
        model.add(Dense(7, activation='sigmoid'))
    else:
        model.add(Dense(12, activation='sigmoid'))

    model.compile(loss=masked_loss_function,
                  optimizer='Adam',
                  metrics=[masked_accuracy])

    history = model.fit(X_train,
                        y_train,
                        epochs=epoch,
                        batch_size=128,
                        verbose=2,
                        validation_data=(X_test, y_test),
                        callbacks=[
                            earlyStopping,
                            ModelCheckpoint('result/CNN256_LSTM128_64_2.h5',
                                            monitor='val_masked_accuracy',
                                            mode='max',
                                            save_best_only=True)
                        ])

    plot.plot(history, ("LRCN" + name))

    score = ROC_PR.ROC(model,
                       X_test,
                       y_test, ("LRCN" + name),
                       True,
                       limited=limited)
    return score
Пример #7
0
def model_256_128_64_2BS(FrameSize, X, X_train, X_test, y_train, y_test,
                         epoch):
    model = Sequential()
    # model.add(Embedding(2, 50, input_length=None))
    # model.add(LSTM(256, return_sequences=True))

    model.add(
        LSTM(256,
             input_shape=(FrameSize, X[0].shape[1]),
             return_sequences=True,
             recurrent_dropout=0.3))
    model.add(SpatialDropout1D(0.2))
    model.add(LSTM(128, return_sequences=False, recurrent_dropout=0.3))
    model.add(Dropout(0.2))
    model.add(Dense(64))
    model.add(Dropout(0.2))
    model.add(Dense(2, activation='sigmoid'))

    model.compile(loss='binary_crossentropy',
                  optimizer='Adam',
                  metrics=['accuracy'])

    history = model.fit(X_train,
                        y_train,
                        epochs=epoch,
                        batch_size=128,
                        verbose=2,
                        shuffle=True,
                        validation_data=(X_test, y_test),
                        callbacks=[
                            ModelCheckpoint('result/One_256_128_64_2BS.h5',
                                            monitor='val_accuracy',
                                            mode='max',
                                            save_best_only=True)
                        ])

    # model.save_weights("result/One_256_128_64_2BS.h5")

    plot.plot(history, "One_256_128_64_2BS")

    ROC_PR.ROC(model, X_test, y_test, "One_256_128_64_2BS")
Пример #8
0
def model_CNN256_LSTM128_64_2(FrameSize, X, X_train, X_test, y_train, y_test,
                              epoch):
    model = Sequential()
    model.add(Dropout(0.2))
    model.add(
        Conv1D(filters=5, kernel_size=3, activation='relu', padding='same'))
    model.add(MaxPooling1D(pool_size=3))
    model.add(LSTM(256, return_sequences=True, recurrent_dropout=0.3))
    model.add(SpatialDropout1D(0.2))
    model.add(LSTM(128, return_sequences=False, recurrent_dropout=0.3))
    model.add(Dropout(0.2))
    model.add(Dense(64))
    model.add(Dropout(0.2))
    model.add(Dense(2, activation='sigmoid'))

    model.compile(loss='categorical_crossentropy',
                  optimizer='Adam',
                  metrics=['accuracy'])

    history = model.fit(X_train,
                        y_train,
                        epochs=epoch,
                        batch_size=128,
                        shuffle=True,
                        verbose=2,
                        validation_data=(X_test, y_test),
                        callbacks=[
                            ModelCheckpoint('result/CNN256_LSTM128_64_2.h5',
                                            monitor='accuracy',
                                            mode='max',
                                            save_best_only=True)
                        ])

    # plot_model(model, to_file='model_plot.png', show_shapes=True)

    plot.plot(history, "One_CNN256_LSTM128_64_2")

    ROC_PR.ROC(model, X_test, y_test, "One_CNN256_LSTM128_64_2", False)
Пример #9
0
def model_256_128_64_2(FrameSize, X, X_train, X_test, y_train, y_test, epoch,
                       earlyStopping):
    model = Sequential()
    model.add(
        LSTM(256,
             input_shape=(FrameSize, X[0].shape[1]),
             return_sequences=True,
             recurrent_dropout=0.3))
    model.add(SpatialDropout1D(0.2))
    model.add(LSTM(128, return_sequences=False, recurrent_dropout=0.3))
    model.add(Dropout(0.2))
    model.add(Dense(64))
    model.add(Dropout(0.2))
    model.add(Dense(12, activation='sigmoid'))

    model.compile(loss=masked_loss_function,
                  optimizer='Adam',
                  metrics=[masked_accuracy])

    print(model.summary())

    history = model.fit(X_train,
                        y_train,
                        epochs=epoch,
                        batch_size=128,
                        shuffle=True,
                        verbose=2,
                        validation_data=(X_test, y_test),
                        callbacks=[
                            earlyStopping,
                            ModelCheckpoint('result/256_128_64_2.h5',
                                            monitor='val_masked_accuracy',
                                            mode='max',
                                            save_best_only=True)
                        ])

    plot.plot(history, "256_128_64_2")
    ROC_PR.ROC(model, X_test, y_test, "256_128_64_2", True)
Пример #10
0
def model_CNN256_LSTM128_64_2(FrameSize,
                              X,
                              X_train,
                              X_test,
                              y_train,
                              y_test,
                              epoch,
                              earlyStopping,
                              name,
                              dropout2_rate,
                              dense_1,
                              filterCNN,
                              kernelCNN,
                              LSTM1,
                              LSTM2,
                              recurrent_dropout,
                              limited=False):
    print(X.shape)
    print(FrameSize)
    model = Sequential()
    # model.add(TimeDistributed(Conv1D(filters=1, kernel_size=3, activation='relu', padding='same', input_shape=(FrameSize, X[0].shape[1], 1))))
    # model.add(TimeDistributed(MaxPooling1D(pool_size=3)))
    # model.add(TimeDistributed(Flatten()))
    model.add(Dropout(dropout2_rate))
    # model.add(Conv1D(filters=5, kernel_size=3, activation='relu', padding='same'))
    model.add(
        Conv1D(filters=filterCNN,
               kernel_size=kernelCNN,
               activation='relu',
               padding='same'))
    model.add(MaxPooling1D(pool_size=3, padding='same'))
    # model.add(TimeDistributed(Flatten()))
    # model.add(LSTM(256, return_sequences=True, recurrent_dropout=0.3))
    model.add(
        LSTM(LSTM1, return_sequences=True,
             recurrent_dropout=recurrent_dropout))
    model.add(SpatialDropout1D(dropout2_rate))
    # model.add(LSTM(128, return_sequences=False, recurrent_dropout=0.3))
    model.add(
        LSTM(LSTM2,
             return_sequences=False,
             recurrent_dropout=recurrent_dropout))
    model.add(Dropout(dropout2_rate))
    # model.add(Dense(64))
    model.add(Dense(dense_1))
    model.add(Dropout(dropout2_rate))
    if limited:
        model.add(Dense(7, activation='sigmoid'))
    else:
        model.add(Dense(12, activation='sigmoid'))

    model.compile(loss=masked_loss_function,
                  optimizer='Adam',
                  metrics=[masked_accuracy])

    history = model.fit(
        X_train,
        y_train,
        epochs=epoch,
        batch_size=128,
        # shuffle=True,
        verbose=2,
        validation_data=(X_test, y_test),
        callbacks=[
            earlyStopping,
            ModelCheckpoint('result/CNN256_LSTM128_64_2.h5',
                            monitor='val_masked_accuracy',
                            mode='max',
                            save_best_only=True)
        ])

    # plot_model(model, to_file='model_plot.png', show_shapes=True)

    plot.plot(history, ("LRCN" + name))

    score = ROC_PR.ROC(model,
                       X_test,
                       y_test, ("LRCN" + name),
                       True,
                       limited=limited)
    return score
Пример #11
0
def model_shuffle_index_0(FrameSize, X, X_train, X_test, y_train, y_test,
                          epoch, earlyStopping, name, index):
    print(X.shape)
    print(FrameSize)

    model = Sequential()

    if index == 0:
        model.add(Dropout(0.3975303416300372))
        model.add(
            Conv1D(filters=4, kernel_size=3, activation='relu',
                   padding='same'))
        model.add(MaxPooling1D(pool_size=3, padding='same'))
        model.add(Dropout(0.3975303416300372))
        model.add(
            Conv1D(filters=6, kernel_size=3, activation='relu',
                   padding='same'))
        model.add(MaxPooling1D(pool_size=3, padding='same'))
        model.add(Dropout(0.3975303416300372))
        model.add(
            Conv1D(filters=7, kernel_size=3, activation='relu',
                   padding='same'))
        model.add(MaxPooling1D(pool_size=3, padding='same'))

        model.add(LSTM(107, return_sequences=True, recurrent_dropout=0.3))
        model.add(Dropout(0.3975303416300372))
        model.add(LSTM(161, return_sequences=True, recurrent_dropout=0.3))
        model.add(Dropout(0.3975303416300372))
        model.add(LSTM(386, return_sequences=False, recurrent_dropout=0.3))
        model.add(Dropout(0.3975303416300372))

        model.add(Dense(90))
        model.add(Dropout(0.3975303416300372))
        model.add(Dense(503))
        model.add(Dropout(0.3975303416300372))
        model.add(Dense(319))
        model.add(Dropout(0.3975303416300372))
        model.add(Dense(151))
        model.add(Dropout(0.3975303416300372))

        model.add(Dense(12, activation='sigmoid'))
    elif index == 1:
        model.add(Dropout(0.1135579744586698))
        model.add(
            Conv1D(filters=4, kernel_size=5, activation='relu',
                   padding='same'))
        model.add(MaxPooling1D(pool_size=4, padding='same'))

        model.add(LSTM(420, return_sequences=True, recurrent_dropout=0.3))
        model.add(Dropout(0.1135579744586698))
        model.add(LSTM(67, return_sequences=True, recurrent_dropout=0.3))
        model.add(Dropout(0.1135579744586698))
        model.add(LSTM(231, return_sequences=False, recurrent_dropout=0.3))
        model.add(Dropout(0.1135579744586698))

        model.add(Dense(112))
        model.add(Dropout(0.1135579744586698))
        model.add(Dense(92))
        model.add(Dropout(0.1135579744586698))

        model.add(Dense(12, activation='sigmoid'))
    elif index == 2:
        model.add(Dropout(0.3211287914743064))
        model.add(
            Conv1D(filters=7, kernel_size=3, activation='relu',
                   padding='same'))
        model.add(MaxPooling1D(pool_size=3, padding='same'))
        model.add(Dropout(0.3211287914743064))
        model.add(
            Conv1D(filters=4, kernel_size=5, activation='relu',
                   padding='same'))
        model.add(MaxPooling1D(pool_size=4, padding='same'))
        model.add(Dropout(0.3211287914743064))
        model.add(
            Conv1D(filters=5, kernel_size=4, activation='relu',
                   padding='same'))
        model.add(MaxPooling1D(pool_size=4, padding='same'))

        model.add(LSTM(404, return_sequences=False, recurrent_dropout=0.3))
        model.add(Dropout(0.3211287914743064))

        model.add(Dense(69))
        model.add(Dropout(0.3211287914743064))
        model.add(Dense(70))
        model.add(Dropout(0.3211287914743064))
        model.add(Dense(171))
        model.add(Dropout(0.3211287914743064))
        model.add(Dense(453))
        model.add(Dropout(0.3211287914743064))

        model.add(Dense(12, activation='sigmoid'))
    elif index == 3:

        model.add(Dropout(0.3584182599371434))
        model.add(
            Conv1D(filters=8, kernel_size=5, activation='relu',
                   padding='same'))
        model.add(MaxPooling1D(pool_size=5, padding='same'))
        model.add(Dropout(0.3584182599371434))
        model.add(
            Conv1D(filters=7, kernel_size=5, activation='relu',
                   padding='same'))
        model.add(MaxPooling1D(pool_size=5, padding='same'))

        model.add(LSTM(438, return_sequences=True, recurrent_dropout=0.3))
        model.add(Dropout(0.3584182599371434))
        model.add(LSTM(500, return_sequences=False, recurrent_dropout=0.3))
        model.add(Dropout(0.3584182599371434))

        model.add(Dense(81))
        model.add(Dropout(0.3584182599371434))
        model.add(Dense(267))
        model.add(Dropout(0.3584182599371434))
        model.add(Dense(388))
        model.add(Dropout(0.3584182599371434))

        model.add(Dense(12, activation='sigmoid'))

    model.compile(loss=masked_loss_function,
                  optimizer='Adam',
                  metrics=[masked_accuracy])

    history = model.fit(X_train,
                        y_train,
                        epochs=epoch,
                        batch_size=128,
                        verbose=2,
                        validation_data=(X_test, y_test),
                        callbacks=[
                            earlyStopping,
                            ModelCheckpoint('result/CNN256_LSTM128_64_2.h5',
                                            monitor='val_masked_accuracy',
                                            mode='max',
                                            save_best_only=True)
                        ])

    plot.plot(history, ("LRCN" + name))

    score = ROC_PR.ROC(model, X_test, y_test, ("LRCN" + name), True)
    return score, ROC_PR.ROC_Score(model, X_train, y_train, limited=False)