Example #1
0
def build_multi_cr_lstm_model(ts, fea_dim):
    # 定义输入
    batch_size, timesteps, input_dim = None, ts, 1
    #inputs = Input(shape=(ts, fea_dim))
    inputs= Input(batch_shape=(batch_size, ts, fea_dim))
    # ########################################
    # cnn层&lstm层1
    cnn_left_out1 = Conv1D(filters=32, kernel_size=3, strides=3, kernel_initializer=he_normal(seed=3))(inputs)
    act_left_out1 = LeakyReLU()(cnn_left_out1)

    lstm_left_out1 = NestedLSTM(32, depth=2, dropout=0, recurrent_dropout=0.0,)(act_left_out1)

    # #########################################
    # cnn层&lstm层2
    cnn_right_out1 = Conv1D(filters=32, kernel_size=5, strides=3, kernel_initializer=he_normal(seed=3))(inputs)
    act_right_out1 = LeakyReLU()(cnn_right_out1)

    lstm_right_out1 = NestedLSTM(32, depth=2, dropout=0, recurrent_dropout=0.0,)(act_right_out1)

    # #########################################
    # cnn层&lstm层3
    cnn_mid_out1 = Conv1D(filters=32, kernel_size=2, strides=3, kernel_initializer=he_normal(seed=3))(inputs)
    act_mid_out1 = LeakyReLU()(cnn_mid_out1)

    lstm_mid_out1 = NestedLSTM(32, depth=2, dropout=0, recurrent_dropout=0.0,)(act_mid_out1)

    # ############################################
    # 上层叠加新的dense层
    concat_output = Concatenate(axis=1)([lstm_left_out1, lstm_mid_out1, lstm_right_out1])
    outputs = Dense(1)(concat_output)
    model_func = Model(inputs=inputs, outputs=outputs)
    model_func.compile(loss='mse', optimizer=Adam(lr=0.02, decay=0.003), metrics=['mse'])
Example #2
0
def Nestlstm_models():
    train_label1,train_data1,test_label1,test_data1 = load_data_old()
    model1 = Sequential()
    # model1.add(NestedLSTM(64, depth=2, dropout=0, recurrent_dropout=0.2,activation='relu',return_sequences=True))
    # model1.add(NestedLSTM(64, depth=2, dropout=0, recurrent_dropout=0,activation='relu',return_sequences=True))
    # model1.add(NestedLSTM(64, depth=2, dropout=0, recurrent_dropout=0,activation='relu',return_sequences=True))
    model1.add(NestedLSTM(64, depth=2, dropout=0, recurrent_dropout=0.2,activation='relu'))
    model1.add(Dense(2,activation='softmax'))
    model1.compile(loss='sparse_categorical_crossentropy', optimizer='Nadam', metrics=['accuracy'])
    model1.fit(train_data1,train_label1,batch_size=20, epochs=20,verbose=2)
    a = np.argmax(model1.predict(test_data1),axis=1)
    # print("Initial data classification results:\n",test_label1)
    print("NLSTM classification results:\n",a)
    NLSTM_AC = accuracy_score(test_label1, a)
    NLSTM_f1 = f1_score(test_label1, a, average='macro')
    # print("LSTM_AC,LSTM_f1",LSTM_AC,LSTM_f1)
    return NLSTM_AC,NLSTM_f1
Example #3
0
def multi_head_cnn_model( train_x, train_y):
    '''
    该函数定义 Multi-head CNN 模型
    '''
    #train_x, train_y = sliding_window(train, sw_width)
    n_timesteps, n_features, n_outputs = train_x.shape[1], train_x.shape[2], train_y.shape[1]
    in_layers, out_layers = [], []  # 用于存放每个特征序列的CNN子模型
    for i in range(n_features):
        inputs = Input(shape=(n_timesteps, 1))

        conv1 = Conv1D(filters=32, kernel_size=3, activation='relu')(inputs)
        conv2 = Conv1D(filters=32, kernel_size=3, activation='relu')(conv1)
        pool1 = MaxPooling1D(pool_size=2)(conv2)
        x   = NestedLSTM(32, depth=2, dropout=0, recurrent_dropout=0.0,)(pool1)
        flat = Flatten()(x)
        in_layers.append(inputs)
        out_layers.append(flat)

    merged = concatenate(out_layers)  # 合并八个CNN子模型

    dense1 = Dense(200, activation='relu')(merged)  # 全连接层对上一层输出特征进行解释
    dense2 = Dense(100, activation='relu')(dense1)
    outputs = Dense(n_outputs)(dense2)
    model = Model(inputs=in_layers, outputs=outputs)

    model.compile(loss='mse', optimizer='adam', metrics=['accuracy'])
    model.summary()

   # plot_model(model, to_file='multi-head-cnn-energy-usage-prediction.png', show_shapes=True, show_layer_names=True,
  #             dpi=300)

 #   input_data = [train_x[:, :, i].reshape((train_x.shape[0], n_timesteps, 1)) for i in range(n_features)]

    # 这里只是为了方便演示和输出loss曲线,不建议这么做,这样其实是训练了2倍的epoch;
    # 可以保存模型,再加载预测;或者直接将预测函数定影在这里,省去调用步骤。
 #   model.fit(input_data, train_y, epochs=epochs, batch_size=batch_size, verbose=verbose)
#    history = model.fit(input_data, train_y, epochs=epochs, batch_size=batch_size, verbose=verbose)

    return model
Example #4
0
        model.add(TimeDistributed(Conv2D(32, (3, 3), padding='same')))
        model.add(Activation('relu'))
        model.add(TimeDistributed(MaxPooling2D(pool_size=(2, 2),padding='same')))

        model.add(TimeDistributed(Conv2D(64, (3, 3), padding='same')))
        model.add(Activation('relu'))
        model.add(TimeDistributed(MaxPooling2D(pool_size=(2, 2),padding='same')))

        model.add(TimeDistributed(Conv2D(128, (3, 3), padding='same')))
        model.add(Activation('relu'))
        model.add(TimeDistributed(MaxPooling2D(pool_size=(2, 2), padding='same')))

        model.add(TimeDistributed(Flatten()))

        model.add(NestedLSTM(800, depth=2))
        model.add(Dropout(0.2))
        model.add(Dense(link_num*pre_steps))

        model.summary()

        model.compile(optimizer=optimizers.RMSprop(lr=0.001),loss='mse',metrics = [mean_absolute_percentage_error])

        print('Train...')
        model.fit(x_train, y_train,
                batch_size =32,
                epochs=20, validation_data=(x_test, y_test),
                callbacks=[ModelCheckpoint('./h5/cnn+nlstm_{}_{}.h5'.format(str(timesteps), str(pre_steps)), monitor='val_loss' , save_best_only=True, save_weights_only=False,verbose=1)])

        print('evaluation.....')
        score = model.evaluate(x_test, y_test)
Example #5
0
    # input dim
    vocabulary_size = len(vocabulary_inv_train)
    # output dim
    embedding_vector_length = 220

    model = Sequential()
    model.add(Embedding(vocabulary_size, embedding_vector_length, input_length=max_review_length))
    model.add(Conv1D(filters=128, kernel_size=3, padding='same', activation='relu'))
    model.add(MaxPooling1D(pool_size=2))
    model.add(Conv1D(filters=96, kernel_size=3, padding='same', activation='relu'))
    model.add(MaxPooling1D(pool_size=2))
    model.add(Conv1D(filters=64, kernel_size=3, padding='same', activation='relu'))
    model.add(MaxPooling1D(pool_size=2))

    model.add(NestedLSTM(250, depth=5, return_sequences=True, name="NLSTM1"))
    model.add(Dropout(0.2))

    model.add(NestedLSTM(350, depth=5, return_sequences=True, name="NLSTM2"))
    model.add(Dropout(0.2))

    model.add(NestedLSTM(250, depth=3, name="NLSTM3"))
    model.add(Dropout(0.2))

    model.add((Dense(50, activation='softmax')))

    model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])

    # file_path = "weights.best.hdf5"
    checkpoint = ModelCheckpoint('weights.{epoch:03d}-{val_acc:.4f}.hdf5', monitor='val_acc', verbose=1,
                                 save_best_only=True, mode='max')
x_val = sequence.pad_sequences(x_val, maxlen=maxlen)
print('x_train shape:', x_train.shape)
print('x_test shape:', x_test.shape)

# configuration matches 4.47 Million parameters with `units=600` and `64 embedding dim`
print('Build model...')
model = Sequential()
model.add(Embedding(max_features, 128))
model.add(
    IndRNN(128,
           recurrent_clip_min=-1,
           recurrent_clip_max=-1,
           dropout=0.5,
           recurrent_dropout=0.0,
           return_sequences=True))
model.add(NestedLSTM(128, depth=2, dropout=0.0, recurrent_dropout=0.0))
model.add(Dense(10, activation='softmax'))

# try using different optimizers and different optimizer configs
model.compile(loss='categorical_crossentropy',
              optimizer='adam',
              metrics=['accuracy'])

model.summary()

print('Train...')
model.fit(x_train,
          y_train,
          batch_size=batch_size,
          epochs=1,
          validation_data=(x_val, y_val),
print('Loading data...')
(x_train, y_train), (x_test, y_test) = imdb.load_data(num_words=max_features)
print(len(x_train), 'train sequences')
print(len(x_test), 'test sequences')

print('Pad sequences (samples x time)')
x_train = sequence.pad_sequences(x_train, maxlen=maxlen)
x_test = sequence.pad_sequences(x_test, maxlen=maxlen)
print('x_train shape:', x_train.shape)
print('x_test shape:', x_test.shape)

print('Build model...')
model = Sequential()
model.add(Embedding(max_features, 64))
model.add(NestedLSTM(
    600, depth=2, dropout=0.2,
    recurrent_dropout=0.2))  # configuration matches 4.47 Million parameters
model.add(Dense(1, activation='sigmoid'))

# try using different optimizers and different optimizer configs
model.compile(loss='binary_crossentropy',
              optimizer='adam',
              metrics=['accuracy'])

model.summary()

print('Train...')
model.fit(x_train,
          y_train,
          batch_size=batch_size,
          epochs=15,