# load data
print("loading data...")
ts = time.time()
fname = os.path.join(
    path_cache, 'BikeNYC_C{}_P{}_T{}.h5'.format(len_closeness, len_period,
                                                len_trend))
if os.path.exists(fname) and CACHEDATA:
    X_train, Y_train, X_test, Y_test, mmn, external_dim, timestamp_train, timestamp_test = read_cache(
        fname)
    print("load %s successfully" % fname)
else:
    X_train, Y_train, X_test, Y_test, mmn, external_dim, timestamp_train, timestamp_test = BikeNYC.load_data(
        T=T,
        nb_flow=nb_flow,
        len_closeness=len_closeness,
        len_period=len_period,
        len_trend=len_trend,
        len_test=len_test,
        preprocess_name='preprocessing_bikenyc.pkl',
        meta_data=True,
        datapath=DATAPATH)
    if CACHEDATA:
        cache(fname, X_train, Y_train, X_test, Y_test, external_dim,
              timestamp_train, timestamp_test)

print("\n days (test): ", [v[:8] for v in timestamp_test[0::T]])
print("\nelapsed time (loading data): %.3f seconds\n" % (time.time() - ts))

print('=' * 10)

# training-test-evaluation iterations
for i in range(0, 10):
Beispiel #2
0
def main():
    # load data
    print("loading data...")
    X_train_ALL, X_test_ALL, X_train, Y_train, X_test, Y_test, mmn, external_dim, timestamp_train, timestamp_test = BikeNYC.load_data(
        T=T,
        nb_flow=nb_flow,
        len_closeness=len_closeness,
        len_period=len_period,
        len_trend=len_trend,
        len_test=len_test,
        preprocess_name='preprocessing.pkl',
        meta_data=True)

    print("\n days (test): ", [v[:8] for v in timestamp_test[0::T]])

    print('=' * 10)
    print("compiling model...")
    print(
        "**at the first time, it takes a few minites to compile if you use [Theano] as the backend**"
    )
    print('external_dim is:', external_dim)
    model = build_model(external_dim)
    hyperparams_name = 'binCNN_c{}.p{}.t{}.resunit{}.lr{}'.format(
        len_closeness, len_period, len_trend, nb_residual_unit, lr)
    fname_param = os.path.join('MODEL', '{}.best.h5'.format(hyperparams_name))

    early_stopping = EarlyStopping(monitor='val_rmse', patience=5, mode='min')
    model_checkpoint = ModelCheckpoint(fname_param,
                                       monitor='val_rmse',
                                       verbose=0,
                                       save_best_only=True,
                                       mode='min')

    print('=' * 10)
    print("training model...")

    print('X_train_ALL shape is:', X_train_ALL.shape)
    print('X_test_ALL shape is:', X_test_ALL.shape)

    history = model.fit(X_train_ALL,
                        Y_train,
                        nb_epoch=nb_epoch,
                        batch_size=batch_size,
                        validation_split=0.1,
                        callbacks=[early_stopping, model_checkpoint],
                        verbose=1)
    model.save_weights(os.path.join('MODEL', '{}.h5'.format(hyperparams_name)),
                       overwrite=True)
    pickle.dump((history.history),
                open(
                    os.path.join(path_result,
                                 '{}.history.pkl'.format(hyperparams_name)),
                    'wb'))

    print('=' * 10)
    print('evaluating using the model that has the best loss on the valid set')

    model.load_weights(fname_param)
    score = model.evaluate(X_train_ALL,
                           Y_train,
                           batch_size=Y_train.shape[0] // 48,
                           verbose=0)
    print('Train score: %.6f rmse (norm): %.6f rmse (real): %.6f' %
          (score[0], score[1], score[1] *
           (mmn._max - mmn._min) / 2. * m_factor))

    score = model.evaluate(X_test_ALL,
                           Y_test,
                           batch_size=Y_test.shape[0],
                           verbose=0)
    print('Test score: %.6f rmse (norm): %.6f rmse (real): %.6f' %
          (score[0], score[1], score[1] *
           (mmn._max - mmn._min) / 2. * m_factor))

    print('=' * 10)
    print("training model (cont)...")
    fname_param = os.path.join('MODEL',
                               '{}.cont.best.h5'.format(hyperparams_name))
    model_checkpoint = ModelCheckpoint(fname_param,
                                       monitor='rmse',
                                       verbose=0,
                                       save_best_only=True,
                                       mode='min')
    history = model.fit(X_train_ALL,
                        Y_train,
                        nb_epoch=nb_epoch_cont,
                        verbose=1,
                        batch_size=batch_size,
                        callbacks=[model_checkpoint],
                        validation_data=(X_test_ALL, Y_test))
    pickle.dump(
        (history.history),
        open(
            os.path.join(path_result,
                         '{}.cont.history.pkl'.format(hyperparams_name)),
            'wb'))
    model.save_weights(os.path.join('MODEL',
                                    '{}_cont.h5'.format(hyperparams_name)),
                       overwrite=True)

    print('=' * 10)
    print('evaluating using the final model')
    score = model.evaluate(X_train_ALL,
                           Y_train,
                           batch_size=Y_train.shape[0] // 48,
                           verbose=0)
    print('Train score: %.6f rmse (norm): %.6f rmse (real): %.6f' %
          (score[0], score[1], score[1] *
           (mmn._max - mmn._min) / 2. * m_factor))

    score = model.evaluate(X_test_ALL,
                           Y_test,
                           batch_size=Y_test.shape[0],
                           verbose=0)
    print('Test score: %.6f rmse (norm): %.6f rmse (real): %.6f' %
          (score[0], score[1], score[1] *
           (mmn._max - mmn._min) / 2. * m_factor))
def bikenyc_evaluation():
    # parameters
    DATAPATH = '../data'  # data path, you may set your own data path with the global envirmental variable DATAPATH
    CACHEDATA = True  # cache data or NOT
    path_cache = os.path.join(DATAPATH, 'CACHE', 'ST-ResNet')  # cache path

    T = 24  # number of time intervals in one day
    lr = 0.0002  # learning rate
    len_closeness = 3  # length of closeness dependent sequence
    len_period = 4  # length of peroid dependent sequence
    len_trend = 4  # length of trend dependent sequence
    nb_residual_unit = 4  # paper says 4 for BikeNYC

    nb_flow = 2
    days_test = 10
    len_test = T * days_test
    map_height, map_width = 16, 8  # grid size

    if CACHEDATA and os.path.isdir(path_cache) is False:
        os.mkdir(path_cache)

    # load data
    print("loading data...")
    ts = time.time()
    fname = os.path.join(
        path_cache, 'BikeNYC_C{}_P{}_T{}.h5'.format(len_closeness, len_period,
                                                    len_trend))
    if os.path.exists(fname) and CACHEDATA:
        X_train, Y_train, X_test, Y_test, mmn, external_dim, timestamp_train, timestamp_test = read_cache(
            fname, 'preprocessing_bikenyc.pkl')
        print("load %s successfully" % fname)
    else:
        X_train, Y_train, X_test, Y_test, mmn, external_dim, timestamp_train, timestamp_test = BikeNYC.load_data(
            T=T,
            nb_flow=nb_flow,
            len_closeness=len_closeness,
            len_period=len_period,
            len_trend=len_trend,
            len_test=len_test,
            preprocess_name='preprocessing_bikenyc.pkl',
            meta_data=True,
            datapath=DATAPATH)
        if CACHEDATA:
            cache(fname, X_train, Y_train, X_test, Y_test, external_dim,
                  timestamp_train, timestamp_test)

    print("\n days (test): ", [v[:8] for v in timestamp_test[0::T]])
    print("\nelapsed time (loading data): %.3f seconds\n" % (time.time() - ts))

    # build model
    model = build_model(external_dim, nb_residual_unit, map_height, map_width,
                        len_closeness, len_period, len_trend)

    model_fname = 'BikeNYC.c3.p4.t4.resunit4.iter0.cont.best.h5'
    model.load_weights(os.path.join('../best_models', 'ST-ResNet',
                                    model_fname))

    # evaluate and save results
    dict_multi_score = multi_step_2D(model,
                                     X_test,
                                     Y_test,
                                     mmn,
                                     len_closeness,
                                     step=5)

    for i in range(len(dict_multi_score)):
        csv_name = os.path.join('results', f'bikenyc_step{i+1}.csv')
        save_to_csv(dict_multi_score[i], csv_name)
Beispiel #4
0
def main():
    # load data
    print("loading data...")
    X_train, Y_train, X_test, Y_test, mmn, external_dim, timestamp_train, timestamp_test = BikeNYC.load_data(
        T=T, nb_flow=nb_flow, len_closeness=len_closeness, len_period=len_period, len_trend=len_trend, len_test=len_test,
        preprocess_name='preprocessing.pkl', meta_data=True)

    # print("\n days (test): ", [v[:8] for v in timestamp_test[0::T]])
    print('='*10 + ' Build model and start traning ' + '='*10)
    cost, optimizer, accuracy, model_output, XC, XP, XT, X_ext, Y = build_model(external_dim)
    # Initialize all the variables
    init = tf.global_variables_initializer()
    m = X_train[0].shape[0]
    seed = 0
    print("number of data points %i" % m)

    # Start the session to compute the tensorflow graph
    print_cost = True

    # timer
    import time
    start = time.time()

    # variables for early_stopping
    early_stopping_num = 0
    epoch_cost_prev = float('inf')

    # model saver
    saver = tf.train.Saver()

    # start training
    with tf.Session() as sess:
        # Initialize variables
        sess.run(init)
        for epoch in range(nb_epoch):
            epoch_cost = 0.
            # shuffle dataset and use random_mini_batches for training
            num_minibatches = int(m / batch_size)
            seed = seed + 1
            minibatches = random_mini_batches(X_train, Y_train, m, batch_size, seed)
            for minibatch in minibatches:
                # Select a minibatch
                (minibatch_X, minibatch_Y) = minibatch
                _ , minibatch_cost = sess.run([optimizer, cost],
                                                feed_dict={XC: minibatch_X[0],
                                                           XP: minibatch_X[1],
                                                           XT: minibatch_X[2],
                                                           X_ext: minibatch_X[3],
                                                           Y: minibatch_Y})
                # update cost at current epoch
                epoch_cost += minibatch_cost / num_minibatches

            # early_stopping
            if epoch_cost > epoch_cost_prev:
                early_stopping_num += 1
            else:
                early_stopping_num = 0
            if early_stopping_num > 5:
                print("Training early stops at epcho %i" % epoch)
                print("Current training error is %f" % epoch_cost)
                break # break from iterations

            # update previous cost
            epoch_cost_prev = epoch_cost
            # Print the cost every 5 epoch
            if print_cost == True and epoch % 5 == 0:
                print ("Cost after epoch %i: %f" % (epoch, epoch_cost))

        # save model
        saver.save(sess, 'saved_model/model', global_step = epoch)

        # examine RMSE for both train and test
        print("="*10 + " Check model accuracy " + "="*10)
        print ("Train Accuracy: %f" % accuracy.eval({XC: X_train[0],
                                                 XP: X_train[1],
                                                 XT: X_train[2],
                                                 X_ext: X_train[3],
                                                 Y: Y_train}))
        print ("Test Accuracy: %f" % accuracy.eval({XC: X_test[0],
                                                XP: X_test[1],
                                                XT: X_test[2],
                                                X_ext: X_test[3],
                                                Y: Y_test}))
    print(" ")
    end = time.time()
    print("Running time of training and evaluation in seconds %f" % (end-start))
def main():
    # 加载预测需要用到的数据
    print("loading data...")
    X_train, Y_train, X_test, Y_test, external_dim, timestamp_train, timestamp_test = BikeNYC.load_data(
        T=T,
        nb_flow=nb_flow,
        len_closeness=len_closeness,
        len_period=len_period,
        len_trend=len_trend,
        len_test=len_test,
        preprocess_name='preprocessing.pkl',
        meta_data=True)

    for _X in X_train:
        print(
            'theshape  ',
            _X.shape,
        )

    model = build_model(external_dim)
    fname_param = '/home/fly/PycharmProjects/version2-baseline-4-28/DeepST-KDD_for_predict/scripts/AirPrediction/MODEL/c{len}.p{per}.t{trend}.resunit{res_num}.lr{leaing_ra}.cont.best.h5'.format(
        len=len_closeness,
        per=len_period,
        trend=len_trend,
        res_num=nb_residual_unit,
        leaing_ra=lr)
    model.load_weights(fname_param)
    #开始使用模型进行预测    这里有毒,草,   这里需要的输入是三个输入
    print('Y_train[0]:', type(Y_train))
    '''
    这里的问题困惑了我很长时间,也怪我对kreas和 神经网络的训练机制不够理解的通透,
    第一点:这里 因为自己创建的模型的输入是有批次数的,所以对应进行预测的时候 输入的数据也得有批次数(kreas的预测、评估、训练都是按批次的),对于predict预测也是这样的,  所以这里我是限定了维度,包装成了需要的
    输入的形式,这是根据X的形式包装的一个特征数据对应的输出预测。
    第二点: 在于对批次的理解上,和师姐交流了下,对于 图像数据是4唯的网络输入(批次数,通道,长度,宽度),对于lstm是3维的输入(批次数,记忆体长度..),其中一次训练时候
    是按批次的,相当于加了个维度,按正常的方式由前往后跑,一个批次每个数据都会得到一个损失值,得到的损失值进行批次内sum合并之后再进行一次传播,所以理明白 模型的训练是以
    一个批次的数据为单位进行训练的,  跑模型时候批次的作用体现在扩展维度上,  批次内数据也就是一个高唯独内的数据。
    
    还有构造的模型中根本没有考虑批次的事情,将批次的融入是kreas中的fit所作的事情, 可以看到模型中根本就没有batch_size的事情。
    第三点:kreas中模型的输入不像TF是按名 feed传值的,kreas中的方式是  按照计算流图找到开始的传入将数据传给神经网络,对应放到输入的位置,自己传入所有数据,在fit中指定
    好批次就好,数据会按批次训练。。。    
    其实这里有个问题是外部因素的融入我只融入了一天一个唯独的节假日影响(是以后一个时刻点所对应的数据),
    
    '''
    #========================      进行预测      ===========================#
    #  下面是一种模式,已经掌握了其思想
    the_predict_index = 199
    y_pred = model.predict([
        X_train[0][the_predict_index].reshape(1, len_closeness, 35, 12),
        X_train[1][the_predict_index].reshape(1, len_period, 35, 12),
        X_train[2][the_predict_index].reshape(1, len_trend, 35, 12),
        X_train[3][the_predict_index].reshape(1, external_dim)
    ],
                           batch_size=1)
    print('成功得到时刻点', timestamp_train[the_predict_index], '的一个预测结果:',
          y_pred.shape)
    the_last_list = []
    #现在将不同时刻点的数据按列表方式进行组合成一个二维的,准备存到csv文件中去。
    #========================      遍历和组装      ===========================#
    #下面的方式 是一种遍历, array中的特征数据 全放在一个列表中去,   发现直接时间拼接的方式不对,最后也终于攻克了,就是列表的拼接方式。。。
    for i in range(len(y_pred[0])):
        for ii in y_pred[0][i]:
            #这里必须写三步,这是我的新发现。 不这样写 list就变成None了,不可思议爱
            ii_list = ii.tolist()
            ii_list.append(timestamp_train[i])
            the_last_list.append(ii_list)
            #print(ii_list.append(timestamp_train[i][0]))
            #the_last_list.append()

    #print(y_pred[0][0][0].tolist().append('dsa'))
    #print('the_last_list  length:',the_last_list)
    pred_array = pd.DataFrame(the_last_list,
                              columns=['PM2.5', 'PM10', 'O3', 'utc_time'])
    pred_array.to_csv(
        '/home/fly/PycharmProjects/version2-baseline-4-28/DeepST-KDD_for_predict/for_submit_data/the_predict_data.csv',
        index=False)
Beispiel #6
0
def main():
    # Load data
    print("loading data...")
    X_train, Y_train, X_test, Y_test, X_timestamps, Y_timestamps, mmn = BikeNYC.load_sequence(
        seq_length=seq_length,
        T=24,
        test_percent=0.055,
        data_numbers=test_data_nums)
    print('X_train shape is', X_train.shape)
    print('Y_train shape is', Y_train.shape)
    print('X_test shape is', X_test.shape)
    print('Y_test shape is', Y_test.shape)

    # Train the network, to use reset_states(),
    # we must make epochs=1 and train for nb_epoch in a Loop.
    seq = build_model()

    hyperparams_name = 'b{}.Conv2DLSTM_layers{}.SeqLen{}.Conv2D_LSTM_BikeNYC.lr{}'.format(
        batch_size, num_layers, seq_length, lr)
    fname_param = os.path.join(PATH_MODEL,
                               '{}.best.h5'.format(hyperparams_name))
    early_stopping = EarlyStopping(monitor='val_rmse', patience=10, mode='min')
    model_checkpoint = ModelCheckpoint(fname_param,
                                       monitor='val_rmse',
                                       verbose=0,
                                       save_best_only=True,
                                       mode='min')
    # for e in range(nb_epoch):
    #     seq.fit(X_train, Y_train, batch_size=batch_size, epochs=1, validation_split=0.1)
    #     seq.reset_states()

    history = seq.fit(X_train,
                      Y_train,
                      batch_size=batch_size,
                      epochs=nb_epoch,
                      validation_split=0.1,
                      callbacks=[early_stopping, model_checkpoint],
                      verbose=1)

    seq.save_weights(os.path.join(PATH_MODEL,
                                  '{}.h5'.format(hyperparams_name)),
                     overwrite=True)
    pickle.dump((history.history),
                open(
                    os.path.join(PATH_RESULT,
                                 '{}.history.pkl'.format(hyperparams_name)),
                    'wb'))

    print('=' * 10)
    print('evaluating using the model that has the best loss on the valid set')

    seq.load_weights(fname_param)
    score = seq.evaluate(X_train,
                         Y_train,
                         batch_size=Y_train.shape[0] // 48,
                         verbose=0)
    print('Train score: %.6f rmse (norm): %.6f rmse (real): %.6f' %
          (score[0], score[1], score[1] *
           (mmn._max - mmn._min) / 2. * m_factor))

    score = seq.evaluate(X_test, Y_test, batch_size=Y_test.shape[0], verbose=0)
    print('Test score: %.6f rmse (norm): %.6f rmse (real): %.6f' %
          (score[0], score[1], score[1] *
           (mmn._max - mmn._min) / 2. * m_factor))

    print('=' * 10)
    print("training model (cont)...")
    fname_param = os.path.join(PATH_MODEL,
                               '{}.cont.best.h5'.format(hyperparams_name))
    model_checkpoint = ModelCheckpoint(fname_param,
                                       monitor='rmse',
                                       verbose=0,
                                       save_best_only=True,
                                       mode='min')

    history = seq.fit(X_train,
                      Y_train,
                      nb_epoch=nb_cont_epoch,
                      verbose=1,
                      batch_size=batch_size,
                      callbacks=[model_checkpoint],
                      validation_data=(X_test, Y_test))

    pickle.dump(
        (history.history),
        open(
            os.path.join(PATH_RESULT,
                         '{}.cont.history.pkl'.format(hyperparams_name)),
            'wb'))
    seq.save_weights(os.path.join(PATH_MODEL,
                                  '{}_cont.h5'.format(hyperparams_name)),
                     overwrite=True)

    print('=' * 10)
    print('evaluating using the final model')
    score = seq.evaluate(X_train,
                         Y_train,
                         batch_size=Y_train.shape[0] // 48,
                         verbose=0)
    print('Train score: %.6f rmse (norm): %.6f rmse (real): %.6f' %
          (score[0], score[1], score[1] *
           (mmn._max - mmn._min) / 2. * m_factor))

    score = seq.evaluate(X_test, Y_test, batch_size=Y_test.shape[0], verbose=0)
    print('Test score: %.6f rmse (norm): %.6f rmse (real): %.6f' %
          (score[0], score[1], score[1] *
           (mmn._max - mmn._min) / 2. * m_factor))
Beispiel #7
0
LSTM1=np.load('LSTM1.npy')

TT1=1
FF1=128
input1_128=Input(shape=[FF1])
cpt_conv1=PReLU()(input1_128)
cpt_conv1=BatchNormalization()(cpt_conv1)
cpt_conv1=Dropout(0.2)(cpt_conv1)
cpt_conv1=Dense(units=2,activation='tanh')(cpt_conv1)
model1=Model(inputs=input1_128,outputs=cpt_conv1)
model1.compile(loss='mse', optimizer=Adam(lr), metrics=[metrics.rmse])


print("loading data...")
X_train, Y_train, X_test, Y_test, mmn, external_dim, timestamp_train, timestamp_test = BikeNYC.load_data(
        T=T, nb_flow=nb_flow, len_closeness=len_closeness, len_period=len_period, len_trend=len_trend, len_test=len_test,
        preprocess_name='preprocessing.pkl', meta_data=False)
print("\n days (test): ", [v[:8] for v in timestamp_test[0::T]])

XX_train=np.load('XX_train.npy')
XXp_test=np.load('XXp_test.npy')

for X in range(8):
    for Y in range(8):
        if X<=2:
            continue
        if X=3 and Y<=4:
            continue
        
        XTRAIN=XX_train[:,:,X,Y]
        YTRAIN=Y_train[:,:,X,Y]
Beispiel #8
0
def main():
    model = build_model(external_dim=8)
    #plot_model(model, to_file= os.path.join(path_model,'testmodel.png'), show_shapes=True)

    print("loading data...")
    three_models = True  # If true, split Closeness, Period and Trend into 3 sub-CNN respectively.

    # data_numbers=None will use all data, this could be very slowly.
    # data_numbers=800 will use only 800 series for trying on small data.
    X_train_ALL, X_test_ALL, X_train, Y_train, X_test, Y_test, mmn, external_dim, timestamp_train, timestamp_test = BikeNYC.load_data(
        T=T,
        nb_flow=nb_flow,
        len_closeness=len_closeness,
        len_period=len_period,
        len_trend=len_trend,
        len_test=len_test,
        preprocess_name='preprocessing.pkl',
        meta_data=True,
        data_numbers=None)

    print("\n days (test): ", [v[:8] for v in timestamp_test[0::T]])

    print('=' * 10)
    print("compiling model...")
    print(
        "**at the first time, it takes a few minites to compile if you use [Theano] as the backend**"
    )
    print('external_dim is:', external_dim)

    hyperparams_name = 'binCNN_CPT_c{}.p{}.t{}.resunit{}.lr{}'.format(\
        len_closeness, len_period, len_trend, nb_residual_unit, lr)
    fname_param = os.path.join(path_model,
                               '{}.best.h5'.format(hyperparams_name))

    early_stopping = EarlyStopping(monitor='val_rmse', patience=5, mode='min')
    model_checkpoint = ModelCheckpoint(fname_param,
                                       monitor='val_rmse',
                                       verbose=0,
                                       save_best_only=True,
                                       mode='min')

    print('=' * 10)
    print("training model...")
    history = model.fit(X_train_ALL,
                        Y_train,
                        epochs=nb_epoch,
                        batch_size=batch_size,
                        validation_split=0.1,
                        callbacks=[early_stopping, model_checkpoint],
                        verbose=1)
    model.save_weights(os.path.join(path_model,
                                    '{}.h5'.format(hyperparams_name)),
                       overwrite=True)
    pickle.dump((history.history),
                open(
                    os.path.join(path_result,
                                 '{}.history.pkl'.format(hyperparams_name)),
                    'wb'))
    print('=' * 10)
    print('evaluating using the model that has the best loss on the valid set')

    model.load_weights(fname_param)
    score = model.evaluate(X_train_ALL,
                           Y_train,
                           batch_size=Y_train.shape[0] // 48,
                           verbose=0)
    print('Train score: %.6f rmse (norm): %.6f rmse (real): %.6f' %
          (score[0], score[1], score[1] *
           (mmn._max - mmn._min) / 2. * m_factor))

    score = model.evaluate(X_test_ALL,
                           Y_test,
                           batch_size=Y_test.shape[0],
                           verbose=0)
    print('Test score: %.6f rmse (norm): %.6f rmse (real): %.6f' %
          (score[0], score[1], score[1] *
           (mmn._max - mmn._min) / 2. * m_factor))

    print('=' * 10)
    print("training model (cont)...")
    fname_param = os.path.join(path_model,
                               '{}.cont.best.h5'.format(hyperparams_name))
    model_checkpoint = ModelCheckpoint(fname_param,
                                       monitor='rmse',
                                       verbose=0,
                                       save_best_only=True,
                                       mode='min')
    history = model.fit(X_train_ALL,
                        Y_train,
                        epochs=nb_epoch_cont,
                        verbose=1,
                        batch_size=batch_size,
                        callbacks=[model_checkpoint],
                        validation_data=(X_test_ALL, Y_test))
    pickle.dump(
        (history.history),
        open(
            os.path.join(path_result,
                         '{}.cont.history.pkl'.format(hyperparams_name)),
            'wb'))
    model.save_weights(os.path.join(path_model,
                                    '{}_cont.h5'.format(hyperparams_name)),
                       overwrite=True)

    print('=' * 10)
    print('evaluating using the final model')
    score = model.evaluate(X_train_ALL,
                           Y_train,
                           batch_size=Y_train.shape[0] // 48,
                           verbose=0)
    print('Train score: %.6f rmse (norm): %.6f rmse (real): %.6f' %
          (score[0], score[1], score[1] *
           (mmn._max - mmn._min) / 2. * m_factor))

    score = model.evaluate(X_test_ALL,
                           Y_test,
                           batch_size=Y_test.shape[0],
                           verbose=0)
    print('Test score: %.6f rmse (norm): %.6f rmse (real): %.6f' %
          (score[0], score[1], score[1] *
           (mmn._max - mmn._min) / 2. * m_factor))
def main():
    # ------------------------------------     配置模型和数据    --------------------------------------------#
    # load data
    print("loading data...")
    #开始加载数据   加载时指定各种参数,会根据传入的参数进行加载数据的分离。
    X_train, Y_train, X_test, Y_test, external_dim, timestamp_train, timestamp_test = BikeNYC.load_data(
        T=T,
        nb_flow=nb_flow,
        len_closeness=len_closeness,
        len_period=len_period,
        len_trend=len_trend,
        len_test=len_test,
        preprocess_name='preprocessing.pkl',
        meta_data=True)

    print("\n days (test): ", [v[:8] for v in timestamp_test[0::T]])

    print('=' * 10)
    print("compiling model...")
    print(
        "**at the first time, it takes a few minites to compile if you use [Theano] as the backend**"
    )
    model = build_model(external_dim)

    # ------------------------------------     第一次训练    --------------------------------------------#
    hyperparams_name = 'c{}.p{}.t{}.resunit{}.lr{}'.format(
        len_closeness, len_period, len_trend, nb_residual_unit, lr)
    fname_param = os.path.join('MODEL', '{}.bes'
                               ''
                               't.h5'.format(hyperparams_name))

    early_stopping = EarlyStopping(monitor='val_rmse', patience=5, mode='min')
    model_checkpoint = ModelCheckpoint(fname_param,
                                       monitor='val_rmse',
                                       verbose=0,
                                       save_best_only=True,
                                       mode='min')

    #————————————  配置网格搜索找参数 —————————#
    # model1=build_model(external_dim)
    # batch_size = [10, 20]
    # epochs = [10, 50, 100]
    # param_grid = dict(batch_size=batch_size, nb_epoch=epochs)
    # kflod = StratifiedKFold(n_splits=10)
    # grid = GridSearchCV(estimator=model1, param_grid=param_grid, n_jobs=-1,scoring = smape,cv=kflod )
    # print('=' * 10)
    # print("training model...",Y_train.shape)
    # history = grid.fit(X_train, Y_train)
    # print("Best: %f using %s" % (history.best_score_, history.best_params_))
    # for params, mean_score, scores in history.grid_scores_:
    #     print("%f (%f) with: %r" % (scores.mean(), scores.std(), params))

    # ————————————————————————————————————#

    print('=' * 10)
    print("training model...", Y_train.shape)
    history = model.fit(X_train,
                        Y_train,
                        nb_epoch=nb_epoch,
                        batch_size=batch_size,
                        validation_split=0.1,
                        callbacks=[early_stopping, model_checkpoint],
                        verbose=1)
    '''
    这部分我想加入网格搜索,找到最佳的参数
    '''
    model.save_weights(os.path.join('MODEL', '{}.h5'.format(hyperparams_name)),
                       overwrite=True)
    pickle.dump((history.history),
                open(
                    os.path.join(path_result,
                                 '{}.history.pkl'.format(hyperparams_name)),
                    'wb'))

    print('=' * 10)
    print('evaluating using the model that has the best loss on the valid set')

    model.load_weights(fname_param)
    score = model.evaluate(X_train,
                           Y_train,
                           batch_size=Y_train.shape[0] // 24,
                           verbose=0)
    print('Train score: %.6f rmse (norm): %.6f smape(normation):  %.6f ' %
          (score[0], score[1], score[2]))

    score = model.evaluate(X_test,
                           Y_test,
                           batch_size=Y_test.shape[0],
                           verbose=0)
    print('Test score: %.6f rmse (norm): %.6f  smape(normation):  %.6f ' %
          (score[0], score[1], score[2]))

    showResult(
        os.path.join(path_result, '{}.history.pkl'.format(hyperparams_name)))
    #---------------------------------------      第二次训练        -----------------------------------------#

    print('=' * 10)
    print("training model (cont)...")
    fname_param = os.path.join('MODEL',
                               '{}.cont.best.h5'.format(hyperparams_name))
    model_checkpoint = ModelCheckpoint(fname_param,
                                       monitor='rmse',
                                       verbose=0,
                                       save_best_only=True,
                                       mode='min')

    history = model.fit(X_train,
                        Y_train,
                        nb_epoch=nb_epoch_cont,
                        verbose=1,
                        batch_size=batch_size,
                        callbacks=[early_stopping, model_checkpoint],
                        validation_data=(X_test, Y_test))
    pickle.dump(
        (history.history),
        open(
            os.path.join(path_result,
                         '{}.cont.history.pkl'.format(hyperparams_name)),
            'wb'))
    model.save_weights(os.path.join('MODEL',
                                    '{}_cont.h5'.format(hyperparams_name)),
                       overwrite=True)

    print('=' * 10)
    print('evaluating using the final model')
    score = model.evaluate(X_train,
                           Y_train,
                           batch_size=Y_train.shape[0] // 24,
                           verbose=0)
    print('Train score: %.6f rmse (norm): %.6f  smape(normation):  %.6f ' %
          (score[0], score[1], score[2]))

    score = model.evaluate(X_test,
                           Y_test,
                           batch_size=Y_test.shape[0],
                           verbose=1)
    print('Test score: %.6f rmse (norm): %.6f  smape(normation):  %.6f ' %
          (score[0], score[1], score[2]))

    showResult(
        os.path.join(path_result,
                     '{}.cont.history.pkl'.format(hyperparams_name)))