from deepst.models.STResNet import stresnet from deepst.config import Config import deepst.metrics as metrics from deepst.datasets import ShenyangRegular, DalianRegular np.random.seed(1337) # for reproducibility import tensorflow as tf from keras.backend.tensorflow_backend import set_session from keras import backend config = tf.ConfigProto() config.gpu_options.allow_growth = True set_session(tf.Session(config=config)) # parameters # data path, you may set your own data path with the global envirmental # variable DATAPATH DATAPATH = Config().DATAPATH nb_epoch = 500 # number of epoch at training stage nb_epoch_cont = 100 # number of epoch at training (cont) stage batch_size = 32 # batch size T = 48 # number of time intervals in one day nbfilter = 64 lr_arr = [0.002, 0.005] # learning rate # lr_arr = [0.002] len_closeness = 3 # length of closeness dependent sequence len_period = 1 # length of peroid dependent sequence len_trend = 1 # length of trend dependent sequence nb_residual_unit = 4 # number of residual units nb_flow = 2 # there are two types of flows: new-flow and end-flow # divide data into two subsets: Train & Test, of which the test set is the # last 10 days
from deepst.models.STResNet import stresnet from deepst.config import Config import deepst.metrics as metrics from deepst.datasets import TaxiDD from deepst.datasets import TaxiDD_3_frame import copy import os os.environ['CUDA_VISIBLE_DEVICES'] = "1" np.random.seed(1337) # for reproducibility # parameters DATAPATH = Config( ).DATAPATH # data path, you may set your own data path with the global envirmental variable DATAPATH CACHEDATA = True # cache data or NOT path_cache = os.path.join(DATAPATH, 'CACHE') # cache path nb_epoch = 500 # number of epoch at training stage nb_epoch_cont = 100 # number of epoch at training (cont) stage batch_size = 32 # batch size T = 480 # number of time intervals in one day lr = 0.0002 # learning rate len_closeness = 3 # length of closeness dependent sequence len_period = 1 # length of peroid dependent sequence len_trend = 1 # length of trend dependent sequence len_closeness_test = 5 # length of closeness dependent sequence len_period_test = 1 # length of peroid dependent sequence len_trend_test = 1 # length of trend dependent sequence if len(sys.argv) == 1: print(__doc__)
import numpy as np import math import pandas as pd from keras.optimizers import Adam from keras.callbacks import EarlyStopping, ModelCheckpoint from deepst.models.STResNet import stresnet from deepst.config import Config import deepst.metrics as metrics from deepst.datasets import BikeNYC from ma_util.offline_val import offline_score from sklearn.preprocessing import MinMaxScaler #这是标准化处理的语句,很方便,里面有标准化和反标准化。。 np.random.seed(1337) # for reproducibility # parameters # data path, you may set your own data path with the global envirmental # variable DATAPATH DATAPATH = Config().DATAPATH #配置的环境 T = 24 # number of time intervals in one day 一天的周期迭代次数 lr = 0.0001 # learning rate len_closeness = 6 # length of closeness dependent sequence 考虑的相邻的迭代次数 len_period = 1 # length of peroid dependent sequence 以相邻周期四个作为预测趋势 len_trend = 4 # length of trend dependent sequence 以前面4个作为趋势性 nb_residual_unit = 6 # number of residual units 残差单元数量 nb_flow = 1 # there are two types of flows: new-flow and end-flow # divide data into two subsets: Train & Test, of which the test set is the # last 10 days 使用10天数据进行测试 days_test = 10 len_test = T * days_test #测试用的时间戳数量 map_height, map_width = 35, 12 # grid size 每个代表流量意义的格点图的大小为16*8 # For NYC Bike data, there are 81 available grid-based areas, each of