Example #1
0
def test_cheb_polynomial1():
    from lib.utils import (get_adjacency_matrix,
                           scaled_Laplacian, cheb_polynomial)
    adj = get_adjacency_matrix('data/PEMS04/distance.csv', 307)
    L = scaled_Laplacian(adj)
    cheb_polys = cheb_polynomial(L, 3)
    assert len(cheb_polys) == 3
    for i in cheb_polys:
        assert i.shape == adj.shape
Example #2
0
ScaledSAt = bool(int(
    training_config['ScaledSAt']))  # whether use spatial self attention
SE = bool(int(training_config['SE']))  # whether use spatial embedding
smooth_layer_num = int(training_config['smooth_layer_num'])
aware_temporal_context = bool(int(training_config['aware_temporal_context']))
TE = bool(int(training_config['TE']))
use_LayerNorm = True
residual_connection = True

# direction = 1 means: if i connected to j, adj[i,j]=1;
# direction = 2 means: if i connected to j, then adj[i,j]=adj[j,i]=1
if direction == 2:
    adj_mx, distance_mx = get_adjacency_matrix_2direction(
        adj_filename, num_of_vertices, id_filename)
if direction == 1:
    adj_mx, distance_mx = get_adjacency_matrix(adj_filename, num_of_vertices,
                                               id_filename)
folder_dir = 'MAE_%s_h%dd%dw%d_layer%d_head%d_dm%d_channel%d_dir%d_drop%.2f_%.2e' % (
    model_name, num_of_hours, num_of_days, num_of_weeks, num_layers, nb_head,
    d_model, encoder_input_size, direction, dropout, learning_rate)

if aware_temporal_context:
    folder_dir = folder_dir + 'Tcontext'
if ScaledSAt:
    folder_dir = folder_dir + 'ScaledSAt'
if SE:
    folder_dir = folder_dir + 'SE' + str(smooth_layer_num)
if TE:
    folder_dir = folder_dir + 'TE'

print('folder_dir:', folder_dir, flush=True)
params_path = os.path.join('../experiments', dataset_name, folder_dir)
Example #3
0
num_for_predict = 12
num_of_weeks = 2
num_of_days = 1
num_of_hours = 2
num_of_vertices = FLAGS.num_point
num_of_features = 3
merge = False
model_name = 'Gated_STGCN_%s' % f
params_dir = 'experiment_Gated_STGCN'
prediction_path = 'Gated_STGCN_prediction_0%s' % f
wdecay = 0.000

device = torch.device(FLAGS.device)

#read laplace matrix
adj = get_adjacency_matrix(adj_filename, num_nodes)
adjs = scaled_Laplacian(adj)
supports = (torch.tensor(adjs)).type(torch.float32).to(device)

print('Model is %s' % (model_name))

timestamp = datetime.now().strftime("%Y%m%d%H%M%S")
if params_dir != "None":
    params_path = os.path.join(params_dir, model_name)
else:
    params_path = 'params/%s_%s/' % (model_name, timestamp)

# check parameters file
if os.path.exists(params_path) and not FLAGS.force:
    raise SystemExit("Params folder exists! Select a new params path please!")
else:
Example #4
0
def get_backbones(config_filename, adj_filename, ctx):
    config = configparser.ConfigParser()
    config.read(config_filename)

    K = int(config['Training']['K'])
    num_of_weeks = int(config['Training']['num_of_weeks'])
    num_of_days = int(config['Training']['num_of_days'])
    num_of_hours = int(config['Training']['num_of_hours'])
    num_of_vertices = int(config['Data']['num_of_vertices'])

    adj_mx = get_adjacency_matrix(adj_filename, num_of_vertices)
    L_tilde = scaled_Laplacian(adj_mx)
    cheb_polynomials = [
        nd.array(i, ctx=ctx) for i in cheb_polynomial(L_tilde, K)
    ]

    backbones1 = [{
        "K": K,
        "num_of_chev_filters": 64,
        "num_of_time_filters": 64,
        "time_conv_strides": num_of_weeks,
        "cheb_polynomials": cheb_polynomials
    }, {
        "K": K,
        "num_of_chev_filters": 64,
        "num_of_time_filters": 64,
        "time_conv_strides": 1,
        "cheb_polynomials": cheb_polynomials
    }]

    backbones2 = [{
        "K": K,
        "num_of_chev_filters": 64,
        "num_of_time_filters": 64,
        "time_conv_strides": num_of_days,
        "cheb_polynomials": cheb_polynomials
    }, {
        "K": K,
        "num_of_chev_filters": 64,
        "num_of_time_filters": 64,
        "time_conv_strides": 1,
        "cheb_polynomials": cheb_polynomials
    }]

    backbones3 = [{
        "K": K,
        "num_of_chev_filters": 64,
        "num_of_time_filters": 64,
        "time_conv_strides": num_of_hours,
        "cheb_polynomials": cheb_polynomials
    }, {
        "K": K,
        "num_of_chev_filters": 64,
        "num_of_time_filters": 64,
        "time_conv_strides": 1,
        "cheb_polynomials": cheb_polynomials
    }]

    all_backbones = [backbones1, backbones2, backbones3]

    return all_backbones
Example #5
0
def get_backbones(config_filename, adj_filename, device):
    config = configparser.ConfigParser()
    config.read(config_filename)

    K = int(config['Training']['K'])
    num_of_weeks = int(config['Training']['num_of_weeks'])
    num_of_days = int(config['Training']['num_of_days'])
    num_of_hours = int(config['Training']['num_of_hours'])
    num_of_vertices = int(config['Data']['num_of_vertices'])

    adj_mx = get_adjacency_matrix(adj_filename, num_of_vertices)  #获得邻接矩阵adj_mx
    L_tilde = scaled_Laplacian(adj_mx)
    cheb_polynomials = torch.tensor(cheb_polynomial(L_tilde, K),
                                    dtype=torch.float32).to(
                                        device)  #.to(device)生成新的张量
    #生成最高3阶的切比雪夫多项式

    backbones1 = [
        {
            "K": K,
            "num_of_chev_filters": 64,  #切比雪夫过滤器的数量
            "num_of_time_filters": 64,  #时间节点过滤器的数量
            "time_conv_strides": num_of_weeks,  #时间卷积跨步
            "cheb_polynomials": cheb_polynomials
        },
        {
            "K": K,
            "num_of_chev_filters": 64,
            "num_of_time_filters": 64,
            "time_conv_strides": 1,
            "cheb_polynomials": cheb_polynomials
        }
    ]

    backbones2 = [{
        "K": K,
        "num_of_chev_filters": 64,
        "num_of_time_filters": 64,
        "time_conv_strides": num_of_days,
        "cheb_polynomials": cheb_polynomials
    }, {
        "K": K,
        "num_of_chev_filters": 64,
        "num_of_time_filters": 64,
        "time_conv_strides": 1,
        "cheb_polynomials": cheb_polynomials
    }]

    backbones3 = [{
        "K": K,
        "num_of_chev_filters": 64,
        "num_of_time_filters": 64,
        "time_conv_strides": num_of_hours,
        "cheb_polynomials": cheb_polynomials
    }, {
        "K": K,
        "num_of_chev_filters": 64,
        "num_of_time_filters": 64,
        "time_conv_strides": 1,
        "cheb_polynomials": cheb_polynomials
    }]

    all_backbones = [backbones1, backbones2, backbones3]

    return all_backbones
Example #6
0
def test_scaled_Laplacian():
    from lib.utils import get_adjacency_matrix, scaled_Laplacian
    adj = get_adjacency_matrix('data/PEMS04/distance.csv', 307)
    assert scaled_Laplacian(adj).shape == adj.shape
Example #7
0
def test_get_adjacency_matrix2():
    from lib.utils import get_adjacency_matrix
    filename = 'data/PEMS08/distance.csv'
    num_of_vertices = 170
    A = get_adjacency_matrix(filename, num_of_vertices)
    assert A.shape == (num_of_vertices, num_of_vertices)