Example #1
0
def make_model(DEVICE, nb_block, in_channels, K, nb_chev_filter,
               nb_time_filter, time_strides, adj_mx, num_for_predict,
               len_input, num_of_vertices):
    '''

    :param DEVICE:
    :param nb_block:
    :param in_channels:
    :param K:
    :param nb_chev_filter:
    :param nb_time_filter:
    :param time_strides:
    :param cheb_polynomials:
    :param nb_predict_step:
    :param len_input
    :return:
    '''
    L_tilde = scaled_Laplacian(adj_mx)
    cheb_polynomials = [
        torch.from_numpy(i).type(torch.FloatTensor).to(DEVICE)
        for i in cheb_polynomial(L_tilde, K)
    ]
    model = ASTGCN_submodule(DEVICE, nb_block, in_channels, K, nb_chev_filter,
                             nb_time_filter, time_strides, cheb_polynomials,
                             num_for_predict, len_input, num_of_vertices)

    for p in model.parameters():
        if p.dim() > 1:
            nn.init.xavier_uniform_(p)
        else:
            nn.init.uniform_(p)

    return model
Example #2
0
 def forward(self, x, state=None, M=None):
     '''
     Chebyshev graph convolution operation
     :param x: (batch_size,N, dim_in)
     :return: (batch_size,N, dim_out)
     '''
     batch_size, num_of_vertices, in_channels = x.shape
     output = torch.zeros(batch_size, num_of_vertices, self.dim_out).to(
         self.DEVICE)  # (batch_size,N, dim_out)
     L_tilde = scaled_Laplacian(self.adj)
     cheb_polynomials = [
         torch.from_numpy(i).type(torch.FloatTensor)
         for i in cheb_polynomial(L_tilde, self.order_K)
     ]
     if state is not None:
         s = torch.einsum('ij,jkm->ikm', M,
                          state.permute(1, 0, 2)).permute(1, 0, 2)
         x = torch.cat((x, s), dim=-1)
     x0 = x
     if self._in_drop != 0:
         x = torch.dropout(x, 1.0 - self._in_drop, train=True)
     # k-order展开
     for k in range(self.order_K):
         # chebyshev多项式
         output = output + x.permute(0, 2, 1).matmul(cheb_polynomials[k].to(
             self.DEVICE)).permute(0, 2, 1).matmul(self.Theta[k])
     output = torch.matmul(output, self.weights)
     output = output + self.biases
     res = F.relu(output)
     if self._gcn_drop != 0.0:
         res = torch.dropout(res, 1.0 - self._gcn_drop, train=True)
     if self._residual:
         x0 = self.linear(x0)
         res = res + x0
     return res  # (batch_size,N, dim_out)
Example #3
0
def test_cheb_polynomial1():
    from lib.utils import (get_adjacency_matrix,
                           scaled_Laplacian, cheb_polynomial)
    adj = get_adjacency_matrix('data/PEMS04/distance.csv', 307)
    L = scaled_Laplacian(adj)
    cheb_polys = cheb_polynomial(L, 3)
    assert len(cheb_polys) == 3
    for i in cheb_polys:
        assert i.shape == adj.shape
Example #4
0
num_of_weeks = 2
num_of_days = 1
num_of_hours = 2
num_of_vertices = FLAGS.num_point
num_of_features = 3
merge = False
model_name = 'Gated_STGCN_%s' % f
params_dir = 'experiment_Gated_STGCN'
prediction_path = 'Gated_STGCN_prediction_0%s' % f
wdecay = 0.000

device = torch.device(FLAGS.device)

#read laplace matrix
adj = get_adjacency_matrix(adj_filename, num_nodes)
adjs = scaled_Laplacian(adj)
supports = (torch.tensor(adjs)).type(torch.float32).to(device)

print('Model is %s' % (model_name))

timestamp = datetime.now().strftime("%Y%m%d%H%M%S")
if params_dir != "None":
    params_path = os.path.join(params_dir, model_name)
else:
    params_path = 'params/%s_%s/' % (model_name, timestamp)

# check parameters file
if os.path.exists(params_path) and not FLAGS.force:
    raise SystemExit("Params folder exists! Select a new params path please!")
else:
    if os.path.exists(params_path):
Example #5
0
def get_backbones(config_filename, adj_filename, ctx):
    config = configparser.ConfigParser()
    config.read(config_filename)

    K = int(config['Training']['K'])
    num_of_weeks = int(config['Training']['num_of_weeks'])
    num_of_days = int(config['Training']['num_of_days'])
    num_of_hours = int(config['Training']['num_of_hours'])
    num_of_vertices = int(config['Data']['num_of_vertices'])

    adj_mx = get_adjacency_matrix(adj_filename, num_of_vertices)
    L_tilde = scaled_Laplacian(adj_mx)
    cheb_polynomials = [
        nd.array(i, ctx=ctx) for i in cheb_polynomial(L_tilde, K)
    ]

    backbones1 = [{
        "K": K,
        "num_of_chev_filters": 64,
        "num_of_time_filters": 64,
        "time_conv_strides": num_of_weeks,
        "cheb_polynomials": cheb_polynomials
    }, {
        "K": K,
        "num_of_chev_filters": 64,
        "num_of_time_filters": 64,
        "time_conv_strides": 1,
        "cheb_polynomials": cheb_polynomials
    }]

    backbones2 = [{
        "K": K,
        "num_of_chev_filters": 64,
        "num_of_time_filters": 64,
        "time_conv_strides": num_of_days,
        "cheb_polynomials": cheb_polynomials
    }, {
        "K": K,
        "num_of_chev_filters": 64,
        "num_of_time_filters": 64,
        "time_conv_strides": 1,
        "cheb_polynomials": cheb_polynomials
    }]

    backbones3 = [{
        "K": K,
        "num_of_chev_filters": 64,
        "num_of_time_filters": 64,
        "time_conv_strides": num_of_hours,
        "cheb_polynomials": cheb_polynomials
    }, {
        "K": K,
        "num_of_chev_filters": 64,
        "num_of_time_filters": 64,
        "time_conv_strides": 1,
        "cheb_polynomials": cheb_polynomials
    }]

    all_backbones = [backbones1, backbones2, backbones3]

    return all_backbones
Example #6
0
def get_backbones(config_filename, adj_filename, device):
    config = configparser.ConfigParser()
    config.read(config_filename)

    K = int(config['Training']['K'])
    num_of_weeks = int(config['Training']['num_of_weeks'])
    num_of_days = int(config['Training']['num_of_days'])
    num_of_hours = int(config['Training']['num_of_hours'])
    num_of_vertices = int(config['Data']['num_of_vertices'])

    adj_mx = get_adjacency_matrix(adj_filename, num_of_vertices)  #获得邻接矩阵adj_mx
    L_tilde = scaled_Laplacian(adj_mx)
    cheb_polynomials = torch.tensor(cheb_polynomial(L_tilde, K),
                                    dtype=torch.float32).to(
                                        device)  #.to(device)生成新的张量
    #生成最高3阶的切比雪夫多项式

    backbones1 = [
        {
            "K": K,
            "num_of_chev_filters": 64,  #切比雪夫过滤器的数量
            "num_of_time_filters": 64,  #时间节点过滤器的数量
            "time_conv_strides": num_of_weeks,  #时间卷积跨步
            "cheb_polynomials": cheb_polynomials
        },
        {
            "K": K,
            "num_of_chev_filters": 64,
            "num_of_time_filters": 64,
            "time_conv_strides": 1,
            "cheb_polynomials": cheb_polynomials
        }
    ]

    backbones2 = [{
        "K": K,
        "num_of_chev_filters": 64,
        "num_of_time_filters": 64,
        "time_conv_strides": num_of_days,
        "cheb_polynomials": cheb_polynomials
    }, {
        "K": K,
        "num_of_chev_filters": 64,
        "num_of_time_filters": 64,
        "time_conv_strides": 1,
        "cheb_polynomials": cheb_polynomials
    }]

    backbones3 = [{
        "K": K,
        "num_of_chev_filters": 64,
        "num_of_time_filters": 64,
        "time_conv_strides": num_of_hours,
        "cheb_polynomials": cheb_polynomials
    }, {
        "K": K,
        "num_of_chev_filters": 64,
        "num_of_time_filters": 64,
        "time_conv_strides": 1,
        "cheb_polynomials": cheb_polynomials
    }]

    all_backbones = [backbones1, backbones2, backbones3]

    return all_backbones
Example #7
0
def get_backbones_traffic4cast(config_filename, adj_filename):
    config = configparser.ConfigParser()
    config.read(config_filename)

    K = int(config['Training']['K'])
    num_of_weeks = int(config['Training']['num_of_weeks'])
    num_of_days = int(config['Training']['num_of_days'])
    num_of_hours = int(config['Training']['num_of_hours'])

    adj_mx = sp.load_npz(adj_filename).toarray()
    # adj_mx = get_adjacency_matrix(adj_filename, num_of_vertices)
    L_tilde = scaled_Laplacian(adj_mx)
    cheb_polynomials = [
        nd.array(i).tostype('csr') for i in cheb_polynomial(L_tilde, K)
    ]

    num_filters = 16
    backbones1 = [
        {
            "K": K,
            "num_of_chev_filters": num_filters,
            "num_of_time_filters": num_filters,
            "time_conv_strides": num_of_weeks,
            # "cheb_polynomials": cheb_polynomials
        },
        # {
        #     "K": K,
        #     "num_of_chev_filters": num_filters,
        #     "num_of_time_filters": num_filters,
        #     "time_conv_strides": 1,
        #     # "cheb_polynomials": cheb_polynomials
        # }
    ]

    backbones2 = [
        {
            "K": K,
            "num_of_chev_filters": num_filters,
            "num_of_time_filters": num_filters,
            "time_conv_strides": num_of_days,
            # "cheb_polynomials": cheb_polynomials
        },
        # {
        #     "K": K,
        #     "num_of_chev_filters": num_filters,
        #     "num_of_time_filters": num_filters,
        #     "time_conv_strides": 1,
        #     # "cheb_polynomials": cheb_polynomials
        # }
    ]

    backbones3 = [
        {
            "K": K,
            "num_of_chev_filters": num_filters,
            "num_of_time_filters": num_filters,
            "time_conv_strides": num_of_hours,
            "num_of_features": 3
            # "cheb_polynomials": cheb_polynomials
        },
        # {
        #     "K": K,
        #     "num_of_chev_filters": num_filters,
        #     "num_of_time_filters": num_filters,
        #     "time_conv_strides": 1,
        #     "num_of_features": 3, # for traffic4cast is 3, otherwise 1
        #     # "cheb_polynomials": cheb_polynomials
        # }
    ]

    all_backbones = [backbones1, backbones2, backbones3]

    return all_backbones, cheb_polynomials
Example #8
0
def test_scaled_Laplacian():
    from lib.utils import get_adjacency_matrix, scaled_Laplacian
    adj = get_adjacency_matrix('data/PEMS04/distance.csv', 307)
    assert scaled_Laplacian(adj).shape == adj.shape