예제 #1
0
 def get_date_adj_list(self, origin_base_path, start_idx, duration, sep='\t', normalize=False, row_norm=False, add_eye=False, data_type='tensor'):
     assert data_type in ['tensor', 'matrix']
     date_dir_list = sorted(os.listdir(origin_base_path))
     # print('adj list: ', date_dir_list)
     date_adj_list = []
     for i in range(start_idx, min(start_idx + duration, self.max_time_num)):
         original_graph_path = os.path.join(origin_base_path, date_dir_list[i])
         spmat = get_sp_adj_mat(original_graph_path, self.full_node_list, sep=sep)
         # spmat = sp.coo_matrix((np.exp(alpha * spmat.data), (spmat.row, spmat.col)), shape=(self.node_num, self.node_num))
         if add_eye:
             spmat = spmat + sp.eye(spmat.shape[0])
         if normalize:
             spmat = get_normalized_adj(spmat, row_norm=row_norm)
         # data type
         if data_type == 'tensor':
             sptensor = sparse_mx_to_torch_sparse_tensor(spmat)
             date_adj_list.append(sptensor.cuda() if self.has_cuda else sptensor)
         else:  # data_type == matrix
             date_adj_list.append(spmat)
     # print(len(date_adj_list))
     return date_adj_list
예제 #2
0
        means=means,
        stds=stds)
    val_input, val_target, val_mean_t, val_std_t = generate_dataset(
        val_original_data,
        num_timesteps_input=num_timesteps_input,
        num_timesteps_output=num_timesteps_output,
        means=means,
        stds=stds)
    test_input, test_target, test_mean_t, test_std_t = generate_dataset(
        test_original_data,
        num_timesteps_input=num_timesteps_input,
        num_timesteps_output=num_timesteps_output,
        means=means,
        stds=stds)

    A_wave = get_normalized_adj(A)
    A_wave = torch.from_numpy(A_wave)
    # A_wave = torch.memory_format(padding)
    A_wave = A_wave.to(device=args.device)

    net = STGCN(A_wave.shape[0], training_input.shape[3], num_timesteps_input,
                num_timesteps_output).to(device=args.device)

    optimizer = torch.optim.Adam(net.parameters(), lr=1e-3)
    loss_criterion = nn.MSELoss()

    training_losses = []
    validation_losses = []
    validation_maes = []
    for epoch in range(epochs):
        loss = train_epoch(training_input,
예제 #3
0
        pin_memory=True)
    val_loader = Data.DataLoader(
        dataset=val_dataset,
        batch_size=batch_size,
        drop_last=True,
        num_workers=32,
        pin_memory=True)
    logger.info(f"finish loading dataset, dataset length:{len(dataset)}")

    #############################ele distance matrix###################
    ele_A = np.load('Ybus_inv.npy')
    for i in range(0,ele_A.shape[0]):
        for j in range(0,ele_A.shape[1]):
            ele_A[i,j] = ele_A[i,i]-2*ele_A[i,j]-ele_A[j,j]
    # print(ele_A)
    ele_A_wave = get_normalized_adj(ele_A)
    ele_A_wave = torch.from_numpy(ele_A_wave)
    ele_A_wave = ele_A_wave.to(device=args.device)
    ###################################################################

    #############################adjacency matrix###################
    A = pd.read_excel('AdjacencyMatrix.xls').values.astype(np.float32)
    A_wave = get_normalized_adj(A)
    A_wave = torch.from_numpy(A_wave)
    A_wave = A_wave.to(device=args.device)
    ###################################################################

    print("starting a new model")
    net = STGCN(A_wave.shape[0],
            feature_num,
            num_timesteps_input,