def convert_sparse_train_input1(adj, features): supports = preprocess_adj(adj) m = torch.from_numpy(supports[0]).long() n = torch.from_numpy(supports[1]) support = torch.sparse.FloatTensor(m.t(), n, supports[2]).float() features = [torch.tensor(idxs, dtype=torch.long).to(device) if torch.cuda.is_available() else \ torch.tensor(idxs, dtype=torch.long) for idxs in features] # i = torch.from_numpy(features[0]).long() # v = torch.from_numpy(features[1]) # feature = torch.sparse.FloatTensor(i.t(), v, features[2]) if torch.cuda.is_available(): m = m.to(device) n = n.to(device) # support = torch.sparse.FloatTensor(m.t(), n, supports[2]).float().to(device) support = torch.sparse.FloatTensor(m.t(), n, supports[2]).float().to(device) # i = i.to(device) # v = v.to(device) # feature = torch.sparse.FloatTensor(i.t(), v, features[2]).to(device) # print("**", features[0].dtype) # print("**", support.dtype) return features, support
seed = 123 np.random.seed(seed) torch.random.manual_seed(seed) # load data adj, features, y_train, y_val, y_test, train_mask, val_mask, test_mask = load_data( args.dataset) print('adj:', adj.shape) print('features:', features.shape) print('y:', y_train.shape, y_val.shape, y_test.shape) print('mask:', train_mask.shape, val_mask.shape, test_mask.shape) # D^-1@X features = preprocess_features(features) # [49216, 2], [49216], [2708, 1433] supports = preprocess_adj(adj) # D^-1@X features = preprocess_features(features) # [49216, 2], [49216], [2708, 1433] supports = preprocess_adj(adj) # 数据放到cuda device = torch.device('cuda') train_label = torch.from_numpy(y_train).long().to(device) num_classes = train_label.shape[1] train_label = train_label.argmax(dim=1) train_mask = torch.from_numpy(train_mask.astype(np.int)).to(device) val_label = torch.from_numpy(y_val).long().to(device) val_label = val_label.argmax(dim=1) val_mask = torch.from_numpy(val_mask.astype(np.int)).to(device)