示例#1
0
文件: train.py 项目: gyy1225/dblp_gcn
                    help='Number of hidden units.')
parser.add_argument('--dropout',
                    type=float,
                    default=0.5,
                    help='Dropout rate (1 - keep probability).')

args = parser.parse_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()

np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.cuda:
    torch.cuda.manual_seed(args.seed)

# Load data
adj, features, labels, idx_train, idx_val, idx_test = load_data()

# Model and optimizer
model = GCN(nfeat=features.shape[1],
            nhid=args.hidden,
            nclass=labels.max().item() + 1,
            dropout=args.dropout)
optimizer = optim.Adam(model.parameters(),
                       lr=args.lr,
                       weight_decay=args.weight_decay)
# optimizer = optim.Adam(model.parameters(),
#                        lr=args.lr, weight_decay=10)

if args.cuda:
    model.cuda()
    features = features.cuda()
示例#2
0
parser.add_argument('--hidden', type=int, default=16,
                    help='Number of hidden units.')
parser.add_argument('--dropout', type=float, default=0.5,
                    help='Dropout rate (1 - keep probability).')

args = parser.parse_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()
print(f"参数列表:{args}")

np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.cuda:
    torch.cuda.manual_seed(args.seed)

# Load data
adj, features, labels, idx_train, idx_val, idx_test = load_data(
    path='./data/cora/')
print(adj.shape, features.shape, labels.shape)

# Model and optimizer
model = GCN(nfeat=features.shape[1],
            nhid=args.hidden,
            nclass=labels.max().item() + 1,
            dropout=args.dropout)
optimizer = optim.Adam(model.parameters(),
                       lr=args.lr, weight_decay=args.weight_decay)

if args.cuda:
    model.cuda()
    features = features.cuda()
    adj = adj.cuda()
    labels = labels.cuda()
示例#3
0
                    action='store_true',
                    default=False,
                    help='Using Batch Normalization')
#dataset = 'citeseer'
#dataset = 'pubmed'
args = parser.parse_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()
torch.cuda.set_device(args.cuda_device)
dataset = args.dataset
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.cuda:
    torch.cuda.manual_seed(args.seed)

# Load data
A, features, labels, idx_train, idx_val, idx_test, adj_mask = load_data(
    dataset)
idx_unlabel = torch.range(idx_train.shape[0], labels.shape[0] - 1, dtype=int)

features = features.cuda()
adj_mask = adj_mask.cuda()
print(features[adj_mask[0]].shape)
assert False

# Model and optimizer
model = MLP(nfeat=features.shape[1],
            nhid=args.hidden,
            nclass=labels.max().item() + 1,
            input_droprate=args.input_droprate,
            hidden_droprate=args.hidden_droprate,
            use_bn=args.use_bn)
optimizer = optim.Adam(model.parameters(),
示例#4
0
parser.add_argument('--neg_sample_weight',  type=int, default=20,
                    help='Negative sample size')

parser.add_argument('--transfer', action='store_true', default=False,
                    help='Transfer learning - using smaller learning rate when transfering')

args = parser.parse_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()

np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.cuda:
    torch.cuda.manual_seed(args.seed)

# Load data
adj, adj_ds, neg_adj_list, train_edges, degrees, features, labels, idx_train, idx_val, idx_test = load_data(args.path, args.dataset)

# Model and optimizer
model = GCN(nfeat=features.shape[1],
            nhid=args.hidden,
            nclass=labels.max().item() + 1,
            dropout=args.dropout)
optimizer = optim.Adam(model.parameters(),
                       lr=args.lr, weight_decay=args.weight_decay)
optimizer2 = optim.Adam(
    [
        {"params": model.gc1.parameters(), "lr": args.lr/10},
        {"params": model.gc2.parameters(), "lr": args.lr}
    ], 
    lr=args.lr, 
    weight_decay=args.weight_decay
示例#5
0
class args_:
    def __init__(self):
        self.hidden=16
        self.no_cuda=False
        self.fastmode=False
        self.seed=42
        self.epochs=200
        self.lr=0.01
        self.weight_decay=5e-4
        self.dropout=0.5
# this does not matter at all.
# whatever.
args=args_()
args.cuda = not args.no_cuda and torch.cuda.is_available()
# Load data
adj, features, labels, idx_train, idx_val, idx_test = load_data(path="/root/AGI/lazero/brainfuck/pygcn/data/cora/")
# you'd better see this.
# idx is for index.
# active: 10:00 AM -> 12:00 PM
# 12:00 noon <-> 2:00 AM 
# mod operation.
# how to let computer calc this?
# you can assign random things.
# Model and optimizer
# anyway, do you want to train some letters? the network made up of letters.
model = GCN(nfeat=features.shape[1],
            nhid=args.hidden,
            nclass=labels.max().item() + 1,
            dropout=args.dropout)
optimizer = optim.Adam(model.parameters(),
                       lr=args.lr, weight_decay=args.weight_decay)
                    help='Weight decay (L2 loss on parameters).')
parser.add_argument('--hidden', type=int, default=16,
                    help='Number of hidden units.')
parser.add_argument('--dropout', type=float, default=0.5,
                    help='Dropout rate (1 - keep probability).')

args = parser.parse_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()

np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.cuda:
    torch.cuda.manual_seed(args.seed)

# Load data
adj, features, labels, idx_train, idx_val, idx_test = load_data(path="data/cora/", dataset="cora")

def test():
    model.eval()
    output = model(features, adj)
    loss_test = F.nll_loss(output[idx_test], labels[idx_test])
    acc_test = accuracy(output[idx_test], labels[idx_test])
    print("Test set results:",
        "loss= {:.4f}".format(loss_test.item()),
        "accuracy= {:.4f}".format(acc_test.item()))

def train(epoch):
    t = time.time()
    model.train()
    optimizer.zero_grad()
    output = model(features, adj)
示例#7
0
        self.hidden = 16
        self.no_cuda = False
        self.fastmode = False
        self.seed = 42
        self.epochs = 200
        self.lr = 0.01
        self.weight_decay = 5e-4
        self.dropout = 0.5


# this does not matter at all.
# whatever.
args = args_()
args.cuda = not args.no_cuda and torch.cuda.is_available()
# Load data
adj, features, labels, idx_train, idx_val, idx_test = load_data(
    path="pygcn/data/cora/")
# first is adj matrix.
# second??
print(adj.shape, features.shape, labels.shape, idx_train.shape, idx_val.shape,
      idx_test.shape)
# torch.Size([2708, 2708]) torch.Size([2708, 1433]) torch.Size([2708]) torch.Size([140]) torch.Size([300]) torch.Size([1000])
print(type(adj), type(features), type(labels), type(idx_train), type(idx_val),
      type(idx_test))
# pdd={"adj":adj, "features":features, "labels":labels, "idx_train":idx_train, "idx_val":idx_val, "idx_test":idx_test}
# for x in pdd.keys():
# print(labels)
#     # print(x.__name__)
#     print(x,pdd[x].shape)
#     print(pdd[x])
# you'd better see this.
# # Model and optimizer
示例#8
0
#os.environ["CUDA_VISIBLE_DEVICES"] = "5"
torch.cuda.set_device(5)

print('args.cuda', args.cuda)

np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.cuda:
    torch.cuda.manual_seed(args.seed)

print(args.seed)
# Load data
dataset = 'cora'
#dataset = 'citeseer'
# dataset = 'pubmed'
adj, A, _, features, labels, idx_train, idx_val, idx_test, edges, adj_ = load_data(
    dataset)

model = NGCN(nfeat_1=features.shape[1],
             nfeat_2=features.shape[1],
             nhid_1=args.hid1,
             nhid_2=args.hid1,
             nclass=labels.max().item() + 1,
             dropout=args.dropout)
# dropout=0.5)

# center_loss_1 = CenterLoss(num_classes=labels.max().item() + 1, feat_dim=16, use_gpu=args.cuda)
# center_loss_2 = CenterLoss(num_classes=labels.max().item() + 1, feat_dim=16, use_gpu=args.cuda)

params = list(model.parameters(
))  #+ list(center_loss_1.parameters()) + list(center_loss_2.parameters())
示例#9
0
args = parser.parse_args()

np.random.seed(args.seed)
torch.manual_seed(args.seed)

# Load data
dataset = []
graphs.save_numpy(
    r'C:\Users\Pasi\OneDrive\Documents\Uni\MSem. 4 - SS 20\MT - Master Thesis\Simulator and Models\MT_SimpleDataGenerator\pygcn\data\sdg_fraud\\',
    ['price', 'new_value', 'old_value'])

for graph in graphs.get_raw_list():
    adj, features, labels, idx_train, idx_val, idx_test = load_data(
        path="../pygcn/data/sdg_fraud/",
        dataset=graph.get_name(),
        train_size=len(graph) - 1,
        validation_size=0)

    dataset.append(
        Bunch(
            name=graph.get_name(),
            adj=adj,
            features=features,
            # labels_raw=labels,
            labels=readout_labels(labels, args.classification_mode),
            idx_train=idx_train,
            idx_val=idx_val,
            idx_test=idx_test))

random.shuffle(dataset)
示例#10
0
                    help='Number of hidden units.')
parser.add_argument('--dropout',
                    type=float,
                    default=0.5,
                    help='Dropout rate (1 - keep probability).')

args = parser.parse_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()

np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.cuda:
    torch.cuda.manual_seed(args.seed)

# Load data
adj, features, labels, idx_train, idx_val, idx_test = load_data(
    path="./data/cora/", dataset='cora', train_size=400, validation_size=50)

# Model and optimizer
model = GCN(nfeat=features.shape[1],
            nhid=args.hidden,
            nclass=labels.max().item() + 1,
            dropout=args.dropout)
optimizer = optim.Adam(model.parameters(),
                       lr=args.lr,
                       weight_decay=args.weight_decay)

if args.cuda:
    model.cuda()
    features = features.cuda()
    adj = adj.cuda()
    labels = labels.cuda()
示例#11
0
文件: train.py 项目: seanli3/pygcn
parser.add_argument('--dropout',
                    type=float,
                    default=0.5,
                    help='Dropout rate (1 - keep probability).')
parser.add_argument('--dataset', type=str, default='cora')

args = parser.parse_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()

np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.cuda:
    torch.cuda.manual_seed(args.seed)

# Load data
adj, features, labels, idx_train, idx_val, idx_test = load_data(
    dataset=args.dataset)

# Model and optimizer
model = GCN(nfeat=features.shape[1],
            nhid=args.hidden,
            nclass=labels.max().item() + 1,
            dropout=args.dropout)
optimizer = optim.Adam(model.parameters(),
                       lr=args.lr,
                       weight_decay=args.weight_decay)

if args.cuda:
    model.cuda()
    features = features.cuda()
    adj = adj.cuda()
    labels = labels.cuda()
示例#12
0
parser.add_argument('--dropout', type=float, default=0.5,
                    help='Dropout rate (1 - keep probability).')

args = parser.parse_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()

np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.cuda:
    torch.cuda.manual_seed(args.seed)

np.random.seed(187348292)

# Load data
# adj, features, labels, idx_train, idx_val, idx_test = load_data()
adj, features, labels = load_data()
adj_csr = sp.csr_matrix(adj)
idxs = np.arange(adj.shape[0])
train_size = 500
val_size = 300
test_size = 1000
idx_train, idx_val, idx_test = training_split(idxs, train_size, val_size, test_size)


# Model and optimizer
model = GCN(nfeat=features.shape[1],
            nhid=args.hidden,
            nclass=labels.max().item() + 1,
            dropout=args.dropout)
optimizer = optim.Adam(model.parameters(),
                       lr=args.lr, weight_decay=args.weight_decay)
示例#13
0
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.cuda:
    torch.cuda.manual_seed(args.seed)

print("start")

# Load data
graph = np.load("D:/data/graph/graph_save.npy")
# Load data
node = torch.load("D:/data/node/node.pt")
with open("D:/data/node/nodelist.json", 'r') as fp:
    node_list = json.load(fp)
print("Data Load")

adj, features, labels, idx_train, idx_val, idx_test = load_data(graph[0], node, node_list)

# Model and optimizer
model = GCN(nfeat=features.shape[1],
            nhid=args.hidden,
            nclass=labels.max().item() + 1,
            dropout=args.dropout)
optimizer = optim.Adam(model.parameters(),
                       lr=args.lr, weight_decay=args.weight_decay)

if args.cuda:
    model.cuda()
    features = features.cuda()
    adj = adj.cuda()
    labels = labels.cuda()
    idx_train = idx_train.cuda()
示例#14
0
文件: train.py 项目: hoangdzung/pygcn
                    help='Hard assignment of gumbel softmax')
parser.add_argument('--beta',
                    type=float,
                    default=0,
                    help='Beta param of gumbel softmax, default=0')

args = parser.parse_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()

np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.cuda:
    torch.cuda.manual_seed(args.seed)

# Load data
adj, adj_ds, _, _, _, features, labels, idx_train, idx_val, idx_test = load_data(
    args.path, args.dataset)

# Model and optimizer
model = GCN(nfeat=features.shape[1],
            nhid=args.hidden,
            nclass=labels.max().item() + 1,
            dropout=args.dropout)
optimizer = optim.Adam(model.parameters(),
                       lr=args.lr,
                       weight_decay=args.weight_decay)
optimizer2 = optim.Adam([{
    "params": model.gc1.parameters(),
    "lr": args.lr / 10
}, {
    "params": model.gc2.parameters(),
    "lr": args.lr
示例#15
0
                    help='Dropout rate (1 - keep probability).')
parser.add_argument('--dataset', type=str, default="cora",
                    help='Dataset (cora...)')
parser.add_argument('--noramlize_features', type=bool, default=True,
                    help='normalize features')

args = parser.parse_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()

np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.cuda:
    torch.cuda.manual_seed(args.seed)

# Load data
adj, features, labels, idx_train, idx_val, idx_test = load_data("../data/%s/" % args.dataset, args.dataset, args.noramlize_features)

# Model and optimizer
model = GCN(nfeat=features.shape[1],
            nhid=args.hidden,
            nclass=labels.max().item() + 1,
            dropout=args.dropout)
optimizer = optim.Adam(model.parameters(),
                       lr=args.lr, weight_decay=args.weight_decay)

if args.cuda:
    model.cuda()
    features = features.cuda()
    adj = adj.cuda()
    labels = labels.cuda()
    idx_train = idx_train.cuda()
示例#16
0
                    help='Dropout rate (1 - keep probability).')
parser.add_argument('--data_type',
                    type=str,
                    default="cora",
                    help="Type of dataset to train and eval.")

args = parser.parse_args()
args.cuda = not args.no_cuda and torch.cuda.is_available()

np.random.seed(args.seed)
torch.manual_seed(args.seed)
if args.cuda:
    torch.cuda.manual_seed(args.seed)

# Load data
adj, features, labels, idx_train, idx_val, idx_test = load_data(args.data_type)

# Model and optimizer
model = GCN(nfeat=features.shape[1],
            nhid=args.hidden,
            nclass=labels.max().item() + 1,
            dropout=args.dropout,
            data_type=args.data_type)
if args.data_type == "cora":
    loss = LabelSmoothLoss()
elif args.data_type.startswith("elliptic"):
    loss = nn.CrossEntropyLoss(weight=torch.Tensor([0.7, 0.3, 0.0]))
optimizer = optim.Adam(model.parameters(),
                       lr=args.lr,
                       weight_decay=args.weight_decay)