def prepare(train_data, train_params, arch_gcn): """ Prepare some data structure and initialize model / minibatch handler before the actual iterative training taking place. """ adj_full, adj_train, feat_full, class_arr, role = train_data adj_full = adj_full.astype(np.int32) adj_train = adj_train.astype(np.int32) adj_full_norm = adj_norm(adj_full) num_classes = class_arr.shape[1] minibatch = Minibatch(adj_full_norm, adj_train, role, train_params) model = GraphSAINT(num_classes, arch_gcn, train_params, feat_full, class_arr) printf("TOTAL NUM OF PARAMS = {}".format( sum(p.numel() for p in model.parameters())), style="yellow") minibatch_eval = Minibatch(adj_full_norm, adj_train, role, train_params, cpu_eval=True) model_eval = GraphSAINT(num_classes, arch_gcn, train_params, feat_full, class_arr, cpu_eval=True) if args_global.gpu >= 0: model = model.cuda() return model, minibatch, minibatch_eval, model_eval
def prepare(train_data, train_params, arch_gcn): adj_full, adj_train, feat_full, class_arr, role = train_data adj_full = adj_full.astype(np.int32) adj_train = adj_train.astype(np.int32) adj_full_norm = adj_norm(adj_full) num_classes = class_arr.shape[1] minibatch = Minibatch(adj_full_norm, adj_train, role, train_params) model = GraphSAINT(num_classes, arch_gcn, train_params, feat_full, class_arr) if args_global.gpu >= 0: model = model.cuda() return model, minibatch