Ejemplo n.º 1
0
def train_gcn(args):
    exp_init(args.seed, gpu_id=args.gpu)
    # ! config
    cf = GraphSageConfig(args)
    cf.device = th.device("cuda:0" if args.gpu >= 0 else "cpu")

    # ! Load Graph
    g, features, n_feat, cf.n_class, labels, train_x, val_x, test_x = preprocess_data(cf.dataset, cf.train_percentage)
    features = features.to(cf.device)
    g = dgl.add_self_loop(g).to(cf.device)
    supervision = SimpleObject({'train_x': train_x, 'val_x': val_x, 'test_x': test_x, 'labels': labels})

    # ! Train Init
    print(f'{cf}\nStart training..')
    model = SAGE(n_feat, cf.n_hidden, cf.n_class, cf.n_layer, F.relu, cf.dropout, cf.aggregator)
    model.to(cf.device)
    print(model)
    optimizer = th.optim.Adam(
        model.parameters(), lr=cf.lr, weight_decay=cf.weight_decay)
    if cf.early_stop > 0:
        stopper = EarlyStopping(patience=cf.early_stop, path=cf.checkpoint_file)
    else:
        stopper = None

    # ! Train
    trainer = FullBatchTrainer(model=model, g=g, cf=cf, features=features,
                               sup=supervision, stopper=stopper, optimizer=optimizer,
                               loss_func=th.nn.CrossEntropyLoss())
    trainer.run()
    trainer.eval_and_save()

    return cf
Ejemplo n.º 2
0
	def forward(self, template, source, maxiter=10):
		template, source, template_mean, source_mean = data_utils.preprocess_data(template, source, 
																			 self.p0_zero_mean, self.p1_zero_mean)

		result = self.iclk(template, source, maxiter)
		result = data_utils.postprocess_data(result, template, source, template_mean, source_mean, 
											 self.p0_zero_mean, self.p1_zero_mean)
		return result
Ejemplo n.º 3
0
    def test(self, N, train_set, test_set):

        # Generate "new" data
        train_data, test_data = generate_pair_sets(train_set, test_set, 0, N)
        _, _, _, x_test, y_test, _ = preprocess_data(train_data, test_data, 0,
                                                     N)

        # Compute the model's prediction
        test_output = self.forward(x_test)
        pred_test = torch.argmax(test_output, dim=0)

        # Compare the prediction with the real values
        correct_test = pred_test == y_test
        acc = np.mean(correct_test.cpu().numpy())

        return acc
Ejemplo n.º 4
0
def main(args):
    """
    :return:
    """
    args.task_type = 'regression'
    args.model_name = 'dnn'
    warnings.filterwarnings('ignore')
    torch.set_default_dtype(torch.float64)

    filename = args.dataname + str(args.num) + '_' + args.model_name + '_' + str(args.run_number)
    args.n_properties = properties_map[args.dataname]
    args.standardise = False

    with open('results/{}/{}/{}/{}.txt'.format(args.dataname, args.task_type, args.model_name, filename), 'a') as f:
        # Write hyperparameters to file
        write_args(f, args)
        f.flush()
        args.file = f

        # These are matrices of dimension [N_train, n_descriptors + n_properties],
        # [N_test, n_descriptors + n_properties] respectively. NaNs are imputed for missing
        # values. We will use the ECFP4 fingerprint descriptor (1024 bits, radius = 3)
        x = np.load(args.directory + args.dataname + '_x' + str(args.num) +
                        '_train_fingerprints.npy', allow_pickle=True)
        x_test = np.load(args.directory + args.dataname + '_x' + str(args.num) +
                             '_test_fingerprints.npy', allow_pickle=True)

        # Preprocess the data: standardising, applying PCA transforms and selecting relevant descriptors if desired.
        # Also converts x and x_test from numpy matrices to torch tensors.
        x, x_test, args.means, args.stds = preprocess_data(x, x_test, args.n_properties, args.pca_components, args.task_type)
        args.in_dim = x.shape[1]

        if args.model_name == 'baseline':
            f.write('\n ... predictions from baseline model.')
            # The baseline corresponds to mean imputation (with predictive uncertainty equal to
            # the standard deviation of the training set)
            r2_scores, mlls, rmses = baseline_metrics_calculator(x, args.n_properties,
                                                                 means=args.means,
                                                                 stds=args.stds)
            write_metrics(r2_scores, mlls, rmses, file=f, set_type='train')

            r2_scores, mlls, rmses = baseline_metrics_calculator(x_test, args.n_properties,
                                                                 means=args.means,
                                                                 stds=args.stds)
            write_metrics(r2_scores, mlls, rmses, file=f, set_type='test')

        else:
            f.write('\n ... building {} model'.format(args.model_name))

            # Load model
            model = RegressionWrapper(network=ProbabilisticVanillaNN(in_dim=args.in_dim, out_dim=args.n_properties,
                                                                     hidden_dims=args.hidden_dims, restrict_var=False),
                                      batch_size=args.batch_size, lr=args.lr, file=args.file)
            file_path = os.path.dirname(f.name) + '/models/{}_{}_{}_{}.dat'.format(args.dataname, args.model_name,
                                                                                   args.run_number, args.task_type)

            # If model was saved after last training procedure, reload to continue training from where you left off
            if os.path.isfile(file_path):
                model.network.load_state_dict(torch.load(file_path))

            f.write('\n ... training model. \n')

            # Train model
            model.train_model(x=x, epochs=args.epochs, epoch_print_freq=50, means=args.means,
                              stds=args.stds)

            # Save model
            torch.save(model.network.state_dict(), file_path)

            # Calculate performance metrics on the train and test datasets
            r2_scores, mlls, rmses = model.metrics_calculator(x, save=False)
            write_metrics(r2_scores, mlls, rmses, file=f, set_type='train')

            r2_scores, mlls, rmses = model.metrics_calculator(x_test, save=True)
            write_metrics(r2_scores, mlls, rmses, file=f, set_type='test')
Ejemplo n.º 5
0
def train_GSR(args):
    # ! Init Environment
    exp_init(args.seed if hasattr(args, 'seed') else 0, args.gpu)

    # ! Import packages
    # Note that the assignment of GPU-ID must be specified before torch/dgl is imported.
    import torch as th
    import dgl
    from utils.data_utils import preprocess_data
    from utils.early_stopper import EarlyStopping

    from models.GSR.GSR import GSR_pretrain, GSR_finetune, para_copy
    from models.GSR.config import GSRConfig
    from models.GSR.data_utils import get_pretrain_loader, get_structural_feature
    from models.GSR.cl_utils import MemoryMoCo, moment_update, NCESoftmaxLoss
    from models.GSR.trainer import FullBatchTrainer
    from models.GSR.trainGSR import train_GSR
    from models.GSR.PolyLRDecay import PolynomialLRDecay

    # ! Config
    cf = GSRConfig(args)
    cf.device = th.device("cuda:0" if args.gpu >= 0 else "cpu")

    # ! Load Graph
    g, features, cf.n_feat, cf.n_class, labels, train_x, val_x, test_x = \
        preprocess_data(cf.dataset, cf.train_percentage)
    feat = {'F': features, 'S': get_structural_feature(g, cf)}
    cf.feat_dim = {v: feat.shape[1] for v, feat in feat.items()}
    supervision = SimpleObject({
        'train_x': train_x,
        'val_x': val_x,
        'test_x': test_x,
        'labels': labels
    })
    # ! Train Init
    print(f'{cf}\nStart training..')
    p_model = GSR_pretrain(g, cf).to(cf.device)
    # print(p_model)
    # ! Train Phase 1: Pretrain
    if cf.p_epochs > 0:
        # os.remove(cf.pretrain_model_ckpt)  # Debug Only
        if os.path.exists(cf.pretrain_model_ckpt):

            p_model.load_state_dict(
                th.load(cf.pretrain_model_ckpt, map_location=cf.device))
            print(f'Pretrain embedding loaded from {cf.pretrain_model_ckpt}')
        else:
            print(
                f'>>>> PHASE 1 - Pretraining and Refining Graph Structure <<<<<'
            )
            views = ['F', 'S']
            optimizer = th.optim.Adam(p_model.parameters(),
                                      lr=cf.prt_lr,
                                      weight_decay=cf.weight_decay)
            if cf.p_schedule_step > 1:
                scheduler_poly_lr_decay = PolynomialLRDecay(
                    optimizer,
                    max_decay_steps=cf.p_schedule_step,
                    end_learning_rate=0.0001,
                    power=2.0)
            # Construct virtual relation triples
            p_model_ema = GSR_pretrain(g, cf).to(cf.device)
            moment_update(p_model, p_model_ema, 0)  # Copy
            moco_memories = {
                v: MemoryMoCo(
                    cf.n_hidden,
                    cf.nce_k,  # Single-view contrast
                    cf.nce_t,
                    device=cf.device).to(cf.device)
                for v in views
            }
            criterion = NCESoftmaxLoss(cf.device)
            pretrain_loader = get_pretrain_loader(g.cpu(), cf)

            for epoch_id in range(cf.p_epochs):
                for step, (input_nodes, edge_subgraph,
                           blocks) in enumerate(pretrain_loader):
                    t0 = time()
                    blocks = [b.to(cf.device) for b in blocks]
                    edge_subgraph = edge_subgraph.to(cf.device)
                    input_feature = {
                        v: feat[v][input_nodes].to(cf.device)
                        for v in views
                    }

                    # ===================Moco forward=====================
                    p_model.train()

                    q_emb = p_model(edge_subgraph,
                                    blocks,
                                    input_feature,
                                    mode='q')
                    std_dict = {
                        v: round(q_emb[v].std(dim=0).mean().item(), 4)
                        for v in ['F', 'S']
                    }
                    print(f"Std: {std_dict}")

                    if std_dict['F'] == 0 or std_dict['S'] == 0:
                        print(
                            f'\n\n????!!!! Same Embedding Epoch={epoch_id}Step={step}\n\n'
                        )
                        # q_emb = p_model(edge_subgraph, blocks, input_feature, mode='q')

                    with th.no_grad():
                        k_emb = p_model_ema(edge_subgraph,
                                            blocks,
                                            input_feature,
                                            mode='k')
                    intra_out, inter_out = [], []

                    for tgt_view, memory in moco_memories.items():
                        for src_view in views:
                            if src_view == tgt_view:
                                intra_out.append(
                                    memory(q_emb[f'{tgt_view}'],
                                           k_emb[f'{tgt_view}']))
                            else:
                                inter_out.append(
                                    memory(q_emb[f'{src_view}->{tgt_view}'],
                                           k_emb[f'{tgt_view}']))

                    # ===================backward=====================
                    # ! Self-Supervised Learning
                    intra_loss = th.stack(
                        [criterion(out_) for out_ in intra_out]).mean()
                    inter_loss = th.stack(
                        [criterion(out_) for out_ in inter_out]).mean()
                    # ! Loss Fusion
                    loss_tensor = th.stack([intra_loss, inter_loss])
                    intra_w = float(cf.intra_weight)
                    loss_weights = th.tensor([intra_w, 1 - intra_w],
                                             device=cf.device)
                    loss = th.dot(loss_weights, loss_tensor)
                    # ! Semi-Supervised Learning
                    optimizer.zero_grad()
                    loss.backward()
                    optimizer.step()

                    moment_update(p_model, p_model_ema, cf.momentum_factor)
                    print_log({
                        'Epoch': epoch_id,
                        'Batch': step,
                        'Time': time() - t0,
                        'intra_loss': intra_loss.item(),
                        'inter_loss': inter_loss.item(),
                        'overall_loss': loss.item()
                    })

                    if cf.p_schedule_step > 1:
                        scheduler_poly_lr_decay.step()

                epochs_to_save = P_EPOCHS_SAVE_LIST + (
                    [1, 2, 3, 4] if args.dataset == 'arxiv' else [])
                if epoch_id + 1 in epochs_to_save:
                    # Convert from p_epochs to current p_epoch checkpoint
                    ckpt_name = cf.pretrain_model_ckpt.replace(
                        f'_pi{cf.p_epochs}', f'_pi{epoch_id + 1}')
                    th.save(p_model.state_dict(), ckpt_name)
                    print(f'Model checkpoint {ckpt_name} saved.')

            th.save(p_model.state_dict(), cf.pretrain_model_ckpt)

    # ! Train Phase 2: Graph Structure Refine
    print(f'>>>> PHASE 2 - Graph Structure Refine <<<<< ')

    if cf.p_epochs <= 0 or cf.add_ratio + cf.rm_ratio == 0:
        print('Use original graph!')
        g_new = g
    else:
        if os.path.exists(cf.refined_graph_file):
            print(f'Refined graph loaded from {cf.refined_graph_file}')
            g_new = dgl.load_graphs(cf.refined_graph_file)[0][0]
        else:
            g_new = p_model.refine_graph(g, feat)
            dgl.save_graphs(cf.refined_graph_file, [g_new])

    # ! Train Phase 3:  Node Classification
    f_model = GSR_finetune(cf).to(cf.device)
    print(f_model)
    # Copy parameters
    if cf.p_epochs > 0:
        para_copy(f_model,
                  p_model.encoder.F,
                  paras_to_copy=['conv1.weight', 'conv1.bias'])
    optimizer = th.optim.Adam(f_model.parameters(),
                              lr=cf.lr,
                              weight_decay=cf.weight_decay)
    stopper = EarlyStopping(patience=cf.early_stop,
                            path=cf.checkpoint_file) if cf.early_stop else None
    del g, feat, p_model
    th.cuda.empty_cache()

    print(f'>>>> PHASE 3 - Node Classification <<<<< ')
    trainer_func = FullBatchTrainer
    trainer = trainer_func(model=f_model,
                           g=g_new,
                           features=features,
                           sup=supervision,
                           cf=cf,
                           stopper=stopper,
                           optimizer=optimizer,
                           loss_func=th.nn.CrossEntropyLoss())
    trainer.run()
    trainer.eval_and_save()
    return cf
Ejemplo n.º 6
0
def main(args):
    """
    :return:
    """

    probe_file = 'Probes_fingerprints.npy'

    args.task_type = 'classification'
    args.model_name = 'dnn'
    warnings.filterwarnings('ignore')
    torch.set_default_dtype(torch.float64)

    filename = args.dataname + str(
        args.num) + '_' + args.model_name + '_' + str(
            args.run_number) + 'probes'
    args.n_properties = properties_map[args.dataname]
    args.standardise = False

    with open(
            'results/{}/{}/{}/{}.txt'.format(args.dataname, args.task_type,
                                             args.model_name, filename),
            'a') as f:
        # Write hyperparameters to file
        write_args(f, args)
        f.flush()
        args.file = f

        # These are matrices of dimension [N_train, n_descriptors + n_properties],
        # [N_test, n_descriptors + n_properties] respectively. NaNs are imputed for missing
        # values. We will use the ECFP4 fingerprint descriptor (1024 bits, radius = 3)

        x = np.load(args.directory + args.dataname + '_x' + str(args.num) +
                    '_train_fingerprints.npy',
                    allow_pickle=True)
        x_test = np.load(args.directory + probe_file, allow_pickle=True)

        y = np.empty((x_test.shape[0], args.n_properties))

        x_test = np.concatenate((x_test, y), axis=1)
        pdb.set_trace()

        # Preprocess the data: standardising, applying PCA transforms and selecting relevant descriptors if desired.
        # Also converts x and x_test from numpy matrices to torch tensors.
        x, x_test, args.means, args.stds = preprocess_data(
            x, x_test, args.n_properties, args.pca_components, args.task_type)

        pdb.set_trace()
        args.in_dim = x.shape[1]

        f.write('\n ... building {} model'.format(args.model_name))

        # Load model
        model = ClassificationWrapper(network=BinaryClassificationNN(
            in_dim=args.in_dim,
            out_dim=args.n_properties,
            hidden_dims=args.hidden_dims),
                                      batch_size=args.batch_size,
                                      lr=args.lr,
                                      file=args.file)

        file_path = os.path.dirname(
            args.file.name) + '/models/{}_{}_{}_{}.dat'.format(
                args.dataname, args.model_name, args.run_number,
                args.task_type)
        # If model was saved after last training procedure, reload parameters to continue training from where you left off
        if os.path.isfile(file_path):
            pdb.set_trace()
            model.network.load_state_dict(torch.load(file_path))
        else:
            raise Exception('No model found.')

        # Make predictions

        predictions = model.predict(x_test, save=True, means=args.means)

        pdb.set_trace()
Ejemplo n.º 7
0
                        "--gpu",
                        default=0,
                        type=int,
                        help="GPU id to use.")
    parser.add_argument("-d", "--dataset", type=str, default=dataset)
    parser.add_argument("-b",
                        "--block_log",
                        action="store_true",
                        help="block log or not")
    parser.add_argument("-t", "--train_percentage", default=10, type=int)
    parser.add_argument("--seed", default=0)
    args = parser.parse_args()

    train_percentage = 10
    load_device = torch.device('cpu')
    graph, features, n_feat, n_class, labels, train_x, val_x, test_x = preprocess_data(
        dataset, train_percentage, load_device)
    graph = graph_normalization(graph, False)
    train_y, val_y, test_y = labels[train_x], labels[val_x], labels[test_x]

    K = 2
    new_graph = emb_generator(graph, features, K, args)

    # emb = new_graph.ndata['se'].numpy()
    # LR = LogisticRegression()
    # LR.fit(emb[train_x.numpy()], train_y.numpy())
    # val_pred = torch.tensor(LR.predict(emb[val_x.numpy()]))
    # test_pred = torch.tensor(LR.predict(emb[test_x.numpy()]))
    #
    # val_acc = accuracy(val_pred, val_y)
    # test_acc = accuracy(test_pred, test_y)
    #
Ejemplo n.º 8
0
FONT_SIZE = 18


def h**o(graph, labels):
    graph = dgl.remove_self_loop(graph)
    src, dst = graph.edges()
    intra_num = (labels[src] == labels[dst]).long().sum().numpy()
    inter_num = (labels[src] != labels[dst]).long().sum().numpy()

    return int(intra_num), int(inter_num)


dataset = 'arxiv'
train_percentage = 0
g_ori, features, n_feat, n_class, labels, train_x, val_x, test_x = preprocess_data(
    dataset, train_percentage)

homo_dict = {}
homo_dict['intra'] = np.zeros((11, 11)).astype(int)
homo_dict['inter'] = np.zeros((11, 11)).astype(int)
homo_dict['homo_ratio'] = np.zeros((11, 11)).astype(float)
filebase = 'PR-_lr0.001_bsz1024_pi2_encGCN_dec-l2_hidden48-prt_intra_w-0.5_ncek16382_fanout5_10_prdo0_act_Elu_d256_GR-fsim_norm1_fsim_weight0.0_add0.0_rm0.25.bin'
filebase = filebase.split('fsim_weight')[0]

fsim_weight = 0.0
add_list = [0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0]
rm_list = [0.0, 0.05, 0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.45, 0.5]

for aid in range(len(add_list)):
    add = add_list[aid]
    for rid in range(len(rm_list)):
Ejemplo n.º 9
0
def full_train_test(optimizer,
                    N_train,
                    N_test,
                    n_iter,
                    n_epochs,
                    batch_size=1,
                    d1=200,
                    d2=200,
                    d3=200,
                    gamma=1,
                    alpha=1,
                    rho=1,
                    verbose=False):
    # Initialize metrics arrays
    accuracy_test_array = []
    accuracy_train_array = []

    tr_acc_evolution = []
    te_acc_evolution = []

    time_array = []

    # Generate the sets
    train_set, test_set = get_sets()

    # Generate train and test DataLoader
    train_data, test_data = generate_pair_sets(train_set, test_set, N_train,
                                               N_test, batch_size)

    d0 = 1 * 28 * 28

    for i in range(1, n_iter + 1):
        print("Iteration %d" % i)

        if (optimizer == "BCD"):
            train_input, train_target, y_train_1hot, test_input, test_target, y_test_1hot = preprocess_data(
                train_data, test_data, N_train, N_test)
            # Instantiate the model
            model = ModelBCD(d0, d1, d2, d3, 10, gamma, alpha, rho)

            start_time = time.time()

            # Train the model
            tr_acc, te_acc = model.train(n_epochs,
                                         train_input,
                                         train_target,
                                         y_train_1hot,
                                         test_input,
                                         test_target,
                                         y_test_1hot,
                                         verbose=verbose)
        else:
            # Instantiate the model
            model = ModelDFW.ModelDFW(d0, d1, d2, d3, 10)

            start_time = time.time()

            # Train the model
            tr_acc, te_acc = model.train(train_data,
                                         test_data,
                                         n_epochs,
                                         verbose=verbose)

        end_time = time.time()

        # Store the train and test accuracy for each epoch of the iteration
        tr_acc_evolution.append(tr_acc)
        te_acc_evolution.append(te_acc)

        # Computes test accuracy with "new" data
        if (optimizer == "BCD"):
            acc_test = model.test(N_test, train_set, test_set)
        else:
            _, test_data = generate_pair_sets(train_set, test_set, N_train,
                                              N_test, batch_size)
            acc_test = model.test(test_data, batch_size)

        # Store the test accuracies for each iteration
        accuracy_test_array.append(acc_test)

        # Store the train accuracies for each iteration
        accuracy_train_array.append(tr_acc[n_epochs - 1])

        # Store the duration of the iteraton
        time_array.append(end_time - start_time)

    # Compute the mean and std of the test accuracies
    acc_mean, acc_std = extract_mean_std(accuracy_test_array)

    # Compute the mean and std of the duration an iteraion
    time_mean, time_std = extract_mean_std(time_array)

    print("Accuracy: %.3f +/- %.3f" % (acc_mean, acc_std))
    print("Iteration time:  %.3f +/- %.3f seconds" % (time_mean, time_std))

    return tr_acc_evolution, te_acc_evolution, accuracy_test_array, time_array