예제 #1
0
파일: DMGI.py 프로젝트: zhanghegui/DMGI
    def __init__(self, args):
        super(modeler, self).__init__()
        self.args = args
        self.gcn = nn.ModuleList([GCN(args.ft_size, args.hid_units, args.activation, args.drop_prob, args.isBias) for _ in range(args.nb_graphs)])

        self.disc = Discriminator(args.hid_units)
        self.H = nn.Parameter(torch.FloatTensor(1, args.nb_nodes, args.hid_units))
        self.readout_func = self.args.readout_func
        if args.isAttn:
            self.attn = nn.ModuleList([Attention(args) for _ in range(args.nheads)])

        if args.isSemi:
            self.logistic = LogReg(args.hid_units, args.nb_classes).to(args.device)

        self.init_weight()
예제 #2
0
def task(embeds):
    train_embs = embeds[0, idx_train]
    val_embs = embeds[0, idx_val]
    test_embs = embeds[0, idx_test]

    log = LogReg(hid_units, nb_classes)
    opt = torch.optim.Adam(log.parameters(), lr=0.01, weight_decay=0.0)
    if torch.cuda.is_available():
        log.cuda()

    for _ in range(100):
        log.train()
        opt.zero_grad()

        logits = log(train_embs)
        loss = xent(logits, train_lbls)

        loss.backward()
        opt.step()

    logits = log(test_embs)
    preds = torch.argmax(logits, dim=1)
    acc = torch.sum(preds == test_lbls).float() / test_lbls.shape[0]
    return acc.detach().cpu().numpy()
예제 #3
0
embeds, _ = model.embed(features, sp_adj if sparse else adj, sparse, None)
train_embs = embeds[0, idx_train]
val_embs = embeds[0, idx_val]
test_embs = embeds[0, idx_test]

train_lbls = torch.argmax(labels[0, idx_train], dim=1)
val_lbls = torch.argmax(labels[0, idx_val], dim=1)
test_lbls = torch.argmax(labels[0, idx_test], dim=1)

tot = torch.zeros(1)
tot = tot.cuda()

accs = []

for _ in range(50):
    log = LogReg(hid_units, nb_classes)
    opt = torch.optim.Adam(log.parameters(), lr=0.01, weight_decay=0.0)
    log.cuda()

    pat_steps = 0
    best_acc = torch.zeros(1)
    best_acc = best_acc.cuda()
    for _ in range(100):
        log.train()
        opt.zero_grad()

        logits = log(train_embs)
        loss = xent(logits, train_lbls)

        loss.backward()
        opt.step()
예제 #4
0
    args=parser.parse_args()

    n_classes = 10
    n_epochs = 200

    pre = Preprocessing('digits')
    pre.load_data(filename='train.csv', name='train')

    X_df = pre.get(name='train').drop(columns=['0'])
    y_df = pre.get(name='train')['0']

    dtype = torch.float
    device = torch.device("cpu")

    model_name = 'logreg_digits'
    model = LogReg(model_name, 256, n_classes)

    learning_rate = 0.0001
    batch_size = 32

    train_classifier = TrainClassifier(model, X_df, y_df)
    trained_model , optimizer, criterion, loss_hist, loss_val_hist, best_param = train_classifier.run_train(n_epochs = n_epochs, lr=learning_rate, batch_size=batch_size)
    pre.save_results(loss_hist, loss_val_hist, f'{model_name}')

    trained_model.load_state_dict(state_dict=best_param)
    trained_model.eval()

    if args.s_model:
        m_exporter = ModelExporter('digits')
        m_exporter.save_nn_model(trained_model, optimizer, 0, n_classes, n_epochs, trained_model.get_args())
예제 #5
0
    parser.add_argument('--n_feat', default=1000, help='number of features')
    parser.add_argument('--s_model', default=False, help='save trained model')

    args=parser.parse_args()

    pre = Preprocessing('IMDB')

    n_classes = 2
    n_features = int(args.n_feat)
    n_epochs = 100
    pre.load_data(filename=f'training_data_{n_features}.csv', name='training_data')

    X_df = pre.get(name='training_data').drop(columns=['target'])
    y_df = pre.get(name='training_data')['target']

    model = LogReg('log_reg', n_features, n_classes)

    train_classifier = TrainClassifier(model, X_df, y_df)
    trained_model, optimizer, criterion, loss_hist, loss_validate_hist = train_classifier.run_train(n_epochs = n_epochs)
    pre.save_results(loss_hist, loss_validate_hist, f'log_reg_{100}')

    m_exporter = ModelExporter('IMDB')
    m_exporter.save_nn_model(trained_model, optimizer, n_features, n_classes, n_epochs)

    ##teeeeeest part
    pre.load_data(filename=f'test_data_{n_features}.csv', name='test_data')

    X_test_df = pre.get(name='test_data').drop(columns=['target'])
    y_test_df = pre.get(name='test_data')['target']

예제 #6
0
def run_GCN(args,
            gpu_id=None,
            exp_name=None,
            number=0,
            return_model=False,
            return_time_series=False):
    random.seed(args.seed)
    torch.cuda.manual_seed(args.seed)
    np.random.seed(args.seed)
    final_acc = 0
    best_acc = 0
    torch.backends.cudnn.deterministic = True
    torch.backends.cudnn.benchmark = False
    running_device = "cpu" if gpu_id is None \
        else torch.device('cuda:{}'.format(gpu_id) if torch.cuda.is_available() else 'cpu')
    dataset_kwargs = {}

    train_d, adj_list, x_list = get_dataset(args, dataset_kwargs)

    lable = train_d.data.y
    A_I = adj_list[0]
    A_I_nomal = adj_list[1]

    nb_edges = train_d.data.num_edges
    nb_nodes = train_d.data.num_nodes
    nb_feature = train_d.data.num_features
    nb_classes = int(lable.max() - lable.min()) + 1

    lable_matrix = (lable.view(nb_nodes, 1).repeat(1, nb_nodes) == lable.view(
        1, nb_nodes).repeat(nb_nodes, 1)) + 0
    I = (torch.eye(nb_nodes).to(lable_matrix.device) == 1)
    lable_matrix[I] = 0
    zero_vec = 0.0 * torch.ones_like(A_I_nomal)
    if args.dataset_name in [
            'Photo', 'DBLP', 'Crocodile', 'CoraFull', 'WikiCS'
    ]:
        useA = True
    else:
        useA = False
    model = UGRL_GCN_test(nb_nodes,
                          nb_feature,
                          args.dim,
                          dim_x=args.dim_x,
                          useact=args.usingact,
                          liner=args.UsingLiner,
                          dropout=args.dropout,
                          useA=useA)

    optimiser = torch.optim.Adam(model.parameters(),
                                 lr=args.lr,
                                 weight_decay=0.0001)

    model.to(running_device)
    lable = lable.to(running_device)
    if args.dataset_name == 'WikiCS':
        train_lbls = lable[train_d.data.train_mask[:, args.NewATop]]  # capture
        test_lbls = lable[train_d.data.test_mask]
    elif args.dataset_name in ['Cora', 'CiteSeer', 'PubMed']:
        train_lbls = lable[train_d.data.train_mask]
        test_lbls = lable[train_d.data.test_mask]
    elif args.dataset_name in ['Photo', 'DBLP', 'Crocodile', 'CoraFull']:
        train_index = []
        test_index = []
        for j in range(lable.max().item() + 1):
            num = ((lable == j) + 0).sum().item()
            index = torch.range(0, len(lable) - 1)[(lable == j)]
            x_list0 = random.sample(list(index), int(len(index) * 0.1))
            for x in x_list0:
                train_index.append(int(x))
        for c in range(len(lable)):
            if int(c) not in train_index:
                test_index.append(int(c))
        train_lbls = lable[train_index]
        test_lbls = lable[test_index]
        val_lbls = lable[train_index]

    A_I_nomal_dense = A_I_nomal
    I_input = torch.eye(A_I_nomal.shape[1])  # .to(A_I_nomal.device)
    if args.dataset_name in ['PubMed', 'CoraFull', 'DBLP']:
        pass
    elif args.dataset_name in ['Crocodile', 'Photo', 'WikiCS']:
        A_I_nomal_dense = A_I_nomal_dense.to(running_device)
    ######################sparse################
    if args.dataset_name in [
            'PubMed', 'Crocodile', 'CoraFull', 'DBLP', 'Photo', 'WikiCS'
    ]:
        A_I_nomal = A_I_nomal.to_sparse()
        model.sparse = True
        I_input = I_input.to_sparse()
    ######################sparse################
    A_I_nomal = A_I_nomal.to(running_device)
    I_input = I_input.to(A_I_nomal.device)
    mask_I = I.to(running_device)
    zero_vec = zero_vec.to(running_device)
    my_margin = args.margin1
    my_margin_2 = my_margin + args.margin2
    margin_loss = torch.nn.MarginRankingLoss(margin=my_margin, reduce=False)
    num_neg = args.NN
    for current_iter, epoch in enumerate(
            tqdm(range(args.start_epoch, args.start_epoch + args.epochs + 1))):
        model.train()
        optimiser.zero_grad()
        idx = np.random.permutation(nb_nodes)
        feature_X = x_list[0].to(running_device)
        lbl_z = torch.tensor([0.]).to(running_device)
        feature_a = feature_X
        feature_p = feature_X
        feature_n = []
        idx_list = []
        idx_lable = []
        for i in range(num_neg):
            idx_0 = np.random.permutation(nb_nodes)
            idx_list.append(idx_0)
            idx_lable.append(lable[idx_0])
            feature_temp = feature_X[idx_0]
            feature_n.append(feature_temp)
        h_a, h_p, h_n_lsit, h_a_0, h_p_0, h_n_0_list = model(feature_a,
                                                             feature_p,
                                                             feature_n,
                                                             A_I_nomal,
                                                             I=I_input)
        s_p = F.pairwise_distance(h_a, h_p)
        cos_0_list = []
        for h_n_0 in h_n_0_list:
            cos_0 = F.pairwise_distance(h_a_0, h_n_0)
            cos_0_list.append(cos_0)
        cos_0_stack = torch.stack(cos_0_list).detach()
        cos_0_min = cos_0_stack.min(dim=0)[0]
        cos_0_max = cos_0_stack.max(dim=0)[0]
        gap = cos_0_max - cos_0_min
        weight_list = []
        for i in range(cos_0_stack.size()[0]):
            weight_list.append((cos_0_stack[i] - cos_0_min) / gap)
        s_n_list = []
        s_n_cosin_list = []
        for h_n in h_n_lsit:
            if args.dataset_name in ['Cora', 'CiteSeer']:
                s_n_cosin_list.append(cosine_dist(h_a, h_n)[mask_I].detach())
            s_n = F.pairwise_distance(h_a, h_n)
            s_n_list.append(s_n)
        margin_label = -1 * torch.ones_like(s_p)
        loss_mar = 0
        mask_margin_N = 0
        i = 0
        for s_n in s_n_list:
            loss_mar += (margin_loss(s_p, s_n, margin_label) *
                         weight_list[i]).mean()
            mask_margin_N += torch.max((s_n - s_p.detach() - my_margin_2),
                                       lbl_z).sum()
            i += 1
        mask_margin_N = mask_margin_N / num_neg
        string_1 = " loss_1: {:.3f}||loss_2: {:.3f}||".format(
            loss_mar.item(), mask_margin_N.item())
        loss = loss_mar * args.w_loss1 + mask_margin_N * args.w_loss2 / nb_nodes
        if args.dataset_name in ['Cora']:
            loss = loss_mar * args.w_loss1 + mask_margin_N * args.w_loss2
        loss.backward()
        optimiser.step()

        model.eval()
        if args.dataset_name in ['Crocodile', 'WikiCS', 'Photo']:
            h_p_d = h_p.detach()
            S_new = cosine_dist(h_p_d, h_p_d)
            model.A = normalize_graph(torch.mul(S_new,
                                                A_I_nomal_dense)).to_sparse()
        elif args.dataset_name in ['Cora', 'CiteSeer']:
            h_a, h_p = model.embed(feature_a,
                                   feature_p,
                                   feature_n,
                                   A_I_nomal,
                                   I=I_input)
            s_a = cosine_dist(h_a, h_a).detach()
            S = (torch.stack(s_n_cosin_list).mean(dim=0).expand_as(A_I) -
                 s_a).detach()
            # zero_vec = -9e15 * torch.ones_like(S)
            one_vec = torch.ones_like(S)
            s_a = torch.where(A_I_nomal > 0, one_vec, zero_vec)
            attention = torch.where(S < 0, s_a, zero_vec)
            attention_N = normalize_graph(attention)
            attention[I] = 0
            model.A = attention_N

        if epoch % 50 == 0:
            model.eval()
            h_a, h_p = model.embed(feature_a,
                                   feature_p,
                                   feature_n,
                                   A_I_nomal,
                                   I=I_input)
            if args.useNewA:
                embs = h_p  #torch.cat((h_a,h_p),dim=1)
            else:
                embs = h_a
            if args.dataset_name in ['Cora', 'CiteSeer', 'PubMed']:
                embs = embs / embs.norm(dim=1)[:, None]

            if args.dataset_name == 'WikiCS':
                train_embs = embs[train_d.data.train_mask[:, args.NewATop]]
                test_embs = embs[train_d.data.test_mask]
            elif args.dataset_name in ['Cora', 'CiteSeer', 'PubMed']:
                train_embs = embs[train_d.data.train_mask]
                test_embs = embs[train_d.data.test_mask]


#               #val_embs = embs[train_d.data.val_mask]
            elif args.dataset_name in [
                    'Photo', 'DBLP', 'Crocodile', 'CoraFull'
            ]:
                train_embs = embs[train_index]
                test_embs = embs[test_index]
                #val_embs = embs[train_index]

            accs = []
            accs_small = []
            xent = nn.CrossEntropyLoss()
            for _ in range(2):
                log = LogReg(args.dim, nb_classes)
                opt = torch.optim.Adam(log.parameters(),
                                       lr=1e-2,
                                       weight_decay=args.wd)
                log.to(running_device)
                for _ in range(args.num1):
                    log.train()
                    opt.zero_grad()
                    logits = log(train_embs)
                    loss = xent(logits, train_lbls)
                    loss.backward()
                    opt.step()
                logits = log(test_embs)
                preds = torch.argmax(logits, dim=1)
                acc = torch.sum(
                    preds == test_lbls).float() / test_lbls.shape[0]
                accs.append(acc * 100)
                ac = []
                for i in range(nb_classes):
                    acc_small = torch.sum(
                        preds[test_lbls == i] == test_lbls[test_lbls == i]
                    ).float() / test_lbls[test_lbls == i].shape[0]
                    ac.append(acc_small * 100)
                accs_small = ac
            accs = torch.stack(accs)
            string_3 = ""
            for i in range(nb_classes):
                string_3 = string_3 + "|{:.1f}".format(accs_small[i].item())
            string_2 = Fore.GREEN + " epoch: {},accs: {:.1f},std: {:.2f} ".format(
                epoch,
                accs.mean().item(),
                accs.std().item())
            tqdm.write(string_1 + string_2 + string_3)
            final_acc = accs.mean().item()
            best_acc = max(best_acc, final_acc)
    return final_acc, best_acc
예제 #7
0
파일: evaluate.py 프로젝트: zhanghegui/DMGI
def evaluate(embeds, idx_train, idx_val, idx_test, labels, device, isTest=True):
    hid_units = embeds.shape[2]
    nb_classes = labels.shape[2]
    xent = nn.CrossEntropyLoss()
    train_embs = embeds[0, idx_train]
    val_embs = embeds[0, idx_val]
    test_embs = embeds[0, idx_test]

    train_lbls = torch.argmax(labels[0, idx_train], dim=1)
    val_lbls = torch.argmax(labels[0, idx_val], dim=1)
    test_lbls = torch.argmax(labels[0, idx_test], dim=1)

    accs = []
    micro_f1s = []
    macro_f1s = []
    macro_f1s_val = [] ##
    for _ in range(50):
        log = LogReg(hid_units, nb_classes)
        opt = torch.optim.Adam(log.parameters(), lr=0.01, weight_decay=0.0)
        log.to(device)

        val_accs = []; test_accs = []
        val_micro_f1s = []; test_micro_f1s = []
        val_macro_f1s = []; test_macro_f1s = []
        for iter_ in range(50):
            # train
            log.train()
            opt.zero_grad()

            logits = log(train_embs)
            loss = xent(logits, train_lbls)

            loss.backward()
            opt.step()

            # val
            logits = log(val_embs)
            preds = torch.argmax(logits, dim=1)

            val_acc = torch.sum(preds == val_lbls).float() / val_lbls.shape[0]
            val_f1_macro = f1_score(val_lbls.cpu(), preds.cpu(), average='macro')
            val_f1_micro = f1_score(val_lbls.cpu(), preds.cpu(), average='micro')

            val_accs.append(val_acc.item())
            val_macro_f1s.append(val_f1_macro)
            val_micro_f1s.append(val_f1_micro)

            # test
            logits = log(test_embs)
            preds = torch.argmax(logits, dim=1)

            test_acc = torch.sum(preds == test_lbls).float() / test_lbls.shape[0]
            test_f1_macro = f1_score(test_lbls.cpu(), preds.cpu(), average='macro')
            test_f1_micro = f1_score(test_lbls.cpu(), preds.cpu(), average='micro')

            test_accs.append(test_acc.item())
            test_macro_f1s.append(test_f1_macro)
            test_micro_f1s.append(test_f1_micro)


        max_iter = val_accs.index(max(val_accs))
        accs.append(test_accs[max_iter])

        max_iter = val_macro_f1s.index(max(val_macro_f1s))
        macro_f1s.append(test_macro_f1s[max_iter])
        macro_f1s_val.append(val_macro_f1s[max_iter]) ###

        max_iter = val_micro_f1s.index(max(val_micro_f1s))
        micro_f1s.append(test_micro_f1s[max_iter])

    if isTest:
        print("\t[Classification] Macro-F1: {:.4f} ({:.4f}) | Micro-F1: {:.4f} ({:.4f})".format(np.mean(macro_f1s),
                                                                                                np.std(macro_f1s),
                                                                                                np.mean(micro_f1s),
                                                                                                np.std(micro_f1s)))
    else:
        return np.mean(macro_f1s_val), np.mean(macro_f1s)

    test_embs = np.array(test_embs.cpu())
    test_lbls = np.array(test_lbls.cpu())

    run_kmeans(test_embs, test_lbls, nb_classes)
    run_similarity_search(test_embs, test_lbls)
예제 #8
0
def main():

    saved_graph = os.path.join('assets', 'saved_graphs', 'best_dgi.pickle')
    saved_logreg = os.path.join('assets', 'saved_graphs', 'best_logreg.pickle')

    dataset = 'cora'

    # training params
    batch_size = 1
    nb_epochs = 10000
    patience = 25
    lr = 0.001
    l2_coef = 0.0
    drop_prob = 0.0
    hid_units = 512
    sparse = True
    nonlinearity = 'prelu' # special name to separate parameters

    adj, features, labels, idx_train, idx_test, idx_val = process.load_data(dataset)

    features, _ = process.preprocess_features(features)

    nb_nodes = features.shape[0]
    ft_size = features.shape[1]
    nb_classes = labels.shape[1]

    adj = process.normalize_adj(adj + sp.eye(adj.shape[0]))

    if sparse:
        adj = process.sparse_mx_to_torch_sparse_tensor(adj)
    else:
        adj = (adj + sp.eye(adj.shape[0])).todense()

    features = torch.FloatTensor(features[np.newaxis])
    if not sparse:
        adj = torch.FloatTensor(adj[np.newaxis])
    labels = torch.FloatTensor(labels[np.newaxis])
    idx_train = torch.LongTensor(idx_train)
    idx_val = torch.LongTensor(idx_val)
    idx_test = torch.LongTensor(idx_test)

    print("Training Nodes: {}, Testing Nodes: {}, Validation Nodes: {}".format(len(idx_train), len(idx_test), len(idx_val)))

    model = DGI(ft_size, hid_units, nonlinearity)
    optimiser = torch.optim.Adam(model.parameters(), lr=lr, weight_decay=l2_coef)

    if torch.cuda.is_available():
        print('Using CUDA')
        model.cuda()
        features = features.cuda()
        if sparse:
            sp_adj = sp_adj.cuda()
        else:
            adj = adj.cuda()
        labels = labels.cuda()
        idx_train = idx_train.cuda()
        idx_val = idx_val.cuda()
        idx_test = idx_test.cuda()

    b_xent = nn.BCEWithLogitsLoss()
    xent = nn.CrossEntropyLoss()
    cant_wait = 0
    best = 1e9
    best_t = 0

    if not os.path.exists(saved_graph):
        pbar = trange(nb_epochs)
        for epoch in pbar:
            model.train()
            optimiser.zero_grad()

            idx = np.random.permutation(nb_nodes)
            shuf_fts = features[:, idx, :]

            lbl_1 = torch.ones(batch_size, nb_nodes)
            lbl_2 = torch.zeros(batch_size, nb_nodes)
            lbl = torch.cat((lbl_1, lbl_2), 1)

            if torch.cuda.is_available():
                shuf_fts = shuf_fts.cuda()
                lbl = lbl.cuda()

            logits = model(features, shuf_fts, adj, sparse, None, None, None)

            loss = b_xent(logits, lbl)

            pbar.desc = 'Loss: {:.4f}'.format(loss)

            if loss < best:
                best = loss
                best_t = epoch
                cnt_wait = 0
                torch.save(model.state_dict(), saved_graph)
            else:
                cant_wait += 1

            if cant_wait == patience:
                tqdm.write('Early stopping!')
                break

            loss.backward()
            optimiser.step()


    print('Loading {}th Epoch'.format(best_t) if best_t else 'Loading Existing Graph')
    model.load_state_dict(torch.load(saved_graph))

    embeds, _ = model.embed(features, adj, sparse, None)
    train_embs = embeds[0, idx_train]
    val_embs = embeds[0, idx_val]
    test_embs = embeds[0, idx_test]

    train_lbls = torch.argmax(labels[0, idx_train], dim=1)
    val_lbls = torch.argmax(labels[0, idx_val], dim=1)
    test_lbls = torch.argmax(labels[0, idx_test], dim=1)

    tot = torch.zeros(1)
    if torch.cuda.is_available():
        tot = tot.cuda()

    accs = []

    print("\nValidation:")
    pbar = trange(50)
    for _ in pbar:
        log = LogReg(hid_units, nb_classes)
        opt = torch.optim.Adam(log.parameters(), lr=0.01, weight_decay=0.0)

        pat_steps = 0
        best_acc = torch.zeros(1)
        if torch.cuda.is_available():
            log.cuda()
            best_acc = best_acc.cuda()
        for _ in range(100):
            log.train()
            opt.zero_grad()

            logits = log(train_embs)
            loss = xent(logits, train_lbls)

            loss.backward()
            opt.step()

        logits = log(test_embs)
        preds = torch.argmax(logits, dim=1)
        acc = torch.sum(preds == test_lbls).float() / test_lbls.shape[0]
        accs.append(acc * 100)
        pbar.desc = "Accuracy: {:.2f}%".format(100 * acc)
        tot += acc

    torch.save(log.state_dict(), saved_logreg)

    accs = torch.stack(accs)
    print('Average Accuracy: {:.2f}%'.format(accs.mean()))
    print('Standard Deviation: {:.3f}'.format(accs.std()))

    print("\nTesting")
    logits = log(val_embs)
    preds = torch.argmax(logits, dim=1)
    acc = torch.sum(preds == val_lbls).float() / val_lbls.shape[0]
    print("Accuracy: {:.2f}%".format(100 * acc))
예제 #9
0
        pre.get('train')['0'],
        test_size=0.01)

    #transfom to torch striuctures
    dtype = torch.float
    device = torch.device("cpu")

    X_train = torch.tensor(X_train_df.values, device=device, dtype=dtype)
    y_train = torch.tensor(y_train_df.values, device=device, dtype=dtype)

    # Softmax regression model

    n_features = X_train.size()[1]
    n_classes = len(np.unique(y_train.round().numpy()))

    model = LogReg(name='logreg', d_in=n_features, d_out=n_classes)

    criterion = nn.CrossEntropyLoss()
    optimizer = torch.optim.SGD(model.parameters(), lr=0.001, momentum=0.9)

    size_batch = 100
    loss_hist = []

    # Train
    start = time.time()
    for t in range(5000):

        X_train_mini, y_train_mini = get_mini_batching(X_train, y_train,
                                                       size_batch)

        # Berechne die Vorhersage (foward step)
예제 #10
0
파일: execute.py 프로젝트: HekpoMaH/DGI
def process_inductive(dataset, gnn_type="GCNConv", K=None, random_init=False, runs=10):

    hyperparameters = get_hyperparameters()
    nb_epochs = hyperparameters["nb_epochs"]
    patience = hyperparameters["patience"]
    lr = hyperparameters["lr"]
    l2_coef = hyperparameters["l2_coef"]
    drop_prob = hyperparameters["drop_prob"]
    hid_units = hyperparameters["hid_units"]
    nonlinearity = hyperparameters["nonlinearity"]
    batch_size = hyperparameters["batch_size"]

    norm_features = torch_geometric.transforms.NormalizeFeatures()
    dataset_train = PPI(
        "./geometric_datasets/"+dataset,
        split="train",
        transform=norm_features,
    )
    print(dataset_train)
    dataset_val = PPI(
        "./geometric_datasets/"+dataset,
        split="val",
        transform=norm_features,
    )
    print(dataset_val)
    dataset_test = PPI(
        "./geometric_datasets/"+dataset,
        split="test",
        transform=norm_features,
    )
    data = []
    for d in dataset_train:
        data.append(d)
    for d in dataset_val:
        data.append(d)

    ft_size = dataset_train[0].x.shape[1]
    nb_classes = dataset_train[0].y.shape[1] # multilabel
    b_xent = nn.BCEWithLogitsLoss()

    loader_train = DataLoader(
        data,
        batch_size=hyperparameters["batch_size"],
        shuffle=True,
    )
    loader_test = DataLoader(
        dataset_test,
        batch_size=hyperparameters["batch_size"],
        shuffle=False
    )

    all_accs = []
    for _ in range(runs):
        model = DGI(ft_size, hid_units, nonlinearity, update_rule=gnn_type, batch_size=1, K=K)
        model_name = get_model_name(dataset, gnn_type, K, random_init=random_init)
        print(model)
        optimiser = torch.optim.Adam(model.parameters(), lr=lr, weight_decay=l2_coef)

        if torch.cuda.is_available():
            print('Using CUDA')
            model = model.cuda()
        model.train()

        torch.cuda.empty_cache()
        for epoch in range(20):
            if random_init:
                break
            total_loss = 0
            batch_id = 0
            model.train()
            loaded = list(loader_train)
            for batch in loaded:
                optimiser.zero_grad()
                if torch.cuda.is_available:
                    batch = batch.to('cuda')
                nb_nodes = batch.x.shape[0]
                features = batch.x
                labels = batch.y
                edge_index = batch.edge_index

                idx = np.random.randint(0, len(data))
                while idx == batch_id:
                    idx = np.random.randint(0, len(data))
                shuf_fts = torch.nn.functional.dropout(loaded[idx].x, drop_prob)
                edge_index2 = loaded[idx].edge_index

                lbl_1 = torch.ones(nb_nodes)
                lbl_2 = torch.zeros(shuf_fts.shape[0])
                lbl = torch.cat((lbl_1, lbl_2), 0)

                if torch.cuda.is_available():
                    shuf_fts = shuf_fts.cuda()
                    if edge_index2 is not None:
                        edge_index2 = edge_index2.cuda()
                    lbl = lbl.cuda()
                
                logits = model(features, shuf_fts, edge_index, batch=batch.batch, edge_index_alt=edge_index2)

                loss = b_xent(logits, lbl)
                loss.backward()
                optimiser.step()
                batch_id += 1
                total_loss += loss.item()


            print(epoch, 'Train Loss:', total_loss/(len(dataset_train)))

        torch.save(model.state_dict(), './trained_models/'+model_name)
        torch.cuda.empty_cache()

        print('Loading last epoch')
        if not random_init:
            model.load_state_dict(torch.load('./trained_models/'+model_name))
        model.eval()

        b_xent_reg = nn.BCEWithLogitsLoss(pos_weight=torch.tensor(2.25))
        train_embs, whole_train_data = preprocess_embeddings(model, dataset_train)
        val_embs, whole_val_data = preprocess_embeddings(model, dataset_val)
        test_embs, whole_test_data = preprocess_embeddings(model, dataset_test)

        for _ in range(50):
            log = LogReg(hid_units, nb_classes)
            opt = torch.optim.Adam(log.parameters(), lr=0.01, weight_decay=0.0)
            log.cuda()

            pat_steps = 0
            best = 1e9
            log.train()
            for _ in range(250):
                opt.zero_grad()

                logits = log(train_embs)
                loss = b_xent_reg(logits, whole_train_data.y)
                
                loss.backward()
                opt.step()

                log.eval()
                val_logits = log(val_embs) 
                loss = b_xent_reg(val_logits, whole_val_data.y)
                if loss.item() < best:
                    best = loss.item()
                    pat_steps = 0
                if pat_steps >= 5:
                    break

                pat_steps += 1


            log.eval()
            logits = log(test_embs)
            preds = torch.sigmoid(logits) > 0.5
            f1 = sklearn.metrics.f1_score(whole_test_data.y.cpu(), preds.long().cpu(), average='micro')
            all_accs.append(float(f1))
            print()
            print('Micro-averaged f1:', f1)

    all_accs = torch.tensor(all_accs)

    with open("./results/"+model_name[:-4]+"_results.txt", "w") as f:
        f.writelines([str(all_accs.mean().item())+'\n', str(all_accs.std().item())])
    print(all_accs.mean())
    print(all_accs.std())
예제 #11
0
tot = torch.zeros(1)
if args.cuda:
    tot = tot.cuda()
tot_mac = 0
accs = []
mac_f1 = []

for _ in range(5):
    bad_counter = 0
    best = 10000
    loss_values = []
    best_epoch = 0
    train_patience = 50

    log = LogReg(nout, nb_classes)
    opt = torch.optim.Adam(log.parameters(), lr=0.01, weight_decay=0.0)
    if args.cuda:
        log.cuda()

    for epoch in range(100000):
        log.train()
        opt.zero_grad()
        logits = log(train_embs)
        loss = xent(logits, train_lbls)
        logits_val = log(val_embs)
        loss_val = xent(logits_val, val_lbls)
        loss_values.append(loss_val)
        #        print("train_loss: "+ str(loss) +"  "+"val_loss: "+ str(loss_val) )
        loss.backward()
        opt.step()
예제 #12
0
파일: execute.py 프로젝트: HekpoMaH/DGI
def process_transductive(dataset, gnn_type='GCNConv', K=None, random_init=False, runs=10, drop_sigma=False, just_plot=False):
    dataset_str = dataset
    norm_features = torch_geometric.transforms.NormalizeFeatures()
    dataset = Planetoid("./geometric_datasets"+'/'+dataset,
                        dataset,
                        transform=norm_features)[0]

    # training params
    batch_size = 1 # Transductive setting
    hyperparameters = get_hyperparameters()
    nb_epochs = hyperparameters["nb_epochs"]
    patience = hyperparameters["patience"]
    lr = hyperparameters["lr"]
    xent = nn.CrossEntropyLoss()
    l2_coef = hyperparameters["l2_coef"]
    drop_prob = hyperparameters["drop_prob"]
    hid_units = hyperparameters["hid_units"]
    nonlinearity = hyperparameters["nonlinearity"]

    nb_nodes = dataset.x.shape[0]
    ft_size = dataset.x.shape[1]
    nb_classes = torch.max(dataset.y).item()+1 # 0 based cnt
    features = dataset.x
    labels = dataset.y
    edge_index = dataset.edge_index
    edge_index, _ = torch_geometric.utils.add_remaining_self_loops(edge_index)

    mask_train = dataset.train_mask
    mask_val = dataset.val_mask
    mask_test = dataset.test_mask

    model_name = get_model_name(dataset_str, gnn_type, K, random_init=random_init, drop_sigma=drop_sigma)
    with open("./results/"+model_name[:-4]+"_results.txt", "w") as f:
        pass

    accs = []

    for i in range(runs): 
        model = DGI(ft_size, hid_units, nonlinearity, update_rule=gnn_type, K=K, drop_sigma=drop_sigma)
        print(model, model_name, drop_sigma)
        optimiser = torch.optim.Adam(model.parameters(), lr=lr)

        if torch.cuda.is_available():
            print('Using CUDA')
            features = features.cuda()
            labels = labels.cuda()
            edge_index = edge_index.cuda()
            mask_train = mask_train.cuda()
            mask_val = mask_val.cuda()
            mask_test = mask_test.cuda()
            model = model.cuda()

        best_t = train_transductive(dataset, dataset_str, edge_index, gnn_type, model_name, K=K, random_init=random_init, drop_sigma=drop_sigma)

        xent = nn.CrossEntropyLoss()
        print('Loading {}th epoch'.format(best_t))
        print(model, model_name)
        if not random_init:
            model.load_state_dict(torch.load('./trained_models/'+model_name))
        model.eval()

        embeds, _ = model.embed(features, edge_index, None, standardise=False)
        if just_plot:
            plot_tsne(embeds, labels, model_name)
            exit(0)
        train_embs = embeds[mask_train, :]
        val_embs = embeds[mask_val, :]
        test_embs = embeds[mask_test, :]

        train_lbls = labels[mask_train]
        val_lbls = labels[mask_val]
        test_lbls = labels[mask_test]

        tot = torch.zeros(1)
        tot = tot.cuda()

        for _ in range(50):
            log = LogReg(hid_units, nb_classes)
            opt = torch.optim.Adam(log.parameters(), lr=0.01, weight_decay=0.0)
            log.cuda()

            pat_steps = 0
            best_acc = torch.zeros(1)
            best_acc = best_acc.cuda()
            for _ in range(150):
                log.train()
                opt.zero_grad()

                logits = log(train_embs)
                loss = xent(logits, train_lbls)
                
                loss.backward()
                opt.step()

            logits = log(test_embs)
            preds = torch.argmax(logits, dim=1)
            acc = torch.sum(preds == test_lbls).float() / test_lbls.shape[0]
            accs.append(acc * 100)
            print(acc)
            tot += acc

        print('Average accuracy:', tot / 50)
        
    all_accs = torch.stack(accs, dim=0)
    with open("./results/"+model_name[:-4]+"_results.txt", "a+") as f:
        f.writelines([str(all_accs.mean().item())+'\n', str(all_accs.std().item())+'\n'])

    print(all_accs.mean())
    print(all_accs.std())
def model_gen_fun():
    model = LogReg(input_dim=32 * 32, num_classes=1).eval()
    return model
예제 #14
0
파일: nmincut.py 프로젝트: hoangdzung/DGI
        print(epoch, loss.item(), torch.max(norm_cut), torch.min(norm_cut))
        embeddings = logits.detach()
        X_train = embeddings[idx_train]
        Y_train = labels[idx_train]
        X_test = embeddings[idx_test]
        Y_test = labels[idx_test]
        X_val = embeddings[idx_val]
        Y_val = labels[idx_val]
        tot = torch.zeros(1)
        tot = tot.cuda()
        totv = torch.zeros(1)
        totv = totv.cuda()
        accs = []
        accsv = []
        for _ in range(50):
            log = LogReg(args.embed_size, nb_classes)
            opt = torch.optim.Adam(log.parameters(), lr=0.01, weight_decay=0.0)
            log.cuda()

            pat_steps = 0
            best_acc = torch.zeros(1)
            best_acc = best_acc.cuda()
            for _ in range(100):
                log.train()
                opt.zero_grad()

                logits = log(X_train)
                loss = xent(logits, Y_train)

                loss.backward()
                opt.step()
    n_classes = 10
    n_epochs = 200

    pre = Preprocessing('digits')
    pre.load_data(filename='train_encoded.csv', name='train')

    X_df = pre.get(name='train').drop(columns=['0'])
    y_df = pre.get(name='train')['0']

    dtype = torch.float
    device = torch.device("cpu")

    print(len(X_df.columns))

    model_name = 'logreg_digits_encoded'
    model = LogReg(model_name, len(X_df.columns), n_classes)

    learning_rate = 0.0001
    batch_size = 32

    train_classifier = TrainClassifier(model, X_df, y_df)
    trained_model, optimizer, criterion, loss_hist, loss_val_hist, best_param = train_classifier.run_train(
        n_epochs=n_epochs, lr=learning_rate, batch_size=batch_size)
    pre.save_results(loss_hist, loss_val_hist, f'{model_name}')

    trained_model.load_state_dict(state_dict=best_param)
    trained_model.eval()

    if args.s_model:
        m_exporter = ModelExporter('digits')
        m_exporter.save_nn_model(trained_model, optimizer, 0, n_classes,
예제 #16
0
    y_val = torch.tensor(y_val_df.values, device=device, dtype=dtype)

    # Softmax regression model

    #n_features = X.size()[1]
    n_classes = 2

    H_0 = 150
    H_1 = 125
    n_iter = 500

    print('features: ', n_features)
    print('classes: ', n_classes)

    model = LogReg(
        'logreg', n_features,
        n_classes)  #IMDB_NN_Model('imdb', n_features, H_0, H_1, n_classes)

    criterion = nn.CrossEntropyLoss()
    optimizer = torch.optim.Adam(model.parameters(), lr=0.001)

    size_batch = 256
    loss_hist = []
    loss_val_hist = []
    acc_train = []
    acc_val = []
    # Train
    start = time.time()

    for t in range(n_iter):
        X_mini, y_mini = get_mini_batching(X_train, y_train, size_batch)