Exemple #1
0
def visualize():
    adj, features, y_train, y_val, y_test, train_mask, val_mask, test_mask = load_data(
        args.dataset)
    model = GCN(features.shape[1], args.hidden_dim, y_train.shape[1], 0)
    gcn_layer1 = model.gcn_layer1
    gcn_layer1.register_forward_hook(hook_fn_forward)
    if args.checkpoint:
        model.load_state_dict(torch.load(args.checkpoint))
    model.eval()
    model(adj, features)

    x_all = gcn_layer1_output[0]
    y_train = np.argmax(y_train[train_mask, :].numpy(), axis=1)
    y_val = np.argmax(y_val[val_mask, :].numpy(), axis=1)
    y_test = np.argmax(y_test[test_mask, :].numpy(), axis=1)
    tsne_vis(x_all[train_mask], y_train, classnames[args.dataset], 'train_set',
             args.dataset)
    tsne_vis(x_all[val_mask], y_val, classnames[args.dataset], 'val_set',
             args.dataset)
    tsne_vis(x_all[test_mask], y_test, classnames[args.dataset], 'test_set',
             args.dataset)
Exemple #2
0
                                verbose=True,
                                seed=seed,
                                train_valid_split=0.8
                                )
        elif model_name == 'MLP64':
            model = MLP(name="MLP_lay2_chan64", cuda=cuda, dropout=True, num_layer=2, channels=64, train_valid_split=0.8, patience=30, lr=0.001)
        elif model_name == 'MLP64_lr4':
            model = MLP(name="MLP_lay2_chan64_lr.0.0001_nodropout", cuda=cuda, dropout=False, num_layer=2, channels=64, train_valid_split=0.8, patience=30, lr=0.0001)

        try:
            # print(x_train.shape,  y_train.shape, adj.shape)

            model.fit(x_train, y_train, adj=adj)

            with torch.no_grad():
                model.eval()
                y_hat = model.predict(x_test)
                y_hat = np.argmax(y_hat, axis=1)
                # auc = sklearn.metrics.roc_auc_score(y_test, np.asarray(y_hat).flatten(), multi_class='ovo')
                acc = sklearn.metrics.accuracy_score(y_test, np.asarray(y_hat).flatten())
                f1 = sklearn.metrics.f1_score(y_test, np.asarray(y_hat).flatten(), average='macro')

                experiment["model"] = model.name
                experiment["auc"] = 0
                experiment["acc"] = acc
                experiment["f1"] = f1
                experiment["num_genes"] = len(x_train.columns)

                experiment["time_elapsed"] = str(time.time() - start_time)
                results = record_result(results, experiment, filename)
                print(experiment)
Exemple #3
0
    min_loss = 1e18

    trlog = {}
    trlog['train_loss'] = []
    trlog['val_loss'] = []
    trlog['min_loss'] = 0

    for epoch in range(1, args.max_epoch + 1):
        gcn.train()
        output_vectors = gcn(word_vectors)
        loss = mask_l2_loss(output_vectors, fc_vectors, tlist[:n_train])
        optimizer.zero_grad()
        loss.backward()
        optimizer.step()

        gcn.eval()
        output_vectors = gcn(word_vectors)
        train_loss = mask_l2_loss(output_vectors, fc_vectors, tlist[:n_train]).item()
        if v_val > 0:
            val_loss = mask_l2_loss(output_vectors, fc_vectors, tlist[n_train:]).item()
            loss = val_loss
        else:
            val_loss = 0
            loss = train_loss
        print('epoch {}, train_loss={:.4f}, val_loss={:.4f}'
              .format(epoch, train_loss, val_loss))

        trlog['train_loss'].append(train_loss)
        trlog['val_loss'].append(val_loss)
        trlog['min_loss'] = min_loss
        torch.save(trlog, osp.join(save_path, 'trlog'))
Exemple #4
0
def main(training_file,
         dev_file,
         test_file,
         epochs=None,
         patience=None,
         num_heads=None,
         num_out_heads=None,
         num_layers=None,
         num_hidden=None,
         residual=None,
         in_drop=None,
         attn_drop=None,
         lr=None,
         weight_decay=None,
         alpha=None,
         batch_size=None,
         graph_type=None,
         net=None,
         freeze=None,
         cuda=None,
         fw=None):
    # number of training epochs
    if epochs is None:
        epochs = 400
    print('EPOCHS', epochs)
    # used for early stop
    if patience is None:
        patience = 15
    print('PATIENCE', patience)

    # number of hidden attention heads
    if num_heads is None:
        num_heads_ch = [4, 5, 6, 7]
    else:
        num_heads_ch = flattenList(num_heads)
    print('NUM HEADS', num_heads_ch)

    # number of output attention heads
    if num_out_heads is None:
        num_out_heads_ch = [4, 5, 6, 7]
    else:
        num_out_heads_ch = flattenList(num_out_heads)
    print('NUM OUT HEADS', num_out_heads_ch)

    # number of hidden layers
    if num_layers is None:
        num_layers_ch = [2, 3, 4, 5, 6]
    else:
        num_layers_ch = flattenList(num_layers)
    print('NUM LAYERS', num_layers_ch)
    # number of hidden units
    if num_hidden is None:
        num_hidden_ch = [32, 64, 96, 128, 256, 350, 512]
    else:
        num_hidden_ch = flattenList(num_hidden)
    print('NUM HIDDEN', num_hidden_ch)
    # use residual connection
    if residual is None:
        residual_ch = [True, False]
    else:
        residual_ch = flattenList(residual)
    print('RESIDUAL', residual_ch)
    # input feature dropout
    if in_drop is None:
        in_drop_ch = [0., 0.001, 0.0001, 0.00001]
    else:
        in_drop_ch = flattenList(in_drop)
    print('IN DROP', in_drop_ch)
    # attention dropout
    if attn_drop is None:
        attn_drop_ch = [0., 0.001, 0.0001, 0.00001]
    else:
        attn_drop_ch = flattenList(attn_drop)
    print('ATTENTION DROP', attn_drop_ch)
    # learning rate
    if lr is None:
        lr_ch = [0.0000005, 0.0000015, 0.00001, 0.00005, 0.0001]
    else:
        lr_ch = flattenList(lr)
    print('LEARNING RATE', lr_ch)
    # weight decay
    if weight_decay is None:
        weight_decay_ch = [0.0001, 0.001, 0.005]
    else:
        weight_decay_ch = flattenList(weight_decay)
    print('WEIGHT DECAY', weight_decay_ch)
    # the negative slop of leaky relu
    if alpha is None:
        alpha_ch = [0.1, 0.15, 0.2]
    else:
        alpha_ch = flattenList(alpha)
    print('ALPHA', alpha_ch)
    # batch size used for training, validation and test
    if batch_size is None:
        batch_size_ch = [175, 256, 350, 450, 512, 800, 1600]
    else:
        batch_size_ch = flattenList(batch_size)
    print('BATCH SIZE', batch_size_ch)
    # net type
    if net is None:
        net_ch = [GCN, GAT, RGCN, PGCN, PRGCN, GGN, PGAT, Custom_Net]
    else:
        net_ch_raw = flattenList(net)
        net_ch = []
        for ch in net_ch_raw:
            if ch.lower() == 'gcn':
                if fw == 'dgl':
                    net_ch.append(GCN)
                else:
                    net_ch.append(PGCN)
            elif ch.lower() == 'gat':
                if fw == 'dgl':
                    net_ch.append(GAT)
                else:
                    net_ch.append(PGAT)
            elif ch.lower() == 'rgcn':
                if fw == 'dgl':
                    net_ch.append(RGCN)
                else:
                    net_ch.append(PRGCN)
            elif ch.lower() == 'ggn':
                net_ch.append(GGN)
            elif ch.lower() == 'rgat':
                net_ch.append(PRGAT)
            elif ch.lower() == 'custom_net':
                net_ch.append(Custom_Net)
            else:
                print('Network type {} is not recognised.'.format(ch))
                exit(1)
    print('NET TYPE', net_ch)
    # graph type
    if net_ch in [GCN, GAT, PGCN, GGN, PGAT, Custom_Net]:
        if graph_type is None:
            graph_type_ch = ['raw', '1', '2', '3', '4', 'relational']
        else:
            graph_type_ch = flattenList(graph_type)
    else:
        if graph_type is None:
            graph_type_ch = ['relational']
        else:
            graph_type_ch = flattenList(graph_type)

    print('GRAPH TYPE', graph_type_ch)
    # Freeze input neurons?
    if freeze is None:
        freeze_ch = [True, False]
    else:
        freeze_ch = flattenList(freeze)
    print('FREEZE', freeze_ch)
    # CUDA?
    if cuda is None:
        device = torch.device("cpu")
    elif cuda:
        device = torch.device("cuda")
    else:
        device = torch.device("cpu")
    print('DEVICE', device)
    if fw is None:
        fw = ['dgl', 'pg']

    # define loss function
    loss_fcn = torch.nn.BCEWithLogitsLoss()
    #loss_fcn = torch.nn.MSELoss()

    for trial in range(10):
        trial_s = str(trial).zfill(6)
        num_heads = random.choice(num_heads_ch)
        num_out_heads = random.choice(num_out_heads_ch)
        num_layers = random.choice(num_layers_ch)
        num_hidden = random.choice(num_hidden_ch)
        residual = random.choice(residual_ch)
        in_drop = random.choice(in_drop_ch)
        attn_drop = random.choice(attn_drop_ch)
        lr = random.choice(lr_ch)
        weight_decay = random.choice(weight_decay_ch)
        alpha = random.choice(alpha_ch)
        batch_size = random.choice(batch_size_ch)
        graph_type = random.choice(graph_type_ch)
        net_class = random.choice(net_ch)
        freeze = random.choice(freeze_ch)
        fw = random.choice(fw)
        if freeze == False:
            freeze = 0
        else:
            if graph_type == 'raw' or graph_type == '1' or graph_type == '2':
                freeze = 4
            elif graph_type == '3' or graph_type == '4':
                freeze = 6
            elif graph_type == 'relational':
                freeze = 5
            else:
                exit(1)

        print('=========================')
        print('TRIAL', trial_s)
        print('HEADS', num_heads)
        print('OUT_HEADS', num_out_heads)
        print('LAYERS', num_layers)
        print('HIDDEN', num_hidden)
        print('RESIDUAL', residual)
        print('inDROP', in_drop)
        print('atDROP', attn_drop)
        print('LR', lr)
        print('DECAY', weight_decay)
        print('ALPHA', alpha)
        print('BATCH', batch_size)
        print('GRAPH_ALT', graph_type)
        print('ARCHITECTURE', net_class)
        print('FREEZE', freeze)
        print('FRAMEWORK', fw)
        print('=========================')

        # create the dataset
        print('Loading training set...')
        train_dataset = SocNavDataset(training_file,
                                      mode='train',
                                      alt=graph_type)
        print('Loading dev set...')
        valid_dataset = SocNavDataset(dev_file, mode='valid', alt=graph_type)
        print('Loading test set...')
        test_dataset = SocNavDataset(test_file, mode='test', alt=graph_type)
        print('Done loading files')
        train_dataloader = DataLoader(train_dataset,
                                      batch_size=batch_size,
                                      collate_fn=collate)
        valid_dataloader = DataLoader(valid_dataset,
                                      batch_size=batch_size,
                                      collate_fn=collate)
        test_dataloader = DataLoader(test_dataset,
                                     batch_size=batch_size,
                                     collate_fn=collate)

        num_rels = train_dataset.data[0].num_rels
        cur_step = 0
        best_loss = -1
        n_classes = train_dataset.labels.shape[1]
        print('Number of classes:  {}'.format(n_classes))
        num_feats = train_dataset.features.shape[1]
        print('Number of features: {}'.format(num_feats))
        g = train_dataset.graph
        heads = ([num_heads] * num_layers) + [num_out_heads]

        # define the model

        if fw == 'dgl':
            if net_class in [GCN]:
                model = GCN(g, num_feats, num_hidden, n_classes, num_layers,
                            F.elu, in_drop)
            elif net_class in [GAT]:
                model = net_class(g,
                                  num_layers,
                                  num_feats,
                                  num_hidden,
                                  n_classes,
                                  heads,
                                  F.elu,
                                  in_drop,
                                  attn_drop,
                                  alpha,
                                  residual,
                                  freeze=freeze)
            else:
                # def __init__(self, g, in_dim, h_dim, out_dim, num_rels, num_hidden_layers=1):
                model = RGCN(g,
                             in_dim=num_feats,
                             h_dim=num_hidden,
                             out_dim=n_classes,
                             num_rels=num_rels,
                             feat_drop=in_drop,
                             num_hidden_layers=num_layers,
                             freeze=freeze)
        else:

            if net_class in [PGCN]:
                model = PGCN(
                    num_feats,
                    n_classes,
                    num_hidden,
                    num_layers,
                    in_drop,
                    F.relu,
                    improved=True,  # Compute A-hat as A + 2I
                    bias=True)

            elif net_class in [PRGCN]:
                model = PRGCN(
                    num_feats,
                    n_classes,
                    num_rels,
                    num_rels,  # num_rels?   # TODO: Add variable
                    num_hidden,
                    num_layers,
                    in_drop,
                    F.relu,
                    bias=True)
            elif net_class in [PGAT]:
                model = PGAT(num_feats,
                             n_classes,
                             num_heads,
                             in_drop,
                             num_hidden,
                             num_layers,
                             F.relu,
                             concat=True,
                             neg_slope=alpha,
                             bias=True)
            elif net_class in [PRGAT]:
                model = PRGAT(
                    num_feats,
                    n_classes,
                    num_heads,
                    num_rels,
                    num_rels,  # num_rels?   # TODO: Add variable
                    num_hidden,
                    num_layers,
                    num_layers,
                    in_drop,
                    F.relu,
                    alpha,
                    bias=True)
            elif net_class in [Custom_Net]:
                model = Custom_Net(features=num_feats,
                                   hidden=num_layers,
                                   out_features=num_hidden,
                                   classes=n_classes)

            else:
                model = GGN(num_feats, num_layers, aggr='mean', bias=True)
        # Describe the model
        # describe_model(model)

        # define the optimizer
        optimizer = torch.optim.Adam(model.parameters(),
                                     lr=lr,
                                     weight_decay=weight_decay)
        # for name, param in model.named_parameters():
        # if param.requires_grad:
        # print(name, param.data.shape)
        model = model.to(device)

        for epoch in range(epochs):
            model.train()
            loss_list = []
            for batch, data in enumerate(train_dataloader):
                subgraph, feats, labels = data
                subgraph.set_n_initializer(dgl.init.zero_initializer)
                subgraph.set_e_initializer(dgl.init.zero_initializer)
                feats = feats.to(device)
                labels = labels.to(device)
                if fw == 'dgl':
                    model.g = subgraph
                    for layer in model.layers:
                        layer.g = subgraph
                    logits = model(feats.float())
                else:
                    if net_class in [PGCN, PGAT, GGN]:
                        data = Data(x=feats.float(),
                                    edge_index=torch.stack(
                                        subgraph.edges()).to(device))
                    else:
                        data = Data(
                            x=feats.float(),
                            edge_index=torch.stack(
                                subgraph.edges()).to(device),
                            edge_type=subgraph.edata['rel_type'].squeeze().to(
                                device))
                    logits = model(data)
                loss = loss_fcn(logits[getMaskForBatch(subgraph)],
                                labels.float())
                optimizer.zero_grad()
                a = list(model.parameters())[0].clone()
                loss.backward()
                optimizer.step()
                b = list(model.parameters())[0].clone()
                not_learning = torch.equal(a.data, b.data)
                if not_learning:
                    print('Not learning')
                    # sys.exit(1)
                else:
                    pass
                    # print('Diff: ', (a.data-b.data).sum())
                # print(loss.item())
                loss_list.append(loss.item())
            loss_data = np.array(loss_list).mean()
            print('Loss: {}'.format(loss_data))
            if epoch % 5 == 0:
                if epoch % 5 == 0:
                    print(
                        "Epoch {:05d} | Loss: {:.4f} | Patience: {} | ".format(
                            epoch, loss_data, cur_step),
                        end='')
                score_list = []
                val_loss_list = []
                for batch, valid_data in enumerate(valid_dataloader):
                    subgraph, feats, labels = valid_data
                    subgraph.set_n_initializer(dgl.init.zero_initializer)
                    subgraph.set_e_initializer(dgl.init.zero_initializer)
                    feats = feats.to(device)
                    labels = labels.to(device)
                    score, val_loss = evaluate(feats.float(), model, subgraph,
                                               labels.float(), loss_fcn, fw,
                                               net_class)
                    score_list.append(score)
                    val_loss_list.append(val_loss)
                mean_score = np.array(score_list).mean()
                mean_val_loss = np.array(val_loss_list).mean()
                if epoch % 5 == 0:
                    print("Score: {:.4f} MEAN: {:.4f} BEST: {:.4f}".format(
                        mean_score, mean_val_loss, best_loss))
                # early stop
                if best_loss > mean_val_loss or best_loss < 0:
                    best_loss = mean_val_loss
                    # Save the model
                    # print('Writing to', trial_s)
                    torch.save(model.state_dict(), fw + str(net) + '.tch')
                    params = [
                        val_loss, graph_type,
                        str(type(net_class)), g, num_layers, num_feats,
                        num_hidden, n_classes, heads, F.elu, in_drop,
                        attn_drop, alpha, residual, num_rels, freeze
                    ]
                    pickle.dump(params, open(fw + str(net) + '.prms', 'wb'))
                    cur_step = 0
                else:
                    cur_step += 1
                    if cur_step >= patience:
                        break
        torch.save(model, 'gattrial.pth')
        test_score_list = []
        for batch, test_data in enumerate(test_dataloader):
            subgraph, feats, labels = test_data
            subgraph.set_n_initializer(dgl.init.zero_initializer)
            subgraph.set_e_initializer(dgl.init.zero_initializer)
            feats = feats.to(device)
            labels = labels.to(device)
            test_score_list.append(
                evaluate(feats, model, subgraph, labels.float(), loss_fcn, fw,
                         net_class)[0])
        print("F1-Score: {:.4f}".format(np.array(test_score_list).mean()))
        model.eval()
        return best_loss