Ejemplo n.º 1
0
def main():
    modelcuda = SpGAT(nfeat=1433,
                      nhid=8,
                      nclass=7,
                      dropout=0.6,
                      nheads=8,
                      alpha=0.2)
    modelcuda.cuda()
    modelcpu = SpGAT(nfeat=1433,
                     nhid=8,
                     nclass=7,
                     dropout=0.6,
                     nheads=8,
                     alpha=0.2)
    for density in [0.001, 0.002, 0.003, 0.004, 0.005, 0.006]:
        print("###########\ndensity", density)
        tscuda = []
        # debug = density==0.003
        time_densities(density, modelcuda, ts=tscuda)
        time_densities(density, modelcuda, ts=tscuda)
        time_densities(density, modelcuda, ts=tscuda)
        tscuda = np.array(tscuda)

        # tscuda = tscuda.min(0)
        # tscuda = tscuda.min(0)[1:].sum()
        print("tscuda", tscuda)
        totcuda = tscuda[-1][0]

        # print("CPU turn")
        tscpu = []
        time_densities(density, modelcpu, ts=tscpu, dv="cpu")
        time_densities(density, modelcpu, ts=tscpu, dv="cpu")
        time_densities(density, modelcpu, ts=tscpu, dv="cpu")
        tscpu = np.array(tscpu)
        # tscpu = tscpu.min(0)[1:].sum()
        # tscpu = tscpu.min(0)
        # pdb.set_trace()
        print("tscpu", tscpu)
        totcpu = tscpu[-1][0]

        print("-----speedup ", totcpu / totcuda)
Ejemplo n.º 2
0
def named():
    modelcuda = SpGAT(nfeat=100,
                      nhid=8,
                      nclass=7,
                      dropout=0.6,
                      nheads=8,
                      alpha=0.2)
    modelcuda.cuda()
    modelcpu = SpGAT(nfeat=100,
                     nhid=8,
                     nclass=7,
                     dropout=0.6,
                     nheads=8,
                     alpha=0.2)
    for density in ["0.9", "protein", "spheres", "webbase"]:
        print("###########\nname", density)
        tscuda = []
        # debug = density==0.003
        time_named(density, modelcuda, ts=tscuda)
        time_named(density, modelcuda, ts=tscuda)
        time_named(density, modelcuda, ts=tscuda)
        tscuda = np.array(tscuda)

        # tscuda = tscuda.min(0)
        # tscuda = tscuda.min(0)[1:].sum()
        print("tscuda", tscuda)
        totcuda = tscuda[-1][0]

        # print("CPU turn")
        tscpu = []
        time_named(density, modelcpu, ts=tscpu, dv="cpu")
        time_named(density, modelcpu, ts=tscpu, dv="cpu")
        time_named(density, modelcpu, ts=tscpu, dv="cpu")
        tscpu = np.array(tscpu)
        totcpu = tscpu[-1][0]
        # tscpu = tscpu.min(0)[1:].sum()
        # tscpu = tscpu.min(0)
        # pdb.set_trace()
        print("tscpu", tscpu)
        print("-----speedup ", totcpu / totcuda)
Ejemplo n.º 3
0
def timecora():
    print("reading data...")
    with open("cora.pk", "rb") as f:
        (adj, features, labels, idx_train, idx_val, idx_test) = pk.load(f)
    print("done reading.")

    print("numnodes : ", features.shape)

    modelcuda = SpGAT(nfeat=1433,
                      nhid=8,
                      nclass=7,
                      dropout=0.6,
                      nheads=8,
                      alpha=0.2)
    # modelcuda.load_state_dict(torch.load('1092.pkl'))
    modelcuda.cuda()
    modelcpu = SpGAT(nfeat=1433,
                     nhid=8,
                     nclass=7,
                     dropout=0.6,
                     nheads=8,
                     alpha=0.2)
    # modelcpu.load_state_dict(torch.load('1092.pkl'))
    tscuda = []
    compute_test(modelcuda, adj, features, labels, idx_test, ts=tscuda)
    compute_test(modelcuda, adj, features, labels, idx_test, ts=tscuda)
    compute_test(modelcuda, adj, features, labels, idx_test, ts=tscuda)
    tscuda = np.array(tscuda)
    # tscuda = tscuda.min(0)[1:].sum()
    print("tscuda", tscuda)

    # print("CPU turn")
    tscpu = []
    compute_test(modelcpu, adj, features, labels, idx_test, ts=tscpu, dv="cpu")
    compute_test(modelcpu, adj, features, labels, idx_test, ts=tscpu, dv="cpu")
    compute_test(modelcpu, adj, features, labels, idx_test, ts=tscpu, dv="cpu")
    tscpu = np.array(tscpu)
    # tscpu = tscpu.min(0)[1:].sum()
    print("tscpu", tscpu)
Ejemplo n.º 4
0
                  dropout=args.dropout,
                  nheads=args.nb_heads,
                  alpha=args.alpha)
else:
    model = GAT(nfeat=features.shape[1],
                nhid=args.hidden,
                nclass=int(labels.max()) + 1,
                dropout=args.dropout,
                nheads=args.nb_heads,
                alpha=args.alpha)
optimizer = optim.Adam(model.parameters(),
                       lr=args.lr,
                       weight_decay=args.weight_decay)

if args.cuda:
    model.cuda()
    features = features.cuda()
    adj = adj.cuda()
    labels = labels.cuda()
    idx_train = idx_train.cuda()
    idx_val = idx_val.cuda()
    idx_test = idx_test.cuda()

features, adj, labels = Variable(features), Variable(adj), Variable(labels)


def train(epoch):
    t = time.time()
    model.train()
    optimizer.zero_grad()
    output = model(features, adj)  # GAT模块
Ejemplo n.º 5
0
    def train_pipeline(self, adj, features, labels, idx_train, idx_val,
                       idx_test, *args):

        adj = normalize_adj(adj + sp.eye(adj.shape[0]))

        if sp.issparse(adj):
            adj = adj.todense()

        if sp.issparse(features):
            features = features.todense()

        # With networkx, we no longer need to convert from one-hot encoding...
        # labels = np.where(labels)[1]

        adj = torch.FloatTensor(adj)
        features = torch.FloatTensor(features)
        labels = torch.LongTensor(labels)
        idx_train = torch.LongTensor(idx_train)
        idx_val = torch.LongTensor(idx_val)
        idx_test = torch.LongTensor(idx_test)

        random.seed(self.args.seed)
        np.random.seed(self.args.seed)
        torch.manual_seed(self.args.seed)
        if self.args.cuda:
            torch.cuda.manual_seed(self.args.seed)

        # Model and optimizer
        if self.args.sparse:
            model = SpGAT(
                nfeat=features.shape[1],
                nhid=self.args.hidden,
                nclass=int(labels.max()) + 1,
                dropout=self.args.dropout,
                nheads=self.args.nb_heads,
                alpha=self.args.alpha,
            )
        else:
            model = GAT(
                nfeat=features.shape[1],
                nhid=self.args.hidden,
                nclass=int(labels.max()) + 1,
                dropout=self.args.dropout,
                nheads=self.args.nb_heads,
                alpha=self.args.alpha,
            )
        optimizer = optim.Adam(model.parameters(),
                               lr=self.args.lr,
                               weight_decay=self.args.weight_decay)

        if self.args.cuda:
            model.cuda()
            features = features.cuda()
            adj = adj.cuda()
            labels = labels.cuda()
            idx_train = idx_train.cuda()
            idx_val = idx_val.cuda()
            idx_test = idx_test.cuda()

        features, adj, labels = Variable(features), Variable(adj), Variable(
            labels)

        # TODO: Test if these lines could be written below line 41.
        self.adj = adj
        self.features = features
        self.labels = labels
        self.idx_train = idx_train
        self.idx_val = idx_val
        self.idx_test = idx_test

        def train(epoch):
            t = time.time()
            model.train()
            optimizer.zero_grad()
            output = model(features, adj)
            loss_train = F.nll_loss(output[idx_train], labels[idx_train])
            acc_train = accuracy(output[idx_train], labels[idx_train])
            loss_train.backward()
            optimizer.step()

            if not self.args.fastmode:
                # Evaluate validation set performance separately,
                # deactivates dropout during validation run.
                model.eval()
                output = model(features, adj)

            loss_val = F.nll_loss(output[idx_val], labels[idx_val])
            acc_val = accuracy(output[idx_val], labels[idx_val])
            print(
                "Epoch: {:04d}".format(epoch + 1),
                "loss_train: {:.4f}".format(loss_train.data.item()),
                "acc_train: {:.4f}".format(acc_train.data.item()),
                "loss_val: {:.4f}".format(loss_val.data.item()),
                "acc_val: {:.4f}".format(acc_val.data.item()),
                "time: {:.4f}s".format(time.time() - t),
            )

            return loss_val.data.item()

        # Train model
        t_total = time.time()
        loss_values = []
        bad_counter = 0
        best = self.args.epochs + 1
        best_epoch = 0
        for epoch in range(self.args.epochs):
            loss_values.append(train(epoch))

            torch.save(model.state_dict(), "{}.pkl".format(epoch))
            if loss_values[-1] < best:
                best = loss_values[-1]
                best_epoch = epoch
                bad_counter = 0
            else:
                bad_counter += 1

            if bad_counter == self.args.patience:
                break

            files = glob.glob("*.pkl")
            for file in files:
                epoch_nb = int(file.split(".")[0])
                if epoch_nb < best_epoch:
                    os.remove(file)

        files = glob.glob("*.pkl")
        for file in files:
            epoch_nb = int(file.split(".")[0])
            if epoch_nb > best_epoch:
                os.remove(file)

        print("Optimization Finished!")
        print("Total time elapsed: {:.4f}s".format(time.time() - t_total))

        # Restore best model
        print("Loading {}th epoch".format(best_epoch))
        model.load_state_dict(torch.load("{}.pkl".format(best_epoch)))

        self.model = model

        return model
Ejemplo n.º 6
0
def main():
    args.data_dir = os.path.join(args.data_dir, args.dataset)
    args.output_dir = os.path.join(args.output_dir, args.dataset)

    if os.path.exists(args.output_dir) and os.listdir(args.output_dir):
        print("Output directory ({}) already exists and is not empty.".format(
            args.output_dir))
    else:
        os.makedirs(args.output_dir, exist_ok=True)

    random.seed(args.seed)
    np.random.seed(args.seed)
    torch.manual_seed(args.seed)
    if CUDA:
        args.use_cuda = CUDA
        torch.cuda.manual_seed_all(args.seed)
    torch.backends.cudnn.deterministic = True
    print("args = ", args)

    ori_model = 'None'
    ori_load = True

    for idx in range(args.N):
        data_idx = idx
        # Load data
        adj, features, labels, idx_train, idx_val, idx_test, test_sub_idx, ori_adj, ori_idx_train, ori_idx_valid = \
            load_data(args, data_idx, base_path=args.data_dir, dataset=args.dataset)

        file_name = "model_name_" + str(args.model_name) + "_lr_" + str(
            args.lr) + "_epochs_" + str(args.epochs) + "_k_factors_" + str(
                args.k_factors) + "_up_bound_" + str(
                    args.up_bound) + "_top_n_" + str(
                        args.top_n) + "_att_lr_" + str(
                            args.att_lr) + "_hidden_" + str(
                                args.hidden) + "_w1_" + str(args.w1)

        if args.all_data:
            model_path = os.path.join(args.output_dir, file_name)
        else:
            model_path = os.path.join(args.output_dir, str(data_idx),
                                      file_name)
        if not os.path.exists(model_path):
            os.makedirs(model_path)

        # Model and optimizer
        if args.model_name == "SpGAT":
            model = SpGAT(nfeat=features.shape[1],
                          nhid=args.hidden,
                          nclass=int(labels.max()) + 1,
                          dropout=args.dropout,
                          nheads=args.nb_heads,
                          alpha=args.alpha)
        elif args.model_name == "SpGAT_2":
            model = SpGAT_2(nfeat=features.shape[1],
                            nclass=int(labels.max()) + 1,
                            config=args)
        elif args.model_name == "SpGAT2":
            model = SpGAT_2(nfeat=features.shape[1],
                            nclass=int(labels.max()) + 1,
                            config=args)
        else:
            model = GAT(nfeat=features.shape[1],
                        nhid=args.hidden,
                        nclass=int(labels.max()) + 1,
                        dropout=args.dropout,
                        nheads=args.nb_heads,
                        alpha=args.alpha)

        print("load path", args.load)
        if args.load != 'None' and ori_load:
            model = load_model(model, args.load)
            print("model loaded")
            ori_load = False

        if ori_model != 'None':
            model = copy.deepcopy(ori_model)
            print("load model from", idx - 1)

        print(model.state_dict().keys())

        if CUDA:
            model.cuda()
            features = Variable(features.cuda())
            adj = Variable(adj.cuda())
            labels = Variable(labels.cuda())
            idx_train = idx_train.cuda()
            idx_val = idx_val.cuda()
            idx_test = idx_test.cuda()
            if "_" in args.model_name and not args.all_data and data_idx > 0 and ori_adj is not None:
                ori_adj = Variable(ori_adj.cuda())
                ori_idx_train = ori_idx_train.cuda()
                ori_idx_valid = ori_idx_valid.cuda()

        loader = Corpus(features, adj, labels, idx_train, idx_val, idx_test,
                        ori_adj, ori_idx_train, ori_idx_valid)

        for name, param in model.named_parameters():
            if param.requires_grad == False:
                print("False", name)
                param.requires_grad = True

        best_epoch = 0
        if args.evaluate == 0:
            best_epoch = train(model, model_path, loader, data_idx)
            ori_model = copy.deepcopy(model)
        evaluate(model,
                 model_path,
                 loader,
                 data_idx,
                 best_epoch=best_epoch,
                 test_sub_idx=test_sub_idx)
        evaluate(model,
                 model_path,
                 loader,
                 data_idx,
                 best_epoch=best_epoch,
                 test_sub_idx=test_sub_idx,
                 best_or_final='final')

        args.load = os.path.join(model_path, 'trained_final.pth')
Ejemplo n.º 7
0
    def train_pipeline(self, *args, custom_function=True, function=None):
        random.seed(self.args.seed)
        np.random.seed(self.args.seed)
        torch.manual_seed(self.args.seed)
        if self.args.cuda:
            torch.cuda.manual_seed(self.args.seed)

        # Load data
        adj, features, labels, idx_train, idx_val, idx_test = new_load_data(
            *args, custom_function=custom_function, function=function)

        # Model and optimizer
        if self.args.sparse:
            model = SpGAT(nfeat=features.shape[1],
                          nhid=self.args.hidden,
                          nclass=int(labels.max()) + 1,
                          dropout=self.args.dropout,
                          nheads=self.args.nb_heads,
                          alpha=self.args.alpha)
        else:
            model = GAT(nfeat=features.shape[1],
                        nhid=self.args.hidden,
                        nclass=int(labels.max()) + 1,
                        dropout=self.args.dropout,
                        nheads=self.args.nb_heads,
                        alpha=self.args.alpha)
        optimizer = optim.Adam(model.parameters(),
                               lr=self.args.lr,
                               weight_decay=self.args.weight_decay)

        if self.args.cuda:
            model.cuda()
            features = features.cuda()
            adj = adj.cuda()
            labels = labels.cuda()
            idx_train = idx_train.cuda()
            idx_val = idx_val.cuda()
            idx_test = idx_test.cuda()

        features, adj, labels = Variable(features), Variable(adj), Variable(
            labels)

        # TODO: Test if these lines could be written below line 41.
        self.adj = adj
        self.features = features
        self.labels = labels
        self.idx_train = idx_train
        self.idx_val = idx_val
        self.idx_test = idx_test

        def train(epoch):
            t = time.time()
            model.train()
            optimizer.zero_grad()
            output = model(features, adj)
            loss_train = F.nll_loss(output[idx_train], labels[idx_train])
            acc_train = accuracy(output[idx_train], labels[idx_train])
            loss_train.backward()
            optimizer.step()

            if not self.args.fastmode:
                # Evaluate validation set performance separately,
                # deactivates dropout during validation run.
                model.eval()
                output = model(features, adj)

            loss_val = F.nll_loss(output[idx_val], labels[idx_val])
            acc_val = accuracy(output[idx_val], labels[idx_val])
            print('Epoch: {:04d}'.format(epoch + 1),
                  'loss_train: {:.4f}'.format(loss_train.data.item()),
                  'acc_train: {:.4f}'.format(acc_train.data.item()),
                  'loss_val: {:.4f}'.format(loss_val.data.item()),
                  'acc_val: {:.4f}'.format(acc_val.data.item()),
                  'time: {:.4f}s'.format(time.time() - t))

            return loss_val.data.item()

        # Train model
        t_total = time.time()
        loss_values = []
        bad_counter = 0
        best = self.args.epochs + 1
        best_epoch = 0
        for epoch in range(self.args.epochs):
            loss_values.append(train(epoch))

            torch.save(model.state_dict(), '{}.pkl'.format(epoch))
            if loss_values[-1] < best:
                best = loss_values[-1]
                best_epoch = epoch
                bad_counter = 0
            else:
                bad_counter += 1

            if bad_counter == self.args.patience:
                break

            files = glob.glob('*.pkl')
            for file in files:
                epoch_nb = int(file.split('.')[0])
                if epoch_nb < best_epoch:
                    os.remove(file)

        files = glob.glob('*.pkl')
        for file in files:
            epoch_nb = int(file.split('.')[0])
            if epoch_nb > best_epoch:
                os.remove(file)

        print("Optimization Finished!")
        print("Total time elapsed: {:.4f}s".format(time.time() - t_total))

        # Restore best model
        print('Loading {}th epoch'.format(best_epoch))
        model.load_state_dict(torch.load('{}.pkl'.format(best_epoch)))

        self.model = model

        return model