N1 = np.concatenate((N1, np.zeros((edges_2, 1), dtype=np.float32)), axis=1)

plot_graph(E1, N1)

E = np.concatenate((E, np.asarray(e2)), axis=0)
N_tot = np.eye(edges + edges_2, dtype=np.float32)
N_tot = np.concatenate((N_tot, np.zeros(
    (edges + edges_2, 1), dtype=np.float32)),
                       axis=1)

# Create Input to GNN

labels = np.random.randint(2, size=(N_tot.shape[0]))
#labels = np.eye(max(labels)+1, dtype=np.int32)[labels]  # one-hot encoding of labels

cfg = GNNWrapper.Config()
cfg.use_cuda = True
cfg.device = utils.prepare_device(n_gpu_use=1, gpu_id=0)
cfg.tensorboard = False
cfg.epochs = 500

cfg.activation = nn.Tanh()
cfg.state_transition_hidden_dims = [
    5,
]
cfg.output_function_hidden_dims = [5]
cfg.state_dim = 5
cfg.max_iterations = 50
cfg.convergence_threshold = 0.01
cfg.graph_based = False
cfg.log_interval = 10
Beispiel #2
0
def main():
    # Training settings
    parser = argparse.ArgumentParser(description='PyTorch')
    parser.add_argument('--batch-size', type=int, default=64, metavar='N',
                        help='input batch size for training (default: 64)')
    parser.add_argument('--test-batch-size', type=int, default=100, metavar='N',
                        help='input batch size for testing (default: 100)')
    parser.add_argument('--epochs', type=int, default=100000, metavar='N',
                        help='number of epochs to train (default: 10)')
    parser.add_argument('--lr', type=float, default=0.0001, metavar='LR',
                        help='learning rate (default: 0.0001)')
    parser.add_argument('--momentum', type=float, default=0.5, metavar='M',
                        help='SGD momentum (default: 0.5)')
    parser.add_argument('--no-cuda', action='store_true', default=False,
                        help='disables CUDA training')
    parser.add_argument('--cuda_dev', type=int, default=0,
                        help='select specific CUDA device for training')
    parser.add_argument('--n_gpu_use', type=int, default=1,
                        help='select number of CUDA device for training')
    # parser.add_argument('--seed', type=int, default=1, metavar='S',
    #                     help='random seed (default: 1)')
    parser.add_argument('--log-interval', type=int, default=50, metavar='N',
                        help='logging training status cadency')
    parser.add_argument('--save-model', action='store_true', default=False,
                        help='For Saving the current Model')
    parser.add_argument('--tensorboard', action='store_true', default=True,
                        help='For logging the model in tensorboard')

    args = parser.parse_args()

    use_cuda = not args.no_cuda and torch.cuda.is_available()
    if not use_cuda:
        args.n_gpu_use = 0

    device = utils.prepare_device(n_gpu_use=args.n_gpu_use, gpu_id=args.cuda_dev)
    # kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}

    # torch.manual_seed(args.seed)
    # # fix random seeds for reproducibility
    # SEED = 123
    # torch.manual_seed(SEED)
    # torch.backends.cudnn.deterministic = True
    # torch.backends.cudnn.benchmark = False
    # np.random.seed(SEED)

    # configugations
    cfg = GNNWrapper.Config()
    cfg.use_cuda = use_cuda
    cfg.device = device

    cfg.log_interval = args.log_interval
    cfg.tensorboard = args.tensorboard

    # cfg.batch_size = args.batch_size
    # cfg.test_batch_size = args.test_batch_size
    # cfg.momentum = args.momentum

    cfg.dataset_path = './data'
    cfg.epochs = args.epochs
    cfg.lrw = args.lr
    cfg.activation = nn.Tanh()
    cfg.state_transition_hidden_dims = [5,]
    cfg.output_function_hidden_dims = [5]
    cfg.state_dim = 2
    cfg.max_iterations = 50
    cfg.convergence_threshold = 0.01
    cfg.graph_based = False
    cfg.log_interval = 10
    cfg.task_type = "semisupervised"

    cfg.lrw = 0.001

    # model creation
    model = SemiSupGNNWrapper(cfg)
    # dataset creation
    E, N, targets, mask_train, mask_test = dataloader.old_load_karate()
    dset = dataloader.from_EN_to_GNN(E, N, targets, aggregation_type="sum", sparse_matrix=True)  # generate the dataset
    dset.idx_train = mask_train
    dset.idx_test = mask_test
    model(dset)  # dataset initalization into the GNN

    # training code
    for epoch in range(1, args.epochs + 1):
        model.train_step(epoch)

        if epoch % 10 == 0:
            model.test_step(epoch)
Beispiel #3
0
def main():
    # Training settings
    parser = argparse.ArgumentParser(description='PyTorch')
    parser.add_argument('--epochs',
                        type=int,
                        default=10000,
                        metavar='N',
                        help='number of epochs to train (default: 10)')
    parser.add_argument('--lr',
                        type=float,
                        default=0.0001,
                        metavar='LR',
                        help='learning rate (default: 0.0001)')
    parser.add_argument('--no-cuda',
                        action='store_true',
                        default=False,
                        help='disables CUDA training')
    parser.add_argument('--cuda_dev',
                        type=int,
                        default=0,
                        help='select specific CUDA device for training')
    parser.add_argument('--n_gpu_use',
                        type=int,
                        default=1,
                        help='select number of CUDA device for training')
    parser.add_argument('--log-interval',
                        type=int,
                        default=50,
                        metavar='N',
                        help='logging training status cadency')
    parser.add_argument('--tensorboard',
                        action='store_true',
                        default=True,
                        help='For logging the model in tensorboard')

    args = parser.parse_args()

    use_cuda = not args.no_cuda and torch.cuda.is_available()
    if not use_cuda:
        args.n_gpu_use = 0

    device = utils.prepare_device(n_gpu_use=args.n_gpu_use,
                                  gpu_id=args.cuda_dev)
    # kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}

    # torch.manual_seed(args.seed)
    # # fix random seeds for reproducibility
    # SEED = 123
    # torch.manual_seed(SEED)
    # torch.backends.cudnn.deterministic = True
    # torch.backends.cudnn.benchmark = False
    # np.random.seed(SEED)

    # configugations
    cfg = GNNWrapper.Config()
    cfg.use_cuda = use_cuda
    cfg.device = device

    cfg.log_interval = args.log_interval
    cfg.tensorboard = args.tensorboard

    # cfg.batch_size = args.batch_size
    # cfg.test_batch_size = args.test_batch_size
    # cfg.momentum = args.momentum

    cfg.dataset_path = './data'
    cfg.epochs = args.epochs
    cfg.lrw = args.lr
    cfg.activation = nn.Sigmoid()
    cfg.state_transition_hidden_dims = [
        10,
    ]
    cfg.output_function_hidden_dims = [5]
    cfg.state_dim = 10  #
    cfg.max_iterations = 50
    cfg.convergence_threshold = 0.01
    cfg.graph_based = False
    cfg.log_interval = 10
    cfg.lrw = 0.01
    cfg.task_type = "multiclass"

    # model creation
    # model_tr = GNNWrapper(cfg)
    # model_val = GNNWrapper(cfg)
    # model_tst = GNNWrapper(cfg)

    cfg.dset_name = "sub_30_15_200"
    cfg.aggregation_type = "degreenorm"
    # dataset creation
    dset = dataloader.get_subgraph(set=cfg.dset_name,
                                   aggregation_type=cfg.aggregation_type,
                                   sparse_matrix=True)  # generate the dataset

    cfg.label_dim = dset["train"].node_label_dim

    state_nets = [
        net.StateTransition(cfg.state_dim,
                            cfg.label_dim,
                            mlp_hidden_dim=cfg.state_transition_hidden_dims,
                            activation_function=cfg.activation),
        net.GINTransition(cfg.state_dim,
                          cfg.label_dim,
                          mlp_hidden_dim=cfg.state_transition_hidden_dims,
                          activation_function=cfg.activation),
        net.GINPreTransition(cfg.state_dim,
                             cfg.label_dim,
                             mlp_hidden_dim=cfg.state_transition_hidden_dims,
                             activation_function=cfg.activation)
    ]

    lrs = [0.05, 0.01, 0.001]

    hyperparameters = dict(lr=lrs, state_net=state_nets)
    hyperparameters_values = [v for v in hyperparameters.values()]

    start_0 = time.time()
    for lr, state_net in product(*hyperparameters_values):
        cfg.lrw = lr
        cfg.state_net = state_net

        print(
            f"learning_rate:{lr}, state_dim:{cfg.state_dim}, aggregation function:{str(state_net).split('(')[0]} "
        )
        # model creation
        model_tr = GNNWrapper(cfg)
        model_val = GNNWrapper(cfg)
        model_tst = GNNWrapper(cfg)

        # 24.3.21 STOPPER
        early_stopper = utils.EarlyStopper(cfg)

        model_tr(dset["train"],
                 state_net=state_net)  # dataset initalization into the GNN
        model_val(dset["validation"],
                  state_net=model_tr.gnn.state_transition_function,
                  out_net=model_tr.gnn.output_function
                  )  # dataset initalization into the GNN
        model_tst(dset["test"],
                  state_net=model_tr.gnn.state_transition_function,
                  out_net=model_tr.gnn.output_function
                  )  # dataset initalization into the GNN
        # training code
        start = time.time()
        for epoch in range(1, args.epochs + 1):
            acc_train = model_tr.train_step(epoch)
            if epoch % 10 == 0:
                acc_tst = model_tst.test_step(epoch)
                acc_val = model_val.valid_step(epoch)
                stp = early_stopper(acc_train, acc_val, acc_tst, epoch)

                # return -1 keeps training the model!
                if stp == -1:
                    print(
                        f"{early_stopper.best_epoch}, \t {early_stopper.best_train}, \t, {early_stopper.best_val}, \t {early_stopper.best_test}"
                    )
                    break
                # model_tst.test_step(epoch)

        time_sample = time.time() - start
        print(f"time taken for one set: {str(time_sample)} seconds")

    time_whole = time.time() - start_0
    print(f"time taken for the whole experiment: {str(time_whole)} seconds")
Beispiel #4
0
def main():
    # Training settings
    parser = argparse.ArgumentParser(description='PyTorch')
    parser.add_argument('--epochs',
                        type=int,
                        default=100000,
                        metavar='N',
                        help='number of epochs to train (default: 10)')
    parser.add_argument('--lr',
                        type=float,
                        default=0.0001,
                        metavar='LR',
                        help='learning rate (default: 0.0001)')
    parser.add_argument('--no-cuda',
                        action='store_true',
                        default=False,
                        help='disables CUDA training')
    parser.add_argument('--cuda_dev',
                        type=int,
                        default=0,
                        help='select specific CUDA device for training')
    parser.add_argument('--n_gpu_use',
                        type=int,
                        default=1,
                        help='select number of CUDA device for training')
    parser.add_argument('--log-interval',
                        type=int,
                        default=50,
                        metavar='N',
                        help='logging training status cadency')
    parser.add_argument('--tensorboard',
                        action='store_true',
                        default=True,
                        help='For logging the model in tensorboard')

    args = parser.parse_args()

    use_cuda = not args.no_cuda and torch.cuda.is_available()
    if not use_cuda:
        args.n_gpu_use = 0

    device = utils.prepare_device(n_gpu_use=args.n_gpu_use,
                                  gpu_id=args.cuda_dev)
    # kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}

    # torch.manual_seed(args.seed)
    # # fix random seeds for reproducibility
    # SEED = 123
    # torch.manual_seed(SEED)
    # torch.backends.cudnn.deterministic = True
    # torch.backends.cudnn.benchmark = False
    # np.random.seed(SEED)

    # configugations
    cfg = GNNWrapper.Config()
    cfg.use_cuda = use_cuda
    cfg.device = device

    cfg.log_interval = args.log_interval
    cfg.tensorboard = args.tensorboard

    # cfg.batch_size = args.batch_size
    # cfg.test_batch_size = args.test_batch_size
    # cfg.momentum = args.momentum

    cfg.dataset_path = './data'
    cfg.epochs = args.epochs
    cfg.lrw = args.lr
    cfg.activation = nn.Sigmoid()
    cfg.state_transition_hidden_dims = [
        10,
    ]
    cfg.output_function_hidden_dims = [5]
    cfg.state_dim = 10
    cfg.max_iterations = 50
    cfg.convergence_threshold = 0.01
    cfg.graph_based = False
    cfg.log_interval = 10
    cfg.lrw = 0.01
    cfg.task_type = "multiclass"

    # model creation
    model_tr = GNNWrapper(cfg)
    model_val = GNNWrapper(cfg)
    model_tst = GNNWrapper(cfg)
    # dataset creation
    dset = dataloader.get_subgraph(set="cli_15_7_200",
                                   aggregation_type="sum",
                                   sparse_matrix=True)  # generate the dataset
    model_tr(dset["train"])  # dataset initalization into the GNN
    model_val(dset["validation"],
              state_net=model_tr.gnn.state_transition_function,
              out_net=model_tr.gnn.output_function
              )  # dataset initalization into the GNN
    model_tst(dset["test"],
              state_net=model_tr.gnn.state_transition_function,
              out_net=model_tr.gnn.output_function
              )  # dataset initalization into the GNN

    # training code
    for epoch in range(1, args.epochs + 1):
        model_tr.train_step(epoch)
        if epoch % 10 == 0:
            model_tst.test_step(epoch)
            model_val.valid_step(epoch)
Beispiel #5
0
def main():
    # Training settings
    parser = argparse.ArgumentParser(description='PyTorch')
    parser.add_argument('--batch-size',
                        type=int,
                        default=64,
                        metavar='N',
                        help='input batch size for training (default: 64)')
    parser.add_argument('--test-batch-size',
                        type=int,
                        default=100,
                        metavar='N',
                        help='input batch size for testing (default: 100)')
    parser.add_argument('--epochs',
                        type=int,
                        default=300,
                        metavar='N',
                        help='number of epochs to train (default: 10)')
    parser.add_argument('--lr',
                        type=float,
                        default=0.0001,
                        metavar='LR',
                        help='learning rate (default: 0.0001)')
    parser.add_argument('--momentum',
                        type=float,
                        default=0.5,
                        metavar='M',
                        help='SGD momentum (default: 0.5)')
    parser.add_argument('--no-cuda',
                        action='store_true',
                        default=False,
                        help='disables CUDA training')
    parser.add_argument('--cuda_dev',
                        type=int,
                        default=0,
                        help='select specific CUDA device for training')
    parser.add_argument('--n_gpu_use',
                        type=int,
                        default=1,
                        help='select number of CUDA device for training')
    # parser.add_argument('--seed', type=int, default=1, metavar='S',
    #                     help='random seed (default: 1)')
    parser.add_argument('--log-interval',
                        type=int,
                        default=50,
                        metavar='N',
                        help='logging training status cadency')
    parser.add_argument('--save-model',
                        action='store_true',
                        default=False,
                        help='For Saving the current Model')
    parser.add_argument('--tensorboard',
                        action='store_true',
                        default=True,
                        help='For logging the model in tensorboard')

    args = parser.parse_args()

    use_cuda = not args.no_cuda and torch.cuda.is_available()
    if not use_cuda:
        args.n_gpu_use = 0

    device = utils.prepare_device(n_gpu_use=args.n_gpu_use,
                                  gpu_id=args.cuda_dev)
    # kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}

    # torch.manual_seed(args.seed)
    # # fix random seeds for reproducibility
    # SEED = 123
    # torch.manual_seed(SEED)
    # torch.backends.cudnn.deterministic = True
    # torch.backends.cudnn.benchmark = False
    # np.random.seed(SEED)

    # configugations
    cfg = GNNWrapper.Config()
    cfg.use_cuda = use_cuda
    cfg.device = device

    cfg.log_interval = args.log_interval
    cfg.tensorboard = args.tensorboard

    # cfg.batch_size = args.batch_size
    # cfg.test_batch_size = args.test_batch_size
    # cfg.momentum = args.momentum

    cfg.dataset_path = './data'
    cfg.epochs = args.epochs
    cfg.lrw = args.lr
    cfg.activation = nn.Tanh()
    cfg.state_transition_hidden_dims = [4]
    cfg.output_function_hidden_dims = []
    cfg.state_dim = 2
    cfg.max_iterations = 50
    cfg.convergence_threshold = 0.001
    cfg.graph_based = False
    cfg.log_interval = 10
    cfg.task_type = "semisupervised"

    cfg.lrw = 0.01

    # model creation
    model = SemiSupGNNWrapper(cfg)
    # dataset creation
    dset = dataloader.get_karate(aggregation_type="sum",
                                 sparse_matrix=True)  # generate the dataset
    #dset = dataloader.get_twochainsSSE(aggregation_type="sum", percentage=0.1, sparse_matrix=True)  # generate the dataset
    model(dset)  # dataset initalization into the GNN

    # training code

    # plotting utilities
    all_states = []
    all_outs = []
    for epoch in range(1, args.epochs + 1):
        out = model.train_step(epoch)
        all_states.append(model.gnn.converged_states.detach().to("cpu"))
        all_outs.append(out.detach().to("cpu"))

        if epoch % 10 == 0:
            model.test_step(epoch)
    # model.test_step()

    # if args.save_model:
    #     torch.save(model.gnn.state_dict(), "mnist_cnn.pt")

    import matplotlib.animation as animation
    import matplotlib.pyplot as plt
    import networkx as nx
    nx_G = nx.karate_club_graph().to_directed()

    def draw(i):
        clscolor = ['#FF0000', '#0000FF', '#FF00FF', '#00FF00']
        pos = {}
        colors = []
        for v in range(34):
            pos[v] = all_states[i][v].numpy()
            cls = all_outs[i][v].argmax(axis=-1)
            # colors.append(clscolor[cls])
            # print(clscolor[targets[v]])
            colors.append(clscolor[dset.targets[v]])
        ax.cla()
        ax.axis('off')
        ax.set_title('Epoch: %d' % i)
        #     node_sha = ["o" for i in range(34)]
        #     for j in idx_train:
        #         node_sha[j] = "s"
        node_sizes = np.full((34), 200)
        node_sizes[dset.idx_train.detach().to("cpu").numpy()] = 350
        nx.draw_networkx(nx_G.to_undirected(),
                         pos,
                         node_color=colors,
                         with_labels=True,
                         node_size=node_sizes,
                         ax=ax)

    #     nx.draw_networkx(nx_G.to_undirected().subgraph(idx_train), pos, node_color=[colors[k] for k in idx_train], node_shape='s',
    #             with_labels=True, node_size=300, ax=ax)

    fig = plt.figure(dpi=150)
    fig.clf()
    ax = fig.subplots()
    draw(0)  # draw the prediction of the first epoch
    plt.close()

    ani = animation.FuncAnimation(fig,
                                  draw,
                                  frames=len(all_states),
                                  interval=200)
    ani.save('learning.mp4', fps=30, extra_args=['-vcodec', 'libx264'])