Exemplo n.º 1
0
def main(args):
    path = pathlib.Path('./src/gkernel')
    if not path.is_file():
        subprocess.call(["make"], cwd="./src", shell=True)
    dataset = TUDataset(root=f'{args.dir}/Pytorch_geometric/{args.dataset}', name=args.dataset)

    if dataset.num_features == 0:
        max_degree = -1
        for data in dataset:
            edge_index = data.edge_index
            degrees = Counter(list(map(int, edge_index[0])))
            if max_degree < max(degrees.values()):
                max_degree = max(degrees.values())

        dataset.transform = OneHotDegree(max_degree=max_degree, cat=False)

    path = pathlib.Path(f'{args.dir}/GraphML/{args.dataset}/{args.dataset.lower()}_{args.kernel}.kernel')
    if not path.is_file():
        save_to_graphml(dataset, f'{args.dir}/GraphML/{args.dataset}')
        cmd = ['./src/gkernel']
        cmd.append('-k')
        cmd.append(args.kernel)
        if args.parameter:
            cmd.append('-p')
            cmd.append(args.parameter)
        cmd.append('-i')
        cmd.append(f'{args.dir}/GraphML/{args.dataset}/{args.dataset.lower()}.list')
        cmd.append('-g')
        cmd.append(f'{args.dir}/GraphML/{args.dataset}/data/')
        cmd.append('-o')
        cmd.append(f'{args.dir}/GraphML/{args.dataset}/{args.dataset.lower()}_{args.kernel}.kernel')
        subprocess.call(cmd)

    K = read_kernel_matrix(f'{args.dir}/GraphML/{args.dataset}/{args.dataset.lower()}_{args.kernel}.kernel')

    y = dataset.data.y.data.numpy()

    ev = Evaluation(K, y, args, verbose=True)

    accs = ev.evaluate(dataset)
Exemplo n.º 2
0
                      outputWidth=128,
                      outputHeight=128)
options['data']['dir'] = options["globals"][dataset.value]
datasetHC, datasetPC = get_datasets(options, dataset=dataset)
config = get_config(trainer=ConstrainedAAE,
                    options=options,
                    optimizer='ADAM',
                    intermediateResolutions=[16, 16],
                    dropout_rate=0.1,
                    dataset=datasetHC)

config.kappa = 1.0
config.scale = 10.0
config.rho = 1.0

# Create an instance of the model and train it
model = ConstrainedAAE(tf.Session(),
                       config,
                       network=constrained_adversarial_autoencoder_Chen)

# Train it
model.train(datasetHC)

# Evaluate
Evaluation.evaluate(
    datasetPC,
    model,
    options,
    description=f"{type(datasetHC).__name__}-{options['threshold']}",
    epoch=str(options['train']['numEpochs']))
def main(model_name, model_path):

    conf = {}
    conf['z_score'] = True

    # Setup: initialize the hyperparameters/variables
    num_epochs = 5  # Number of full passes through the dataset
    batch_size = 128  # Number of samples in each minibatch
    learning_rate = 1e-5
    seed = np.random.seed(
        1)  # Seed the random number generator for reproducibility
    p_val = 0.1  # Percent of the overall dataset to reserve for validation
    p_test = 0.2  # Percent of the overall dataset to reserve for testing
    val_every_n = 100  #

    early_stop_counter = 0
    early_stop_max = 7
    is_converged = False

    # TODO: Convert to Tensor - you can later add other transformations, such as Scaling here
    transform = transforms.Compose(
        [transforms.Resize(512), transforms.ToTensor()])

    # Check if your system supports CUDA
    use_cuda = torch.cuda.is_available()

    # Setup GPU optimization if CUDA is supported
    if use_cuda:
        computing_device = torch.device("cuda")
        extras = {"num_workers": 0, "pin_memory": True}
        print("CUDA is supported")
    else:  # Otherwise, train on the CPU
        computing_device = torch.device("cpu")
        extras = False
        print("CUDA NOT supported")

    train_loader, val_loader, test_loader, _ = create_balanced_split_loaders(
        batch_size,
        seed,
        transform=transform,
        p_val=p_val,
        p_test=p_test,
        shuffle=True,
        show_sample=False,
        extras=extras,
        z_score=conf['z_score'])

    if model_name == 'intensive':
        model = IntensiveCNN()
        model = model.to(computing_device)
        model.load_state_dict(torch.load(model_path)['model_state_dict'])
    elif model_name == 'baseline':
        model = BasicCNN()
        model = model.to(computing_device)
        model.load_state_dict(torch.load(model_path))
    model.eval()

    labels_all = []
    predictions_all = []
    model.eval()
    with torch.no_grad():
        for data in test_loader:
            images, labels = data
            images, labels = images.to(computing_device), labels.to(
                computing_device)
            labels_all.append(labels)
            output = model(images)
            predictions = output > 0.5
            predictions_all.append(predictions)

    labels = torch.cat(labels_all, 0)
    predctions = torch.cat(predictions_all, 0)

    eval = Evaluation(predctions.float(), labels)
    eval.evaluate()