Пример #1
0
def eval(name, cpu, test_data, train, arch, log_dir, model_path,
         output_filename):
    use_cuda = not cpu and torch.cuda.is_available()
    device = "cuda" if use_cuda else "cpu"
    print("Using device:", device)

    model = get_model(arch)
    model.to(device)
    model = load_model(
        model, device, model_path
    )  # modified to add model_path. may not be a good idea for normal use

    criterion = get_criterion(device, train["loss_reduction"])
    exp_logger = logger.Experiment(name)
    exp_logger.add_meters("test", metrics.make_meter_matching())

    gene_test = Generator("test", test_data)
    gene_test.load_dataset()
    test_loader = siamese_loader(gene_test, train["batch_size"],
                                 gene_test.constant_n_vertices)
    acc, loss = trainer.val_triplet(
        test_loader,
        model,
        criterion,
        exp_logger,
        device,
        epoch=0,
        eval_score=metrics.accuracy_linear_assignment,
        val_test="test",
    )
    key = create_key()
    filename_test = os.path.join(log_dir, output_filename)
    print("Saving result at: ", filename_test)
    save_to_json(key, acc, loss, filename_test)
Пример #2
0
def eval(name, cpu, test_data, train, arch, log_dir, model_path,
         output_filename):
    use_cuda = not cpu and torch.cuda.is_available()
    device = 'cuda' if use_cuda else 'cpu'
    print('Using device:', device)

    model = get_model(arch)
    model.to(device)
    model = load_model(model, device)

    criterion = get_criterion(device, train['loss_reduction'])
    exp_logger = logger.Experiment(name)
    exp_logger.add_meters('test', metrics.make_meter_matching())

    gene_test = Generator('test', test_data)
    gene_test.load_dataset()
    test_loader = siamese_loader(gene_test, train['batch_size'],
                                 gene_test.constant_n_vertices)
    acc, loss = trainer.val_triplet(
        test_loader,
        model,
        criterion,
        exp_logger,
        device,
        epoch=0,
        eval_score=metrics.accuracy_linear_assignment,
        val_test='test')
    key = create_key()
    filename_test = os.path.join(log_dir, output_filename)
    print('Saving result at: ', filename_test)
    save_to_json(key, acc, loss, filename_test)
def init_logger(name, _config, _run):
    # set loggers
    exp_logger = logger.Experiment(name, _config, run=_run)
    exp_logger.add_meters("train", metrics.make_meter_matching())
    exp_logger.add_meters("val", metrics.make_meter_matching())
    # exp_logger.add_meters('test', metrics.make_meter_matching())
    exp_logger.add_meters("hyperparams", {"learning_rate": metrics.ValueMeter()})
    return exp_logger
Пример #4
0
def init_logger(args, model):
    # set loggers
    exp_name = args.name
    exp_logger = logger.Experiment(exp_name, args.__dict__)
    exp_logger.add_meters('train', metrics.make_meters(args.num_classes))
    exp_logger.add_meters('val', metrics.make_meters(args.num_classes))
    exp_logger.add_meters('hyperparams', {'learning_rate': metrics.ValueMeter()})
    return exp_logger
Пример #5
0
def init_logger(name, _config, _run):
    # set loggers
    exp_logger = logger.Experiment(name, _config, run=_run)
    exp_logger.add_meters('train', metrics.make_meter_matching())
    exp_logger.add_meters('val', metrics.make_meter_matching())
    #exp_logger.add_meters('test', metrics.make_meter_matching())
    exp_logger.add_meters('hyperparams',
                          {'learning_rate': metrics.ValueMeter()})
    return exp_logger
Пример #6
0
def init_logger(args):
    # set loggers
    exp_name = args['--name']
    exp_logger = logger.Experiment(exp_name, args)
    exp_logger.add_meters('train', metrics.make_meter_matching())
    exp_logger.add_meters('val', metrics.make_meter_matching())
    exp_logger.add_meters('hyperparams',
                          {'learning_rate': metrics.ValueMeter()})
    return exp_logger
def eval_spectral(name, load_data, train, test_data, log_dir, output_filename):
    exp_logger = logger.Experiment(name)
    exp_logger.add_meters("test", metrics.make_meter_matching())

    gene_test = Generator("test", test_data)
    if load_data:
        gene_test.load_dataset()
    else:
        gene_test.create_dataset()
    test_loader = label_loader(gene_test, train["batch_size"], gene_test.constant_n_vertices)

    exp_logger.reset_meters("test")

    print_freq = 10
    for i, (input, cluster_sizes) in enumerate(test_loader):
        acc, total_n_vertices = metrics.accuracy_spectral_cluster_kmeans(input, cluster_sizes)
        exp_logger.update_meter("test", "acc", acc, n=total_n_vertices)
        if i % print_freq:
            acc = exp_logger.get_meter("test", "acc")
            print("Test set\t" "Acc {acc.avg:.3f} ({acc.val:.3f})".format(acc=acc))
def eval(name, cpu, load_data, test_data, train, arch, log_dir, model_path, output_filename, return_result=False):
    use_cuda = not cpu and torch.cuda.is_available()
    device = "cuda" if use_cuda else "cpu"
    print("Using device:", device)

    model = get_model(arch)
    model.to(device)
    model = load_model(model, device)

    if arch["arch"] == "Simple_Node_Embedding":
        criterion = cluster_embedding_loss(device=device)
    elif arch["arch"] == "Similarity_Model":
        criterion = cluster_similarity_loss()
    exp_logger = logger.Experiment(name)
    exp_logger.add_meters("test", metrics.make_meter_matching())

    gene_test = Generator("test", test_data)
    if load_data:
        gene_test.load_dataset()
    else:
        gene_test.create_dataset()
    test_loader = label_loader(gene_test, train["batch_size"], gene_test.constant_n_vertices)
    acc, loss = trainer.val_cluster(
        test_loader,
        model,
        criterion,
        exp_logger,
        device,
        epoch=0,
        eval_score=metrics.accuracy_cluster_kmeans,
        val_test="test",
    )
    if not return_result:
        key = create_key()
        filename_test = os.path.join(log_dir, output_filename)
        print("Saving result at: ", filename_test)
        save_to_json(key, acc, loss, filename_test)
    return acc, loss