コード例 #1
0
ファイル: test.py プロジェクト: raikilon/geometry-recognition
def run_test(dataset=None, epoch=-1, phase="test"):
    print('Running Test')
    opt = TestOptions().parse()
    opt.serial_batches = True  # no shuffle
    if dataset is None:
        dataset = CreateDataset(opt)
        # be consistent with training
        dataset = torch.utils.data.Subset(dataset, range(len(dataset)))
        dataset = DataLoader(dataset, opt)
    else:
        opt.nclasses = len(dataset.dataset.dataset.classes)
        opt.input_nc = dataset.dataset.dataset.opt.input_nc
        dataset.dataset.dataset.opt.num_aug = 1
        # dataset.dataset.dataset.opt.is_train = False
    model = ClassifierModel(opt)
    writer = Writer(opt)
    # test
    writer.reset_counter()
    for i, data in enumerate(dataset):
        model.set_input(data, epoch)
        loss, (prec1, prec5), y_pred, y_true = model.test()
        writer.update_counter(loss, prec1, prec5, y_pred, y_true)
    if epoch == -1:
        writer.plot_summary("val", dataset.dataset.dataset.classes)
    else:
        writer.plot(epoch, phase, dataset.dataset.dataset.classes)

    return writer.statistics.top1.avg
コード例 #2
0
ファイル: test.py プロジェクト: kimmctim/BrainSurfaceTK
def run_test(epoch=-1, is_val=True):
    print('Running Test')
    opt = TestOptions().parse()
    # No shuffling for test set
    opt.serial_batches = True
    opt.which_epoch = epoch

    # Set batch_size to 1
    opt.batch_size = 1
    # If we are running on the test set change the folder path to where the test meshes are stored
    if not is_val:
        opt.phase = "test"

    dataset = DataLoader(opt)
    if opt.verbose:
        print("DEBUG testpath: ", opt.dataroot)
        print("DEBUG dataset length ", len(dataset))
    model = create_model(opt)
    writer = Writer(opt)
    writer.reset_counter()
    for i, data in enumerate(dataset):
        model.set_input(data)
        ncorrect, nexamples = model.test(epoch, is_val)
        if opt.verbose:
            print("DEBUG test ncorrect, nexamples ", ncorrect, nexamples)
        writer.update_counter(ncorrect, nexamples)
    writer.print_acc(epoch, writer.acc)
    return writer.acc
コード例 #3
0
def run_test(epoch=-1):
    print('Running Test')
    opt = TestOptions().parse()
    opt.serial_batches = True  # no shuffle
    dataset = DataLoader(opt)
    model = create_model(opt)
    writer = Writer(opt)
    # test
    writer.reset_counter()
    for i, data in enumerate(dataset):
        model.set_input(data)
        ncorrect, nexamples = model.test()
        writer.update_counter(ncorrect, nexamples)
    writer.print_acc(epoch, writer.acc)
    return writer.acc
コード例 #4
0
ファイル: test.py プロジェクト: junhua-zhang/MeshGraph
def run_test(epoch=-1):
    print('Running Test')
    opt = test_options().parse()
    dataset = ModelNet(root=opt.datasets, name='40_graph', train=False,
                       pre_transform=FaceToGraph(remove_faces=True))
    loader = DataLoader(dataset, batch_size=opt.batch_size, shuffle=False)
    model = create_model(opt)
    writer = Writer(opt)
    writer.reset_counter()
    for i, data in enumerate(loader):
        if data.y.size(0) % 64 != 0:
            continue
        model.set_input_data(data)
        ncorrect, nexamples = model.test()
        writer.update_counter(ncorrect, nexamples)
    writer.print_acc(epoch, writer.acc)
    return writer.acc
コード例 #5
0
ファイル: test_script.py プロジェクト: s183983/MeshCNN_sparse
def run_test(epoch=-1):
    print('Running Test')
    opt = TestOptions().parse()
    opt.serial_batches = True  # no shuffle
    dataset = DataLoader(opt)
    model = create_model(opt)
    writer = Writer(opt)
    # test
    dice_sum = 0
    writer.reset_counter()
    loss_mat = []
    for i, data in enumerate(dataset):
        model.set_input(data)
        ncorrect, nexamples, dice, loss = model.test(loss_bool=True)
        loss_mat.append(loss.cpu().data.numpy())
        dice_sum += dice
        writer.update_counter(ncorrect, nexamples)
    dice_sum /= len(dataset)
    writer.print_acc(epoch, writer.acc, dice_sum)
    writer.save_val_loss(loss_mat, epoch)
    return writer.acc
コード例 #6
0
def run_test(epoch=-1):
    print('Running Test')
    opt = TestOptions().parse()
    opt.serial_batches = True  # no shuffle
    dataset = DataLoader(opt)
    model = create_model(opt)
    writer = Writer(opt)
    # test
    writer.reset_counter()
    recon_test_loss = 0.0
    for i, data in enumerate(dataset):
        model.set_input(data)
        if opt.dataset_mode == 'reconstruction':
            recon_test_loss += model.test()
            continue
        ncorrect, nexamples = model.test()
        writer.update_counter(ncorrect, nexamples)
    recon_test_loss /= len(dataset)
    if opt.dataset_mode == 'reconstruction':
        writer.print_acc(epoch, recon_test_loss)
        return recon_test_loss
    writer.print_acc(epoch, writer.acc)
    return writer.acc
コード例 #7
0
ファイル: test.py プロジェクト: jleesdev/meshcnn
def run_test(epoch=-1):
    print('Running Test')
    opt = TestOptions().parse()
    opt.serial_batches = True  # no shuffle
    dataset = DataLoader(opt)
    model = create_model(opt)
    writer = Writer(opt)
    # test
    writer.reset_counter()
    heappop_error_test = 0
    pred_classes = []
    label_classes = []
    for i, data in enumerate(dataset):
        model.set_input(data)
        if opt.dataset_mode == 'classification':
            try:
                ncorrect, nexamples, pred_class, label_class = model.test()
                pred_classes.append(pred_class.cpu().numpy())
                label_classes.append(label_class.cpu().numpy())
                #print(sklearn.metrics.classification_report(np.concatenate(label_classes, axis=None), np.concatenate(pred_classes, axis=None)))
                writer.update_counter(ncorrect, nexamples)
            except IndexError:
                heappop_error_test += 1
                print('(%d) IndexError occured, passed to next data' %
                      (heappop_error_test))
                pass
        else:
            ncorrect, nexamples, pred_class, label_class = model.test()
            writer.update_counter(ncorrect, nexamples)

    writer.print_acc(epoch, writer.acc)
    if opt.dataset_mode == 'classification':
        print(
            sklearn.metrics.classification_report(
                np.concatenate(label_classes, axis=None),
                np.concatenate(pred_classes, axis=None)))
    return writer.acc