예제 #1
0
def run_test(dataset=None, epoch=-1, phase="test"):
    print('Running Test')
    opt = TestOptions().parse()
    opt.serial_batches = True  # no shuffle
    if dataset is None:
        dataset = CreateDataset(opt)
        # be consistent with training
        dataset = torch.utils.data.Subset(dataset, range(len(dataset)))
        dataset = DataLoader(dataset, opt)
    else:
        opt.nclasses = len(dataset.dataset.dataset.classes)
        opt.input_nc = dataset.dataset.dataset.opt.input_nc
        dataset.dataset.dataset.opt.num_aug = 1
        # dataset.dataset.dataset.opt.is_train = False
    model = ClassifierModel(opt)
    writer = Writer(opt)
    # test
    writer.reset_counter()
    for i, data in enumerate(dataset):
        model.set_input(data, epoch)
        loss, (prec1, prec5), y_pred, y_true = model.test()
        writer.update_counter(loss, prec1, prec5, y_pred, y_true)
    if epoch == -1:
        writer.plot_summary("val", dataset.dataset.dataset.classes)
    else:
        writer.plot(epoch, phase, dataset.dataset.dataset.classes)

    return writer.statistics.top1.avg
예제 #2
0
def run_test(epoch=-1, is_val=True):
    print('Running Test')
    opt = TestOptions().parse()
    # No shuffling for test set
    opt.serial_batches = True
    opt.which_epoch = epoch

    # Set batch_size to 1
    opt.batch_size = 1
    # If we are running on the test set change the folder path to where the test meshes are stored
    if not is_val:
        opt.phase = "test"

    dataset = DataLoader(opt)
    if opt.verbose:
        print("DEBUG testpath: ", opt.dataroot)
        print("DEBUG dataset length ", len(dataset))
    model = create_model(opt)
    writer = Writer(opt)
    writer.reset_counter()
    for i, data in enumerate(dataset):
        model.set_input(data)
        ncorrect, nexamples = model.test(epoch, is_val)
        if opt.verbose:
            print("DEBUG test ncorrect, nexamples ", ncorrect, nexamples)
        writer.update_counter(ncorrect, nexamples)
    writer.print_acc(epoch, writer.acc)
    return writer.acc
예제 #3
0
def run_test(epoch=-1):
    print('Running Test')
    opt = TestOptions().parse()
    opt.serial_batches = True  # no shuffle
    dataset = DataLoader(opt)
    model = create_model(opt)
    writer = Writer(opt)
    # test
    writer.reset_counter()
    for i, data in enumerate(dataset):
        model.set_input(data)
        ncorrect, nexamples = model.test()
        writer.update_counter(ncorrect, nexamples)
    writer.print_acc(epoch, writer.acc)
    return writer.acc
예제 #4
0
def run_test(epoch=-1):
    print('Running Test')
    opt = test_options().parse()
    dataset = ModelNet(root=opt.datasets, name='40_graph', train=False,
                       pre_transform=FaceToGraph(remove_faces=True))
    loader = DataLoader(dataset, batch_size=opt.batch_size, shuffle=False)
    model = create_model(opt)
    writer = Writer(opt)
    writer.reset_counter()
    for i, data in enumerate(loader):
        if data.y.size(0) % 64 != 0:
            continue
        model.set_input_data(data)
        ncorrect, nexamples = model.test()
        writer.update_counter(ncorrect, nexamples)
    writer.print_acc(epoch, writer.acc)
    return writer.acc
예제 #5
0
def run_test(epoch=-1):
    print('Running Test')
    opt = TestOptions().parse()
    opt.serial_batches = True  # no shuffle
    dataset = DataLoader(opt)
    model = create_model(opt)
    writer = Writer(opt)
    # test
    dice_sum = 0
    writer.reset_counter()
    loss_mat = []
    for i, data in enumerate(dataset):
        model.set_input(data)
        ncorrect, nexamples, dice, loss = model.test(loss_bool=True)
        loss_mat.append(loss.cpu().data.numpy())
        dice_sum += dice
        writer.update_counter(ncorrect, nexamples)
    dice_sum /= len(dataset)
    writer.print_acc(epoch, writer.acc, dice_sum)
    writer.save_val_loss(loss_mat, epoch)
    return writer.acc
예제 #6
0
def run_test(epoch=-1):
    print('Running Test')
    opt = TestOptions().parse()
    opt.serial_batches = True  # no shuffle
    dataset = DataLoader(opt)
    model = create_model(opt)
    writer = Writer(opt)
    # test
    writer.reset_counter()
    recon_test_loss = 0.0
    for i, data in enumerate(dataset):
        model.set_input(data)
        if opt.dataset_mode == 'reconstruction':
            recon_test_loss += model.test()
            continue
        ncorrect, nexamples = model.test()
        writer.update_counter(ncorrect, nexamples)
    recon_test_loss /= len(dataset)
    if opt.dataset_mode == 'reconstruction':
        writer.print_acc(epoch, recon_test_loss)
        return recon_test_loss
    writer.print_acc(epoch, writer.acc)
    return writer.acc
예제 #7
0
파일: test.py 프로젝트: jleesdev/meshcnn
def run_test(epoch=-1):
    print('Running Test')
    opt = TestOptions().parse()
    opt.serial_batches = True  # no shuffle
    dataset = DataLoader(opt)
    model = create_model(opt)
    writer = Writer(opt)
    # test
    writer.reset_counter()
    heappop_error_test = 0
    pred_classes = []
    label_classes = []
    for i, data in enumerate(dataset):
        model.set_input(data)
        if opt.dataset_mode == 'classification':
            try:
                ncorrect, nexamples, pred_class, label_class = model.test()
                pred_classes.append(pred_class.cpu().numpy())
                label_classes.append(label_class.cpu().numpy())
                #print(sklearn.metrics.classification_report(np.concatenate(label_classes, axis=None), np.concatenate(pred_classes, axis=None)))
                writer.update_counter(ncorrect, nexamples)
            except IndexError:
                heappop_error_test += 1
                print('(%d) IndexError occured, passed to next data' %
                      (heappop_error_test))
                pass
        else:
            ncorrect, nexamples, pred_class, label_class = model.test()
            writer.update_counter(ncorrect, nexamples)

    writer.print_acc(epoch, writer.acc)
    if opt.dataset_mode == 'classification':
        print(
            sklearn.metrics.classification_report(
                np.concatenate(label_classes, axis=None),
                np.concatenate(pred_classes, axis=None)))
    return writer.acc
예제 #8
0
파일: train.py 프로젝트: jleesdev/meshcnn
    writer = Writer(opt)
    total_steps = 0
    train_start_time = time.time()
    best_tst_acc = 0.0

    torch.manual_seed(1)
    cudnn.benchmark = False
    cudnn.deterministic = True

    for epoch in range(opt.epoch_count, opt.niter + opt.niter_decay + 1):
        epoch_start_time = time.time()
        iter_data_time = time.time()
        epoch_iter = 0
        heappop_error_train = 0
        logger.info('Epoch %d started ...', epoch)
        writer.reset_counter()

        for i, data in enumerate(dataset):
            iter_start_time = time.time()
            if total_steps % opt.print_freq == 0:
                t_data = iter_start_time - iter_data_time
            total_steps += opt.batch_size
            epoch_iter += opt.batch_size
            model.set_input(data)
            try:
                model.optimize_parameters(writer=writer, steps=total_steps)
            except IndexError:
                total_steps -= opt.batch_size
                epoch_iter -= opt.batch_size
                heappop_error_train += 1
                print(