Beispiel #1
0
def test_net():
    ''' Evaluate the network '''
    # Make result directory and the result file.
    result_dir = os.path.join(cfg.DIR.OUT_PATH, cfg.TEST.EXP_NAME)
    if not os.path.exists(result_dir):
        os.makedirs(result_dir)
    result_fn = os.path.join(result_dir, 'result.mat')

    print("Exp file will be written to: " + result_fn)

    # Make a network and load weights
    NetworkClass = load_model(cfg.CONST.NETWORK_CLASS)
    print('Network definition: \n')
    print(inspect.getsource(NetworkClass.network_definition))
    net = NetworkClass(compute_grad=False)
    net.load(cfg.CONST.WEIGHTS)
    solver = Solver(net)

    # set constants
    batch_size = cfg.CONST.BATCH_SIZE

    # set up testing data process. We make only one prefetching process. The
    # process will return one batch at a time.
    queue = Queue(cfg.QUEUE_SIZE)
    data_pair = category_model_id_pair(
        dataset_portion=cfg.TEST.DATASET_PORTION)
    processes = make_data_processes(queue,
                                    data_pair,
                                    1,
                                    repeat=False,
                                    train=False)
    num_data = len(processes[0].data_paths)
    num_batch = int(num_data / batch_size)

    # prepare result container
    results = {'cost': np.zeros(num_batch)}
    for thresh in cfg.TEST.VOXEL_THRESH:
        results[str(thresh)] = np.zeros((num_batch, batch_size, 5))

    # Get all test data
    batch_idx = 0
    for batch_img, batch_voxel in get_while_running(processes[0], queue):
        if batch_idx == num_batch:
            break

        pred, loss, activations = solver.test_output(batch_img, batch_voxel)
        print('%d/%d, cost is: %f' % (batch_idx, num_batch, loss))

        for i, thresh in enumerate(cfg.TEST.VOXEL_THRESH):
            for j in range(batch_size):
                r = evaluate_voxel_prediction(pred[j, ...],
                                              batch_voxel[j, ...], thresh)
                results[str(thresh)][batch_idx, j, :] = r

        # record result for the batch
        results['cost'][batch_idx] = float(loss)
        batch_idx += 1

    print('Total loss: %f' % np.mean(results['cost']))
    sio.savemat(result_fn, results)
Beispiel #2
0
    def val_net(self, val_loader=None, global_step=0):
        ''' Validate the network '''
        #st()
        with torch.no_grad():
            self.net.eval()

            val_losses = 0
            val_num_iter = min(cfg.TRAIN.NUM_VALIDATION_ITERATIONS,
                               len(val_loader))
            val_loader_iter = iter(val_loader)

            num_data = val_num_iter
            num_batch = int(num_data / 1)

            # prepare result container
            results = {
                'cost': np.zeros(num_batch),
                'mAP': np.zeros((num_batch, 1))
            }
            # Save results for various thresholds
            for thresh in cfg.TEST.VOXEL_THRESH:
                results[str(thresh)] = np.zeros((num_batch, 1, 5))

            # Get all test data
            batch_idx = 0
            for i in range(val_num_iter):
                if batch_idx == num_batch:
                    break
                batch_img, batch_voxel = val_loader_iter.next()
                pred, loss, activations = self.test_output(
                    batch_img, batch_voxel)
                #convert pytorch tensor to numpy array
                pred = pred.detach().cpu().numpy()
                loss = loss.detach().cpu().numpy()
                batch_voxel_np = batch_voxel.cpu().numpy()
                # Save IoU per thresh
                j = 0
                for i, thresh in enumerate(cfg.TEST.VOXEL_THRESH):
                    r = evaluate_voxel_prediction(pred[j, ...],
                                                  batch_voxel_np[j,
                                                                 ...], thresh)
                    results[str(thresh)][batch_idx, j, :] = r
                # Compute AP
                precision = sklearn.metrics.average_precision_score(
                    batch_voxel[j, 1].flatten(), pred[j, 1].flatten())
                results['mAP'][batch_idx, j] = precision
                # record result for the batch
                results['cost'][batch_idx] = float(loss)

                batch_idx += 1

            print('Validation loss: %f' % np.mean(results['cost']))
            print('Validation mAP: %f' % np.mean(results['mAP']))

            self.net.tb_logger.add_scalar('Validation loss',
                                          np.mean(results['cost']),
                                          global_step)
            self.net.tb_logger.add_scalar('Validation mIOU',
                                          np.mean(results['mAP']), global_step)
Beispiel #3
0
def test_net():
    ''' Evaluate the network '''
    # Make result directory and the result file.
    result_dir = os.path.join(cfg.DIR.OUT_PATH, cfg.TEST.EXP_NAME)
    if not os.path.exists(result_dir):
        os.makedirs(result_dir)
    result_fn = os.path.join(result_dir, 'result.mat')

    print("Exp file will be written to: " + result_fn)

    # Make a network and load weights
    NetworkClass = load_model(cfg.CONST.NETWORK_CLASS)

    #print('Network definition: \n')
    #print(inspect.getsource(NetworkClass.network_definition))

    net = NetworkClass()
    net.cuda()

    net.eval()

    solver = Solver(net)
    solver.load(cfg.CONST.WEIGHTS)

    # set constants
    batch_size = cfg.CONST.BATCH_SIZE

    # set up testing data process. We make only one prefetching process. The
    # process will return one batch at a time.

    test_dataset = ShapeNetDataset(dataset_portion=cfg.TEST.DATASET_PORTION)
    test_collate_fn = ShapeNetCollateFn(train=False)
    test_loader = DataLoader(dataset=test_dataset,
                             batch_size=batch_size,
                             shuffle=False,
                             num_workers=1,
                             collate_fn=test_collate_fn,
                             pin_memory=True)

    num_data = len(test_dataset)
    num_batch = int(num_data / batch_size)

    # prepare result container
    results = {
        'cost': np.zeros(num_batch),
        'mAP': np.zeros((num_batch, batch_size))
    }
    # Save results for various thresholds
    for thresh in cfg.TEST.VOXEL_THRESH:
        results[str(thresh)] = np.zeros((num_batch, batch_size, 5))

    # Get all test data
    batch_idx = 0
    for batch_img, batch_voxel in test_loader:
        if batch_idx == num_batch:
            break

        #activations is a list of torch.cuda.FloatTensor
        pred, loss, activations = solver.test_output(batch_img, batch_voxel)

        #convert pytorch tensor to numpy array
        pred = pred.detach().cpu().numpy()
        loss = loss.detach().cpu().numpy()
        batch_voxel_np = batch_voxel.cpu().numpy()

        for j in range(batch_size):
            # Save IoU per thresh
            for i, thresh in enumerate(cfg.TEST.VOXEL_THRESH):
                r = evaluate_voxel_prediction(pred[j, ...],
                                              batch_voxel_np[j, ...], thresh)
                results[str(thresh)][batch_idx, j, :] = r

            # Compute AP
            precision = sklearn.metrics.average_precision_score(
                batch_voxel[j, 1].flatten(), pred[j, 1].flatten())

            results['mAP'][batch_idx, j] = precision

        # record result for the batch
        results['cost'][batch_idx] = float(loss)
        print('%d/%d, costs: %f, mAP: %f' %
              (batch_idx, num_batch, loss, np.mean(results['mAP'][batch_idx])))
        batch_idx += 1

    print('Total loss: %f' % np.mean(results['cost']))
    print('Total mAP: %f' % np.mean(results['mAP']))