Esempio n. 1
0
def test_network(cfg, network, data_loader, checkpoint, result_set):
    _checkpoint = torch.load(checkpoint)
    _checkpoint = {k.replace('module.', ''): v for k, v in _checkpoint['rmnet'].items()}
    network.load_state_dict(_checkpoint)
    network.eval()

    checkpoint = os.path.basename(checkpoint)
    test_metrics = AverageMeter(Metrics.names())
    device, = list(set(p.device for p in network.parameters()))
    for idx, (video_name, n_objects, frames, masks, optical_flows) in enumerate(
            tqdm(data_loader,
                 leave=False,
                 desc='%s on GPU %d' % (checkpoint, device.index),
                 position=device.index)):
        with torch.no_grad():
            try:
                est_probs = network(frames, masks, optical_flows, n_objects,
                                    cfg.TEST.MEMORIZE_EVERY, device)
                est_probs = est_probs.permute(0, 2, 1, 3, 4)
                masks = torch.argmax(masks, dim=2)
                est_masks = torch.argmax(est_probs, dim=1)
            except Exception as ex:
                logging.warning('Error occurred during testing Checkpoint[Name=%s]: %s' %
                                (checkpoint, ex))
                continue

            metrics = Metrics.get(est_masks[0], masks[0])
            test_metrics.update(metrics, torch.max(n_objects[0]).item())

    jf_mean = test_metrics.avg(2)
    if jf_mean != 0:
        logging.info('Checkpoint[Name=%s] has been tested successfully, JF-Mean = %.4f.' %
                     (checkpoint, jf_mean))
    else:
        logging.warning('Exception occurred during testing Checkpoint[Name=%s]' % checkpoint)

    result_set['JF-Mean'] = jf_mean
Esempio n. 2
0
def test_net(cfg,
             epoch_idx=-1,
             test_data_loader=None,
             test_writer=None,
             grnet=None):
    # Enable the inbuilt cudnn auto-tuner to find the best algorithm to use
    torch.backends.cudnn.benchmark = True

    if test_data_loader is None:
        # Set up data loader
        dataset_loader = utils.data_loaders.DATASET_LOADER_MAPPING[
            cfg.DATASET.TEST_DATASET](cfg)
        # 在data_loader.py中修改这里的dataset值
        test_data_loader = torch.utils.data.DataLoader(
            dataset=dataset_loader.get_dataset(
                utils.data_loaders.DatasetSubset.TEST),
            batch_size=1,
            num_workers=cfg.CONST.NUM_WORKERS,
            collate_fn=utils.data_loaders.collate_fn,
            pin_memory=True,
            shuffle=False)

    # Setup networks and initialize networks
    if grnet is None:
        grnet = GRNet(cfg)

        if torch.cuda.is_available():
            grnet = torch.nn.DataParallel(grnet).cuda()

        logging.info('Recovering from %s ...' % (cfg.CONST.WEIGHTS))
        checkpoint = torch.load(cfg.CONST.WEIGHTS)
        grnet.load_state_dict(checkpoint['grnet'])

    # Switch models to evaluation mode
    grnet.eval()

    # Set up loss functions
    chamfer_dist = ChamferDistance()
    gridding_loss = GriddingLoss(
        scales=cfg.NETWORK.GRIDDING_LOSS_SCALES,
        alphas=cfg.NETWORK.GRIDDING_LOSS_ALPHAS)  # lgtm [py/unused-import]

    # Testing loop
    n_samples = len(test_data_loader)
    test_losses = AverageMeter(['SparseLoss', 'DenseLoss'])
    # test_losses = AverageMeter(['GridLoss', 'DenseLoss'])
    test_metrics = AverageMeter(Metrics.names())  # 'F-score, CD
    category_metrics = dict()

    # Testing loop
    # 通过data得到sparse_pucloud,  data from test_data_loader

    tot_recall, tot_precision, tot_emd = 0.0, 0.0, 0.0
    tot_shapes = 0

    score_dict = {}

    for model_idx, (taxonomy_id, model_id,
                    data) in enumerate(test_data_loader):
        taxonomy_id = taxonomy_id[0] if isinstance(
            taxonomy_id[0], str) else taxonomy_id[0].item()
        model_id = model_id[0]

        with torch.no_grad():
            for k, v in data.items():
                data[k] = utils.helpers.var_or_cuda(v)

            sparse_ptcloud, dense_ptcloud = grnet(data)
            # print('--------dense: ', type(dense_ptcloud), dense_ptcloud.shape)
            # print('--------gt: ', type(data['gtcloud']), data['gtcloud'.shape])
            sparse_loss = chamfer_dist(sparse_ptcloud, data['gtcloud'])
            # grid_loss = gridding_loss(dense_ptcloud, data['gtcloud'])
            dense_loss = chamfer_dist(dense_ptcloud, data['gtcloud'])

            # Fsore
            fscore_pred = o3d.geometry.PointCloud()
            # print(type(dense_ptcloud))
            # print(dense_ptcloud.shape)
            # print(data['gtcloud'].shape)
            # print(type(data['gtcloud']))
            fscore_pred.points = o3d.utility.Vector3dVector(
                np.array(dense_ptcloud.squeeze().cpu().detach().numpy()))
            fscore_gt = o3d.geometry.PointCloud()
            fscore_gt.points = o3d.utility.Vector3dVector(
                data['gtcloud'].squeeze().cpu().detach().numpy())

            dist1 = fscore_pred.compute_point_cloud_distance(fscore_gt)
            dist2 = fscore_gt.compute_point_cloud_distance(fscore_pred)

            th = 0.01
            recall = float(sum(d < th for d in dist2)) / float(len(dist2))
            precision = float(sum(d < th for d in dist1)) / float(len(dist1))
            tot_recall += recall
            tot_precision += precision

            # 计算EMD
            # dense_pts = np.array(dense_ptcloud.cpu())
            # num_points = dense_pts.shape[1]
            # EMD_loss = earth_mover_distance(dense_ptcloud, data['gtcloud'], transpose=False) / num_points
            # EMD_loss = EMD_loss.mean().item()
            # tot_emd += EMD_loss

            tot_shapes += 1

            # print('dense_pc: ', dense_ptcloud.shape,  type(dense_ptcloud))
            test_losses.update(
                [sparse_loss.item() * 1000,
                 dense_loss.item() * 1000])
            # test_losses.update([grid_loss.item() * 1000, dense_loss.item() * 1000])
            _metrics = Metrics.get(dense_ptcloud,
                                   data['gtcloud'])  # return: values
            test_metrics.update(_metrics)

            if taxonomy_id not in category_metrics:
                category_metrics[taxonomy_id] = AverageMeter(Metrics.names())
            category_metrics[taxonomy_id].update(_metrics)

            # train时不用存数据
            # 存 npz
            '''
            save_path = '/home2/wuruihai/GRNet_FILES/Results/Completion3D_grnet_chair_ep300_npz_16384d/'
            save_path2 = '/home2/wuruihai/GRNet_FILES/Results/Completion3D_grent_chair_ep300_npz_2048d/'

            part_name = 'part_7'

            # 只存了 final results (dense_ptcloud)
            save_npz_path = save_path + part_name + '/'
            save_npz_path2 = save_path2 + part_name + '/'
            if not os.path.exists(save_npz_path):
                os.makedirs(save_npz_path)
            if not os.path.exists(save_npz_path2):
                os.makedirs(save_npz_path2)

            dense_pts = np.array(dense_ptcloud.cpu())
            dense_pts2 = rescale_pc_parts(dense_pts, 2048) # rescale
            dense_pts /= 0.45  # 放大回我们的大小
            dense_pts2 /= 0.45
            np.savez(save_npz_path + '%s.npz' % model_id, pts = dense_pts)
            np.savez(save_npz_path2 + '%s.npz' % model_id, pts = dense_pts2)
            '''

            # 存npz (GRNet's data),  Completion3D, 没有part

            # save_path = '/home2/wuruihai/GRNet_FILES/Results/ShapeNet_grnet_pretrained_model_VAL_npz/'
            # if not os.path.exists(save_path):
            #     os.makedirs(save_path)
            # dense_pts = np.array(dense_ptcloud.cpu())
            # np.savez(save_path + '%s.npz' % model_id, pts=dense_pts)

            # 存scores为txt

            dense_pts = np.array(dense_ptcloud.cpu())
            CD_loss = dense_loss.item()

            num_points = dense_pts.shape[1]
            EMD_loss = earth_mover_distance(
                dense_ptcloud, data['gtcloud'], transpose=False) / num_points
            EMD_loss = EMD_loss.mean().item()

            fscore = 2 * recall * precision / (
                recall + precision) if recall + precision else 0

            score_dict[model_id] = (CD_loss, EMD_loss, precision, recall,
                                    fscore)
            # print(score_dict)

            # 存 png
            '''
            save_path = '/home2/wuruihai/GRNet_FILES/Results/Completion3D_GRNet_1003/'
            if not os.path.exists(save_path):
                os.makedirs(save_path)

            plt.figure()


            pc_ptcloud = data['partial_cloud'].squeeze().cpu().numpy()
            pc_ptcloud_img = utils.helpers.get_ptcloud_img(pc_ptcloud)
            matplotlib.image.imsave(save_path + '%s_1_pc.png' % model_id,
                                    pc_ptcloud_img)
        
            
            # sparse_ptcloud = sparse_ptcloud.squeeze().cpu().numpy()
            # sparse_ptcloud_img = utils.helpers.get_ptcloud_img(sparse_ptcloud)
            # matplotlib.image.imsave(save_path+'%s_sps.png' % model_id,
            #                         sparse_ptcloud_img)
            

            dense_ptcloud = dense_ptcloud.squeeze().cpu().numpy()
            dense_ptcloud_img = utils.helpers.get_ptcloud_img(dense_ptcloud)
            matplotlib.image.imsave(save_path + '%s_2_dns.png' % model_id,
                                    dense_ptcloud_img)

            
            gt_ptcloud = data['gtcloud'].squeeze().cpu().numpy()
            gt_ptcloud_img = utils.helpers.get_ptcloud_img(gt_ptcloud)
            matplotlib.image.imsave(save_path+'%s_3_gt.png' % model_id,
                                    gt_ptcloud_img)
            '''
            '''
            if model_idx in range(510, 600):

                now_num=model_idx-499
                # if test_writer is not None and model_idx < 3:
                # sparse_ptcloud = sparse_ptcloud.squeeze().cpu().numpy()
                sparse_ptcloud = sparse_ptcloud.squeeze().numpy()
                sparse_ptcloud_img = utils.helpers.get_ptcloud_img(sparse_ptcloud)
                matplotlib.image.imsave('/home2/wuruihai/GRNet_FILES/results2/%s_%s_sps.png'%(model_idx,model_id), sparse_ptcloud_img)

                # dense_ptcloud = dense_ptcloud.squeeze().cpu().numpy()
                dense_ptcloud = dense_ptcloud.squeeze().numpy()
                dense_ptcloud_img = utils.helpers.get_ptcloud_img(dense_ptcloud)
                matplotlib.image.imsave('/home2/wuruihai/GRNet_FILES/results2/%s_%s_dns.png' % (model_idx, model_id),
                                        dense_ptcloud_img)


                # gt_ptcloud = data['gtcloud'].squeeze().cpu().numpy()
                gt_ptcloud = data['gtcloud'].squeeze().numpy()
                gt_ptcloud_img = utils.helpers.get_ptcloud_img(gt_ptcloud)
                matplotlib.image.imsave('/home2/wuruihai/GRNet_FILES/results2/%s_%s_gt.png'%(model_idx,model_id), gt_ptcloud_img)

                cv.imwrite("/home2/wuruihai/GRNet_FILES/out3.png", sparse_ptcloud_img)
                im = Image.fromarray(sparse_ptcloud_img).convert('RGB')
                im.save("/home2/wuruihai/GRNet_FILES/out.jpeg")
            
                test_writer.add_image('Model%02d/SparseReconstruction' % model_idx, sparse_ptcloud_img, epoch_idx)
                dense_ptcloud = dense_ptcloud.squeeze().cpu().numpy()
                dense_ptcloud_img = utils.helpers.get_ptcloud_img(dense_ptcloud)
                test_writer.add_image('Model%02d/DenseReconstruction' % model_idx, dense_ptcloud_img, epoch_idx)

                gt_ptcloud = data['gtcloud'].squeeze().cpu().numpy()
                gt_ptcloud_img = utils.helpers.get_ptcloud_img(gt_ptcloud)
                test_writer.add_image('Model%02d/GroundTruth' % model_idx, gt_ptcloud_img, epoch_idx)
            '''

            logging.info(
                'Test[%d/%d] Taxonomy = %s Sample = %s Losses = %s Metrics = %s'
                %
                (model_idx + 1, n_samples, taxonomy_id, model_id,
                 ['%.4f' % l
                  for l in test_losses.val()], ['%.4f' % m for m in _metrics]))
    plt.show()
    plt.savefig('/raid/wuruihai/GRNet_FILES/results.png')
    # Print testing results
    print(
        '============================ TEST RESULTS ============================'
    )
    print('Taxonomy', end='\t')
    print('#Sample', end='\t')
    for metric in test_metrics.items:
        print(metric, end='\t')
    print()

    # 将CD, EMD存到txt中
    # print(score_dict)
    # fname = '/home2/wuruihai/GRNet_FILES/Results/ShapeNet_grnet_pretrained_model_VAL_scores.txt'
    # fw = open(fname, 'w')
    # # print(score_dict)
    # for idx in score_dict.keys():
    #     fw.write('%s\t%s\t%s\t%s\t%s\t%s\n' % (idx, score_dict[idx][0], score_dict[idx][1], score_dict[idx][2], score_dict[idx][3], score_dict[idx][4]))  # model_id \t CD \t EMD

    for taxonomy_id in category_metrics:
        print(taxonomy_id, end='\t')
        print(category_metrics[taxonomy_id].count(0), end='\t')
        for value in category_metrics[taxonomy_id].avg():
            print('%.4f' % value, end='\t')
        print()

    print('Overall', end='\t\t\t')
    for value in test_metrics.avg():
        print('%.4f' % value, end='\t')
    print('\n')

    print('recall: ', tot_recall / tot_shapes)
    print('precision: ', tot_precision / tot_shapes)
    # print('EMD: ', tot_emd / tot_shapes)

    # Add testing results to TensorBoard
    if test_writer is not None:
        # test_writer.add_scalar('Loss/Epoch/Sparse', test_losses.avg(0), epoch_idx)
        test_writer.add_scalar('Loss/Epoch/Grid', test_losses.avg(0),
                               epoch_idx)
        test_writer.add_scalar('Loss/Epoch/Dense', test_losses.avg(1),
                               epoch_idx)
        for i, metric in enumerate(test_metrics.items):
            test_writer.add_scalar('Metric/%s' % metric, test_metrics.avg(i),
                                   epoch_idx)

    return Metrics(cfg.TEST.METRIC_NAME, test_metrics.avg())
Esempio n. 3
0
def test_net(cfg,
             epoch_idx=-1,
             test_data_loader=None,
             test_writer=None,
             grnet=None):
    # Enable the inbuilt cudnn auto-tuner to find the best algorithm to use
    torch.backends.cudnn.benchmark = True

    if test_data_loader is None:
        # Set up data loader
        dataset_loader = utils.data_loaders.DATASET_LOADER_MAPPING[
            cfg.DATASET.TEST_DATASET](cfg)
        test_data_loader = torch.utils.data.DataLoader(
            dataset=dataset_loader.get_dataset(
                utils.data_loaders.DatasetSubset.TEST),
            batch_size=1,
            num_workers=cfg.CONST.NUM_WORKERS,
            collate_fn=utils.data_loaders.collate_fn,
            pin_memory=True,
            shuffle=False)

    # Setup networks and initialize networks
    if grnet is None:
        grnet = GRNet(cfg)

        if torch.cuda.is_available():
            grnet = torch.nn.DataParallel(grnet).cuda()

        logging.info('Recovering from %s ...' % (cfg.CONST.WEIGHTS))
        checkpoint = torch.load(cfg.CONST.WEIGHTS)
        grnet.load_state_dict(checkpoint['grnet'])

    # Switch models to evaluation mode
    grnet.eval()

    # Set up loss functions
    chamfer_dist = ChamferDistance()
    gridding_loss = GriddingLoss(
        scales=cfg.NETWORK.GRIDDING_LOSS_SCALES,
        alphas=cfg.NETWORK.GRIDDING_LOSS_ALPHAS)  # lgtm [py/unused-import]

    # Testing loop
    n_samples = len(test_data_loader)
    test_losses = AverageMeter(['SparseLoss', 'DenseLoss'])
    test_metrics = AverageMeter(Metrics.names())
    category_metrics = dict()

    # Testing loop
    for model_idx, (taxonomy_id, model_id,
                    data) in enumerate(test_data_loader):
        taxonomy_id = taxonomy_id[0] if isinstance(
            taxonomy_id[0], str) else taxonomy_id[0].item()
        model_id = model_id[0]

        with torch.no_grad():
            for k, v in data.items():
                data[k] = utils.helpers.var_or_cuda(v)

            sparse_ptcloud, dense_ptcloud = grnet(data)
            sparse_loss = chamfer_dist(sparse_ptcloud, data['gtcloud'])
            dense_loss = chamfer_dist(dense_ptcloud, data['gtcloud'])
            test_losses.update(
                [sparse_loss.item() * 1000,
                 dense_loss.item() * 1000])
            _metrics = Metrics.get(dense_ptcloud, data['gtcloud'])
            test_metrics.update(_metrics)

            # save predicted point cloud
            if cfg.TEST.SAVE_PRED:
                if cfg.DATASET.TEST_DATASET == 'FrankaScan':
                    dirname, obj_idx = model_id.split('-')
                    out_ptcloud = dense_ptcloud[0].cpu()
                    IO.put(
                        cfg.DATASETS.FRANKASCAN.PREDICTION_PATH %
                        (dirname, obj_idx), out_ptcloud)

            if taxonomy_id not in category_metrics:
                category_metrics[taxonomy_id] = AverageMeter(Metrics.names())
            category_metrics[taxonomy_id].update(_metrics)

            if test_writer is not None and model_idx < 3:
                sparse_ptcloud = sparse_ptcloud.squeeze().cpu().numpy()
                sparse_ptcloud_img = utils.helpers.get_ptcloud_img(
                    sparse_ptcloud)
                test_writer.add_image(
                    'Model%02d/SparseReconstruction' % model_idx,
                    sparse_ptcloud_img, epoch_idx)
                dense_ptcloud = dense_ptcloud.squeeze().cpu().numpy()
                dense_ptcloud_img = utils.helpers.get_ptcloud_img(
                    dense_ptcloud)
                test_writer.add_image(
                    'Model%02d/DenseReconstruction' % model_idx,
                    dense_ptcloud_img, epoch_idx)
                gt_ptcloud = data['gtcloud'].squeeze().cpu().numpy()
                gt_ptcloud_img = utils.helpers.get_ptcloud_img(gt_ptcloud)
                test_writer.add_image('Model%02d/GroundTruth' % model_idx,
                                      gt_ptcloud_img, epoch_idx)

            logging.info(
                'Test[%d/%d] Taxonomy = %s Sample = %s Losses = %s Metrics = %s'
                %
                (model_idx + 1, n_samples, taxonomy_id, model_id,
                 ['%.4f' % l
                  for l in test_losses.val()], ['%.4f' % m for m in _metrics]))

    # Print testing results
    print(
        '============================ TEST RESULTS ============================'
    )
    print('Taxonomy', end='\t')
    print('#Sample', end='\t')
    for metric in test_metrics.items:
        print(metric, end='\t')
    print()

    for taxonomy_id in category_metrics:
        print(taxonomy_id, end='\t')
        print(category_metrics[taxonomy_id].count(0), end='\t')
        for value in category_metrics[taxonomy_id].avg():
            print('%.4f' % value, end='\t')
        print()

    print('Overall', end='\t\t\t')
    for value in test_metrics.avg():
        print('%.4f' % value, end='\t')
    print('\n')

    # Add testing results to TensorBoard
    if test_writer is not None:
        test_writer.add_scalar('Loss/Epoch/Sparse', test_losses.avg(0),
                               epoch_idx)
        test_writer.add_scalar('Loss/Epoch/Dense', test_losses.avg(1),
                               epoch_idx)
        for i, metric in enumerate(test_metrics.items):
            test_writer.add_scalar('Metric/%s' % metric, test_metrics.avg(i),
                                   epoch_idx)

    return Metrics(cfg.TEST.METRIC_NAME, test_metrics.avg())
Esempio n. 4
0
File: test.py Progetto: wx-b/RMNet
def test_net(cfg,
             epoch_idx=-1,
             test_data_loader=None,
             test_writer=None,
             tflownet=None,
             rmnet=None):
    # Set up data loader
    if test_data_loader is None:
        # Set up data loader
        test_data_loader = torch.utils.data.DataLoader(
            dataset=utils.data_loaders.DatasetCollector.get_dataset(
                cfg, cfg.DATASET.TEST_DATASET,
                utils.data_loaders.DatasetSubset.VAL),
            batch_size=1,
            num_workers=cfg.CONST.N_WORKERS,
            pin_memory=True,
            shuffle=False)

    # Setup networks and initialize networks
    if rmnet is None:
        tflownet = TinyFlowNet(cfg)
        rmnet = RMNet(cfg)

        if torch.cuda.is_available():
            tflownet = torch.nn.DataParallel(tflownet).cuda()
            rmnet = torch.nn.DataParallel(rmnet).cuda()

        logging.info('Recovering from %s ...' % (cfg.CONST.WEIGHTS))
        checkpoint = torch.load(cfg.CONST.WEIGHTS)
        tflownet.load_state_dict(checkpoint['tflownet'])
        rmnet.load_state_dict(checkpoint['rmnet'])

    # Switch models to evaluation mode
    tflownet.eval()
    rmnet.eval()

    # Set up loss functions
    l1_loss = torch.nn.L1Loss()
    nll_loss = torch.nn.NLLLoss(ignore_index=cfg.CONST.IGNORE_IDX)
    lovasz_loss = LovaszLoss(ignore_index=cfg.CONST.IGNORE_IDX)

    # The testing loop
    n_videos = len(test_data_loader)
    test_losses = AverageMeter()
    test_metrics = AverageMeter(Metrics.names())

    for idx, (video_name, n_objects, frames, masks,
              optical_flows) in enumerate(test_data_loader):
        # Test only selected videos to accelerate the testing process
        if not epoch_idx == -1 and idx not in cfg.TEST.TESTING_VIDEOS_INDEXES:
            continue

        with torch.no_grad():
            # Fix Assertion Error:  all(map(lambda i: i.is_cuda, inputs))
            if torch.cuda.device_count() > 1:
                frames = utils.helpers.var_or_cuda(frames)
                masks = utils.helpers.var_or_cuda(masks)
                optical_flows = utils.helpers.var_or_cuda(optical_flows)

            # Fix bugs: OOM error for large videos
            try:
                if epoch_idx == -1:
                    est_flows, est_probs = utils.helpers.multi_scale_inference(
                        cfg, tflownet, rmnet, frames, masks, n_objects)
                else:
                    est_flows = tflownet(frames)
                    est_probs = rmnet(frames, masks, est_flows, n_objects,
                                      cfg.TEST.MEMORIZE_EVERY)

                est_probs = est_probs.permute(0, 2, 1, 3, 4)
                masks = torch.argmax(masks, dim=2)
                est_masks = torch.argmax(est_probs, dim=1)

                if cfg.TRAIN.NETWORK == 'TinyFlowNet':
                    loss = l1_loss(est_flows, optical_flows)
                else:  # RMNet
                    loss = lovasz_loss(est_probs, masks) + nll_loss(
                        torch.log(est_probs), masks)

            except Exception as ex:
                logging.exception(ex)
                continue

            test_losses.update(loss.item())
            metrics = Metrics.get(est_masks[0], masks[0])
            test_metrics.update(metrics, torch.max(n_objects[0]).item())

            video_name = video_name[0]
            if test_writer is not None and idx < 3 and cfg.TEST.VISUALIZE_EVERY > 0:
                frames = frames[0]
                n_frames = est_masks.size(1)

                for i in tqdm(range(0, n_frames, cfg.TEST.VISUALIZE_EVERY),
                              leave=False,
                              desc=video_name):
                    est_segmentation = utils.helpers.get_segmentation(
                        frames[i], est_masks[0][i], {
                            'mean': cfg.CONST.DATASET_MEAN,
                            'std': cfg.CONST.DATASET_STD,
                        }, cfg.CONST.IGNORE_IDX)
                    gt_segmentation = utils.helpers.get_segmentation(
                        frames[i], masks[0][i], {
                            'mean': cfg.CONST.DATASET_MEAN,
                            'std': cfg.CONST.DATASET_STD,
                        }, cfg.CONST.IGNORE_IDX)
                    test_writer.add_image(
                        '%s/Frame%03d' % (video_name, i),
                        np.concatenate((est_segmentation, gt_segmentation),
                                       axis=0), epoch_idx)

            logging.info(
                'Test[%d/%d] VideoName = %s Loss = %.4f Metrics = %s' %
                (idx + 1, n_videos, video_name, loss,
                 ['%.4f' % m for m in metrics]))

    # Print testing results
    logging.info(
        '[Test Summary] Loss = %.4f Metrics = %s' %
        (test_losses.avg(), ['%.4f' % tm for tm in test_metrics.avg()]))

    # Add testing results to TensorBoard
    if test_writer is not None:
        test_writer.add_scalar('Loss/Epoch', test_losses.avg(), epoch_idx)
        for i, metric in enumerate(test_metrics.items):
            test_writer.add_scalar('Metric/%s' % metric, test_metrics.avg(i),
                                   epoch_idx)

    return Metrics(cfg.TEST.MAIN_METRIC_NAME, test_metrics.avg())
Esempio n. 5
0
def test_net(cfg, epoch_idx=-1, test_data_loader=None, test_writer=None, grnet=None):
    # Enable the inbuilt cudnn auto-tuner to find the best algorithm to use
    torch.backends.cudnn.benchmark = True

    if test_data_loader is None:
        # Set up data loader
        dataset_loader = utils.data_loaders.DATASET_LOADER_MAPPING[cfg.DATASET.TEST_DATASET](cfg)
        # 在data_loader.py中修改这里的dataset值
        test_data_loader = torch.utils.data.DataLoader(dataset=dataset_loader.get_dataset(
            utils.data_loaders.DatasetSubset.VAL),
                                                       batch_size=1,
                                                       num_workers=cfg.CONST.NUM_WORKERS,
                                                       collate_fn=utils.data_loaders.collate_fn,
                                                       pin_memory=True,
                                                       shuffle=False)

    # Setup networks and initialize networks
    if grnet is None:
        grnet = GRNet(cfg)

        if torch.cuda.is_available():
            grnet = torch.nn.DataParallel(grnet).cuda()

        logging.info('Recovering from %s ...' % (cfg.CONST.WEIGHTS))
        checkpoint = torch.load(cfg.CONST.WEIGHTS)
        grnet.load_state_dict(checkpoint['grnet'])

    # Switch models to evaluation mode
    grnet.eval()

    # Set up loss functions
    chamfer_dist = ChamferDistance()
    gridding_loss = GriddingLoss(scales=cfg.NETWORK.GRIDDING_LOSS_SCALES,
                                 alphas=cfg.NETWORK.GRIDDING_LOSS_ALPHAS)    # lgtm [py/unused-import]

    # Testing loop
    n_samples = len(test_data_loader)
    test_losses = AverageMeter(['SparseLoss', 'DenseLoss'])
    # test_losses = AverageMeter(['GridLoss', 'DenseLoss'])
    test_metrics = AverageMeter(Metrics.names())  # 'F-score, CD
    category_metrics = dict()


    # Testing loop
    # 通过data得到sparse_pucloud,  data from test_data_loader

    '''
    gt_path = '/raid/wuruihai/GRNet_FILES/xkh/Completion3D/val/gt/03001627/'
    # pred_path = '/raid/wuruihai/GRNet_FILES/Results/Completion3D_grnet_data_ep300_npz_2048d/'  # 0.0033
    pred_path = '/raid/wuruihai/GRNet_FILES/Results/Completion3D_grnet_alldata_ep300_npz_small_2048d/'   # 0.0030
    n_points = 2048
    for root, dirs, files in os.walk(pred_path):
        len_files = len(files)
        pred_batch = np.zeros((1, n_points, 3))
        gt_batch = np.zeros((1, n_points, 3))
        idx = -1
        tot = 0

        for file in files:
            file_id = os.path.splitext(file)[0]
            idx += 1

            pred = np.load(pred_path + file)['pts']
            # pred = rescale_pc_parts(pred, num_points=n_points)
            # pred = pred.reshape(n_points, 3)
            pred_batch[0] = pred

            gt = h5py.File(gt_path + file_id + '.h5', 'r')['data'][:]  # Completion3D
            gt = np.array(gt).astype(np.float32)
            # gt = rescale_pc_parts(gt, num_points=n_points)
            # gt = gt.reshape(n_points, 3)
            gt_batch[0] = gt


            with torch.no_grad():
                cd = chamfer_dist(torch.tensor(pred_batch, dtype=torch.float32).cuda(), torch.tensor(gt_batch, dtype=torch.float32).cuda())
            print(cd)
            tot += cd
        print('avg: ', tot/len_files)

        return
    '''



    for model_idx, (taxonomy_id, model_id, data) in enumerate(test_data_loader):
        taxonomy_id = taxonomy_id[0] if isinstance(taxonomy_id[0], str) else taxonomy_id[0].item()
        model_id = model_id[0]

        with torch.no_grad():
            for k, v in data.items():
                data[k] = utils.helpers.var_or_cuda(v)



            sparse_ptcloud, dense_ptcloud = grnet(data)
            sparse_loss = chamfer_dist(sparse_ptcloud, data['gtcloud'])
            # grid_loss = gridding_loss(dense_ptcloud, data['gtcloud'])
            dense_loss = chamfer_dist(dense_ptcloud, data['gtcloud'])
            print(dense_ptcloud.shape, data['gtcloud'].shape)
            test_losses.update([sparse_loss.item() * 1000, dense_loss.item() * 1000])
            # test_losses.update([grid_loss.item() * 1000, dense_loss.item() * 1000])
            _metrics = Metrics.get(dense_ptcloud, data['gtcloud']) # return: values
            test_metrics.update(_metrics)

            if taxonomy_id not in category_metrics:
                category_metrics[taxonomy_id] = AverageMeter(Metrics.names())
            category_metrics[taxonomy_id].update(_metrics)


            # train时不用存数据
            # 存 npz

            '''
            save_path = '/home2/wuruihai/GRNet_FILES/Results/Completion3D_grnet_chair_ep300_npz_16384d/'
            save_path2 = '/home2/wuruihai/GRNet_FILES/Results/Completion3D_grent_chair_ep300_npz_2048d/'

            part_name = 'part_7'

            # 只存了 final results (dense_ptcloud)
            save_npz_path = save_path + part_name + '/'
            save_npz_path2 = save_path2 + part_name + '/'
            if not os.path.exists(save_npz_path):
                os.makedirs(save_npz_path)
            if not os.path.exists(save_npz_path2):
                os.makedirs(save_npz_path2)

            dense_pts = np.array(dense_ptcloud.cpu())
            dense_pts2 = rescale_pc_parts(dense_pts, 2048) # rescale
            dense_pts /= 0.45  # 放大回我们的大小
            dense_pts2 /= 0.45
            np.savez(save_npz_path + '%s.npz' % model_id, pts = dense_pts)
            np.savez(save_npz_path2 + '%s.npz' % model_id, pts = dense_pts2)
            '''



            # 存npz (GRNet's data),  Completion3D, 没有part
            # 和grnet自己的数据集比较,不需要放大(/0.45)

            '''
            save_path = '/home2/wuruihai/GRNet_FILES/Results/Completion3D_grnet_alldata_ep300_npz_small_16384d/'
            save_path2 = '/home2/wuruihai/GRNet_FILES/Results/Completion3D_grnet_alldata_ep300_npz_small_2048d/'
            if not os.path.exists(save_path):
                os.makedirs(save_path)
            if not os.path.exists(save_path2):
                os.makedirs(save_path2)
            dense_pts = np.array(dense_ptcloud.cpu())
            dense_pts2 = rescale_pc_parts(dense_pts, 2048)  # rescale
            np.savez(save_path + '%s.npz' % model_id, pts=dense_pts)
            np.savez(save_path2 + '%s.npz' % model_id, pts = dense_pts2)
            '''




            # 存 png
            '''
            save_path = '/home2/wuruihai/GRNet_FILES/Results/ShapeNet_zy_chair_ep500_part0_16384d_png/'
            if not os.path.exists(save_path):
                os.makedirs(save_path)

            plt.figure()


            pc_ptcloud = data['partial_cloud'].squeeze().cpu().numpy()
            pc_ptcloud_img = utils.helpers.get_ptcloud_img(pc_ptcloud)
            matplotlib.image.imsave(save_path + '%s_1_pc.png' % model_id,
                                    pc_ptcloud_img)
        
            
            # sparse_ptcloud = sparse_ptcloud.squeeze().cpu().numpy()
            # sparse_ptcloud_img = utils.helpers.get_ptcloud_img(sparse_ptcloud)
            # matplotlib.image.imsave(save_path+'%s_sps.png' % model_id,
            #                         sparse_ptcloud_img)
            

            dense_ptcloud = dense_ptcloud.squeeze().cpu().numpy()
            dense_ptcloud_img = utils.helpers.get_ptcloud_img(dense_ptcloud)
            matplotlib.image.imsave(save_path+'%s_2_dns.png' % model_id,
                                    dense_ptcloud_img)

            
            gt_ptcloud = data['gtcloud'].squeeze().cpu().numpy()
            gt_ptcloud_img = utils.helpers.get_ptcloud_img(gt_ptcloud)
            matplotlib.image.imsave(save_path+'%s_3_gt.png' % model_id,
                                    gt_ptcloud_img)
            '''




            '''
            if model_idx in range(510, 600):

                now_num=model_idx-499
                # if test_writer is not None and model_idx < 3:
                # sparse_ptcloud = sparse_ptcloud.squeeze().cpu().numpy()
                sparse_ptcloud = sparse_ptcloud.squeeze().numpy()
                sparse_ptcloud_img = utils.helpers.get_ptcloud_img(sparse_ptcloud)
                matplotlib.image.imsave('/home2/wuruihai/GRNet_FILES/results2/%s_%s_sps.png'%(model_idx,model_id), sparse_ptcloud_img)

                # dense_ptcloud = dense_ptcloud.squeeze().cpu().numpy()
                dense_ptcloud = dense_ptcloud.squeeze().numpy()
                dense_ptcloud_img = utils.helpers.get_ptcloud_img(dense_ptcloud)
                matplotlib.image.imsave('/home2/wuruihai/GRNet_FILES/results2/%s_%s_dns.png' % (model_idx, model_id),
                                        dense_ptcloud_img)


                # gt_ptcloud = data['gtcloud'].squeeze().cpu().numpy()
                gt_ptcloud = data['gtcloud'].squeeze().numpy()
                gt_ptcloud_img = utils.helpers.get_ptcloud_img(gt_ptcloud)
                matplotlib.image.imsave('/home2/wuruihai/GRNet_FILES/results2/%s_%s_gt.png'%(model_idx,model_id), gt_ptcloud_img)

                cv.imwrite("/home2/wuruihai/GRNet_FILES/out3.png", sparse_ptcloud_img)
                im = Image.fromarray(sparse_ptcloud_img).convert('RGB')
                im.save("/home2/wuruihai/GRNet_FILES/out.jpeg")
            
                test_writer.add_image('Model%02d/SparseReconstruction' % model_idx, sparse_ptcloud_img, epoch_idx)
                dense_ptcloud = dense_ptcloud.squeeze().cpu().numpy()
                dense_ptcloud_img = utils.helpers.get_ptcloud_img(dense_ptcloud)
                test_writer.add_image('Model%02d/DenseReconstruction' % model_idx, dense_ptcloud_img, epoch_idx)

                gt_ptcloud = data['gtcloud'].squeeze().cpu().numpy()
                gt_ptcloud_img = utils.helpers.get_ptcloud_img(gt_ptcloud)
                test_writer.add_image('Model%02d/GroundTruth' % model_idx, gt_ptcloud_img, epoch_idx)
            '''

            logging.info('Test[%d/%d] Taxonomy = %s Sample = %s Losses = %s Metrics = %s' %
                         (model_idx + 1, n_samples, taxonomy_id, model_id, ['%.4f' % l for l in test_losses.val()
                                                                            ], ['%.4f' % m for m in _metrics]))
    plt.show()
    plt.savefig('/raid/wuruihai/GRNet_FILES/results.png')
    # Print testing results
    print('============================ TEST RESULTS ============================')
    print('Taxonomy', end='\t')
    print('#Sample', end='\t')
    for metric in test_metrics.items:
        print(metric, end='\t')
    print()

    for taxonomy_id in category_metrics:
        print(taxonomy_id, end='\t')
        print(category_metrics[taxonomy_id].count(0), end='\t')
        for value in category_metrics[taxonomy_id].avg():
            print('%.4f' % value, end='\t')
        print()

    print('Overall', end='\t\t\t')
    for value in test_metrics.avg():
        print('%.4f' % value, end='\t')
    print('\n')

    # Add testing results to TensorBoard
    if test_writer is not None:
        # test_writer.add_scalar('Loss/Epoch/Sparse', test_losses.avg(0), epoch_idx)
        test_writer.add_scalar('Loss/Epoch/Grid', test_losses.avg(0), epoch_idx)
        test_writer.add_scalar('Loss/Epoch/Dense', test_losses.avg(1), epoch_idx)
        for i, metric in enumerate(test_metrics.items):
            test_writer.add_scalar('Metric/%s' % metric, test_metrics.avg(i), epoch_idx)

    return Metrics(cfg.TEST.METRIC_NAME, test_metrics.avg())