def get_pred(batch_data):
        # process data
        batch_data = batch_data[0]
        segSize = (batch_data['img_ori'].shape[0],
                   batch_data['img_ori'].shape[1])
        img_resized_list = batch_data['img_data']

        with torch.no_grad():
            scores = torch.zeros(1, cfg.DATASET.num_class, segSize[0], segSize[1])
            scores = async_copy_to(scores, gpu)

            for img in img_resized_list:
                feed_dict = batch_data.copy()
                feed_dict['img_data'] = img
                del feed_dict['img_ori']
                del feed_dict['info']
                feed_dict = async_copy_to(feed_dict, gpu)

                # forward pass
                pred_tmp = segmentation_module(feed_dict, segSize=segSize)
                scores = scores + pred_tmp / len(cfg.DATASET.imgSizes)

            _, pred = torch.max(scores, dim=1)
            writer.write(pred)
            pred = as_numpy(pred.squeeze(0).cpu())
        pbar.update(1)
        return pred
Пример #2
0
def test(segmentation_module, loader, gpu):
    segmentation_module.eval()

    pbar = tqdm(total=len(loader))
    for batch_data in loader:
        # process data
        batch_data = batch_data[0]
        segSize = (batch_data['img_ori'].shape[0],
                   batch_data['img_ori'].shape[1])
        img_resized_list = batch_data['img_data']

        with torch.no_grad():
            scores = torch.zeros(1, cfg.DATASET.num_class, segSize[0],
                                 segSize[1])
            scores = async_copy_to(scores, gpu)

            for img in img_resized_list:
                feed_dict = batch_data.copy()
                feed_dict['img_data'] = img
                del feed_dict['img_ori']
                del feed_dict['info']
                feed_dict = async_copy_to(feed_dict, gpu)

                # forward pass
                pred_tmp = segmentation_module(feed_dict, segSize=segSize)
                scores = scores + pred_tmp / len(cfg.DATASET.imgSizes)

            _, pred = torch.max(scores, dim=1)
            pred = as_numpy(pred.squeeze(0).cpu())

        # visualization
        visualize_result((batch_data['img_ori'], batch_data['info']), pred,
                         cfg)

        pbar.update(1)
Пример #3
0
def huawei_seg(imgs, segmentation_module):
    #
    cfg.merge_from_file("/home/mind/model/config/ade20k-hrnetv2-huawei.yaml")
    imgs = [imgs]
    cfg.list_test = [{'fpath_img': x} for x in imgs]
    # Dataset and Loader
    dataset_test = InferDataset(cfg.list_test, cfg.DATASET)
    loader_test = torch.utils.data.DataLoader(
        dataset_test,
        batch_size=1,
        shuffle=False,
        collate_fn=user_scattered_collate,
        num_workers=5,
        drop_last=True)

    segmentation_module.cuda()
    loader = loader_test
    # Main loop
    segmentation_module.eval()
    pbar = tqdm(total=len(loader))
    for batch_data in loader:
        # process data
        batch_data = batch_data[0]
        segSize = (batch_data['img_ori'].shape[0],
                   batch_data['img_ori'].shape[1])
        img_resized_list = batch_data['img_data']

        with torch.no_grad():
            scores = torch.zeros(1, cfg.DATASET.num_class, segSize[0],
                                 segSize[1])
            scores = async_copy_to(scores, 0)

            for img in img_resized_list:
                feed_dict = batch_data.copy()
                feed_dict['img_data'] = img
                del feed_dict['img_ori']
                del feed_dict['info']
                feed_dict = async_copy_to(feed_dict, 0)

                # forward pass
                pred_tmp = segmentation_module(feed_dict, segSize=segSize)
                # print(pred_tmp.shape)#torch.Size([1, 2, 1024, 1024])
                scores = scores + pred_tmp / len(cfg.DATASET.imgSizes)

            _, pred = torch.max(scores, dim=1)
            pred = as_numpy(pred.squeeze(0).cpu())
        #
        # visualize_result(
        #     (batch_data['img_ori'], batch_data['info']),
        #     pred,
        #     cfg
        # )
        pbar.update(1)
    #
    return pred
def evaluate(segmentation_module, loader, cfg, gpu_id, result_queue):
    segmentation_module.eval()

    for index, batch_data in enumerate(loader):
        # process data
        batch_data = batch_data[0]
        seg_label = as_numpy(batch_data['seg_label'][0])
        img_resized_list = batch_data['img_data']

        with torch.no_grad():
            segSize = (seg_label.shape[0], seg_label.shape[1])
            scores = torch.zeros(1, cfg.DATASET.num_class, segSize[0],
                                 segSize[1])
            scores = async_copy_to(scores, gpu_id)

            for img in img_resized_list:
                feed_dict = batch_data.copy()
                feed_dict['img_data'] = img
                del feed_dict['img_ori']
                del feed_dict['info']
                feed_dict = async_copy_to(feed_dict, gpu_id)

                # forward pass
                scores_tmp = segmentation_module(feed_dict, segSize=segSize)
                scores = scores + scores_tmp / len(cfg.DATASET.imgSizes)

            _, pred = torch.max(scores, dim=1)
            pred = as_numpy(pred.squeeze(0).cpu())

        # calculate accuracy and SEND THEM TO MASTER
        acc, pix = accuracy(pred, seg_label)
        intersection, union = intersectionAndUnion(pred, seg_label,
                                                   cfg.DATASET.num_class)
        result_queue.put_nowait((acc, pix, intersection, union))

        # visualization
        if cfg.VAL.visualize:
            visualize_result(
                (batch_data['img_ori'], seg_label, batch_data['info']), pred,
                os.path.join(cfg.DIR, 'result'))
Пример #5
0
def test(segmentation_module, loader, device, names):
    segmentation_module.eval()
    pbar = tqdm(total=len(loader))
    re = []
    for batch_data in loader:
        # process data
        batch_data = batch_data[0]
        segSize = (batch_data['img_ori'].shape[0],
                   batch_data['img_ori'].shape[1])
        img_resized_list = batch_data['img_data']

        with torch.no_grad():
            scores = torch.zeros(1, cfg_ss.DATASET.num_class, segSize[0],
                                 segSize[1])
            if torch.cuda.is_available():
                scores = async_copy_to(scores, device)

            for img in img_resized_list:
                feed_dict = batch_data.copy()
                feed_dict['img_data'] = img
                del feed_dict['img_ori']
                del feed_dict['info']
                if torch.cuda.is_available():
                    feed_dict = async_copy_to(feed_dict, device)

                # forward pass
                pred_tmp = segmentation_module(feed_dict, segSize=segSize)
                scores = scores + pred_tmp / len(cfg_ss.DATASET.imgSizes)

            _, pred = torch.max(scores, dim=1)
            pred = as_numpy(pred.squeeze(0).cpu())
            pred = np.int32(pred)
            pixs = pred.size

            uniques, counts = np.unique(pred, return_counts=True)
            pred_ratios = {}
            for idx in np.argsort(counts)[::-1]:
                name = names[uniques[idx] + 1]
                ratio = counts[idx] / pixs * 100
                if ratio > 0.1:
                    pred_ratios[name] = ratio

            re.append({
                "original_img": batch_data['img_ori'],
                "img_inf": batch_data['info'],
                "pred_result": pred,
                "pred_ratio": pred_ratios
            })

        pbar.update(1)
    return re
def huawei_seg(imgs):

    parser = argparse.ArgumentParser(
        description="PyTorch Semantic Segmentation Testing")
    parser.add_argument(
        "--cfg",
        default="config/ade20k-hrnetv2-huawei.yaml",
        metavar="FILE",
        help="path to config file",
        type=str,
    )
    parser.add_argument("--gpu",
                        default=0,
                        type=int,
                        help="gpu id for evaluation")

    args = parser.parse_args()

    cfg.merge_from_file(args.cfg)

    cfg.MODEL.arch_encoder = cfg.MODEL.arch_encoder.lower()
    cfg.MODEL.arch_decoder = cfg.MODEL.arch_decoder.lower()

    # absolute paths of model weights
    cfg.MODEL.weights_encoder = os.path.join(cfg.DIR,
                                             'encoder_' + cfg.TEST.checkpoint)
    cfg.MODEL.weights_decoder = os.path.join(cfg.DIR,
                                             'decoder_' + cfg.TEST.checkpoint)
    #
    imgs = [imgs]
    cfg.list_test = [{'fpath_img': x} for x in imgs]

    torch.cuda.set_device(args.gpu)

    # Network Builders
    net_encoder = ModelBuilder.build_encoder(arch=cfg.MODEL.arch_encoder,
                                             fc_dim=cfg.MODEL.fc_dim,
                                             weights=cfg.MODEL.weights_encoder)
    net_decoder = ModelBuilder.build_decoder(arch=cfg.MODEL.arch_decoder,
                                             fc_dim=cfg.MODEL.fc_dim,
                                             num_class=cfg.DATASET.num_class,
                                             weights=cfg.MODEL.weights_decoder,
                                             use_softmax=True)

    crit = nn.NLLLoss(ignore_index=-1)

    segmentation_module = SegmentationModule(net_encoder, net_decoder, crit)

    # Dataset and Loader
    dataset_test = InferDataset(cfg.list_test, cfg.DATASET)
    loader_test = torch.utils.data.DataLoader(
        dataset_test,
        batch_size=1,
        shuffle=False,
        collate_fn=user_scattered_collate,
        num_workers=5,
        drop_last=True)

    segmentation_module.cuda()
    loader = loader_test
    # Main loop
    segmentation_module.eval()
    pbar = tqdm(total=len(loader))
    for batch_data in loader:
        # process data
        batch_data = batch_data[0]
        segSize = (batch_data['img_ori'].shape[0],
                   batch_data['img_ori'].shape[1])
        img_resized_list = batch_data['img_data']

        with torch.no_grad():
            scores = torch.zeros(1, cfg.DATASET.num_class, segSize[0],
                                 segSize[1])
            scores = async_copy_to(scores, args.gpu)

            for img in img_resized_list:
                feed_dict = batch_data.copy()
                feed_dict['img_data'] = img
                del feed_dict['img_ori']
                del feed_dict['info']
                feed_dict = async_copy_to(feed_dict, args.gpu)

                # forward pass
                pred_tmp = segmentation_module(feed_dict, segSize=segSize)
                # print(pred_tmp.shape)#torch.Size([1, 2, 1024, 1024])
                scores = scores + pred_tmp / len(cfg.DATASET.imgSizes)

            _, pred = torch.max(scores, dim=1)
            pred = as_numpy(pred.squeeze(0).cpu())
        # visualization
        visualize_result((batch_data['img_ori'], batch_data['info']), pred,
                         cfg)
        pbar.update(1)
    #
    return pred
Пример #7
0
def evaluate(segmentation_module, loader, cfg, gpu):
    acc_meter = AverageMeter()
    intersection_meter = AverageMeter()
    union_meter = AverageMeter()
    time_meter = AverageMeter()

    segmentation_module.eval()

    pbar = tqdm(total=len(loader))
    for batch_data in loader:
        # process data
        batch_data = batch_data[0]
        seg_label = as_numpy(batch_data['seg_label'][0])
        img_resized_list = batch_data['img_data']

        torch.cuda.synchronize()
        tic = time.perf_counter()
        with torch.no_grad():
            segSize = (seg_label.shape[0], seg_label.shape[1])
            scores = torch.zeros(1, cfg.DATASET.num_class, segSize[0], segSize[1])
            scores = async_copy_to(scores, gpu)

            for img in img_resized_list:
                feed_dict = batch_data.copy()
                feed_dict['img_data'] = img
                del feed_dict['img_ori']
                del feed_dict['info']
                feed_dict = async_copy_to(feed_dict, gpu)

                # forward pass
                scores_tmp = segmentation_module(feed_dict, segSize=segSize)
                scores = scores + scores_tmp / len(cfg.DATASET.imgSizes)

            _, pred = torch.max(scores, dim=1)
            pred = as_numpy(pred.squeeze(0).cpu())

        torch.cuda.synchronize()
        time_meter.update(time.perf_counter() - tic)

        # calculate accuracy
        acc, pix = accuracy(pred, seg_label)
        intersection, union = intersectionAndUnion(pred, seg_label, cfg.DATASET.num_class)
        acc_meter.update(acc, pix)
        intersection_meter.update(intersection)
        union_meter.update(union)

        # visualization
        if cfg.VAL.visualize:
            visualize_result(
                (batch_data['img_ori'], seg_label, batch_data['info']),
                pred,
                os.path.join(cfg.DIR, 'result')
            )

        pbar.update(1)

    # summary
    iou = intersection_meter.sum / (union_meter.sum + 1e-10)
    for i, _iou in enumerate(iou):
        print('class [{}], IoU: {:.4f}'.format(i, _iou))

    print('[Eval Summary]:')
    print('Mean IoU: {:.4f}, Accuracy: {:.2f}%, Inference Time: {:.4f}s'
          .format(iou.mean(), acc_meter.average()*100, time_meter.average()))
def main():
    """Run main function"""



    OSS = OmniSemSeg(DATADIR, SAVEDIR)

    if VERBOSE:
        print('Semantic Segmentation ')

        print("Saving results to %s" % SAVEDIR)

    print("Nombre images: ",len(OSS.list_img))
    
    if IMODE == "test":
        for elt in OSS.list_img:
            torch.cuda.synchronize()
            tic = time.perf_counter()

            pred_sphe, pred_persp = OSS.semseg_pred(elt)

            time_end = time.perf_counter() - tic
            # if VERBOSE:
            print("Done for ",str(elt), "in ", time_end)
            OSS.save_simple(elt, pred_persp, pred_sphe)
            # OSS.save_all(elt, pred_persp, pred_sphe)

    elif IMODE == "eval":


        from mit_semseg.lib.utils import as_numpy

        semseg_metric_persp = semseg_metric()
        semseg_metric_sphe = semseg_metric()

        for elt in OSS.list_img[0:100]:


            semseg_gt_file = elt.replace("_0.png","_2.png")
            semseg_gt = as_numpy(PIL.Image.open(semseg_gt_file).convert('RGB'))
            # print("Image seg GT")
            # # print(semseg_gt)
            # print(numpy.unique(semseg_gt[:,:,0], return_counts=True)) #red
            # print(numpy.unique(semseg_gt[:,:,1], return_counts=True)) #green
            # print(numpy.unique(semseg_gt[:,:,2], return_counts=True)) #blue
            # semseg_gt_id = numpy.zeros((semseg_gt.shape[0],semseg_gt.shape[1])) -1
            # for idx in range(semseg_gt.shape[0]):
            #     for idy in range(semseg_gt.shape[1]):
            #         for idc, col in enumerate(OSS.colors):
            #             if not((semseg_gt[idx,idy] - col).all()):
            #                 semseg_gt_id[idx,idy] = idc
            #                 break
            # print("Semseg Gt ID")
            # print(semseg_gt_id)

            torch.cuda.synchronize()
            tic = time.perf_counter()

            # if VERBOSE:
            print("Doing for ",str(elt))
            pred_sphe, pred_persp = OSS.semseg_pred(elt)
            # OSS.save_all(elt, pred_persp, pred_sphe)
            OSS.save_all_2(elt, pred_persp, pred_sphe)

            pred_sphe_color = colorEncode(pred_sphe, OSS.colors).astype(numpy.uint8)
            pred_persp_color = colorEncode(pred_persp, OSS.colors).astype(numpy.uint8)

            semseg_metric_persp.update_metrics(pred_persp_color,semseg_gt,time.perf_counter() - tic)
            semseg_metric_sphe.update_metrics(pred_sphe_color,semseg_gt,time.perf_counter() - tic)



            # print("MIOU KERAS : ",iou_mean(pred_sphe,semseg_gt_id,150))

        semseg_metric_persp.show_metrics("PERSP")
        semseg_metric_sphe.show_metrics("SPHE")


    print("DONE")