コード例 #1
0
ファイル: engine.py プロジェクト: MrCrowbar/RoofDetection
def evaluate(model, data_loader, device):
    n_threads = torch.get_num_threads()
    # FIXME remove this and make paste_masks_in_image run on the GPU
    torch.set_num_threads(1)
    cpu_device = torch.device("cpu")
    #cpu_device = torch.device('cuda')
    model.eval()
    metric_logger = utils.MetricLogger(delimiter="  ")
    header = 'Test:'

    coco = get_coco_api_from_dataset(data_loader.dataset)
    iou_types = _get_iou_types(model)
    coco_evaluator = CocoEvaluator(coco, iou_types)

    for image, targets in metric_logger.log_every(data_loader, 100, header):
        image = list(img.to(device) for img in image)
        targets = [{k: v.to(device) for k, v in t.items()} for t in targets]

        torch.cuda.synchronize()
        model_time = time.time()
        outputs = model(image)

        outputs = [{k: v.to(cpu_device)
                    for k, v in t.items()} for t in outputs]
        model_time = time.time() - model_time

        res = {
            target["image_id"].item(): output
            for target, output in zip(targets, outputs)
        }
        evaluator_time = time.time()
        coco_evaluator.update(res)
        evaluator_time = time.time() - evaluator_time
        metric_logger.update(model_time=model_time,
                             evaluator_time=evaluator_time)

    # gather the stats from all processes
    metric_logger.synchronize_between_processes()
    print("Averaged stats:", metric_logger)
    coco_evaluator.synchronize_between_processes()

    # accumulate predictions from all images
    coco_evaluator.accumulate()
    coco_evaluator.summarize()
    torch.set_num_threads(n_threads)
    return coco_evaluator
コード例 #2
0
def evaluate(model, data_loader, device):
    n_threads = torch.get_num_threads()
    torch.set_num_threads(1)
    model.eval()
    metric_logger = utils.MetricLogger(delimiter="  ")
    header = 'Test:'

    coco = data_loader.dataset.coco
    iou_types = ["bbox"]
    coco_evaluator = CocoEvaluator(coco, iou_types)

    for image, targets in metric_logger.log_every(data_loader, 100, header):
        image = list(img.to(device) for img in image)
        targets = [{k: v.to(device) for k, v in t.items()} for t in targets]

        torch.cuda.synchronize()
        model_time = time.time()
        outputs = model(image)

        outputs = [{k: v for k, v in t.items()} for t in outputs]
        model_time = time.time() - model_time

        res = {target["image_id"].item(): output for target, output in zip(targets, outputs)}
        evaluator_time = time.time()
        coco_evaluator.update(res)
        evaluator_time = time.time() - evaluator_time
        metric_logger.update(model_time=model_time, evaluator_time=evaluator_time)

    # gather the stats from all processes
    metric_logger.synchronize_between_processes()
    print("Averaged stats:", metric_logger)
    coco_evaluator.synchronize_between_processes()

    # accumulate predictions from all images
    coco_evaluator.accumulate()
    coco_evaluator.summarize()
    torch.set_num_threads(n_threads)
    return coco_evaluator
コード例 #3
0
def evaluate(model, data_loader, device, box_threshold=0.001):
    n_threads = torch.get_num_threads()
    # FIXME remove this and make paste_masks_in_image run on the GPU
    torch.set_num_threads(1)
    cpu_device = torch.device("cpu")
    model.eval()
    metric_logger = utils.MetricLogger(delimiter="  ")
    header = 'Test:'

    coco = get_coco_api_from_dataset(data_loader.dataset, box_threshold)
    iou_types = _get_iou_types(model)
    coco_evaluator = CocoEvaluator(coco, iou_types)

    for images, targets in metric_logger.log_every(data_loader, 100, header):
        # images = list(img.to(device) for img in images)
        # targets = [{k: v.to(device) if torch.is_tensor(v) else v for k, v in t.items()} for t in targets]

        torch.cuda.synchronize()
        model_time = time.time()
        # if box_threshold is None:
        #     outputs = model(images)
        # else:
        #     outputs = model(images, box_threshold)

        outputs = model(images, targets)
        # outputs = [{k: v.to(cpu_device) for k, v in t.items()} for t in outputs]
        predictions = outputs['detections'].to(cpu_device)
        batch_predictions = []
        batch_size = predictions.shape[0]
        for i in range(batch_size):
            num_of_detections = len(
                torch.where(predictions[i][:, 4] > 0.0001)[0])
            batch_predictions.insert(
                i, {
                    'boxes': predictions[i][:num_of_detections, 0:4],
                    'scores': predictions[i][:num_of_detections, 4],
                    'labels': predictions[i][:num_of_detections, 5],
                })
            if num_of_detections > 0:
                try:
                    print("max score was [{}]".format(
                        batch_predictions[0]['scores'][0]))
                except:
                    print("exception when using batch_predictions during eval")
                    print("batch_size [{}]".format(batch_size))
                    print(batch_predictions)

        model_time = time.time() - model_time

        # vis = visualize.Visualize('.', targets['img_size'][0][0])
        # num_of_detections = len(torch.where(targets['cls'][0] > -1)[0])
        # vis.show_image_data(images[0], targets['cls'][0,:num_of_detections].int(), None, targets['bbox'][0,:num_of_detections,[1,0,3,2]])

        # print("img ids: [{}]".format(targets['image_id'].to(cpu_device).tolist()))
        res = {
            image_id: output
            for image_id, output in zip(
                targets['image_id'].to(cpu_device).tolist(), batch_predictions)
        }  # ofekp: this used to be target["image_id"].item()
        evaluator_time = time.time()
        coco_evaluator.update(res)
        evaluator_time = time.time() - evaluator_time
        metric_logger.update(model_time=model_time,
                             evaluator_time=evaluator_time)

    # gather the stats from all processes
    metric_logger.synchronize_between_processes()
    print("Averaged stats:", metric_logger)
    coco_evaluator.synchronize_between_processes()

    # accumulate predictions from all images
    coco_evaluator.accumulate()
    coco_evaluator.summarize()
    torch.set_num_threads(n_threads)
    return coco_evaluator
コード例 #4
0
def evaluate(model, data_loader, device):

    hookF = {}
    for name, module in model.named_modules():
        if isinstance(module, torch.jit.ScriptModule):
            continue
        else:
            hookF[name] = statistics(module)
    n_threads = torch.get_num_threads()
    # FIXME remove this and make paste_masks_in_image run on the GPU
    torch.set_num_threads(1)
    cpu_device = torch.device("cpu")
    model.eval()
    metric_logger = utils.MetricLogger(delimiter="  ")
    header = 'Test:'

    coco = get_coco_api_from_dataset(data_loader.dataset)
    iou_types = _get_iou_types(model)
    coco_evaluator = CocoEvaluator(coco, iou_types)

    for image, targets in metric_logger.log_every(data_loader, 100, header):
        image = list(img.to(device) for img in image)
        targets = [{k: v.to(device) for k, v in t.items()} for t in targets]

        torch.cuda.synchronize()
        model_time = time.time()
        outputs = model(image)

        outputs = [{k: v.to(cpu_device)
                    for k, v in t.items()} for t in outputs]
        model_time = time.time() - model_time

        res = {
            target["image_id"].item(): output
            for target, output in zip(targets, outputs)
        }
        evaluator_time = time.time()
        coco_evaluator.update(res)
        evaluator_time = time.time() - evaluator_time
        metric_logger.update(model_time=model_time,
                             evaluator_time=evaluator_time)

    # gather the stats from all processes
    metric_logger.synchronize_between_processes()
    print("Averaged stats:", metric_logger)
    coco_evaluator.synchronize_between_processes()

    # accumulate predictions from all images
    coco_evaluator.accumulate()
    coco_evaluator.summarize()
    torch.set_num_threads(n_threads)

    stat = {}
    for key, statObj in hookF.items():

        stat[key] = [{
            "min_input": float(statObj.input_stat.min),
            "max_input": float(statObj.input_stat.max),
            "min_output": float(statObj.output_stat.min),
            "max_output": float(statObj.output_stat.max),
            "avg_min_input": float(statObj.input_stat.avg_min),
            "avg_max_input": float(statObj.input_stat.avg_max),
            "avg_min_output": float(statObj.output_stat.avg_min),
            "avg_max_output": float(statObj.output_stat.avg_max)
        }]
    del stat['']
    #save the dictionary as a json file
    with open('Pytorch_Obj_Det_Stat.json', 'w') as fp:
        for k, v in stat.items():
            json.dump(stat, fp, indent=0)

    return coco_evaluator
コード例 #5
0
def evaluate_bin(model, data_loader, device, bin_folder):

    n_threads = torch.get_num_threads()
    # FIXME remove this and make paste_masks_in_image run on the GPU
    torch.set_num_threads(1)
    cpu_device = torch.device("cpu")
    metric_logger = utils.MetricLogger(delimiter="  ")
    header = 'Test:'

    coco = get_coco_api_from_dataset(data_loader.dataset)
    iou_types = _get_iou_types(model)
    coco_evaluator = CocoEvaluator(coco, iou_types)
    bin_folder = bin_folder
    jsonPath = os.path.join(args.output_dir, 'images_shape.json')
    with open(jsonPath) as json_file:
        shape_dict = json.load(json_file)
    #
    model.transform = IdentityTransform(model.transform.min_size,
                                        model.transform.max_size,
                                        model.transform.image_mean,
                                        model.transform.image_std)
    model.eval()
    for image, targets in metric_logger.log_every(data_loader, 100, header):
        image = list(img.to(device) for img in image)
        original_image_sizes = [img.shape[-2:] for img in image]

        targets = [{k: v.to(device) for k, v in t.items()} for t in targets]

        img_id = targets[0]['image_id'].cpu().numpy()[0]
        path = os.path.join(bin_folder, str(img_id) + '.bin')
        f = open(path, 'rb')
        transformed_img = np.fromfile(f, np.float32)
        transformed_img = np.reshape(transformed_img,
                                     shape_dict[str(img_id)][0][0])

        image_sizes_not_devisible = np.asarray(shape_dict[str(img_id)][1][0])
        image_sizes_not_devisible = torch.from_numpy(image_sizes_not_devisible)

        transformed_img_T = torch.from_numpy(transformed_img)
        transformed_img_T = transformed_img_T.to(device)

        torch.cuda.synchronize()
        model_time = time.time()
        outputs = model(transformed_img_T)

        outputs = [{k: v.to(cpu_device)
                    for k, v in t.items()} for t in outputs]
        outputs = model.transform.postprocess(outputs,
                                              [image_sizes_not_devisible],
                                              original_image_sizes)

        model_time = time.time() - model_time

        res = {
            target["image_id"].item(): output
            for target, output in zip(targets, outputs)
        }
        evaluator_time = time.time()
        coco_evaluator.update(res)
        evaluator_time = time.time() - evaluator_time
        metric_logger.update(model_time=model_time,
                             evaluator_time=evaluator_time)

    # gather the stats from all processes
    metric_logger.synchronize_between_processes()
    print("Averaged stats:", metric_logger)
    coco_evaluator.synchronize_between_processes()

    # accumulate predictions from all images
    coco_evaluator.accumulate()
    coco_evaluator.summarize()
    torch.set_num_threads(n_threads)
コード例 #6
0
ファイル: engine.py プロジェクト: wdjang/cycif_maskrcnn
def evaluate(model,
             data_loader,
             device,
             is_vis=False,
             draw_bbox=False,
             vis_dir='./vis_results'):
    n_threads = torch.get_num_threads()
    # FIXME remove this and make paste_masks_in_image run on the GPU
    torch.set_num_threads(1)
    cpu_device = torch.device("cpu")
    model.eval()
    metric_logger = utils.MetricLogger(delimiter="  ")
    header = 'Test:'

    coco = get_coco_api_from_dataset(data_loader.dataset)
    iou_types = _get_iou_types(model)
    coco_evaluator = CocoEvaluator(coco, iou_types)

    res_list = []
    for images, targets in metric_logger.log_every(data_loader, 100, header):
        images = list(img.to(device) for img in images)
        targets = [{k: v.to(device) for k, v in t.items()} for t in targets]

        torch.cuda.synchronize()
        model_time = time.time()
        outputs = model(images)

        outputs = [{k: v.to(cpu_device)
                    for k, v in t.items()} for t in outputs]
        model_time = time.time() - model_time

        res = {
            target["image_id"].item(): output
            for target, output in zip(targets, outputs)
        }
        evaluator_time = time.time()
        coco_evaluator.update(res)
        evaluator_time = time.time() - evaluator_time
        metric_logger.update(model_time=model_time,
                             evaluator_time=evaluator_time)
        res_list.append(res)

        # visualization
        if is_vis:
            if os.path.exists(vis_dir) == False:
                os.makedirs(vis_dir)
            coco_evaluator.visualize(res, images, vis_dir, draw_bbox)

    with open(os.path.join(vis_dir, 'pred_res.pkl'), 'wb') as pkl_file:
        pickle.dump(res_list, pkl_file)

    # gather the stats from all processes
    metric_logger.synchronize_between_processes()
    print("Averaged stats:", metric_logger)
    coco_evaluator.synchronize_between_processes()

    # accumulate predictions from all images
    coco_evaluator.accumulate()
    mAP_scores = coco_evaluator.summarize()
    torch.set_num_threads(n_threads)

    return mAP_scores
コード例 #7
0
ファイル: test.py プロジェクト: mmmuuuuua/maskrcnn
def main():
    # train on the GPU or on the CPU, if a GPU is not available
    device = torch.device(
        'cuda') if torch.cuda.is_available() else torch.device('cpu')

    # our dataset has two classes only - background and person
    num_classes = 2

    # get the model using our helper function
    model = get_model_instance_segmentation(num_classes)

    # load pretrain_dict
    pretrain_dict = torch.load(
        os.path.join("C:\\zhulei\\maskRcnn\\models", "_epoch-9.pth"))
    model.load_state_dict(pretrain_dict)

    # move model to the right device
    model.to(device)

    # use our dataset and defined transformations
    dataset_test = PennFudanDataset('C:\\zhulei\\maskRcnn\\data\\test',
                                    get_transform(train=False))
    data_loader_test = torch.utils.data.DataLoader(dataset_test,
                                                   batch_size=1,
                                                   shuffle=False,
                                                   num_workers=1,
                                                   collate_fn=utils.collate_fn)

    n_threads = torch.get_num_threads()
    # FIXME remove this and make paste_masks_in_image run on the GPU
    torch.set_num_threads(1)
    cpu_device = torch.device("cpu")
    model.eval()
    metric_logger = utils.MetricLogger(delimiter="  ")
    header = 'Test:'

    coco = get_coco_api_from_dataset(data_loader_test.dataset)
    iou_types = _get_iou_types(model)
    coco_evaluator = CocoEvaluator(coco, iou_types)

    for image, targets in metric_logger.log_every(data_loader_test, 100,
                                                  header):
        image = list(img.to(device) for img in image)
        targets = [{k: v.to(device) for k, v in t.items()} for t in targets]

        torch.cuda.synchronize()
        model_time = time.time()

        outputs = model(image)

        instance_segmentation_api(image[0], outputs)

        # 可视化
        # for img in image:
        #     Image.fromarray((img.mul(255).permute(1, 2, 0).byte().cpu().numpy())[0])
        # print(outputs[0]['masks'].shape)
        # for i in range(99):
        #     result = Image.fromarray(outputs[0]['masks'][i, 0].mul(255).byte().cpu().numpy())
        #     result.show()

        outputs = [{k: v.to(cpu_device)
                    for k, v in t.items()} for t in outputs]
        model_time = time.time() - model_time

        res = {
            target["image_id"].item(): output
            for target, output in zip(targets, outputs)
        }
        evaluator_time = time.time()
        coco_evaluator.update(res)
        evaluator_time = time.time() - evaluator_time
        metric_logger.update(model_time=model_time,
                             evaluator_time=evaluator_time)

    # gather the stats from all processes
    metric_logger.synchronize_between_processes()
    print("Averaged stats:", metric_logger)
    coco_evaluator.synchronize_between_processes()

    # accumulate predictions from all images
    coco_evaluator.accumulate()
    coco_evaluator.summarize()
    torch.set_num_threads(n_threads)
コード例 #8
0
def evaluate(model, data_loader, device, epoch_num=None, check_num=200):
    n_threads = torch.get_num_threads()
    # FIXME remove this and make paste_masks_in_image run on the GPU
    torch.set_num_threads(1)
    cpu_device = torch.device("cpu")

    model.eval()
    metric_logger = utils.MetricLogger(delimiter="  ")
    header = 'Test:'

    coco = get_coco_api_from_dataset(data_loader.dataset)
    iou_types = _get_iou_types(model)
    coco_evaluator = CocoEvaluator(coco, iou_types)

    idx = 0

    for images, targets in metric_logger.log_every(data_loader, 100, header):
        images = list(img.to(device) for img in images)
        targets = [{k: v.to(device) for k, v in t.items()} for t in targets]

        torch.cuda.synchronize()
        model_time = time.time()
        outputs_set = model(images)  # 输出:对应a validate batch里面的每一个输出组成的list

        outputs_list = [{k: v.to(cpu_device)
                         for k, v in t.items()}
                        for t in outputs_set]  # 对于minibatch里面的每个output

        # outputs_list包含一个个 t 是 {'boxes','labels','scores','masks'},每个的值都是一个tensor

        model_time = time.time() - model_time

        res = {
            target["image_id"].item(): output
            for target, output in zip(targets, outputs_list)
        }
        # 构建一个dict,每个键为target["image_id"].item() 即imageid
        # 值为对应数据在模型预测的时候的输出t, 是 {'boxes','labels','scores','masks'}字典,
        # 其内每个的值都是一个tensor,长度=预测目标数

        idx += 1
        if idx - idx // check_num * check_num == 0:  # 每100次记录一次
            if epoch_num is not None:
                coco_a_result_check(images, targets, res,
                                    'E' + str(epoch_num) + '_' + str(idx))
            else:
                coco_a_result_check(images, targets, res)
        '''
        for key in res:
            print(len(res[key]['boxes']))  # 一开始mask rcnn网络输出是100个框(detr 200),后续学好了之后框的数量会大大下降。
        '''

        evaluator_time = time.time()
        coco_evaluator.update(res)
        evaluator_time = time.time() - evaluator_time
        metric_logger.update(model_time=model_time,
                             evaluator_time=evaluator_time)

    # gather the stats from all processes
    metric_logger.synchronize_between_processes()
    print("Averaged stats:", metric_logger)
    coco_evaluator.synchronize_between_processes()

    # accumulate predictions from all images
    coco_evaluator.accumulate()
    coco_evaluator.summarize()
    torch.set_num_threads(n_threads)
    return coco_evaluator
コード例 #9
0
ファイル: engine.py プロジェクト: proteanblank/craftassist
def evaluate(model, criterion, postprocessor, data_loader, base_ds, device, eval_bbox, eval_masks):
    model.eval()
    criterion.eval()

    metric_logger = utils.MetricLogger(delimiter="  ")
    metric_logger.add_meter("class_error", utils.SmoothedValue(window_size=1, fmt="{value:.2f}"))
    header = "Test:"

    iou_types = []
    if eval_masks:
        iou_types += ["segm"]
    if eval_bbox:
        iou_types += ["bbox"]
    iou_types = tuple(iou_types)
    if isinstance(base_ds, LVIS):
        coco_evaluator = LvisEvaluator(base_ds, iou_types) if eval_bbox or eval_masks else None
    else:
        coco_evaluator = CocoEvaluator(base_ds, iou_types) if eval_bbox or eval_masks else None
    # coco_evaluator.coco_eval[iou_types[0]].params.iouThrs = [0, 0.1, 0.5, 0.75]
    for samples, targets in metric_logger.log_every(data_loader, 10, header):
        samples = samples.to(device)
        targets = [{k: v.to(device) for k, v in t.items()} for t in targets]

        outputs = model(samples)
        loss_dict = criterion(outputs, targets)
        weight_dict = criterion.weight_dict

        # reduce losses over all GPUs for logging purposes
        loss_dict_reduced = utils.reduce_dict(loss_dict)
        loss_dict_reduced_scaled = {
            k: v * weight_dict[k] for k, v in loss_dict_reduced.items() if k in weight_dict
        }
        loss_dict_reduced_unscaled = {f"{k}_unscaled": v for k, v in loss_dict_reduced.items()}
        metric_logger.update(
            loss=sum(loss_dict_reduced_scaled.values()),
            **loss_dict_reduced_scaled,
            **loss_dict_reduced_unscaled,
        )
        metric_logger.update(class_error=loss_dict_reduced["class_error"])

        results = postprocessor(outputs, targets)
        res = {target["image_id"].item(): output for target, output in zip(targets, results)}
        if coco_evaluator is not None:
            coco_evaluator.update(res)
    # gather the stats from all processes
    metric_logger.synchronize_between_processes()
    print("Averaged stats:", metric_logger)
    if coco_evaluator is not None:
        coco_evaluator.synchronize_between_processes()

    # accumulate predictions from all images
    if coco_evaluator is not None:
        coco_evaluator.accumulate()
        coco_evaluator.summarize()
    stats = {k: meter.global_avg for k, meter in metric_logger.meters.items()}
    if coco_evaluator is not None:
        if eval_bbox:
            stats["coco_eval_bbox"] = coco_evaluator.coco_eval["bbox"].stats.tolist()
        if eval_masks:
            stats["coco_eval_masks"] = coco_evaluator.coco_eval["segm"].stats.tolist()
    return stats, coco_evaluator
コード例 #10
0
def evaluate_yolo_2017(model, data_loader, device):
    n_threads = torch.get_num_threads()
    # FIXME remove this and make paste_masks_in_image run on the GPU
    torch.set_num_threads(1)
    cpu_device = torch.device("cpu")
    model.eval()
    metric_logger = utils.MetricLogger(delimiter="  ")
    header = 'Test:'

    coco = get_coco_api_from_dataset(data_loader.dataset)
    iou_types = _get_iou_types(model)
    coco_evaluator = CocoEvaluator(coco, iou_types)
    transform = GeneralizedRCNNTransform(416, 416, [0, 0, 0], [1, 1, 1])
    transform.eval()
    for image, targets in metric_logger.log_every(data_loader, 100, header):
        image = list(img.to(device) for img in image)

        original_image_sizes = [img.shape[-2:] for img in image]

        targets = [{k: v.to(device) for k, v in t.items()} for t in targets]

        torch.cuda.synchronize()
        model_time = time.time()
        transformed_img = transform(image)
        transformed_shape = transformed_img[0].tensors.shape[-2:]
        inf_out, _ = model(transformed_img[0].tensors)
        # Run NMS
        output = non_max_suppression(inf_out, conf_thres=0.001, iou_thres=0.6)

        # Statistics per image
        predictions = []
        for si, pred in enumerate(output):
            prediction = {'boxes': [], 'labels': [], 'scores': []}
            if pred is None:
                continue
            # Append to text file
            # with open('test.txt', 'a') as file:
            #    [file.write('%11.5g' * 7 % tuple(x) + '\n') for x in pred]

            # Clip boxes to image bounds
            clip_coords(pred, transformed_shape)
            # Append to pycocotools JSON dictionary
            # [{"image_id": 42, "category_id": 18, "bbox": [258.15, 41.29, 348.26, 243.78], "score": 0.236}, ...
            image_id = int(targets[si]['image_id'])
            box = pred[:, :4].clone()  # xyxy
            # scale_coords(transformed_shape, box, shapes[si][0], shapes[si][1])  # to original shape
            # box = xyxy2xywh(box)  # xywh
            # box[:, :2] -= box[:, 2:] / 2  # xy center to top-left corner
            for di, d in enumerate(pred):
                box_T = [floatn(x, 3) for x in box[di]]
                label = coco91class[int(d[5])]
                score = floatn(d[4], 5)
                prediction['boxes'].append(box_T)
                prediction['labels'].append(label)
                prediction['scores'].append(score)
            prediction['boxes'] = torch.tensor(prediction['boxes'])
            prediction['labels'] = torch.tensor(prediction['labels'])
            prediction['scores'] = torch.tensor(prediction['scores'])
            predictions.append(prediction)

        outputs = transform.postprocess(predictions,
                                        transformed_img[0].image_sizes,
                                        original_image_sizes)

        outputs = [{k: v.to(cpu_device)
                    for k, v in t.items()} for t in predictions]
        model_time = time.time() - model_time

        res = {
            target["image_id"].item(): output
            for target, output in zip(targets, outputs)
        }
        evaluator_time = time.time()
        coco_evaluator.update(res)
        evaluator_time = time.time() - evaluator_time
        metric_logger.update(model_time=model_time,
                             evaluator_time=evaluator_time)

    # gather the stats from all processes
    metric_logger.synchronize_between_processes()
    print("Averaged stats:", metric_logger)
    coco_evaluator.synchronize_between_processes()

    # accumulate predictions from all images
    coco_evaluator.accumulate()
    coco_evaluator.summarize()
    torch.set_num_threads(n_threads)
    return coco_evaluator
コード例 #11
0
    data_loader = torch.utils.data.DataLoader(val_dataset,
                                              batch_size=1,
                                              shuffle=False,
                                              num_workers=4,
                                              collate_fn=utils.collate_fn)

    n_threads = torch.get_num_threads()

    torch.set_num_threads(1)
    cpu_device = torch.device('cpu')

    coco = get_coco_api_from_dataset(data_loader.dataset)
    iou_types = _get_iou_types(model)

    for i in range(0, 56):
        coco_evaluator = CocoEvaluator(coco, iou_types)

        model.to(device)

        print('load model {} parameters...'.format(i))
        model.load_state_dict(torch.load(
            'output/output_epoch_{}.pt'.format(i)))

        model.eval()

        data_iter = iter(data_loader)

        for data in data_iter:
            images, guide_images, targets = data

            images = list(image.to(device) for image in images)
コード例 #12
0
def evaluate_bcnn(model, data_loader, device):
    n_threads = torch.get_num_threads()
    # FIXME remove this and make paste_masks_in_image run on the GPU
    torch.set_num_threads(1)
    cpu_device = torch.device("cpu")
    model.eval()
    metric_logger = utils.MetricLogger(delimiter="  ")
    header = 'Test:'

    coco = get_coco_api_from_dataset_bcnn(data_loader.dataset)
    iou_types = _get_iou_types(model)
    coco_evaluator = CocoEvaluator(coco, iou_types)

    for images, targets in metric_logger.log_every(data_loader, 100, header):
        fine_targets = []
        for t in targets:
            fine_targets.append({
                'boxes':
                t['boxes'].to(device),
                'labels':
                torch.tensor([l[0] for l in t['labels']]).to(device),
                'image_id':
                t['image_id'].to(device),
                'area':
                t['area'].to(device),
                'iscrowd':
                t['iscrowd'].to(device)
            })
        targets = fine_targets
        print('targets', targets)

        images = list(img.to(device) for img in images)

        torch.cuda.synchronize()
        model_time = time.time()
        outputs = model(images)
        print('outputs', outputs)

        outputs = [{k: v.to(cpu_device)
                    for k, v in t.items()} for t in outputs]
        model_time = time.time() - model_time

        res = {
            target["image_id"].item(): output
            for target, output in zip(targets, outputs)
        }
        print('res', res)
        evaluator_time = time.time()
        coco_evaluator.update(res)
        evaluator_time = time.time() - evaluator_time
        metric_logger.update(model_time=model_time,
                             evaluator_time=evaluator_time)

    # gather the stats from all processes
    metric_logger.synchronize_between_processes()
    print("Averaged stats:", metric_logger)
    coco_evaluator.synchronize_between_processes()

    # accumulate predictions from all images
    coco_evaluator.accumulate()
    coco_evaluator.summarize()
    torch.set_num_threads(n_threads)
    return coco_evaluator
コード例 #13
0
def evaluate(model, data_loader, device, writer, epoch):
    n_threads = torch.get_num_threads()
    # FIXME remove this and make paste_masks_in_image run on the GPU
    torch.set_num_threads(1)
    cpu_device = torch.device("cpu")
    model.eval()
    metric_logger = utils.MetricLogger(delimiter="  ")
    header = 'Test:'

    coco = get_coco_api_from_dataset(data_loader.dataset)
    iou_types = _get_iou_types(model)
    coco_evaluator = CocoEvaluator(coco, iou_types)

    for image, targets in metric_logger.log_every(data_loader, 100, header):
        image = list(img.to(device) for img in image)
        targets = [{k: v.to(device) for k, v in t.items()} for t in targets]

        torch.cuda.synchronize()
        model_time = time.time()
        outputs = model(image)

        outputs = [{k: v.to(cpu_device)
                    for k, v in t.items()} for t in outputs]
        model_time = time.time() - model_time

        res = {
            target["image_id"].item(): output
            for target, output in zip(targets, outputs)
        }
        evaluator_time = time.time()
        coco_evaluator.update(res)
        evaluator_time = time.time() - evaluator_time
        metric_logger.update(model_time=model_time,
                             evaluator_time=evaluator_time)

    # gather the stats from all processes
    metric_logger.synchronize_between_processes()
    print("Averaged stats:", metric_logger)
    coco_evaluator.synchronize_between_processes()

    # accumulate predictions from all images
    coco_evaluator.accumulate()
    coco_evaluator.summarize()
    torch.set_num_threads(n_threads)

    #   TB Logs (remember, when not specified MaxDet = 100)

    writer.add_scalar('mAP @ [.5:.0.5:.95] @ all_size',
                      coco_evaluator.coco_eval['bbox'].stats[0], epoch)
    writer.add_scalar('mAP @ 0.5 @ all_size',
                      coco_evaluator.coco_eval['bbox'].stats[1], epoch)
    writer.add_scalar('mAP @ 0.75 @ all_size',
                      coco_evaluator.coco_eval['bbox'].stats[2], epoch)
    writer.add_scalar('mAP @ [.5:.0.5:.95] @ SMALL',
                      coco_evaluator.coco_eval['bbox'].stats[3], epoch)
    writer.add_scalar('mAP @ [.5:.0.5:.95] @ MEDIUM',
                      coco_evaluator.coco_eval['bbox'].stats[4], epoch)
    writer.add_scalar('mAP @ [.5:.0.5:.95] @ LARGE',
                      coco_evaluator.coco_eval['bbox'].stats[5], epoch)

    writer.add_scalar('mAR @ [.5:.0.5:.95] @ all_size @ MaxDet=1',
                      coco_evaluator.coco_eval['bbox'].stats[6], epoch)
    writer.add_scalar('mAR @ [.5:.0.5:.95] @ all_size @ MaxDet=10',
                      coco_evaluator.coco_eval['bbox'].stats[7], epoch)
    writer.add_scalar('mAR @ [.5:.0.5:.95] @ all_size @ MaxDet=100',
                      coco_evaluator.coco_eval['bbox'].stats[8], epoch)
    writer.add_scalar('mAR @ [.5:.0.5:.95] @ SMALL',
                      coco_evaluator.coco_eval['bbox'].stats[9], epoch)
    writer.add_scalar('mAR @ [.5:.0.5:.95] @ MEDIUM',
                      coco_evaluator.coco_eval['bbox'].stats[10], epoch)
    writer.add_scalar('mAR @ [.5:.0.5:.95] @ LARGE',
                      coco_evaluator.coco_eval['bbox'].stats[11], epoch)

    return coco_evaluator, coco_evaluator.coco_eval['bbox'].stats[1]
コード例 #14
0
ファイル: litmodel.py プロジェクト: JoOkuma/MO434_project
 def _get_coco_eval(self):
     coco = get_coco_api_from_dataset(self.dataset)
     iou_types = ['segm', 'bbox']
     return CocoEvaluator(coco, iou_types)
def evaluate(model, data_loader, device):
    n_threads = torch.get_num_threads()
    # FIXME remove this and make paste_masks_in_image run on the GPU
    torch.set_num_threads(1)
    cpu_device = torch.device("cpu")
    model.eval()
    metric_logger = utils.MetricLogger(delimiter="  ")
    header = 'Test:'

    coco = get_coco_api_from_dataset(data_loader.dataset)
    iou_types = _get_iou_types(model)
    coco_evaluator = CocoEvaluator(coco, iou_types)
    count = 0
    for images, targets, valids, _ in metric_logger.log_every(
            data_loader, 100, header):
        if False in valids:
            continue
        images = list(img.to(device) for img in images)
        targets = [{k: v.to(device) for k, v in t.items()} for t in targets]

        torch.cuda.synchronize()
        model_time = time.time()
        outputs = model(images)

        outputs = [{k: v.to(cpu_device)
                    for k, v in t.items()} for t in outputs]

        img = images[0].cpu().numpy().transpose(1, 2, 0)
        ax = plt.subplot(1, 1, 1)
        ax.set_axis_off()
        ax.imshow(img)
        count += 1
        mask_count = 3
        for box, mask_tensor in zip(
                outputs[0]['boxes'], outputs[0]['masks']
        ):  # Wont work when not using gt_boxes because we can have less boxes than masks
            box = box.int().tolist()
            mask = ((mask_tensor[0].cpu().numpy().astype(np.float)) >=
                    0.5).astype(np.float)

            full_mask = np.expand_dims(mask, axis=-1).repeat(4, axis=-1)
            full_mask[:, :, 0] = 0.
            full_mask[:, :, 1] = 1
            full_mask[:, :, 2] = 0.

            ax.imshow(full_mask, alpha=0.3)
            mask_count -= 1
            if mask_count == 0:
                break

        plt.savefig(f'data/{count}.png')
        plt.clf()

        model_time = time.time() - model_time

        res = {
            target["image_id"].item(): output
            for target, output in zip(targets, outputs)
        }
        evaluator_time = time.time()
        coco_evaluator.update(res)
        evaluator_time = time.time() - evaluator_time
        metric_logger.update(model_time=model_time,
                             evaluator_time=evaluator_time)

    # gather the stats from all processes
    metric_logger.synchronize_between_processes()
    print("Averaged stats:", metric_logger)
    coco_evaluator.synchronize_between_processes()

    # accumulate predictions from all images
    coco_evaluator.accumulate()
    coco_evaluator.summarize()
    torch.set_num_threads(n_threads)
    return coco_evaluator
コード例 #16
0
ファイル: vocREMIND.py プロジェクト: vishwa30/rodeo
def evaluate_withpq(model, data_loader, device):
    n_threads = torch.get_num_threads()
    # FIXME remove this and make paste_masks_in_image run on the GPU
    torch.set_num_threads(1)
    cpu_device = torch.device("cpu")
    model.eval()
    metric_logger = utils.MetricLogger(delimiter="  ")
    header = 'Test:'

    coco = get_coco_api_from_dataset(data_loader.dataset)
    iou_types = _get_iou_types(model)
    coco_evaluator = CocoEvaluator(coco, iou_types)

    test_data_pkl = h5py.File(
        'resnet_imagenet_features/backbone.7.0_test_reconstructed.h5', 'r')
    #test_data_pkl = h5py.File('resnet_imagenet_features/backbone.7.0_test.h5', 'r')

    for images, targets in tqdm(data_loader, desc=header):
        images = list(image for image in images)
        targets = [{k: v.to(device) for k, v in t.items()} for t in targets]

        image_id = targets[0]['image_id'].item()
        quantized_x = test_data_pkl[str(image_id)][()]
        quantized_x = torch.from_numpy(quantized_x)
        imagepq = quantized_x.to(device)

        torch.cuda.synchronize()
        model_time = time.time()

        #print ("----",image_id,"----",imagepq.shape)
        outputs = model(images, imagepq, targets)

        outputs = [{k: v.to(cpu_device)
                    for k, v in t.items()} for t in outputs]
        model_time = time.time() - model_time

        res = {
            target["image_id"].item(): output
            for target, output in zip(targets, outputs)
        }
        evaluator_time = time.time()
        coco_evaluator.update(res)
        evaluator_time = time.time() - evaluator_time

    # gather the stats from all processes
    metric_logger.synchronize_between_processes()
    print("Averaged stats:", metric_logger)
    coco_evaluator.synchronize_between_processes()

    # accumulate predictions from all images
    coco_evaluator.accumulate()
    coco_evaluator.summarize()
    coco_evaluator.summarize_per_category()
    torch.set_num_threads(n_threads)

    test_data_pkl.close()
    return coco_evaluator
コード例 #17
0
def evaluate(model, data_loader, device):
    n_threads = torch.get_num_threads()
    # FIXME remove this and make paste_masks_in_image run on the GPU
    torch.set_num_threads(1)
    cpu_device = torch.device("cpu")
    model.eval()
    metric_logger = utils.MetricLogger(delimiter="  ")
    header = 'Test:'

    coco = get_coco_api_from_dataset(data_loader.dataset)
    iou_types = _get_iou_types(model)
    coco_evaluator = CocoEvaluator(coco, iou_types)

    # changing these two lines a bit to have iteration number and to keep image tensor
    for i, (images, targets) in enumerate(metric_logger.log_every(data_loader, 100, header)):
        img = images[0]

        images = list(img.to(device) for img in images)
        targets = [{k: v.to(device) for k, v in t.items()} for t in targets]

        torch.cuda.synchronize()
        model_time = time.time()
        outputs = model(images)

        # ### OUR CODE ###
        # let's track bounding box and labels predictions for the first 50 images
        # as we hardly want to track all validation images
        # but want to see how the predicted bounding boxes and labels are changing during the process
        if i < 50:
            # let's add tracking images with predicted bounding boxes
            logger.add_image_with_boxes(
                # adding pred_images tag to combine images in one subgroup
                "pred_images/PD-{}".format(i),
                # passing image tensor
                img,
                # passing predicted bounding boxes
                outputs[0]["boxes"].cpu(),
                # mapping & passing predicted labels
                labels=[
                    COCO_INSTANCE_CATEGORY_NAMES[i]
                    for i in outputs[0]["labels"].cpu().numpy()
                ],
            )
        # ### END OUR CODE ###
        outputs = [{k: v.to(cpu_device) for k, v in t.items()} for t in outputs]
        model_time = time.time() - model_time

        res = {target["image_id"].item(): output for target, output in zip(targets, outputs)}
        evaluator_time = time.time()
        coco_evaluator.update(res)
        evaluator_time = time.time() - evaluator_time
        metric_logger.update(model_time=model_time, evaluator_time=evaluator_time)

    # gather the stats from all processes
    metric_logger.synchronize_between_processes()
    print("Averaged stats:", metric_logger)
    coco_evaluator.synchronize_between_processes()

    # accumulate predictions from all images
    coco_evaluator.accumulate()
    coco_evaluator.summarize()
    torch.set_num_threads(n_threads)
    return coco_evaluator
コード例 #18
0
def evaluate(model, data_loader, device):
    n_threads = torch.get_num_threads()
    # FIXME remove this and make paste_masks_in_image run on the GPU
    torch.set_num_threads(1)
    cpu_device = torch.device("cpu")
    model.eval()
    metric_logger = utils.MetricLogger(delimiter="  ")
    header = 'Test:'

    coco = get_coco_api_from_dataset2(data_loader.dataset)
    iou_types = _get_iou_types(model)
    coco_evaluator = CocoEvaluator(coco, iou_types)

    for images, targets in data_loader:
        images = list(img.to(device) for img in images)
        targets = [{k: v.to(device) for k, v in t.items()} for t in targets]

        torch.cuda.synchronize()
        model_time = time.time()
        predictions = model(images)

        predictions = [{k: v.to(cpu_device)
                        for k, v in t.items()} for t in predictions
                       ]  #[{'boxes':,'labels':,'scores':}] batch_size = 1

        outputs = []
        count = 0
        for image in predictions:
            boxes = image['boxes']
            scores = image['scores']
            lables = image['labels']
            indices = torch.ops.torchvision.nms(boxes, scores, 0.1)
            count += 1

            boxes = torch.stack([boxes[i] for i in indices])
            scores = torch.stack([scores[i] for i in indices])
            lables = torch.stack([lables[i] for i in indices])
            outputs.append({
                'boxes': boxes,
                'labels': lables,
                'scores': scores
            })

        model_time = time.time() - model_time

        res = {
            target["image_id"].item(): output
            for target, output in zip(targets, outputs)
        }
        evaluator_time = time.time()
        coco_evaluator.update(res)
        evaluator_time = time.time() - evaluator_time
        metric_logger.update(model_time=model_time,
                             evaluator_time=evaluator_time)

    # gather the stats from all processes
    metric_logger.synchronize_between_processes()
    print("Averaged stats:", metric_logger)
    coco_evaluator.synchronize_between_processes()

    # accumulate predictions from all images
    coco_evaluator.accumulate()
    coco_evaluator.summarize()
    torch.set_num_threads(n_threads)
    return coco_evaluator
コード例 #19
0
ファイル: engine.py プロジェクト: dasari4321/MOT-1234
def evaluate(model, data_loader, device):
    n_threads = torch.get_num_threads()
    # FIXME remove this and make paste_masks_in_image run on the GPU
    torch.set_num_threads(1)
    cpu_device = torch.device("cpu")
    model.eval()
    metric_logger = utils.MetricLogger(delimiter="  ")
    header = 'Test:'

    coco = get_coco_api_from_dataset(data_loader.dataset)
    iou_types = _get_iou_types(model)
    coco_evaluator = CocoEvaluator(coco, iou_types)
    idx = 0
    results = []
    for image, targets in metric_logger.log_every(data_loader, 100, header):
        image = list(img.to(device) for img in image)
        targets = [{k: v.to(device) for k, v in t.items()} for t in targets]

        torch.cuda.synchronize()
        model_time = time.time()
        outputs = model(image)
        ###########################################
        results = model.tracker.run(predicted=outputs,
                                    image=image,
                                    idx_frame=idx,
                                    results=results)
        ###########################################
        outputs = [{k: v.to(cpu_device)
                    for k, v in t.items()} for t in outputs]
        model_time = time.time() - model_time

        res = {
            target["image_id"].item(): output
            for target, output in zip(targets, outputs)
        }
        evaluator_time = time.time()
        coco_evaluator.update(res)
        evaluator_time = time.time() - evaluator_time
        metric_logger.update(model_time=model_time,
                             evaluator_time=evaluator_time)
        img = transforms.ToPILImage()(image[0].detach().cpu()).convert("RGB")
        draw = ImageDraw.Draw(img)
        for k in range(min(len(targets[0]['boxes']),
                           len(outputs[0]['boxes']))):
            draw.rectangle(targets[0]['boxes'][k].detach().cpu().numpy(),
                           outline="red")
            draw.rectangle(outputs[0]['boxes'][k].detach().cpu().numpy(),
                           outline="blue")

#        write_results("./results/mot.txt", results, 'mot')
#        img.show()
        img.save("./results/%4d" % int(idx + 1) + ".png")
        idx += 1
#     del img
# time.sleep(10)
# import psutil
# for proc in psutil.process_iter():
#     if proc.name() == "display":
#         proc.kill()

# gather the stats from all processes
    metric_logger.synchronize_between_processes()
    print("Averaged stats:", metric_logger)
    coco_evaluator.synchronize_between_processes()

    # accumulate predictions from all images
    coco_evaluator.accumulate()
    coco_evaluator.summarize()
    torch.set_num_threads(n_threads)
    return coco_evaluator
コード例 #20
0
def inference(model, dataloader, datatype, args):
    batch_time = AverageMeter('Time', ':6.3f')
    batch_size = args.batch_size
    warmup_iters = args.warmup_iterations
    max_iters = args.max_iterations if dataloader is None else len(dataloader)
    model.eval()
    coco = get_coco_api_from_dataset(dataloader.dataset)
    iou_types = ["bbox"]
    iou_types.append("segm")
    coco_evaluator = CocoEvaluator(coco, iou_types)
    if args.ipex:
        import intel_extension_for_pytorch as ipex
        model = model.to(memory_format=torch.channels_last)
        model = ipex.optimize(model,
                              dtype=datatype,
                              level="O1",
                              conv_bn_folding=False,
                              replace_dropout_with_identity=False)
        model.backbone = ipex.optimize(model.backbone,
                                       dtype=datatype,
                                       level="O1")
    else:
        if args.jit:
            model = model.to(memory_format=torch.channels_last)
        else:
            from torch.utils import mkldnn as mkldnn_utils
            model = mkldnn_utils.to_mkldnn(model, dtype=datatype)
    if args.jit:
        x = torch.randn(batch_size, 3, 1200,
                        1200).to(memory_format=torch.channels_last)
        if args.precision == "bf16":
            with torch.cpu.amp.autocast(), torch.no_grad():
                model.backbone = torch.jit.trace(model.backbone,
                                                 x,
                                                 strict=False)
            model.backbone = torch.jit.freeze(model.backbone)
        else:
            with torch.no_grad():
                model.backbone = torch.jit.trace(model.backbone,
                                                 x,
                                                 strict=False)
            model.backbone = torch.jit.freeze(model.backbone)
    with torch.no_grad():
        if dataloader is None:
            print(
                "Models for detection tasks need to use real dataset. You need to specify coco dataset. "
            )
            exit(1)
        else:
            for i, batch in enumerate(dataloader):
                images = batch[0]
                if not args.ipex and not args.jit:
                    images = list(img.to(datatype) for img in images)
                if args.ipex and args.precision == "bf16":
                    with torch.cpu.amp.autocast():
                        if i == warmup_iters:
                            with profile(
                                    activities=[ProfilerActivity.CPU],
                                    record_shapes=True
                            ) as prof, record_function("model_inference"):
                                output = model(images)
                        else:
                            output = model(images)
                else:
                    if i == warmup_iters:
                        with profile(
                                activities=[ProfilerActivity.CPU],
                                record_shapes=True) as prof, record_function(
                                    "model_inference"):
                            output = model(images)
                    else:
                        output = model(images)
                if i > warmup_iters:
                    break
            for i, batch in enumerate(dataloader):
                images = batch[0]
                end = time.time()
                if not args.ipex and not args.jit:
                    images = list(img.to(datatype) for img in images)
                if args.ipex and args.precision == "bf16":
                    with torch.cpu.amp.autocast():
                        output = model(images)
                else:
                    output = model(images)
                batch_time.update(time.time() - end)
                output = [{k: v.to(torch.float32)
                           for k, v in t.items()} for t in output]
                res = {
                    target["image_id"].item(): output
                    for target, output in zip(batch[1], output)
                }
                coco_evaluator.update(res)
                if max_iters != -1 and i >= max_iters:
                    break
    print(prof.key_averages().table(sort_by="cpu_time_total", row_limit=-1))
    latency = batch_time.avg / batch_size * 1000
    perf = batch_size / batch_time.avg
    coco_evaluator.synchronize_between_processes()
    coco_evaluator.accumulate()
    coco_evaluator.summarize()
    print("Bbox AP: {:.5f} ".format(coco_evaluator.coco_eval['bbox'].stats[0]))
    print("Segm AP: {:.5f} ".format(coco_evaluator.coco_eval['segm'].stats[0]))
    print('Latency: %.3f ms' % latency)
    print("Throughput: {:.3f} fps".format(perf))
コード例 #21
0
def evaluate(model, data_loader, device, epoch, writer=None):
    global best_mAp
    n_threads = torch.get_num_threads()
    torch.set_num_threads(1)
    cpu_device = torch.device("cpu")
    model.eval()
    metric_logger = utils.MetricLogger(delimiter="  ")
    header = 'Test:'

    coco = get_coco_api_from_dataset(data_loader.dataset)
    print("get coco dataset completed!")
    iou_types = _get_iou_types(model)
    coco_evaluator = CocoEvaluator(coco, iou_types)

    running_loss = 0
    running_num = 0

    for image, targets in metric_logger.log_every(data_loader, 100, header):
        image = list(img.to(device) for img in image)
        targets = [{k: v.to(device) for k, v in t.items()} for t in targets]

        torch.cuda.synchronize()
        model_time = time.time()
        outputs, loss_dict = model(image, targets)

        loss_dict_reduced = utils.reduce_dict(loss_dict)
        losses_reduced = sum(loss for loss in loss_dict_reduced.values())
        print('losses_reduced type:', type(losses_reduced))
        loss_value = losses_reduced

        running_loss += loss_value
        running_num += len(image)

        outputs = [{k: v.to(cpu_device)
                    for k, v in t.items()} for t in outputs]
        model_time = time.time() - model_time

        res = {
            target["image_id"].item(): output
            for target, output in zip(targets, outputs)
        }
        evaluator_time = time.time()
        coco_evaluator.update(res)
        evaluator_time = time.time() - evaluator_time
        metric_logger.update(model_time=model_time,
                             evaluator_time=evaluator_time)

    # gather the stats from all processes
    metric_logger.synchronize_between_processes()
    print("Averaged stats:", metric_logger)
    coco_evaluator.synchronize_between_processes()

    # accumulate predictions from all images
    coco_evaluator.accumulate()
    stats_dic = coco_evaluator.summarize()
    print('stats_dic', stats_dic)
    bbox_mAp = stats_dic['bbox'][0]
    torch.set_num_threads(n_threads)

    if writer is not None:
        writer.add_scalar('runing_loss', running_loss / running_num, epoch)
        writer.add_scalar('test_mAP', bbox_mAp, epoch)
    return coco_evaluator, bbox_mAp