Esempio n. 1
0
def run_detection(model, dataloader, device, conf_thres, nms_thres):
    results = []
    _detection_time_list = []
    # _total_time = 0

    logging.info('Performing object detection:')

    for batch_i, batch in enumerate(dataloader):
        file_names = batch[0]
        img_batch = batch[1].to(device)
        scales = batch[2].to(device)
        paddings = batch[3].to(device)

        # Get detections
        start_time = time.time()
        with torch.no_grad():
            detections = model(img_batch)
        detections = post_process(detections, True, conf_thres, nms_thres)

        for detection, scale, padding in zip(detections, scales, paddings):
            detection[..., :4] = untransform_bboxes(detection[..., :4], scale,
                                                    padding)
            cxcywh_to_xywh(detection)

        # Log progress
        end_time = time.time()
        inference_time_both = end_time - start_time
        # print("Total PP time: {:.1f}".format(inference_time_pp*1000))
        logging.info('Batch {}, '
                     'Total time: {}s, '.format(batch_i, inference_time_both))
        _detection_time_list.append(inference_time_both)
        # _total_time += inference_time_both

        results.extend(zip(file_names, detections, scales, paddings))

    _detection_time_tensor = torch.tensor(_detection_time_list)
    avg_time = torch.mean(_detection_time_tensor)
    time_std_dev = torch.std(_detection_time_tensor)
    logging.info('Average inference time (total) is {}s.'.format(
        float(avg_time)))
    logging.info('Std dev of inference time (total) is {}s.'.format(
        float(time_std_dev)))
    return results
    def inference(self, image):
        if self.type == 'frcnn':
            # convert image to torch tensor
            input = self.transform(image)
            # send input data to GPU
            input = input.to(DEVICE)
            # process inference and get detections
            detections = self.detector([input])
            boxes = detections[0]['boxes']
            confidence = detections[0]['scores']
            class_id = detections[0]['labels']
            self.result = self.filter_detection(boxes, confidence, class_id)

        if self.type == 'yolo':
            # convert image to torch tensor
            im = Image.fromarray(image)
            input = self.transform(
                im.resize((IMG_X_MAX, IMG_X_MAX), Image.ANTIALIAS))
            input = input.unsqueeze(0)
            # send input data to GPU
            input = input.to(DEVICE)
            # process inference and get detections
            with torch.no_grad():
                detections = self.detector(input)
            detections = post_process(detections, True, SCORE_THRESHOLD,
                                      IOU_THRESHOLD)
            for detection in detections:
                detection[..., :4] = untransform_bboxes(detection[..., :4])
                cxcywh_to_xywh(detection)
            boxes = detections[0][..., :4]
            self.result = boxes.detach().cpu().numpy()

        if self.type == 'sinet':
            # convert image to torch tensor
            input = self.transform(image)
            # send input data to GPU
            input = input.to(DEVICE)
            # process inference and get detections
            detections = self.detector([input])
            boxes = detections[0]['boxes']
            confidence = detections[0]['scores']
            class_id = detections[0]['labels']
Esempio n. 3
0
def dice_multi_minsize(pred,mask,threshold,minsizes):
    #minsizes = np.arange(0, 4001, 200) #num_size
    pred = pred.detach().cpu().numpy()
    mask = mask.detach().cpu().numpy()
    assert pred.shape == mask.shape

    num_minsizes = minsizes.size
    num_classes = pred.shape[1]
    batch_size = pred.shape[0]

    dice = np.zeros((num_classes,num_minsizes))
    sigma = 1

    for b in range(batch_size):
        for c in range(num_classes):
            pred_layer = pred[b][c]#.astype(np.float32)
            mask_layer = mask[b][c].astype(np.float32)
            for min_idx,minsize in enumerate(minsizes):
                pred_layer_post, _ = post_process(pred_layer,threshold,minsize)
                pred_layer_post = pred_layer_post.astype(np.float32)
                dice[c][min_idx]+=\
                    (2*(pred_layer_post*mask_layer).sum()+sigma) / (pred_layer_post.sum() + mask_layer.sum()+sigma)
    return dice/batch_size