Exemple #1
0
def get_detection_ts(detection, target):
    detection = detection[0]
    true_index = detection['scores'] > 0.
    detection = detection['boxes'][true_index].cpu()

    if len(detection) == 0:
        return 0.

    pred_detection = []
    for i in range(len(detection)):
        min_x = detection[i][0].item()
        min_y = detection[i][1].item()
        max_x = detection[i][2].item()
        max_y = detection[i][3].item()

        pred_box = torch.tensor([[max_x, max_x, min_x, min_x],
                                 [max_y, min_y, max_y, min_y]])
        pred_detection.append(pred_box)

    pred_detection = torch.stack(pred_detection).unsqueeze(0)
    pred_detection[:, :, 0, :] = pred_detection[:, :, 0, :] * 2
    pred_detection[:, :, 1, :] = pred_detection[:, :, 1, :] * 2

    ats_bounding_boxes = compute_ats_bounding_boxes(pred_detection[0],
                                                    target[0]['boxes'])
    return ats_bounding_boxes
Exemple #2
0
    def validate(self, predictions, inputs, conf_thresh=0.5):
        """compute average threat score"""

        ats = 0

        batch_size = predictions.shape[0]

        for element in range(batch_size):
            target = inputs[element]['bounding_box']
            predict = self.decode(predictions[element],
                                  conf_thresh=conf_thresh)

            ats += compute_ats_bounding_boxes(target.cpu(), predict.cpu())

        return ats
                                               num_workers=0)

model_loader = ModelLoader()

total = 0
total_ats_bounding_boxes = 0
total_ts_road_map = 0
with torch.no_grad():
    for i, data in enumerate(dataloader_task1):
        total += 1
        sample, target, road_image = data
        sample = sample.cuda()

        predicted_bounding_boxes = model_loader.get_bounding_boxes(
            sample)[0].cpu()
        ats_bounding_boxes, iou_max = compute_ats_bounding_boxes(
            predicted_bounding_boxes, target['bounding_box'][0])
        total_ats_bounding_boxes += ats_bounding_boxes

        if opt.verbose:
            print(f'{i} - Bounding Box Score: {ats_bounding_boxes:.4}')

    for i, data in enumerate(dataloader_task2):
        sample, target, road_image = data
        sample = sample.cuda()

        predicted_road_map = model_loader.get_binary_road_map(sample).cpu()
        ts_road_map = compute_ts_road_map(predicted_road_map, road_image)
        total_ts_road_map += ts_road_map

        if opt.verbose:
            print(f'{i} - Road Map Score: {ts_road_map:.4}')
Exemple #4
0
def batched_coor_threat(anchor_boxes,
                        gt_offsets,
                        target,
                        gt_classes,
                        batch_sz,
                        nms_threshold=0.1,
                        plot=False):
    # predicted offsets, target_offests, coor_in_meter
    batch_coor = []
    batched_threat_sum = 0

    for i in range(batch_sz):
        anchor_boxes = anchor_boxes[i]
        gt_offsets = gt_offsets[i]
        #target = target[1]['bounding_box']
        if i == 0:
            target = torch.from_numpy(target[0])
        # print(target)
        # print(type(target[i]))
        #target = torch.from_numpy(target[i])
        gt_classes = gt_classes[i]
        #2,25600000,4
        # print(anchor_boxes.shape)
        # print(gt_offsets.shape)
        #2,2560000
        # print(gt_classes)
        inds = (gt_classes != 0)
        #print(inds.shape)
        anchor_boxes = anchor_boxes[inds]
        gt_offsets = gt_offsets[inds]
        gt_classes = gt_classes[inds]

        # target offset
        delta_x = gt_offsets[:, 0]
        delta_y = gt_offsets[:, 1]
        delta_scaleX = gt_offsets[:, 2]
        delta_scaleY = gt_offsets[:, 3]
        gt_widths = anchor_boxes[:, 2] - anchor_boxes[:, 0]
        gt_heights = anchor_boxes[:, 3] - anchor_boxes[:, 1]
        gt_center_x = anchor_boxes[:, 0] + 0.5 * gt_widths
        gt_center_y = anchor_boxes[:, 1] + 0.5 * gt_heights

        ex_width = gt_widths / torch.exp(delta_scaleX)
        ex_height = gt_heights / torch.exp(delta_scaleY)
        ex_center_x = gt_center_x - delta_x * ex_width
        ex_center_y = gt_center_y - delta_y * ex_height

        ex1 = ex_center_x - 0.5 * ex_width
        ex2 = ex_center_x + 0.5 * ex_width
        ey1 = ex_center_y - 0.5 * ex_height
        ey2 = ex_center_y + 0.5 * ex_height
        #print(ex1.shape)

        pred_boxes = torch.cat([
            ex1.unsqueeze(0),
            ey1.unsqueeze(0),
            ex2.unsqueeze(0),
            ey2.unsqueeze(0)
        ],
                               dim=0).permute(1, 0)
        pred_boxes = pred_boxes.type(torch.float32)
        gt_classes = gt_classes.type(torch.float32)
        target = target.type(torch.float32)
        #749543,4
        #print(pred_boxes.shape)
        #749543
        #print(gt_classes.shape)
        # print(pred_boxes)
        # print(min(gt_classes))
        # print(gt_classes)
        # print(gt_classes.shape)
        inds = nms(pred_boxes, gt_classes, nms_threshold)
        pred_boxes = pred_boxes[inds]
        coordinate_list = []

        for box in pred_boxes:
            x1, y1, x2, y2 = box[0], box[1], box[2], box[3]
            width = abs(x1 - x2)
            height = abs(y1 - y2)
            coordinate_list.append(
                torch.tensor([x2, x2, x1, x1, y2, y1, y2, y1]).view(-1, 4))

        coordinate_list = torch.stack(coordinate_list)
        batched_threat_sum += compute_ats_bounding_boxes(
            coordinate_list, target)

        #print(coordinate_list.shape)
        batch_coor.append(coordinate_list)
        #print(len(batch_coor))
    #final_coor = torch.stack(batch_coor)

    # if plot:
    #   fig,ax = plt.subplots(1)
    #   a = torch.zeros(800,800)
    #   ax.imshow(a)
    #   for box in pred_boxes:
    #     x1, y1, x2, y2 = box[0], box[1], box[2], box[3]
    #     rect = patches.Rectangle((x1,y1),abs(x1 - x2),abs(y1 - y2),linewidth=1,edgecolor='r',facecolor='none')
    #     ax.add_patch(rect)

    # plt.show()

    return batch_coor, batched_threat_sum
Exemple #5
0
def batched_coor_threat_updated(ite,
                                predicted_offsets,
                                anchor_boxes,
                                target,
                                gt_classes,
                                batch_sz,
                                nms_threshold=0.1,
                                plot=False):
    # predicted offsets, target_offests, coor_in_meter
    batch_coor = []
    batched_threat_sum = 0
    original_anchor = anchor_boxes.clone()
    original_predicted_offsets = predicted_offsets.clone()
    original_gt_classes = gt_classes.clone()

    for i in range(batch_sz):
        anchor_boxes = original_anchor
        predicted_offsets = original_predicted_offsets[i]
        if i == 0:
            cur_target = torch.from_numpy(target[0])
        else:
            cur_target = torch.from_numpy(target[1])

        gt_classes = original_gt_classes[i]

        inds = (gt_classes != 0)
        anchor_boxes = anchor_boxes[inds]
        predicted_offsets = predicted_offsets[inds]
        gt_classes = gt_classes[inds]

        delta_x = predicted_offsets[:, 0]
        delta_y = predicted_offsets[:, 1]
        delta_scaleX = predicted_offsets[:, 2]
        delta_scaleY = predicted_offsets[:, 3]

        gt_widths = anchor_boxes[:, 2] - anchor_boxes[:, 0]
        gt_heights = anchor_boxes[:, 3] - anchor_boxes[:, 1]
        gt_center_x = anchor_boxes[:, 0] + 0.5 * gt_widths
        gt_center_y = anchor_boxes[:, 1] + 0.5 * gt_heights

        ex_width = gt_widths / torch.exp(delta_scaleX)
        ex_height = gt_heights / torch.exp(delta_scaleY)
        ex_center_x = gt_center_x - delta_x * ex_width
        ex_center_y = gt_center_y - delta_y * ex_height

        ex1 = ex_center_x - 0.5 * ex_width
        ex2 = ex_center_x + 0.5 * ex_width
        ey1 = ex_center_y - 0.5 * ex_height
        ey2 = ex_center_y + 0.5 * ex_height

        pred_boxes = torch.cat([
            ex1.unsqueeze(0),
            ey1.unsqueeze(0),
            ex2.unsqueeze(0),
            ey2.unsqueeze(0)
        ],
                               dim=0).permute(1, 0)
        pred_boxes = pred_boxes.type(torch.float32)
        gt_classes = gt_classes.type(torch.float32)
        cur_target = cur_target.type(torch.float32)

        inds = nms(pred_boxes, gt_classes, nms_threshold)
        pred_boxes = pred_boxes[inds]
        coordinate_list = []

        # fig,ax = plt.subplots(1)
        # a = torch.zeros(800,800)
        # ax.imshow(a)
        # for box in pred_boxes:
        #   x1, y1, x2, y2 = box[0], box[1], box[2], box[3]
        #   rect = patches.Rectangle((x1,y1),abs(x1 - x2),abs(y1 - y2),linewidth=1,edgecolor='r',facecolor='none')
        #   ax.add_patch(rect)

        # plt.savefig('/content/drive/My Drive/self_dl/predict_{}_{}.png'.format(ite, i))

        for box in pred_boxes:
            x1, y1, x2, y2 = box[0], box[1], box[2], box[3]
            x1 = (x1 - 400) / 10
            x2 = (x2 - 400) / 10
            y1 = (y1 - 400) / -10
            y2 = (y2 - 400) / -10
            width = abs(x1 - x2)
            height = abs(y1 - y2)
            coordinate_list.append(
                torch.tensor([x2, x2, x1, x1, y2, y1, y2, y1]).view(-1, 4))

        coordinate_list = torch.stack(coordinate_list)
        batched_threat_sum += compute_ats_bounding_boxes(
            coordinate_list, cur_target)
        batch_coor.append(coordinate_list)
        #visActual(cur_target, ite,i)
    return batch_coor, batched_threat_sum
    def evaluate(self, debug=False):
        """
        Evaluation model on task.
        
        ---------------------
        Returns:
        Average validation loss
        """
        # puts model in evaluation mode
        self.model.eval()

        avg_road_ts = 0
        avg_box_ats = 0

        pred_boxes = []
        pred_roads = []

        road_ts = []
        box_ats = []

        # stop gradient tracking
        with torch.no_grad():
            for i, batch in enumerate(
                    tqdm(self.val_dataloader,
                         desc="Validation",
                         mininterval=30)):
                inputs = self.pack_input(batch)
                # {"images": batch[0],
                #  "box_targets": batch[1],
                #  "road_targets": batch[2]}

                out = self.model(**inputs)
                # out[0] First element as loss
                # out[1] Second element as predicted bounding boxes( for evaluation.o.w.empty list)
                # out[2] Third element as road map
                # out[3] road_loss
                # out[4] box_loss(for train.o.w. 0)

                pred_boxes = out[1]
                pred_roads = out[2]

                for road_map1, road_map2 in zip(pred_roads,
                                                inputs['road_targets']):
                    road_ts.append(
                        helper.compute_ts_road_map(road_map1,
                                                   road_map2).item())

                for boxes1, boxes2 in zip(pred_boxes, inputs['box_targets']):
                    box_ats.append(
                        helper.compute_ats_bounding_boxes(
                            boxes1, boxes2['bounding_box']).item())

                if i == 0 and debug:
                    log.info('Debug')
                    break

        if debug:
            log.info(f"Predicted roads is {pred_roads}")
            log.info(f"True roads is {inputs['road_targets']}")
            log.info(f"Road ts array is {road_ts}")
            log.info(f"Predicted roads is {pred_boxes}")
            log.info(f"True roads is {inputs['box_targets']}")
            log.info(f"Box ats array is {box_ats}")

        if len(road_ts) > 0:
            avg_road_ts = mean(road_ts)

        if len(box_ats) > 0:
            avg_box_ats = mean(box_ats)

        return avg_road_ts, avg_box_ats
Exemple #7
0
        val_losses = []
        threat_score = []
        for i, (sample, target, road_img) in enumerate(valloader):

            sample = torch.stack(sample).to(device)

            with torch.no_grad():
                pred_map, loss = model(sample.to(device),
                                       process_target(target).to(device))
                predicted_bounding_boxes = model.get_bounding_boxes(
                    sample.to(device))
                val_losses.append(loss.item())

                for j in range(batch_size):
                    score = compute_ats_bounding_boxes(
                        predicted_bounding_boxes[0][j].to(device),
                        target[j]['bounding_box'].to(device))
                    threat_score.append(score.item())

            if i % 50 == 0:
                print(
                    'Val Epoch: {} [{}/{} ({:.0f}%)]\tAverage Loss So Far: {:.6f}'
                    .format(ep, i * len(sample), len(valloader.dataset),
                            5. * i / len(valloader), np.mean(val_losses)))

        print("Average Validation Epoch Loss: ", np.mean(val_losses))
        print("Average Threat Score: {} ".format(np.mean(threat_score)))
        if np.mean(val_losses) < best_val_loss:
            best_val_loss = np.mean(val_losses)
            torch.save(model.state_dict(), 'test.pt')
Exemple #8
0
def batched_coor_threat_updated_retina(ite,
                                       encoder,
                                       predicted_offsets,
                                       target,
                                       gt_classes,
                                       batch_sz,
                                       nms_threshold=0.1,
                                       plot=False):
    # predicted offsets, target_offests, coor_in_meter
    h = 256
    w = 206
    batch_coor = []
    batched_threat_sum = 0
    original_predicted_offsets = predicted_offsets.clone()
    original_gt_classes = gt_classes.clone()

    for i in range(batch_sz):
        predicted_offsets = original_predicted_offsets[i]
        if i == 0:
            cur_target = torch.from_numpy(target[0])
        else:
            cur_target = torch.from_numpy(target[1])

        gt_classes = original_gt_classes[i]

        # not filtered
        boxes, labels = encoder.decode(predicted_offsets.data.squeeze(),
                                       gt_classes.data.squeeze(), (w, h))
        # filtered

        #ex1, ex2, ey1, ey2
        ex1 = boxes[:, 0]
        ex2 = boxes[:, 1]
        ey1 = boxes[:, 2]
        ey2 = boxes[:, 3]

        pred_boxes = torch.cat([
            ex1.unsqueeze(0),
            ey1.unsqueeze(0),
            ex2.unsqueeze(0),
            ey2.unsqueeze(0)
        ],
                               dim=0).permute(1, 0)
        pred_boxes = pred_boxes.type(torch.float32)
        gt_classes = gt_classes.type(torch.float32)
        cur_target = cur_target.type(torch.float32)

        coordinate_list = []

        for box in pred_boxes:
            x1, y1, x2, y2 = box[0], box[1], box[2], box[3]
            x1 = (x1 - 400) / 10
            x2 = (x2 - 400) / 10
            y1 = (y1 - 400) / -10
            y2 = (y2 - 400) / -10
            width = abs(x1 - x2)
            height = abs(y1 - y2)
            coordinate_list.append(
                torch.tensor([x2, x2, x1, x1, y2, y1, y2, y1]).view(-1, 4))

        coordinate_list = torch.stack(coordinate_list)
        batched_threat_sum += compute_ats_bounding_boxes(
            coordinate_list, cur_target)
        batch_coor.append(coordinate_list)

    return batch_coor, batched_threat_sum