def measure_occlusion(bbox, trackedObjects, cropSize=CROP_SIZE, cropPad=CROP_PAD): image = np.zeros((int(cropSize), int(cropSize)), dtype=np.bool) fullBBox = scale_bbox(bbox, cropPad) fullBBoxXYWH = xyxy_to_xywh(fullBBox) fullBBoxXYWH[[2, 3]] = np.maximum(fullBBoxXYWH[[2, 3]], 1) # Now do all objects for obj in trackedObjects: boxPos = obj.get_bbox() boxPosXYWH = xyxy_to_xywh(boxPos) if IOU(boxPos, fullBBox) < 0.001: continue cropCoords = np.zeros(4) cropCoords = np.clip(boxPos - fullBBox[[0, 1, 0, 1]], 0, fullBBoxXYWH[[2, 3, 2, 3]]) cropCoords *= cropSize * 1.0 / fullBBoxXYWH[[2, 3, 2, 3]] cropCoords = np.clip(np.round(cropCoords), 0, cropSize).astype(int) if (cropCoords[2] - cropCoords[0] < 1 or cropCoords[3] - cropCoords[1] < 1): cropCoords[[0, 1]] = np.clip(cropCoords[[0, 1]] - 1, 0, cropSize).astype(int) cropCoords[[2, 3]] = np.clip(cropCoords[[2, 3]] + 1, 0, cropSize).astype(int) image[cropCoords[1]:cropCoords[3], cropCoords[0]:cropCoords[2]] = True return np.count_nonzero(image) * 1.0 / image.size
def fix_bbox_intersection(self, bbox, gtBox, imageWidth, imageHeight): if type(bbox) == list: bbox = np.array(bbox) if type(gtBox) == list: gtBox = np.array(gtBox) gtBoxArea = float((gtBox[3] - gtBox[1]) * (gtBox[2] - gtBox[0])) bboxLarge = bb_util.scale_bbox(bbox, CROP_PAD) while IOU.intersection(bboxLarge, gtBox) / gtBoxArea < AREA_CUTOFF: bbox = bbox * .9 + gtBox * .1 bboxLarge = bb_util.scale_bbox(bbox, CROP_PAD) return bbox
def fix_bbox_intersection(bbox, gtBox): if type(bbox) == list: bbox = np.array(bbox) if type(gtBox) == list: gtBox = np.array(gtBox) bbox = bbox.copy() gtBoxArea = float((gtBox[3] - gtBox[1]) * (gtBox[2] - gtBox[0])) bboxLarge = bb_util.scale_bbox(bbox, CROP_PAD) while IOU.intersection(bboxLarge, gtBox) / gtBoxArea < AREA_CUTOFF: bbox = bbox * 0.9 + gtBox * 0.1 bboxLarge = bb_util.scale_bbox(bbox, CROP_PAD) return bbox
def run_frame(im_on, gt_on): # Necessary for linking up global variables. global tracker global totalIou, numFrames global initialize global imageNames global ignoreFrames, initializeFrames, lostTarget titles = [] if gt_on == 0 or not (gt[gt_on, 4] == gt[gt_on - 1, 4] and gt[gt_on, 5] == gt[gt_on - 1, 5] and gt[gt_on, 6] - 1 == gt[gt_on - 1, 6]): print("beginning sequence", gt[gt_on, [5, 6]]) # Clear the state if a new sequence has started. initialize = True ignoreFrames = 0 initializeFrames = 0 iou = 1 robustness = 1 gtBox = gt[gt_on, :4].copy() if DISPLAY: inputImageBGR = cv2.imread(imageNames[im_on]) inputImage = inputImageBGR[:, :, ::-1] imageToDraw = inputImageBGR.copy() drawRect(imageToDraw, gtBox, PADDING * 2, [0, 255, 0]) else: inputImage = imageNames[im_on] if ignoreFrames > 0: ignoreFrames -= 1 else: if initialize: outputBox = tracker.track("test_track", inputImage, gtBox) initialize = False else: outputBox = tracker.track("test_track", inputImage) if DISPLAY: drawRect(imageToDraw, outputBox, PADDING, [0, 0, 255]) if initializeFrames == 0: iou = IOU(outputBox, gtBox) totalIou += iou if iou == 0: ignoreFrames = 5 initializeFrames = 10 lostTarget += 1 initialize = True numFrames += 1 robustness = np.exp(-30.0 * lostTarget / numFrames) else: initializeFrames -= 1 meanIou = totalIou * 1.0 / max(numFrames, 1) if DISPLAY: imageToDraw[0, 0] = 255 imageToDraw[0, 1] = 0 titles.append( "Frame %d, IOU %.2f, Mean IOU %.2f, Robustness %.2f, Dropped %d" % (gt_on, iou, meanIou, robustness, lostTarget)) imPlots = [imageToDraw] results = { "gt_on": gt_on, "meanIou": meanIou, "robustness": robustness, "lostTarget": lostTarget, } if DISPLAY: return imPlots, titles, results else: return results
def is_occluded(self): occluder_boxes = np.array([obj.get_bbox() for obj in self.occluder_boxes]) if len(occluder_boxes) == 0: return False return IOU.count_overlapping_boxes(occluder_boxes, self.get_bounded_bbox()) > 0
def runFrame(self, imOn, gtOn): titles = [] if (gtOn == 0 or not ( self.gt[gtOn, 4] == self.gt[gtOn - 1, 4] and self.gt[gtOn, 5] == self.gt[gtOn - 1, 5] and self.gt[gtOn, 6] - 1 == self.gt[gtOn - 1, 6])): if PRINT: print('beginning sequence', self.gt[gtOn, [5, 6]]) # Clear the state if a new sequence has started. self.initialize = True self.ignoreFrames = 0 self.initializeFrames = 0 iou = 1 robustness = 1 gtBox = self.gt[gtOn, :4].copy() if self.display: inputImageBGR = cv2.imread(self.imageNames[imOn]) inputImage = inputImageBGR[:,:,::-1] imageToDraw = inputImageBGR.copy() drawRect(imageToDraw, gtBox, PADDING * 2, [0, 255, 0]) else: inputImage = self.imageNames[imOn] if self.ignoreFrames > 0: self.ignoreFrames -= 1 else: if self.initialize: outputBox = self.tracker.track('test_track', inputImage, gtBox) self.initialize = False else: outputBox = self.tracker.track('test_track', inputImage) if self.display: drawRect(imageToDraw, outputBox, PADDING, [0, 0, 255]) if self.initializeFrames == 0: iou = IOU(outputBox, gtBox) self.totalIou += iou if iou == 0: self.ignoreFrames = 5 self.initializeFrames = 10 self.lostTarget += 1 self.initialize = True self.numFrames += 1 robustness = np.exp(-30.0 * self.lostTarget / self.numFrames) else: self.initializeFrames -= 1 meanIou = self.totalIou * 1.0 / max(self.numFrames, 1) if self.display: imageToDraw[0,0] = 255 imageToDraw[0,1] = 0 titles.append( 'Frame %d, IOU %.2f, Mean IOU %.2f, Robustness %.2f, Dropped %d' % (gtOn, iou, meanIou, robustness, self.lostTarget)) imPlots = [imageToDraw] results = { 'gtOn' : gtOn, 'meanIou' : meanIou, 'robustness' : robustness, 'lostTarget' : self.lostTarget, } if self.display: return (imPlots, titles, results) else: return results
def render_patch(bbox, background, trackedObjects, cropSize=CROP_SIZE, cropPad=CROP_PAD): bboxXYWH = xyxy_to_xywh(bbox) image = np.zeros((int(cropSize), int(cropSize), 3), dtype=np.uint8) fullBBoxXYWH = bboxXYWH.copy() fullBBoxXYWH[[2, 3]] *= cropPad fullBBox = xywh_to_xyxy(fullBBoxXYWH) fullBBoxXYWH = fullBBoxXYWH fullBBoxXYWH[[2, 3]] = np.maximum(fullBBoxXYWH[[2, 3]], 1) # First do background boxPos = np.array([0, 0, background.shape[1], background.shape[0]]) boxPosXYWH = xyxy_to_xywh(boxPos) cropCoords = np.clip(boxPos - fullBBox[[0, 1, 0, 1]], 0, fullBBoxXYWH[[2, 3, 2, 3]]) cropCoords *= (cropSize) * 1.0 / fullBBoxXYWH[[2, 3, 2, 3]] cropCoords = np.clip(np.round(cropCoords), 0, cropSize).astype(int) textureCrop = np.zeros(4) textureCrop[0] = int( max(fullBBox[0] - boxPos[0], 0) * background.shape[1] * 1.0 / boxPosXYWH[2]) textureCrop[1] = int( max(fullBBox[1] - boxPos[1], 0) * background.shape[0] * 1.0 / boxPosXYWH[3]) textureCrop[2] = int( min((fullBBox[2] - boxPos[0]) * 1.0 / boxPosXYWH[2], 1) * background.shape[1]) textureCrop[3] = int( min((fullBBox[3] - boxPos[1]) * 1.0 / boxPosXYWH[3], 1) * background.shape[0]) if (textureCrop[2] - textureCrop[0] < 1 or textureCrop[3] - textureCrop[1] < 1): textureCrop = [0, 0, 1, 1] textureCrop = np.round(textureCrop).astype(int) textureCrop[[0, 2]] = np.clip(textureCrop[[0, 2]], 0, background.shape[1]) textureCrop[[1, 3]] = np.clip(textureCrop[[1, 3]], 0, background.shape[0]) if cropCoords[3] > cropCoords[1] + 1 and cropCoords[2] > cropCoords[0] + 1: image[cropCoords[1]:cropCoords[3], cropCoords[0]:cropCoords[2], :] = (cv2.resize( background[textureCrop[1]:textureCrop[3], textureCrop[0]:textureCrop[2], :], (cropCoords[2] - cropCoords[0], cropCoords[3] - cropCoords[1]))) # Now do all objects for obj in trackedObjects: boxPos = obj.get_bbox() boxPosXYWH = xyxy_to_xywh(boxPos) if IOU(boxPos, fullBBox) < 0.001: continue cropCoords = np.zeros(4) cropCoords = np.clip(boxPos - fullBBox[[0, 1, 0, 1]], 0, fullBBoxXYWH[[2, 3, 2, 3]]) cropCoords *= cropSize * 1.0 / fullBBoxXYWH[[2, 3, 2, 3]] cropCoords = np.clip(np.round(cropCoords), 0, cropSize).astype(int) if (cropCoords[2] - cropCoords[0] < 1 or cropCoords[3] - cropCoords[1] < 1): cropCoords[[0, 1]] = np.clip(cropCoords[[0, 1]] - 1, 0, cropSize).astype(int) cropCoords[[2, 3]] = np.clip(cropCoords[[2, 3]] + 1, 0, cropSize).astype(int) textureCrop = np.zeros(4, dtype=int) textureCrop[0] = int( max(fullBBox[0] - boxPos[0], 0) * obj.texture.shape[1] * 1.0 / boxPosXYWH[2]) textureCrop[1] = int( max(fullBBox[1] - boxPos[1], 0) * obj.texture.shape[0] * 1.0 / boxPosXYWH[3]) textureCrop[2] = int( min((fullBBox[2] - boxPos[0]) * 1.0 / boxPosXYWH[2], 1) * obj.texture.shape[1]) textureCrop[3] = int( min((fullBBox[3] - boxPos[1]) * 1.0 / boxPosXYWH[3], 1) * obj.texture.shape[0]) if (textureCrop[2] - textureCrop[0] < 1 or textureCrop[3] - textureCrop[1] < 1): textureCrop = [0, 0, 2, 2] textureCrop = np.round(textureCrop).astype(int) textureCrop[[0, 2]] = np.clip(textureCrop[[0, 2]], 0, obj.texture.shape[1]) textureCrop[[1, 3]] = np.clip(textureCrop[[1, 3]], 0, obj.texture.shape[0]) # Feathering currentIm = image[cropCoords[1]:cropCoords[3], cropCoords[0]:cropCoords[2], :].astype(np.float32) newIm = cv2.resize( obj.texture[textureCrop[1]:textureCrop[3], textureCrop[0]:textureCrop[2], :], (cropCoords[2] - cropCoords[0], cropCoords[3] - cropCoords[1]), ).astype(np.float32) if (cropCoords[2] - cropCoords[0] < 1 or cropCoords[3] - cropCoords[1] < 1): featherWeightOn = 0 else: featherCrop = np.zeros(4) featherCrop[0] = int( max(fullBBox[0] - boxPos[0], 0) * FEATHER_WEIGHT_ARRAY.shape[1] * 1.0 / boxPosXYWH[2]) featherCrop[1] = int( max(fullBBox[1] - boxPos[1], 0) * FEATHER_WEIGHT_ARRAY.shape[0] * 1.0 / boxPosXYWH[3]) featherCrop[2] = int( min((fullBBox[2] - boxPos[0]) * 1.0 / boxPosXYWH[2], 1) * FEATHER_WEIGHT_ARRAY.shape[1]) featherCrop[3] = int( min((fullBBox[3] - boxPos[1]) * 1.0 / boxPosXYWH[3], 1) * FEATHER_WEIGHT_ARRAY.shape[0]) if (featherCrop[2] - featherCrop[0] < 1 or featherCrop[3] - featherCrop[1] < 1): featherCrop = [ int(CROP_SIZE / 2 - 1), int(CROP_SIZE / 2 - 1), int(CROP_SIZE / 2), int(CROP_SIZE / 2) ] featherCrop = np.round(featherCrop).astype(int) featherCrop[[0, 2]] = np.clip(featherCrop[[0, 2]], 0, FEATHER_WEIGHT_ARRAY.shape[1]) featherCrop[[1, 3]] = np.clip(featherCrop[[1, 3]], 0, FEATHER_WEIGHT_ARRAY.shape[0]) featherWeightOn = cv2.resize( FEATHER_WEIGHT_ARRAY[featherCrop[1]:featherCrop[3], featherCrop[0]:featherCrop[2], :], (cropCoords[2] - cropCoords[0], cropCoords[3] - cropCoords[1])).astype(np.float32) / 255.0 image[cropCoords[1]:cropCoords[3], cropCoords[0]:cropCoords[2], :] = ( (newIm * featherWeightOn + currentIm * (1 - featherWeightOn)).astype(np.uint8)) return image