예제 #1
0
    def tracking(self, verbose=True, video_path=None):
        poses = []
        init_frame = cv2.imread(self.frame_list[0])
        #print(init_frame.shape)
        init_gt = np.array(self.init_gt)
        x1, y1, w, h = init_gt
        init_gt = tuple(init_gt)
        self.tracker.init(init_frame, init_gt)
        writer = None
        if verbose is True and video_path is not None:
            writer = cv2.VideoWriter(
                video_path, cv2.VideoWriter_fourcc('M', 'J', 'P', 'G'), 30,
                (init_frame.shape[1], init_frame.shape[0]))

        for idx in range(len(self.frame_list)):
            if idx != 0:
                current_frame = cv2.imread(self.frame_list[idx])
                height, width = current_frame.shape[:2]
                start = time.time()
                bbox = self.tracker.update(current_frame, vis=verbose)
                print('track time', time.time() - start, current_frame.shape)
                x1, y1, w, h = bbox
                if verbose is True:
                    if len(current_frame.shape) == 2:
                        current_frame = cv2.cvtColor(current_frame,
                                                     cv2.COLOR_GRAY2BGR)
                    score = self.tracker.score
                    apce = APCE(score)
                    psr = PSR(score)
                    F_max = np.max(score)
                    size = self.tracker.crop_size
                    score = cv2.resize(score, size)
                    score -= score.min()
                    score = score / score.max()
                    score = (score * 255).astype(np.uint8)
                    # score = 255 - score
                    score = cv2.applyColorMap(score, cv2.COLORMAP_JET)
                    center = (int(x1 + w / 2), int(y1 + h / 2))
                    x0, y0 = center
                    x0 = np.clip(x0, 0, width - 1)
                    y0 = np.clip(y0, 0, height - 1)
                    center = (x0, y0)
                    xmin = int(center[0]) - size[0] // 2
                    xmax = int(center[0]) + size[0] // 2 + size[0] % 2
                    ymin = int(center[1]) - size[1] // 2
                    ymax = int(center[1]) + size[1] // 2 + size[1] % 2
                    left = abs(xmin) if xmin < 0 else 0
                    xmin = 0 if xmin < 0 else xmin
                    right = width - xmax
                    xmax = width if right < 0 else xmax
                    right = size[0] + right if right < 0 else size[0]
                    top = abs(ymin) if ymin < 0 else 0
                    ymin = 0 if ymin < 0 else ymin
                    down = height - ymax
                    ymax = height if down < 0 else ymax
                    down = size[1] + down if down < 0 else size[1]
                    score = score[top:down, left:right]
                    crop_img = current_frame[ymin:ymax, xmin:xmax]
                    score_map = cv2.addWeighted(crop_img, 0.6, score, 0.4, 0)
                    current_frame[ymin:ymax, xmin:xmax] = score_map
                    show_frame = cv2.rectangle(current_frame,
                                               (int(x1), int(y1)),
                                               (int(x1 + w), int(y1 + h)),
                                               (255, 0, 0), 1)
                    """
                    cv2.putText(show_frame, 'APCE:' + str(apce)[:5], (0, 250), cv2.FONT_HERSHEY_COMPLEX, 2,
                                (0, 0, 255), 5)
                    cv2.putText(show_frame, 'PSR:' + str(psr)[:5], (0, 300), cv2.FONT_HERSHEY_COMPLEX, 2,
                                (255, 0, 0), 5)
                    cv2.putText(show_frame, 'Fmax:' + str(F_max)[:5], (0, 350), cv2.FONT_HERSHEY_COMPLEX, 2,
                                (255, 0, 0), 5)
                    """

                    cv2.imshow('demo', show_frame)
                    if writer is not None:
                        writer.write(show_frame)
                    cv2.waitKey(1)

            poses.append(np.array([int(x1), int(y1), int(w), int(h)]))
        return np.array(poses)
예제 #2
0
파일: main.py 프로젝트: quasus/csrt-py
    def tracking(self, verbose=True, video_path=None):
        poses = []
        init_frame = cv2.imread(self.frame_list[0])
        init_gt = np.array(self.init_gt)
        x1, y1, w, h = init_gt
        init_gt = tuple(init_gt)
        self.tracker.init(init_frame, init_gt)
        writer = None
        if verbose is True and video_path is not None:
            writer = cv2.VideoWriter(
                video_path, cv2.VideoWriter_fourcc('M', 'J', 'P', 'G'), 30,
                (init_frame.shape[1], init_frame.shape[0]))

        fpss = []
        ious = []

        for i, (frame_name,
                gt) in enumerate(zip(self.frame_list[1:], self.gts[1:])):
            timer = cv2.getTickCount()
            current_frame = cv2.imread(frame_name)
            height, width = current_frame.shape[:2]
            bbox = self.tracker.update(current_frame, vis=verbose)

            if gt is not None:
                iou = IOU(bbox, gt)
                if iou < self.IOUmin:
                    break
                ious.append(iou)
                if i == 100:
                    print("Mean IOU (100 frames): {}".format(
                        sum(ious) / len(ious)))

            x1, y1, w, h = bbox
            if verbose:
                if len(current_frame.shape) == 2:
                    current_frame = cv2.cvtColor(current_frame,
                                                 cv2.COLOR_GRAY2BGR)
                score = self.tracker.score
                apce = APCE(score)
                psr = PSR(score)
                F_max = np.max(score)
                size = self.tracker.template_size
                score = cv2.resize(score, size)
                score -= score.min()
                score = score / score.max()
                score = (score * 255).astype(np.uint8)
                # score = 255 - score
                score = cv2.applyColorMap(score, cv2.COLORMAP_JET)
                center = (int(x1 + w / 2), int(y1 + h / 2))
                x0, y0 = center
                x0 = np.clip(x0, 0, width - 1)
                y0 = np.clip(y0, 0, height - 1)
                center = (x0, y0)
                xmin = int(center[0]) - size[0] // 2
                xmax = int(center[0]) + size[0] // 2 + size[0] % 2
                ymin = int(center[1]) - size[1] // 2
                ymax = int(center[1]) + size[1] // 2 + size[1] % 2
                left = abs(xmin) if xmin < 0 else 0
                xmin = 0 if xmin < 0 else xmin
                right = width - xmax
                xmax = width if right < 0 else xmax
                right = size[0] + right if right < 0 else size[0]
                top = abs(ymin) if ymin < 0 else 0
                ymin = 0 if ymin < 0 else ymin
                down = height - ymax
                ymax = height if down < 0 else ymax
                down = size[1] + down if down < 0 else size[1]
                score = score[top:down, left:right]
                crop_img = current_frame[ymin:ymax, xmin:xmax]
                score_map = cv2.addWeighted(crop_img, 0.6, score, 0.4, 0)
                current_frame[ymin:ymax, xmin:xmax] = score_map
                show_frame = cv2.rectangle(current_frame, (int(x1), int(y1)),
                                           (int(x1 + w), int(y1 + h)),
                                           (255, 0, 0), 1)
                if gt is not None:
                    x0, y0, w0, h0 = gt
                show_frame = cv2.rectangle(current_frame, (int(x1), int(y1)),
                                           (int(x1 + w), int(y1 + h)),
                                           (255, 0, 0), 1)
                fps = cv2.getTickFrequency() / (cv2.getTickCount() - timer)
                fpss.append(fps)
                cv2.putText(show_frame, "FPS : " + str(int(fps)), (100, 50),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.75, (50, 170, 50), 2)
                """
                cv2.putText(show_frame, 'APCE:' + str(apce)[:5], (0, 250), cv2.FONT_HERSHEY_COMPLEX, 2,
                            (0, 0, 255), 5)
                cv2.putText(show_frame, 'PSR:' + str(psr)[:5], (0, 300), cv2.FONT_HERSHEY_COMPLEX, 2,
                            (255, 0, 0), 5)
                cv2.putText(show_frame, 'Fmax:' + str(F_max)[:5], (0, 350), cv2.FONT_HERSHEY_COMPLEX, 2,
                            (255, 0, 0), 5)
                """

                cv2.imshow('demo', show_frame)
                if writer is not None:
                    writer.write(show_frame)
                cv2.waitKey(1)

        poses.append(np.array([int(x1), int(y1), int(w), int(h)]))
        print("FPS: " + str(sum(fpss) / len(fpss)))
        print("Lost after {} frames".format(len(ious)))
        return np.array(poses)
예제 #3
0
    def tracking(self,verbose=True,video_path=None):
        poses = []

        # Initial Frame, reading through frame_list
        init_frame = cv2.imread(self.frame_list[0])

        # print(init_frame.shape)
        init_gt = np.array(self.init_gt)

        # coordinates of centre, width and height
        x1, y1, w, h = init_gt
        init_gt=tuple(init_gt)

        # Input to the tracker
        self.tracker.init(init_frame,init_gt)
        writer=None

        # Writing the file into a video if video path is available
        if verbose is True and video_path is not None:
            writer = cv2.VideoWriter(video_path, cv2.VideoWriter_fourcc('M', 'J', 'P', 'G'), 30, (init_frame.shape[1], init_frame.shape[0]))

        # Traversing over the list of frames
        for idx in range(len(self.frame_list)):
            if idx != 0:
                # Reading the frame at the current index
                current_frame=cv2.imread(self.frame_list[idx])
                height,width=current_frame.shape[:2]

                # Calling the update method of the tracker class.
                '''
                UPDATE METHOD:
                INPUT:
                a) current_frame
                b) verbose
                OUTPUT:
                bbox coordinates in this frame.

                '''
                bbox=self.tracker.update(current_frame, vis=verbose)
                x1,y1,w,h=bbox

                #
                if verbose is True:
                    if len(current_frame.shape)==2:
                        current_frame=cv2.cvtColor(current_frame,cv2.COLOR_GRAY2BGR)
                    score = self.tracker.score
                    apce = APCE(score)
                    psr = PSR(score)
                    F_max = np.max(score)
                    size=self.tracker.crop_size
                    score = cv2.resize(score, size)
                    score -= score.min()
                    score =score/ score.max()
                    score = (score * 255).astype(np.uint8)
                    # score = 255 - score
                    score = cv2.applyColorMap(score, cv2.COLORMAP_JET)
                    center = (int(x1+w/2),int(y1+h/2))
                    x0,y0=center
                    x0=np.clip(x0,0,width-1)
                    y0=np.clip(y0,0,height-1)
                    center=(x0,y0)
                    xmin = int(center[0]) - size[0] // 2
                    xmax = int(center[0]) + size[0] // 2 + size[0] % 2
                    ymin = int(center[1]) - size[1] // 2
                    ymax = int(center[1]) + size[1] // 2 + size[1] % 2
                    left = abs(xmin) if xmin < 0 else 0
                    xmin = 0 if xmin < 0 else xmin
                    right = width - xmax
                    xmax = width if right < 0 else xmax
                    right = size[0] + right if right < 0 else size[0]
                    top = abs(ymin) if ymin < 0 else 0
                    ymin = 0 if ymin < 0 else ymin
                    down = height - ymax
                    ymax = height if down < 0 else ymax
                    down = size[1] + down if down < 0 else size[1]
                    score = score[top:down, left:right]
                    crop_img = current_frame[ymin:ymax, xmin:xmax]
                    score_map = cv2.addWeighted(crop_img, 0.6, score, 0.4, 0)
                    current_frame[ymin:ymax, xmin:xmax] = score_map
                    show_frame=cv2.rectangle(current_frame, (int(x1), int(y1)), (int(x1 + w), int(y1 + h)), (255, 0, 0),1)
                    """
                    cv2.putText(show_frame, 'APCE:' + str(apce)[:5], (0, 250), cv2.FONT_HERSHEY_COMPLEX, 2,
                                (0, 0, 255), 5)
                    cv2.putText(show_frame, 'PSR:' + str(psr)[:5], (0, 300), cv2.FONT_HERSHEY_COMPLEX, 2,
                                (255, 0, 0), 5)
                    cv2.putText(show_frame, 'Fmax:' + str(F_max)[:5], (0, 350), cv2.FONT_HERSHEY_COMPLEX, 2,
                                (255, 0, 0), 5)
                    """

                    cv2.imshow('demo', show_frame)
                    if writer is not None:
                        writer.write(show_frame)
                    cv2.waitKey(1)

            poses.append(np.array([int(x1), int(y1), int(w), int(h)]))
        return np.array(poses)