Esempio n. 1
0
    def run(self):
        self.check_validity_input()
        self.set_capture_object()
        self.logger.info("running the program")
        with open(self._filename, 'w') as file:
            # Select boxes
            bboxes = []
            self.logger.info(HOW_TO)

            # OpenCV's selectROI function doesn't work for selecting multiple objects in Python
            # So we will call this function in a loop till we are done selecting all objects
            while True:
                # draw bounding boxes over objects
                # selectROI's default behaviour is to draw box starting from the center
                # when fromCenter is set to false, you can draw box starting from top left corner
                bbox = cv2.selectROI('MultiTracker', self._frame)
                bboxes.append(bbox)
                k = cv2.waitKey(0) & 0xFF
                if k == 113:  # q is pressed
                    if bboxes.__len__() > 2:
                        break
                    else:
                        self.logger.error("\n--At least three objects have to be marked to continue--\n")
            cv2.destroyAllWindows()
            if bboxes.__len__() % 2 == 0:
                self.logger.error("\n--Illegal input. pleas try again.")
                sys.exit(1)

            self.logger.info('Selected bounding boxes {}'.format(bboxes))

            # Create MultiTracker object
            multiTracker = cv2.MultiTracker_create()

            # Initialize MultiTracker
            for bbox in bboxes:
                multiTracker.add(cv2.TrackerCSRT_create(), self._frame, bbox)

            # Write the number of bbox the user choose
            file.write(str(bboxes.__len__()) + "\n")
            firstFrame = True
            tic = time.perf_counter()
            # Process video and track objects
            while self._cap.isOpened():
                success, frame = self._cap.read()
                if not success:
                    break

                # get updated location of objects in subsequent frames
                success, boxes = multiTracker.update(frame)
                # draw tracked objects
                for i, newbox in enumerate(boxes):
                    center = ((int(newbox[0] + (int(newbox[2]) / 2))) - SHIFTING.X_SHIFTING,
                              (int(newbox[1] + (int(newbox[3]) / 2))) - SHIFTING.Y_SHIFTING)
                    file.write(str(center) + "\n")

                # some information on processing single frame
                if firstFrame:
                    firstFrame = False
                    toc = time.perf_counter()
                    totalFrames = int(self._cap.get(cv2.CAP_PROP_FRAME_COUNT))
                    self.logger.info("single frame took %.4f seconds" % (toc - tic))
                    self.logger.info(f"{totalFrames} total frames in video")
                    self.logger.info("Estimated time to finish: {}".format(
                        time.strftime('%H:%M:%S', time.gmtime((toc - tic) * totalFrames))))

        file.close()
        toc = time.perf_counter()
        self.logger.info("Finish detection in {}".format(time.strftime('%H:%M:%S', time.gmtime((toc - tic)))))
        tryModel.tryModel(self._filename, self._outputPath)
        toc = time.perf_counter()
        self.logger.info("Finish model in {}".format(time.strftime('%H:%M:%S', time.gmtime((toc - tic)))))
Esempio n. 2
0
def grab_proc(url, rate, camera_id):
    '''
    抓图处理进程
    :param url:
    :param rate:
    :param camera_id:
    :param logger:
    :return:
    '''
    logger = Log('grab-proc' + str(os.getpid()), 'logs/')
    logger.info('初始化seaweedfs')
    master = WeedClient(config.get('weed', 'host'),
                        config.getint('weed', 'port'))
    logger.info('初始化Kafka')
    kafka = Kafka(bootstrap_servers=config.get('kafka', 'boot_servers'))
    topic = config.get('camera', 'topic')
    face_tool = Face(config.get('api', 'face_server'))
    detect_count = 0  # 用于detect频次计数
    frame_internal = track_internal * rate
    trackable = False

    # 启动抓图线程
    q = queue.Queue(maxsize=100)
    t = GrabJob(q, camera_id, url, rate,
                Log('grab-proc' + str(os.getpid()) + '-thread', 'logs/'),
                config)
    t.start()

    while True:
        try:
            img = q.get(timeout=20)
            if detect_count % frame_internal == 0:
                detect_count = 0
                b64 = mat_to_base64(img)
                t1 = time.time()
                detect_result = face_tool.detect(b64)
                logger.info('detect cost time: ',
                            round((time.time() - t1) * 1000), 'ms')
                if detect_result['error_message'] != '601':
                    logger.warning('verifier detector error, error_message:',
                                   detect_result['error_message'])
                    continue
                tracker = cv2.MultiTracker_create()
                latest_imgs = []
                timestamp = round(time.time())
                for face_num in range(detect_result['detect_nums']):
                    tmp = detect_result['detect'][face_num]
                    bbox = (tmp['left'], tmp['top'], tmp['width'],
                            tmp['height'])
                    tracker.add(cv2.TrackerKCF_create(), img, bbox)
                    face_b64 = face_tool.crop(bbox[0], bbox[1], bbox[2],
                                              bbox[3], b64, True)
                    latest_img = {
                        'image_base64': face_b64,
                        'bbox': bbox,
                        'landmark':
                        detect_result['detect'][face_num]['landmark'],
                        'time': timestamp
                    }
                    # 增加人脸质量过滤
                    if tmp['sideFace'] == 0 and tmp[
                            'quality'] == 1 and tmp['score'] > 0.95:
                        latest_imgs.append(latest_img)
                if len(latest_imgs) > 0:
                    trackable = True
                else:
                    trackable = False

            elif trackable:
                # 开始追踪
                ok, bboxs = tracker.update(img)
                if ok and detect_count < frame_internal - 1:
                    if detect_count % 10 == 0:
                        logger.info('tracking..., detect_count = %d' %
                                    detect_count)
                    detect_count += 1
                    continue
                else:
                    # 取detect到的人脸
                    logger.info('tracking over! detect_count = %d' %
                                detect_count)
                    for latest in latest_imgs:
                        logger.info([camera_id], 'track person success!')
                        face_b64 = latest['image_base64']

                        # save img to seaweed fs
                        logger.info([camera_id],
                                    'save grabbed detect_result to seaweed fs')
                        assign = master.assign()
                        logger.info([camera_id], 'assign result:', assign)

                        ret = master.upload(assign['url'], assign['fid'],
                                            base64_to_bytes(face_b64),
                                            assign['fid'] + '.jpg')
                        logger.info([camera_id], 'upload result:', ret)

                        # send to Kafka
                        url = 'http' + ':' + '//' + assign[
                            'url'] + '/' + assign['fid']
                        logger.info('[', camera_id, ']', 'img url:', url)
                        msg = json.dumps({
                            'url': url,
                            'time': latest['time'],
                            'camera_id': camera_id,
                            'landmark': latest['landmark']
                        })
                        logger.info([camera_id], 'send to kafka: ', msg)
                        kafka.send(topic, msg)
                    # 再次进入detect
                    detect_count = 0
                    trackable = False
                    logger.info('restart detection')
            else:
                if detect_count % 10 == 0:
                    logger.info('detect 0 detect_result, do not track',
                                'detect count= ', detect_count)
                detect_count += 1
                continue
        except queue.Empty:
            logger.error('grab queue empty error, exit')
            break
        detect_count += 1
    logger.info('抓图进程终止')
Esempio n. 3
0
    def track(self):

        self.countimage = 0

        cap = cv2.VideoCapture(self.videopath)
        cap.set(1, self.firstFrame)
        success, frame = cap.read()
        self.video_length = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))

        if not success:
            self.logger.error('Failed to read video')
            sys.exit(1)

        self.logger.debug(
            f'{len(self.bboxes)} tracking boxes have been created')
        trackerType = "CSRT"
        multiTracker = cv2.MultiTracker_create()  # Create MultiTracker object

        for bbox in self.bboxes:  # Initialize MultiTracker
            multiTracker.add(cv2.TrackerCSRT_create(), frame, bbox)
        self.logger.info(
            f'Starting the tracking of {len(self.bboxes)} particles')

        while cap.isOpened():

            success, frame = cap.read()
            if not success or self.countimage == self.lastFrame:
                # cv2.imwrite('img_path{}_lastframe.jpg'.format(self.countimage), previous_frame)
                # self.logger.info("\n Last Image save in your working directory !")
                break

            previous_frame = frame
            success, boxes = multiTracker.update(
                frame)  # get updated location of objects in subsequent frames

            if self.countimage < 1:  # initialize dataframes type
                for i, newbox in enumerate(boxes):
                    self.positions[i] = []

                    # draw tracked objects
            for i, newbox in enumerate(boxes):
                # coordinates of the tracking box
                p1 = ((newbox[0]), (newbox[1]))
                p2 = ((newbox[0] + newbox[2]), (newbox[1] + newbox[3]))
                Pr1 = (int(newbox[0]), int(newbox[1]))
                Pr2 = (int(newbox[0] + newbox[2]), int(newbox[1] + newbox[3]))
                cv2.rectangle(frame, Pr1, Pr2, (255, 0, 0), 2, 1)
                # append the center of the box to dict "self.positions[index of box]" according to refresh rate
                if self.countimage:
                    xcenter = (p1[0] + p2[0]) / 2
                    ycenter = (p1[1] + p2[1]) / 2
                    self.positions[i].append((xcenter, ycenter))

                for pos in self.positions[i]:
                    xcenter = pos[0]
                    ycenter = pos[1]
                    cv2.circle(frame, (int(xcenter), int(ycenter)), 1,
                               self.circle_color, -1)

            self.countimage += 1
            cv2.putText(frame, f"Frame no{self.countimage}", (10, 37),
                        cv2.FONT_HERSHEY_PLAIN, 1, (0, 0, 0), 1)

            self.current_frame = frame

            yield frame

            # quit on ESC button and save last frame
            if cv2.waitKey(1) & 0xFF == 27:  # Esc pressed
                break
Esempio n. 4
0
    def d_t_with_cv_ta(self, stop_thread, format='bgr'):
        """The low-level method to provide detecting and tracking objects with using OpenCV's tracking API.

        Args:
                stop_thread:       	    Stop flag of the tread about terminating it outside of the function's loop.
                format:       	        Color space format.
        """
        if self.record:
            self.recorder.start("track")

        tracked_boxes = []  # this became array.  because of overriding.
        names = []

        multi_tracker = cv2.MultiTracker_create()

        rgb, detected_boxes = self.detect_initiate()

        found_count = 0
        d_t_failure_count = 0
        use_detection = 0

        for frame in self.camera.capture_continuous(self.raw_capture,
                                                    format=format,
                                                    use_video_port=True):

            if len(detected_boxes) > len(tracked_boxes):

                if not self.no_recognize:
                    names = self.recognize_things(rgb, detected_boxes)
                else:
                    names = None

                self.current_frame = cv2.cvtColor(rgb, cv2.COLOR_RGB2BGR)

                # Create MultiTracker object
                multi_tracker = cv2.MultiTracker_create()

                # Initialize MultiTracker
                for box in detected_boxes:
                    # box[3] is x,
                    # box[0] is y,
                    # box[1] is x + w,
                    # box[2] is y + h.
                    reworked_box = box[3], box[
                        0], box[1] - box[3], box[2] - box[0]

                    multi_tracker.add(self.create_tracker_by_name(),
                                      self.current_frame, reworked_box)
                found_count += 1

            # grab the raw NumPy array representing the image, then initialize the timestamp and occupied/unoccupied text
            frame = frame.array
            self.current_frame = frame.copy(
            )  # For reaching with overwrite privileges.

            if use_detection >= 3:
                rgb, detected_boxes = self.detect_things(self.current_frame)
                use_detection = 0

            use_detection += 1

            # Start timer
            timer = cv2.getTickCount()

            # get updated location of objects in subsequent frames
            is_tracking_success, tracked_boxes = multi_tracker.update(
                self.current_frame)

            if not len(detected_boxes) >= len(tracked_boxes):
                d_t_failure_count += 1
            else:
                d_t_failure_count = 0

            # Calculate Frames per second (FPS)
            fps = cv2.getTickFrequency() / (cv2.getTickCount() - timer)

            if is_tracking_success and d_t_failure_count < 5:

                self.track(self.current_frame, tracked_boxes, names)

            elif not is_tracking_success or d_t_failure_count >= 5:
                # Tracking failure
                cv2.putText(self.current_frame, "Tracking failure detected",
                            (100, 80), cv2.FONT_HERSHEY_SIMPLEX, 0.75,
                            (0, 0, 255), 2)
                tracked_boxes = []  # for clearing tracked_boxes list.

            # # Display tracker type on frame
            # cv2.putText(frame, self.tracker_type + " Tracker", (100, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (50, 170, 50), 2)
            #
            # # Display FPS on frame
            # cv2.putText(frame, "FPS : " + str(int(fps)), (100, 50), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (50, 170, 50), 2)

            self.show_frame(self.current_frame)
            self.truncate_stream()

            if self.check_loop_ended(stop_thread):
                break
Esempio n. 5
0
def main(paths,
         day=2,
         camera=3,
         initial_frame=0,
         num_frames=36000,
         N_pruning=0):

    final_frame = initial_frame + num_frames - 1
    if num_frames < 1:
        logging.info('Number of frames needs to be higher than 0\n')
        sys.exit()
    if final_frame > 35999:
        if initial_frame > 35999:
            logging.info('Invalid initial frame (0-35999)\n')
            sys.exit()
        else:
            final_frame = 35999
            num_frames = final_frame + 1 - initial_frame

    data_path = paths['data_path']
    videos_path = paths['videos_path']
    output_path = paths['output_path']
    if not os.path.isdir(output_path):
        logging.info('Output directory does not exist\n')
        sys.exit()

    # Video to load
    video_file = '30min_day' + str(day) + '_cam' + str(
        camera) + '_20fps_960x540.MP4'
    video_name = 'Day ' + str(day) + ' Camera ' + str(camera)

    cap = cv2.VideoCapture(videos_path +
                           video_file)  # Capture object to read video
    if not cap.isOpened():  # Exit if video not opened
        logging.info('Could not open the video\n')
        sys.exit()
    cap.set(cv2.CAP_PROP_POS_FRAMES,
            initial_frame)  # Set the first frame to read
    ret, frame = cap.read()  # Read first frame
    if not ret:  # Exit if reading failure
        logging.info('Unable to read the video file\n')
        sys.exit()
    logging.info(f'Selected video: {video_name}\n')

    # Read annotations
    logging.info('Getting annotations ...\n')
    det_full, cam_full, lost_full = read.read_annotations(
        path=data_path,
        day=day,
        initial_frame=initial_frame,
        num_frames=num_frames)

    # Read annotations from first frame
    frame_index = initial_frame
    annotations0, num_part0 = read_detections(det=det_full,
                                              cam=cam_full,
                                              lost=lost_full,
                                              camera=camera,
                                              frame_index=frame_index -
                                              initial_frame)
    logging.info(
        f'{num_part0} participants annotated on the frame {frame_index}\n')

    # Object MultiTracker (Primary Trackers)
    multi_tracker = cv2.MultiTracker_create()
    boxes, targets_tracked = change_det_boxes(
        init_boxes=annotations0)  # From (x1,y1,x2,y2) to (x1,y1,width,height)
    for box in boxes:  # Initialization of primary trackers for each target
        multi_tracker.add(cv2.TrackerKCF_create(), frame, box)
        multi_tracker.add(cv2.TrackerMedianFlow_create(), frame, box)
        multi_tracker.add(cv2.TrackerMIL_create(), frame, box)

    # MHT
    tracking_params = {
        'N_pruning': N_pruning,  # Index for pruning
        'distance_threshold':
        100,  # Distance threshold for hypothesis formation
        'distance_threshold2':
        75,  # Distance threshold for the updating of primary trackers
        'MIL_weight': 0.2,  # MIL Tracker weight on the track scoring
        'MF_weight': 0.35,  # MF Tracker weight on the track scoring
        'KCF_weight': 0.45,  # KCF Tracker weight on the track scoring
        'color_score_threshold':
        0.20,  # Bhattacharyya distance threshold score between color histograms for Re-ID 
        'color_score_weight':
        0.75,  # Color histograms weight on the lost tracks scoring
        'lost_time_threshold': 25,  # Time of loss threshold for Re-ID
        'lost_time_weight':
        0.25,  # Time of loss weight on the lost tracks scoring
        'color_hist_bins': 4
    }  # Number of bins per histogram
    mht = MHT(tracking_params)  # Object MHT initialized
    logging.info('Running MHT ...\n')
    ti = time.time()  # Start timer
    logging.info(f'Frame: {frame_index} ...')
    mht.init(frame=frame, detections=annotations0)  # Run MHT on first frame.

    ids = []  # ID's of targets tracked at each frame
    fps_acc = 0  # Processing speed

    trk_path = output_path + 'tracker/'
    if not os.path.isdir(trk_path):
        os.mkdir(trk_path)

    res_file = trk_path + 'Results_day' + str(day) + '_cam' + str(
        camera) + '_' + str(tracking_params['N_pruning'])
    speed_file = trk_path + 'Speed_day' + str(day) + '_cam' + str(
        camera) + '_' + str(tracking_params['N_pruning'])
    runtime_file = trk_path + 'Time_day' + str(day) + '_cam' + str(
        camera) + '_' + str(tracking_params['N_pruning'])

    frame_index += 1
    #########################################
    frame_print = set(np.arange(initial_frame - 1, final_frame,
                                100))  # To print frame every 100 frames
    frame_save = set(np.arange(initial_frame - 1, final_frame,
                               1000))  # To save results every 1000 frames

    # Process video and track objects
    while cap.isOpened() and (frame_index <= final_frame) and (frame_index <=
                                                               35999):
        ret, frame = cap.read()  # Read new frame
        if not ret:  # Exit if reading failure
            break

        if frame_index in frame_print:
            logging.info(f'Frame: {frame_index} ...')

        timer = cv2.getTickCount()  # Start timer to get FPS
        # Read annotations
        annotations, num_part = read_detections(det=det_full,
                                                cam=cam_full,
                                                lost=lost_full,
                                                camera=camera,
                                                frame_index=frame_index -
                                                initial_frame)

        if num_part < num_part0:
            logging.info(
                f'Number of annotations changed from {num_part0} to {num_part} on frame {frame_index}\n'
            )
            num_part0 = num_part
        if num_part > num_part0:
            logging.info(
                f'Number of annotations changed from {num_part0} to {num_part} on frame {frame_index}\n'
            )
            num_part0 = num_part

        # Update primary trackers for the current frame
        ret, multitracker_results = multi_tracker.update(frame)
        # Turn results from primary trackers into a dictionary for MHT
        trackers_results = change_track_boxes(init_boxes=multitracker_results,
                                              indexes=targets_tracked)

        # Run MHT with annotations (detections) and tracker results
        solution_coord, track_ids, new_tracks = mht.run(
            frame=frame,
            detections=annotations,
            trackers_results=trackers_results)

        # Update primary trackers when they're lost or there are new targets
        if len(new_tracks) != 0:
            for key in new_tracks.keys():
                if key in ids:  # If an existing tracker needs to be updated
                    i = targets_tracked.index(key)
                    targets_tracked[i] = random.randint(50, 500)  # Change ID
                targets_tracked.append(key)  # Append corresponding ID

                # Append new trackers for the target
                box = new_tracks[key]
                new_box = (box[0], box[1], box[2] - box[0], box[3] - box[1])
                multi_tracker.add(cv2.TrackerKCF_create(), frame, new_box)
                multi_tracker.add(cv2.TrackerMedianFlow_create(), frame,
                                  new_box)
                multi_tracker.add(cv2.TrackerMIL_create(), frame, new_box)

        ids = track_ids  # Update track ID's

        fps = cv2.getTickFrequency() / (
            cv2.getTickCount() - timer
        )  # Compute frames per second (FPS) of the processing

        fps_acc += fps

        if frame_index in frame_save:  # Save results
            tf = time.time()  # End timer
            t_tot = tf - ti
            fps_mean = fps_acc / (frame_index - initial_frame + 1
                                  )  # Average FPS
            logging.info(f'Elapsed time: {tf-ti} seconds\n')
            logging.info(
                f'{frame_index-initial_frame+1} frames processed ({initial_frame}-{frame_index})\n'
            )

            # Save results to a .CSV file
            #results_file = res_file+'_'+str(initial_frame)+'-'+str(frame_index)+'.csv'
            #write_csv(file_name=results_file, solution_coordinates=solution_coord)

            fps_file = speed_file + '_' + str(initial_frame) + '-' + str(
                frame_index) + '.csv'
            np.savetxt(fps_file, [fps_mean], delimiter=',')
            time_file = runtime_file + '_' + str(initial_frame) + '-' + str(
                frame_index) + '.csv'
            np.savetxt(time_file, [t_tot], delimiter=',')

        frame_index += 1

    tf = time.time()  # End timer
    t_tot = tf - ti

    fps_mean = fps_acc / (frame_index - initial_frame)  # Average FPS

    logging.info('MHT completed\n')
    logging.info(f'Elapsed time: {t_tot} seconds\n')
    logging.info(
        f'{frame_index-initial_frame} frames processed ({initial_frame}-{frame_index-1})\n'
    )

    # Save results to a .CSV file
    results_file = res_file + '_' + str(initial_frame) + '-' + str(
        frame_index - 1) + '.csv'
    write_csv(file_name=results_file, solution_coordinates=solution_coord)

    fps_file = speed_file + '_' + str(initial_frame) + '-' + str(frame_index -
                                                                 1) + '.csv'
    np.savetxt(fps_file, [fps_mean], delimiter=',')
    time_file = runtime_file + '_' + str(initial_frame) + '-' + str(
        frame_index - 1) + '.csv'
    np.savetxt(time_file, [t_tot], delimiter=',')
def detect(input_video, output_video, interval, track_number, csv_path,
           mode_path, debuglog):
    """
    input_video:输入视频的路径
    output_video:输出结果视频的路径
    interval:检测是隔多少帧检测一次
    csv_path:输出csv文件的路径
    """
    # 提示,输入三个要检测的目标
    debuglog('Select {} tracking targets'.format(track_number))

    cv2.namedWindow("tracking")
    # 输入要检测视频的名称和路径
    camera = cv2.VideoCapture(input_video)
    tracker = cv2.MultiTracker_create()
    init_once = False
    model = load_model(mode_path)

    ok, image = camera.read()
    # 设置保存视频结果的信息
    fourcc = cv2.VideoWriter_fourcc(*'MJPG')
    out = cv2.VideoWriter(output_video, fourcc, 20,
                          (image.shape[1], image.shape[0]))

    if not ok:
        debuglog('Failed to read video')
        exit()

    # 设置跟踪目标的个数
    track_number = track_number
    # 把目标跟踪的框放入空列表
    bboxes = []
    for t in range(track_number):
        bbox = cv2.selectROI('tracking', image)  #第一个框用来检测时间参数
        bboxes.append(bbox)

    i = 1

    #保存气孔面积的列表
    aliens = []
    while camera.isOpened():
        ok, image = camera.read()
        if not ok:
            debuglog('no image to read')
            camera.release()
            cv2.destroyAllWindows()
            break

        # 每隔指定的间隔帧数才进行语义分割
        if i % interval == 0:

            if not init_once:
                for bbox in bboxes:
                    ok = tracker.add(cv2.TrackerBoosting_create(), image, bbox)

                init_once = True

            ok, boxes = tracker.update(image)
            # print(ok, boxes)

            j = 0
            # 将一帧里的气孔面积保存在一个列表中
            stoma_area = []
            for newbox in boxes:

                #判断,如果是第一个框,则检测时间参数
                if j == 0:
                    p1 = (int(newbox[0]), int(newbox[1]))
                    p2 = (int(newbox[0] + newbox[2]),
                          int(newbox[1] + newbox[3]))
                    cut_img = cut(image, newbox)
                    time_text = recognition_number(cut_img)

                # 如果不是第一帧,则进行语义分割
                else:
                    p1 = (int(newbox[0]), int(newbox[1]))
                    p2 = (int(newbox[0] + newbox[2]),
                          int(newbox[1] + newbox[3]))
                    cut_img = cut(image, newbox)
                    # print(cut_img.shape)
                    seg_img = segment_img(cut_img, model)
                    # print(seg_img.shape)
                    # 将检测结果与原始图片混合相加
                    dst = cv2.addWeighted(cut_img, 0.5, seg_img, 0.5, 0)
                    image[int(newbox[1]):int(newbox[1] + newbox[3]),
                          # 将混加的结果放到原图上去
                          int(newbox[0]):int(newbox[0] + newbox[2])] = dst
                    # 画矩形
                    cv2.rectangle(image, p1, p2, (200, 0, 0))
                    # 显示数字
                    # 将掩膜变成灰度图像
                    gray_img = cv2.cvtColor(seg_img, cv2.COLOR_BGR2GRAY)
                    area_number = calculate_area(gray_img)
                    stoma_area.append(area_number)
                    text = '{}'.format(area_number)
                    cv2.putText(image, text,
                                (int(newbox[0]), int(newbox[1] - 5)),
                                cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 255),
                                2)

                j = j + 1

            # stoma_area变成字典
            dict_keys = []
            dict_values = stoma_area
            for s in range(len(dict_values)):
                # 生成keys的列表
                dict_keys.append('stomat{}'.format(s))

            # 生成气孔的字典
            new_dict1 = dict(zip(dict_keys, dict_values))
            # 生成序号和时间的字典
            new_dict2 = {'image_number': i, 'time_date': time_text}

            # 合并字典
            new_aliens = dict(new_dict1, **new_dict2)

            aliens.append(new_aliens)
            cv2.imshow('tracking', image)
            out.write(image)
            debuglog('这是第{}张图片!'.format(i))
        else:
            pass
        k = cv2.waitKey(1)
        if k == 27 & 0xFF == ord('q'):
            break  #按q退出
        i = i + 1
    out.release()

    # 将字典转化为pandas
    df = pd.DataFrame(aliens)
    #保存为excel表格
    df.to_csv(csv_path)
Esempio n. 7
0
_, frame = cap.read()  # Capturamos o primeiro frame
bb = []  # Criamos uma lista vazia que receberá as coordenadas dos boxes

out = cv2.VideoWriter('teste_out.mp4', fourcc, 30.0, (frame.shape[1], frame.shape[0]))  # Determina o nome do arquivo de saída, sua taxa de FPS e sua resolução.

while True:

  roi = cv2.selectROI('Frame', frame)  # Função para seleção de ROI
  #print(roi)
  bb.append(roi)  #  Coordenadas do box da ROI adicionadas à lista

  k = cv2.waitKey(0)
  if k == ord('q'):
    break

multiTracker = cv2.MultiTracker_create()  # Cria o objeto Tracker

for bbox in bb:
  multiTracker.add(get_tracker(), frame, bbox)  # Inicializa o objeto Tracker para cada ROI selecionada

while True:

    old_frame = frame

    ret, frame = cap.read()  #  Captura um frame
    if not ret:  # Verifica status do vídeo
        exit()

    _, bxs = multiTracker.update(frame) # Atualiza o objeto Tracker para a nova posição de cada ROI selecionada

    for ID, box in enumerate(bxs):
Esempio n. 8
0
def mot_detect(args):
    def nothing(emp):
        pass

    print("----------> create the trackers")
    # extract the OpenCV version info
    (major, minor) = cv2.__version__.split(".")[:2]
    # if we are using OpenCV 3.2 OR BEFORE, we can use a special factory
    # function to create our object tracker
    if int(major) == 3 and int(minor) < 3:
        tracker = cv2.Tracker_create(args["tracker"].upper())
    # otherwise, for OpenCV 3.3 OR NEWER, we need to explicity call the
    # approrpiate object tracker constructor:
    else:
        # initialize a dictionary that maps strings to their corresponding
        # OpenCV object tracker implementations
        OPENCV_OBJECT_TRACKERS = {
            "csrt": cv2.TrackerCSRT_create,
            "kcf": cv2.TrackerKCF_create,
            "boosting": cv2.TrackerBoosting_create,
            "mil": cv2.TrackerMIL_create,
            "tld": cv2.TrackerTLD_create,
            "medianflow": cv2.TrackerMedianFlow_create,
            "mosse": cv2.TrackerMOSSE_create
        }
        # grab the appropriate object tracker using our dictionary of
        # OpenCV object tracker objects
        trackers = cv2.MultiTracker_create()
    # tracker = OPENCV_OBJECT_TRACKERS[args["tracker"]]()
    # initialize the bounding box coordinates of the object we are going
    # to track
    initBB = None
    tclass = []
    start_time = time.time()
    failure_flag = False

    # if a video path was not supplied, grab the reference to the web cam
    if not args.get("video", False):
        print("[INFO] starting video stream...")
        vs = VideoStream(src=0).start()
        time.sleep(1.0)
    # otherwise, grab a reference to the video file
    else:
        vs = cv2.VideoCapture(args["video"])
    # initialize the FPS throughput estimator
    fps = None

    frames = int(vs.get(cv2.CAP_PROP_FRAME_COUNT))
    loop_flag = 0
    pos = 0

    print("----------> read the frames")
    cv2.namedWindow('Frame')
    cv2.createTrackbar('time', 'Frame', 0, frames, nothing)

    # loop over frames from the video stream
    while True:
        # process-bar setting
        if loop_flag == pos:
            loop_flag = loop_flag + 1
            cv2.setTrackbarPos('time', 'Frame', loop_flag)
        else:
            pos = cv2.getTrackbarPos('time', 'Frame')
            loop_flag = pos
            vs.set(cv2.CAP_PROP_POS_FRAMES, pos)

        # grab the current frame, then handle if we are using a
        # VideoStream or VideoCapture object
        frame = vs.read()
        frame = frame[1] if args.get("video", False) else frame
        # check to see if we have reached the end of the stream
        if frame is None:
            break
        # resize the frame (so we can process it faster) and grab the
        # frame dimensions
        frame = imutils.resize(frame,
                               width=int(args["set_width"]),
                               height=int(args["set_height"]))
        frame0 = frame.copy()
        (H, W) = frame.shape[:2]

        # check to see if we are currently tracking an object
        if initBB is not None:
            print("----------> update the trackers' roi area")
            # grab the new bounding box coordinates of the object
            (success, boxes) = trackers.update(frame)
            print("[INFO] success / box num", success, len(boxes))

            # check to see if the tracking was a success
            if success:
                tpos = []
                for num, box in enumerate(boxes):
                    (x, y, w, h) = [int(v) for v in box]
                    new_pos = [x, y, x + w, y + h]
                    tpos.append(new_pos)
                    cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0),
                                  2)
                    cv2.putText(frame, tclass[num], (x, y - 10),
                                cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 2)
                now_time = time.time()
                if now_time - start_time > float(
                        args["interval"]) and len(boxes) > 0:
                    start_time = now_time
                    pic_name = str(time.time()) + ".jpg"
                    pic_fullpath = Path(args["pic_dir"]).joinpath(pic_name)
                    print("[INFO] save new pic:", pic_fullpath)
                    r = Image.fromarray(frame0[:, :, 2]).convert('L')
                    g = Image.fromarray(frame0[:, :, 1]).convert('L')
                    b = Image.fromarray(frame0[:, :, 0]).convert('L')
                    img = Image.merge("RGB", (r, g, b))
                    print("----------> save the pic and annotation xml")
                    img.save(pic_fullpath)
                    annotation_single_img(args["pic_dir"], pic_name,
                                          args["xml_dir"], tclass, tpos)
                # update the FPS counter
                fps.update()
                fps.stop()
                # initialize the set of information we'll be displaying on
                # the frame
                info = [
                    ("Tracker", args["tracker"]),
                    ("Success", "Yes" if success else "No"),
                    ("FPS", "{:.2f}".format(fps.fps())),
                ]
                # loop over the info tuples and draw them on our frame
                for (i, (k, v)) in enumerate(info):
                    text = "{}: {}".format(k, v)
                    cv2.putText(frame, text, (10, H - ((i * 20) + 20)),
                                cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 2)
            else:
                trackers.clear()
                trackers = cv2.MultiTracker_create()
                failure_flag = True
                initBB = None
                tclass = []
                cv2.putText(frame, lost_warning, (10, 100),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.6, (0, 0, 255), 2)
        # show the output frame
        cv2.imshow("Frame", frame)
        if failure_flag:
            cv2.waitKey(5000) & 0xFF
            failure_flag = False
        else:
            key = cv2.waitKey(100) & 0xFF
        # if the 's' key is selected, we are going to "select" a bounding
        # box to track
        if key == ord("s"):
            print("----------> select roi by mouse")
            cv2.putText(frame, input_cmd, (10, 20), cv2.FONT_HERSHEY_SIMPLEX,
                        0.6, (0, 0, 255), 2)
            cv2.imshow("Frame", frame)
            # show the classes choice
            ckey = cv2.waitKey(0) & 0xFF
            if int(ckey - 48) > len(select_classes) or int(ckey - 48) <= 0:
                continue
            cname = select_classes[int(ckey - 48)]
            print("[INFO] choose type to label:", cname)
            # select the bounding box of the object we want to track (make
            # sure you press ENTER or SPACE after selecting the ROI)
            initBB = cv2.selectROIs("Frame",
                                    frame,
                                    fromCenter=False,
                                    showCrosshair=True)

            # start OpenCV object tracker using the supplied bounding box
            # coordinates, then start the FPS throughput estimator as well
            initBB = tuple(map(tuple, initBB))
            if str(initBB) == '()':
                print("[WARNING] There is no select ROIs!")
                # initBB==None
            else:
                for bb in initBB:
                    tracker = OPENCV_OBJECT_TRACKERS[args["tracker"]]()
                    trackers.add(tracker, frame, bb)
                    tclass.append(cname)

            fps = FPS().start()

        # if the `q` key was pressed, break from the loop
        elif key == ord("q"):
            print("----------> quit all process")
            break

        elif key == ord("r"):
            print("----------> clear all roi trackers")
            tclass = []
            trackers.clear()
            trackers = cv2.MultiTracker_create()
            cv2.putText(frame, input_cmd, (10, 20), cv2.FONT_HERSHEY_SIMPLEX,
                        0.6, (0, 0, 255), 2)
            cv2.imshow("Frame", frame)
            # show the classes choice
            ckey = cv2.waitKey(0) & 0xFF
            cname = select_classes[int(ckey - 48)]
            print("[INFO]You have chosen the class:%s" % cname)
            initBB = cv2.selectROIs("Frame",
                                    frame,
                                    fromCenter=False,
                                    showCrosshair=True)
            initBB = tuple(map(tuple, initBB))
            if str(initBB) == '()':
                print("[WARNING] There is no select ROIs!")
                # initBB==None
            else:
                print("---------->add new roi trackers")
                for bb in initBB:
                    tracker = OPENCV_OBJECT_TRACKERS[args["tracker"]]()
                    trackers.add(tracker, frame, bb)
                    tclass.append(cname)
        # else:
        #     continue

    # if we are using a webcam, release the pointer
    if not args.get("video", False):
        vs.stop()
    # otherwise, release the file pointer
    else:
        vs.release()
    # close all windows
    cv2.destroyAllWindows()
Esempio n. 9
0
    def gen_keypoint(self):
        params = self.set_params()

        multiTracker = cv2.MultiTracker_create()
        bboxes = []
        colors = [(255, 0, 0), (0, 255, 0)]

        try:
            # Starting OpenPose
            opWrapper = self.op.WrapperPython()
            opWrapper.configure(params)
            opWrapper.start()

            stream = cv2.VideoCapture(self.input_video)
            #stream.set(cv2.CAP_PROP_FPS, 2)
            frame_width = int(stream.get(3))
            frame_height = int(stream.get(4))
            out = cv2.VideoWriter(self.output_vdieo,
                                  cv2.VideoWriter_fourcc('M', 'J', 'P', 'G'),
                                  10, (frame_width, frame_height))
            # Process Video
            poseKeypoints_data_video = []
            map_bone_video = []
            while True:
                datum = self.op.Datum()
                ret, img = stream.read()
                if ret == False:
                    break

                datum.cvInputData = img
                opWrapper.emplaceAndPop([datum])

                # Display Image
                #print("Body keypoints: \n" + str(datum.poseKeypoints) + str(type(datum.poseKeypoints)))

                poseKeypoints_data = []
                show_result = datum.cvOutputData

                if len(datum.poseKeypoints.shape) > 1:
                    for poseKeypoints in datum.poseKeypoints:
                        bone_struct = dict()
                        for i in range(len(poseKeypoints)):
                            keypoint = dict()
                            keypoint['x'] = poseKeypoints[i][0]
                            keypoint['y'] = poseKeypoints[i][1]
                            #keypoint['c'] = str(poseKeypoints[i][2])
                            bone_struct[i] = keypoint
                        #self.resize_bone(bone_struct,self.config.fixed_bone_size)
                        poseKeypoints_data.append(bone_struct)
                        print("cal_size: ", cal_size_bone(bone_struct))
                        #draw_bone(bone_struct,cv2,show_result)
                    if len(bboxes) == 0:
                        for bst in poseKeypoints_data:
                            bbox = box_from_bonestruct(bst)
                            bboxes.append(bbox)
                            colors.append((randint(0, 255), randint(0, 255),
                                           randint(0, 255)))

                        for bbox in bboxes:
                            multiTracker.add(cv2.TrackerCSRT_create(), img,
                                             bbox)

                    poseKeypoints_data_video.append(poseKeypoints_data)
                    print(len(poseKeypoints_data_video))
                    # if len(poseKeypoints_data_video) == 5:
                    #     print("IMPM")
                    #     multiTracker.add(cv2.TrackerCSRT_create(), img, (100, 300, 500, 700))
                    #     colors.append((randint(0, 255), randint(0, 255), randint(0, 255)))
                success, boxes = multiTracker.update(img)
                map_bone = map_bone_struct_to_bbox(poseKeypoints_data, boxes)
                map_bone_video.append(map_bone)

                for i in range(len(poseKeypoints_data)):

                    if map_bone[i] == -1 and check_bone_struct(
                            poseKeypoints_data[i]):
                        bbox = box_from_bonestruct(poseKeypoints_data[i])
                        bboxes.append(bbox)
                        color = (randint(0,
                                         255), randint(0,
                                                       255), randint(0, 255))
                        colors.append(color)
                        multiTracker.add(cv2.TrackerCSRT_create(), img, bbox)
                        map_bone[i] = len(colors) - 1
                    draw_bone_with_color(poseKeypoints_data[i], cv2,
                                         show_result, colors[int(map_bone[i])])

                for i, newbox in enumerate(boxes):
                    p1 = (int(newbox[0]), int(newbox[1]))
                    p2 = (int(newbox[0] + newbox[2]),
                          int(newbox[1] + newbox[3]))
                    print(p1, p2)
                    cv2.rectangle(show_result, p1, p2, colors[i], 2, 1)

                cv2.imshow("OpenPose OUPUT VIDEO", show_result)
                out.write(show_result)

                print("YYYYYYYYYYYYYYYYYYYYYYYYYYEEEEE")
                key = cv2.waitKey(1)
                if key == ord('q'):
                    break
            print("XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX")

            stream.release()
            out.release()
            cv2.destroyAllWindows()

            return [poseKeypoints_data_video, map_bone_video]
        except Exception as e:
            print(e)
def process(args):

    objects_detected = dict()

    #ToDo: Put this in intermediate_detection
    tracker_types = ['BOOSTING', 'MIL', 'KCF', 'TLD', 'MEDIANFLOW', 'GOTURN']
    tracker_type = tracker_types[2]
    tracker = None
    """
    if tracker_type == 'BOOSTING':
        tracker = cv.TrackerBoosting_create()
    if tracker_type == 'MIL':
        tracker = cv.TrackerMIL_create()
    if tracker_type == 'KCF':
        tracker = cv.TrackerKCF_create()
    if tracker_type == 'TLD':
        tracker = cv.TrackerTLD_create()
    if tracker_type == 'MEDIANFLOW':
        tracker = cv.TrackerMedianFlow_create()
    if tracker_type == 'GOTURN':
        tracker = cv.TrackerGOTURN_create()
    """
    predictor = object_detector(args.model, args.config)
    multi_tracker = cv.MultiTracker_create()
    stream = cv.VideoCapture(args.input if args.input else 0)
    window_name = "Tracking in progress"
    cv.namedWindow(window_name, cv.WINDOW_NORMAL)
    cv.setWindowProperty(window_name, cv.WND_PROP_AUTOSIZE, cv.WINDOW_AUTOSIZE)
    cv.moveWindow(window_name, 10, 10)

    if args.output:
        _, test_frame = stream.read()
        height = test_frame.shape[0]
        width = test_frame.shape[1]
        fourcc = cv.VideoWriter_fourcc(*'XVID')
        #out = cv.VideoWriter(args.output,fourcc, 20.0, (640,480))
        out = cv.VideoWriter(args.output, fourcc, 20.0, (width, height))
        failTolerance = 0

    if args.classes:
        with open(args.classes, 'rt') as f:
            classes = f.read().rstrip('\n').split('\n')
    else:
        classes = list(np.arange(0, 100))

    stream, objects_detected, objects_list, multi_tracker = intermediate_detections(
        stream, predictor, multi_tracker, tracker, args.thr, classes)

    while stream.isOpened():

        grabbed, frame = stream.read()

        if not grabbed:
            break

        timer = cv.getTickCount()

        #Even when multitracker fails,  bboxes will have old values
        #But ok will be false
        if len(objects_list) > 0:
            ok, bboxes = multi_tracker.update(frame)
        #bboxes = multi_tracker.getObjects()
        #ok = multi_tracker.empty()

        fps = cv.getTickFrequency() / (cv.getTickCount() - timer)

        print(bboxes, ' --- ', ok)
        #if ok and len(bboxes) > 0:
        #if ok and len(bboxes) > 0:

        if ok and len(bboxes) > 0:
            drawPred(frame, bboxes, objects_detected)
            # Display FPS on frame
            cv.putText(frame, "FPS : " + str(int(fps)), (100, 50),
                       cv.FONT_HERSHEY_SIMPLEX, 0.75, (50, 170, 50), 2)

        else:
            cv.putText(frame,
                       'Tracking Failure. Trying to detect more objects',
                       (50, 80), cv.FONT_HERSHEY_SIMPLEX, 0.75, (0, 0, 255), 2)
            stream, objects_detected, objects_list, multi_tracker = intermediate_detections(
                stream, predictor, multi_tracker, tracker, args.thr, classes)

        # Display result
        #If resolution is too big, resize the video
        if frame.shape[1] > 1240:
            cv.imshow(window_name, cv.resize(frame, (1240, 960)))
        else:
            cv.imshow(window_name, frame)

        #Write to output file
        if args.output:
            out.write(frame)
        k = cv.waitKey(1) & 0xff

        #Force detect new objects if 'q' is pressed
        if k == ord('q'):
            print('Refreshing. Detecting New objects')
            cv.putText(frame, 'Refreshing. Detecting New objects', (100, 80),
                       cv.FONT_HERSHEY_SIMPLEX, 0.75, (0, 0, 255), 2)
            stream, objects_detected, objects_list, multi_tracker = intermediate_detections(
                stream, predictor, multi_tracker, tracker, args.thr, classes)

        # Exit if ESC pressed
        if k == 27: break

    stream.release()
    if args.output:
        out.release()
    cv.destroyAllWindows()
Esempio n. 11
0
    def __d_t_with_cv_ta(self, stop_thread, format='bgr'):
        """Method to provide detecting and tracking objects with using OpenCV's tracking API.

        Args:
                stop_thread:       	    Stop flag of the tread about terminating it outside of the function's loop.
                format:       	        Color space format.
        """

        tracked_boxes = []  # this became array.  because of overriding.
        names = []

        multi_tracker = cv2.MultiTracker_create()

        rgb, detected_boxes = self.__detect_initiate()

        found_count = 0
        d_t_failure_count = 0
        use_detection = 0

        last_frame = np.zeros(shape=(self.frame_height, self.frame_width))

        while True:
            if last_frame.any() != self.current_frame.any():

                last_frame = self.current_frame

                if len(detected_boxes) > len(tracked_boxes):

                    if not self.no_recognize:
                        names = self.__recognize_things(rgb, detected_boxes)
                    else:
                        names = None

                    last_frame = cv2.cvtColor(rgb, cv2.COLOR_RGB2BGR)

                    # Create MultiTracker object
                    multi_tracker = cv2.MultiTracker_create()

                    # Initialize MultiTracker
                    for box in detected_boxes:
                        # box[3] is x,
                        # box[0] is y,
                        # box[1] is x + w,
                        # box[2] is y + h.
                        reworked_box = box[3], box[0], box[1] - box[3], box[2] - box[0]

                        multi_tracker.add(self.__create_tracker_by_name(), last_frame, reworked_box)
                    found_count += 1

                if use_detection >= 3:
                    rgb, detected_boxes = self.detect_things(last_frame)
                    use_detection = 0

                use_detection += 1

                # Start timer
                timer = cv2.getTickCount()

                # get updated location of objects in subsequent frames
                is_tracking_success, tracked_boxes = multi_tracker.update(last_frame)

                if not len(detected_boxes) >= len(tracked_boxes):
                    d_t_failure_count += 1
                else:
                    d_t_failure_count = 0

                # Calculate Frames per second (FPS)
                fps = cv2.getTickFrequency() / (cv2.getTickCount() - timer)

                if is_tracking_success and d_t_failure_count < 5:

                    self.track(last_frame, tracked_boxes, names)

                elif not is_tracking_success or d_t_failure_count >= 5:
                    # Tracking failure
                    cv2.putText(last_frame, "Tracking failure detected", (100, 80), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (0, 0, 255), 2)
                    tracked_boxes = []  # for clearing tracked_boxes list.

                # # Display tracker type on frame
                # cv2.putText(frame, self.tracker_type + " Tracker", (100, 20), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (50, 170, 50), 2)
                #
                # # Display FPS on frame
                # cv2.putText(frame, "FPS : " + str(int(fps)), (100, 50), cv2.FONT_HERSHEY_SIMPLEX, 0.75, (50, 170, 50), 2)

                self.__show_frame(last_frame)
                # self.__truncate_stream()

            if self.__check_loop_ended(stop_thread):
                break
def multi_tracking(yolo, video_path):
    global percentResize
    accum_time = 0
    curr_fps = 0
    fps = "FPS: ??"
    prev_time = timer()
    global multi_tracking_enable
    global arima_predict
    multi_tracking_enable = True
    tracker = cv2.MultiTracker_create()
    camera = cv2.VideoCapture(video_path)
    ok, image = camera.read()
    if not ok:
        print('Failed to read video')
        exit()
    boxes = []

    frameCount = 1
    startFrame = 1
    skipFrames = 25
    consecutiveframes = 1

    initialHistoryCount = 11
    skipHistory = 5

    extrapolate = 3
    dof = 0

    yoloCount = 0
    countCond = 0
    xhistory = []
    yhistory = []
    depth_history = []
    while(True):
        if(frameCount == startFrame):
            frame = Image.fromarray(image)
            frame, boxes = yolo.detect_image(frame)
            #yolo.close_session()
            break
        ok, image = camera.read()
        frameCount += 1
    #np.set_printoptions(suppress = True)
    boxes = np.asarray(boxes)
    boxes[:, 2] = boxes[:, 2] - boxes[:, 0]
    boxes[:, 3] = boxes[:, 3] - boxes[:, 1]
    boxes = np.ndarray.tolist(boxes)
    prevBoxes = len(boxes)
    curBoxes = prevBoxes
    #print(boxes)
    #np.savetxt('boxes.txt', boxes, fmt = "%i")
    #return boxes
    #boxes = []
    manualBox = 0
    for i in range(manualBox):
        box = cv2.selectROI('tracking', image)
        boxes.append(box)

    for eachBox in boxes:
        eachBox = tuple(eachBox)
        xhistory.append([int(eachBox[0] + eachBox[2] / 2)])
        yhistory.append([int(eachBox[1] + eachBox[3] / 2)])
        ok = tracker.add(cv2.TrackerMedianFlow_create(), image, eachBox)

    while(True):
        ok, image=camera.read()
        if not ok:
            break
        orig_image = image.copy()

        if(prevBoxes != curBoxes):
            countCond += 1
        if(frameCount % skipFrames == 0):
            #print(consecutiveframes)
            consecutiveframes = 1
            tracker = cv2.MultiTracker_create()
            frame = Image.fromarray(image)
            boxes = []
            frame, boxes = yolo.detect_image(frame)
            yoloCount += 1
            boxes = np.asarray(boxes)
            boxes[:, 2] = boxes[:, 2] - boxes[:, 0]
            boxes[:, 3] = boxes[:, 3] - boxes[:, 1]
            boxes = np.ndarray.tolist(boxes)
            prevBoxes = len(boxes)
            curBoxes = None
            xhistory = []
            yhistory = []
            depth_history = []
            for eachBox in boxes:
                eachBox = tuple(eachBox)
                xhistory.append([int(eachBox[0] + eachBox[2] / 2)])
                yhistory.append([int(eachBox[1] + eachBox[3] / 2)])
                ok = tracker.add(cv2.TrackerMedianFlow_create(), image, eachBox)
            #frameCount += 1
            #continue

        ok, boxes = tracker.update(image)
        for i in range(len(boxes)):
            xhistory[i].append(int(boxes[i][0] + boxes[i][2] / 2))
            yhistory[i].append(int(boxes[i][1] + boxes[i][3] / 2))
        if(arima_predict and len(xhistory[0]) > initialHistoryCount):

            #if(len(xhistory[i]) > 27): dof = 5
            #print(xhistory[0])

            for i in range(len(boxes)):
                history = xhistory[i].copy()
                history = [xhistory[i][t] for t in range(0, len(xhistory[i]), skipHistory)]
                xmin = min(history)
                history[:] = [x - xmin for x in history]
                xmax = max(history)
                if(xmax == 0): xmax = 1
                history[:] = [x / xmax for x in history]
                #print('xh', len(history))
                for j in range(extrapolate):
                    xmodel = ARIMA(history, order = (dof, 1, 0))
                    xmodel_fit = xmodel.fit(disp = 0, maxiter=200)
                    xoutput = xmodel_fit.forecast()
                    history.append(xoutput[0])
                xhat = int((xoutput[0] * xmax) + xmin)
                #xhat = xoutput[0]
                history = yhistory[i].copy()
                history = [yhistory[i][t] for t in range(0, len(yhistory[i]), skipHistory)]
                ymin = min(history)
                history[:] = [y - ymin for y in history]
                #history = [yhistory[i][0], yhistory[i][int(len(yhistory[i]) / 2)], yhistory[i][len(yhistory[i]) - 1]]
                ymax= max(history)
                if(ymax == 0): ymax = 1
                history[:] = [y / ymax for y in history]
                #print('yh', len(history))
                for j in range(extrapolate):
                    ymodel = ARIMA(history, order = (dof, 1, 0))
                    ymodel_fit = ymodel.fit(disp = 0, maxiter=200)
                    youtput = ymodel_fit.forecast()
                    history.append(youtput[0])
                yhat = int((youtput[0] * ymax) + ymin)
                #yhat = youtput[0]
                cp1 = int(boxes[i][0] + boxes[i][2] / 2)
                cp2 = int(boxes[i][1] + boxes[i][3] / 2)
                cv2.arrowedLine(image, (int(xhistory[i][0]),int(yhistory[i][0])), (cp1, cp2), (0, 255, 0), 2)
                cv2.arrowedLine(image, (cp1, cp2), (xhat, yhat), (0, 0, 255), 2)
                #slope = math.abs(math.atan((yhat - cp2) / (xhat - cp1)))
                #speed = math.sqrt((yhat - cp2) * (yhat - cp2) + (xhat - cp1) * (xhat - cp1))
                #percentChange = 0.0
                #if(yhat >= cp2):

                p1 = (int(xhat - boxes[i][2] / 2), int(yhat - boxes[i][3] / 2))
                p2 = (int(xhat + boxes[i][2] / 2), int(yhat + boxes[i][3] / 2))
                cv2.rectangle(image, p1, p2, (255, 255, 255), 1)
        for newbox in boxes:
            p1 = (int(newbox[0]), int(newbox[1]))
            p2 = (int(newbox[0] + newbox[2]), int(newbox[1] + newbox[3]))
            cv2.rectangle(image, p1, p2, (200,0,0), 2)

        if(depthMapEstimation):
            depth_est = image_depth(orig_image)
            dof = 0
            current_depth_est = depth_est.copy()
            pred_depth_est = depth_est.copy()
            pd = 'OFF'
            for i in range(len(boxes)):
                p1 = (int(boxes[i][0]), int(boxes[i][1]))
                p2 = (int(boxes[i][0] + boxes[i][2]), int(boxes[i][1] + boxes[i][3]))
                current_depth = cal_depth_box(depth_est, p1, p2)
                if(len(depth_history) < len(boxes)):
                    depth_history.append([current_depth])
                else:
                    depth_history[i].append(current_depth)
                if(math.isnan(current_depth)):
                    continue
                if(len(depth_history[i]) > initialHistoryCount):
                    pd = 'ON'
                    history = depth_history[i].copy()
                    hisotry = np.nan_to_num(history)
                    history = [history[t] for t in range(0, len(history), skipHistory)]
                    dmin = min(history)
                    history[:] = [d - dmin for d in history]
                    dmax = max(history)
                    if(dmax == 0): dmax = 1
                    history[:] = [d / dmax for d in history]
                    for j in range(extrapolate):
                        dmodel = ARIMA(history, order = (0, 1, 0))
                        dmodel_fit = dmodel.fit(disp = 0, maxiter=200)
                        doutput = dmodel_fit.forecast()
                        history.append(doutput[0])
                    #print(doutput[0])
                    if(not math.isnan(doutput[0])):
                        dhat = int((doutput[0] * dmax) + dmin)
                    else:
                        dhat = current_depth

                    current_depth_est = set_depth(current_depth_est, p1, p2, current_depth)
                    if(math.isnan(current_depth)):
                        print("wtf just happened")
                    cv2.putText(current_depth_est,text=str(int(current_depth)), org=(p1[0], p1[1]), fontFace=cv2.FONT_HERSHEY_SIMPLEX,
                                fontScale=0.50, color=(0, 0, 255), thickness=1)
                    pred_depth_est = set_depth(pred_depth_est, p1, p2, dhat)
                    cv2.putText(pred_depth_est,text=str(int(dhat)), org=(p1[0], p1[1]), fontFace=cv2.FONT_HERSHEY_SIMPLEX,
                                fontScale=0.50, color=(0, 0, 255), thickness=1)

            cv2.putText(pred_depth_est, text=pd, org=(3, 15), fontFace=cv2.FONT_HERSHEY_SIMPLEX,
                        fontScale=0.50, color=(0, 0, 255), thickness=2)
            #cv2.namedWindow("curdepth", cv2.WINDOW_NORMAL)
            current_depth_est = cv2.resize(current_depth_est, (0,0), fx=percentResize, fy=percentResize)
            cv2.imshow('curdepth', current_depth_est)
            #cv2.namedWindow("predepth", cv2.WINDOW_NORMAL)
            pred_depth_est = cv2.resize(pred_depth_est, (0,0), fx=percentResize, fy=percentResize)
            cv2.imshow('predepth', pred_depth_est)
        curr_time = timer()
        exec_time = curr_time - prev_time
        prev_time = curr_time
        accum_time = accum_time + exec_time
        curr_fps = curr_fps + 1
        if accum_time > 1:
            accum_time = accum_time - 1
            fps = "FPS: " + str(curr_fps)
            curr_fps = 0
        cv2.putText(image, text=fps, org=(3, 15), fontFace=cv2.FONT_HERSHEY_SIMPLEX,
                    fontScale=0.50, color=(255, 0, 0), thickness=2)
        #cv2.namedWindow("tracking", cv2.WINDOW_NORMAL)
        image = cv2.resize(image, (0,0), fx=percentResize, fy=percentResize)
        cv2.imshow('tracking', image)
        frameCount += 1
        consecutiveframes += 1
        k = cv2.waitKey(1)
        if k == 27 : break # esc pressed
    print(yoloCount)
    print(countCond)
    yolo.close_session()
Esempio n. 13
0
def vid_main(vid1, vid2, fps):
    # Open video file for read.
    cap1 = cv2.VideoCapture(vid1)
    cap2 = cv2.VideoCapture(vid2)

    # Perpare dataset and recognition models
    FRmodel, ml_model, encoder = rs.set_up(align_method, ml_method)
    print("Successfully set up recongnition system.")

    # Determine homography transfroamtion equal for all scene if cameras are stable.
    if SC:
        H = homo_stationary_cameras(vid2, vid1, ratio, reprojThresh,
                                    reprojThresh2)

    # Initialize.
    count = 0
    new_scene = False
    new_scene_count = 0
    ppl_counts = [0, 0, 0, 0]  # number of people prediction in each sub-frame
    names_0, names_1, names_2, names_3 = (
        [], [], [], [])  # list of name prediction in each sub-frame
    object_trackers = []
    all_names = []
    names_colors = {
    }  # each person with a pre-collected dataset and is detected in the frame is marked by an unique colour.
    sub_frames = []  # len(sub_frames)=1 if FP==True, else=4

    while cap1.isOpened() and cap2.isOpened(
    ):  #take minimum length of the two videos
        print(count)  # Current frame number
        ret1, frame1 = cap1.read()
        ret2, frame2 = cap2.read()
        if ret1 and ret2:
            # At first frame, obtain input frame dimension and create VideoWriter object.
            if count == 0:
                height1, width1, layers1 = frame1.shape
                height2, width2, layers2 = frame2.shape
                new_height = int(height1 + height2 * outputSizeTimes)
                new_width = int(width1 + width2 * outputSizeTimes)
                # Coordinate up-left coordinate so that frame1 with (height1, width1)
                # will be placed at the centre of (new_height, new_width).
                new_coord = [
                    int((new_width - width1) / 2.),
                    int((new_height - height1) / 2.)
                ]

                # Initialize the dimension of output video format.
                frame_width, frame_height = display.output_shape(
                    PB, FP, new_width, new_height, maxWidth, maxHeight)
                out = cv2.VideoWriter(output_file_name,
                                      cv2.VideoWriter_fourcc(*'MP4V'), fps,
                                      (frame_width, frame_height))

                # Initialize sub-frames.
                mask_img = np.zeros((new_height, new_width)).astype(np.uint8)
                mask_img[new_coord[1]:new_coord[1] + height1,
                         new_coord[0]:new_coord[0] + width1].fill(255)
                canvas = np.zeros(
                    (new_height, new_width,
                     3)).astype(np.uint8)  # dimension of sub-frame
                if canvas_color != (0, 0, 0):
                    canvas[:, :, :] = canvas_color

            # SCENE DETECTION:
            # Check video scene change, assuming the two cameras cut at the same time.
            # Else the sorresponding frames do not match.
                pre_frame1 = frame1  #count == 0
            new_frame1_norm_gray = cv2.cvtColor(
                (frame1 * 255.0 / frame1.max()).astype('uint8'),
                cv2.COLOR_BGR2GRAY)
            pre_frame1_norm_gray = cv2.cvtColor(
                (pre_frame1 * 255.0 / pre_frame1.max()).astype('uint8'),
                cv2.COLOR_BGR2GRAY)
            curr_err = mse_maxpool(new_frame1_norm_gray, pre_frame1_norm_gray)
            if count == 0:
                pre_err = curr_err
            curr_err_diff = abs(pre_err - curr_err)
            if count == 0:
                pre_err_diff = curr_err_diff
            new_scene = identify_scene(pre_err_diff, curr_err_diff, pre_err,
                                       curr_err, cutThreshold,
                                       aroundCutThreshold)
            # Update for the next *necognition* process.
            if new_scene or count == 0:
                new_scene_count = cutTextDuration
                object_trackers = []
                all_names = []
                ppl_counts = [0, 0, 0, 0]
#            pre_frame1 = frame1###

# PANORAMA FEATURE DETECTION:
# Use the last calculated frame's homography transformation.
            if count % perFrame == 0:
                # Create PANORAMA of the two frame.
                # Create describer.
                (kps1, features1) = detectAndDescribe(frame1)
                (kps2, features2) = detectAndDescribe(frame2)
                # Machtes.
                M = matchKeypoints(kps2, kps1, features2, features1, ratio,
                                   reprojThresh)

            # If the match is None, then there aren't enough matched keypoints to create a panorama
            # Assume the views are not overlapped, display both videos by simply paste them together as default.
            if M is None and not SC:
                result_img = display.homo_not_match_subframe(
                    PR, canvas, new_width, new_height, new_coord, frame1,
                    frame2)
                common_frame = canvas.copy()
                common_frame[:, :, :] = (0, 0, 0)
            else:
                if not SC:  # Evaluate individual frame's homography transformation when the cameras are not stationary.
                    (matches, H, status) = M
                # Create panorama.
                (panorama, frame2_trans,
                 pano_coord) = warp_images(frame1, frame2, H)

                # Coordinate for transfering panorama coordinates result to a proper sized canvas,
                # with frame1 at the centre.
                pano_bounds, new_bounds = display.pano_trans_bounds(
                    pano_coord, new_coord, new_width, new_height, width1,
                    height1, panorama.shape)
                pano_left_bound, pano_right_bound, pano_up_bound, pano_down_bound = pano_bounds
                new_left_bound, new_right_bound, new_up_bound, new_down_bound = new_bounds

                # frame2 after homogrpahy transformation, for poisson blending and determine the common region.
                target_img = canvas.copy()
                target_img[new_up_bound:new_down_bound, new_left_bound:new_right_bound, :] =\
                    frame2_trans[pano_up_bound:pano_down_bound, pano_left_bound:pano_right_bound, :]

                # POISSON BLENDING:
                if PB:
                    # Input images for poisson blending.
                    source_img = canvas.copy()
                    source_img[new_coord[1]:new_coord[1] + frame1.shape[0],
                               new_coord[0]:new_coord[0] +
                               frame1.shape[1], :] = frame1
                    # Resize the images.
                    source_img = cv2.resize(source_img,
                                            (reduce_width, reduce_height))
                    target_img = cv2.resize(target_img,
                                            (reduce_width, reduce_height))
                    mask_img = cv2.resize(mask_img,
                                          (reduce_width, reduce_height))
                    # Apply Poisson blending.
                    result_img = poisson_blending(source_img, target_img,
                                                  mask_img)
                else:
                    result_img = canvas.copy()
                    result_img[new_up_bound:new_down_bound, new_left_bound:new_right_bound, :] =\
                        panorama[pano_up_bound:pano_down_bound, pano_left_bound:pano_right_bound, :]

                # Mask indicate the common area of the two frames.
                common_mask = canvas.copy()
                common_mask[:, :, :] = (0, 0, 0)
                common_mask[target_img == 0] = 255
                common_mask[mask_img == 0] = 255
                # Apply to output frame.
                common_frame = result_img.copy()
                common_frame[common_mask == 255] = 0

            # Find and label faces in each sub-frame.
            if FP:
                sub_frames = [panorama, common_frame, frame1, frame2]
#                cv2.imwrite("sub1.png", panorama) ## Save INDIVIDUAL FRAMES for test purpose
#                cv2.imwrite("sub2.png", common_frame)
#                cv2.imwrite("sub3.png", frame1)
#                cv2.imwrite("sub4.png", frame2)
            else:
                sub_frames = [panorama]

            for i, sub_img in enumerate(sub_frames):
                # Re-perform detection and recognation when a video scene change is detected,
                # then identify people by tracking.
                if new_scene or count == 0:
                    ### Replaced by recognition: >>>
                    #            # FACE DETECTION:
                    #            faces, eyes = mtcnn_detect(new_frame)
                    ### <<<
                    # FACE RECOGNITION with DETECTION
                    names, patches, faces = rec.predict(
                        sub_img, ml_model, encoder, align_method, FRmodel)
                    ppl_counts[i] = len(names)
                    for name in names:
                        if name not in names_colors:
                            names_colors[name] = (randint(0, 255),
                                                  randint(0, 255),
                                                  randint(0, 255))

                    # OBJECT TRACKING:
                    # Create MultiTracker for tracking multiple objects.
                    curr_multiTracker = cv2.MultiTracker_create()
                    for (x, y, w, h) in faces:
                        curr_multiTracker.add(createTrackerByName(trackerType),
                                              sub_img, (x, y, w, h))
                    object_trackers.append(curr_multiTracker)
                    all_names.append(names)
                    new_scene = False
                else:  # not new scene: previous detected scene = current frame scene
                    # Update face tracking using previouly initialized multiple object tracker.
                    success, faces = object_trackers[i].update(sub_img)

                display.label_faces(sub_img, faces, all_names[i], names_colors)
                # Panorama /main sub-frame
                if i == 0:
                    result_img[new_up_bound:new_down_bound, new_left_bound:new_right_bound, :] =\
                    sub_img[pano_up_bound:pano_down_bound, pano_left_bound:pano_right_bound, :] #update labels
#                    cv2.imwrite("sub5.png", result_img) ## Save LABELED MAIN FRAME for test purpose

# Define new frame.
            new_frame = display.format_output_frame(FP, canvas, new_coord,
                                                    result_img, common_frame,
                                                    frame1, frame2)

            # Add text anotations:
            # Display the "New scene" sign in new_scene_count frame, shortly after new cut detection.
            if new_scene_count >= 0:
                new_frame = display.display_new_scene_text(
                    new_frame, new_width, new_height)
            # Display text for number of people.
            new_frame = display.display_ppl_num_text_ul(
                new_frame, new_width, ppl_counts[0], ppl_counts[1],
                ppl_counts[2], ppl_counts[3])  # Final count: up-left image.
            if FP:
                new_frame = display.display_ppl_num_text_ur(
                    new_frame, new_width, ppl_counts[1])  # up-right image.
                new_frame = display.display_ppl_num_text_bl(
                    new_frame, new_width, new_height,
                    ppl_counts[2])  # bottom-left image.
                new_frame = display.display_ppl_num_text_br(
                    new_frame, new_width, new_height,
                    ppl_counts[3])  # bottom-right image.


#            if count==0:
#                cv2.imwrite("test.png", new_frame) ## Save FIRST FRAME of output video for test purpose.

# Write to output video.
            out.write(new_frame)

            # Update for the next *frame*.
            pre_frame1_norm_gray = new_frame1_norm_gray.copy()
            pre_err = curr_err
            pre_err_diff = curr_err_diff
            new_scene_count -= 1
            names_0, names_1, names_2, names_3 = ([], [], [], [])
            sub_frames = []
            count += 1

        elif cv2.waitKey(1) & 0xFF == ord('q'):  # quit on ESC button
            break
        else:
            break

    # Release everything if job is finished
    cap1.release()
    cap2.release()
    print("Video analysis complete!")
    out.release()
    cv2.destroyAllWindows()  # destroy all the opened windows
    return
    def track(self):
        if self.videopath == -1:
            self.logger.error('Videopath error')
            return("VideopathError")

        num_of_img = 0
        countimage = 0

        cap = cv2.VideoCapture(self.videopath)
        success, frame = cap.read()# Read first frame
        self.video_length = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))

        if not success:
            self.logger.error('Failed to read video')
            sys.exit(1)

        bboxes = []
        colors = []

        a = 2
        self.bboxes = []
        for circle in self.circles:
            x = circle[0]
            y = circle[1]
            radius = circle[2]
            width = radius * 2
            height = width
            self.bboxes.append( (x-(radius+a),y-(radius+a), width, height))

        self.logger.debug(f'{len(self.bboxes)} tracking boxes have been created')

        trackerType = "CSRT"
        multiTracker = cv2.MultiTracker_create()# Create MultiTracker object

        for bbox in self.bboxes: # Initialize MultiTracker
            multiTracker.add(cv2.TrackerCSRT_create(), frame, bbox)

        self.logger.info(f'Starting the tracking of {len(self.bboxes)} particles')
        while cap.isOpened():

            success, frame = cap.read()
            if not success:
                cv2.imwrite('img_path{}_lastframe.jpg'.format(num_of_img), lastframe)
                num_of_img += 1
                self.looger.info("\n Last Image save in your working directory !")
                break

            lastframe = frame
            success, boxes = multiTracker.update(frame)  # get updated location of objects in subsequent frames

            if countimage < 1:  # initialize dataframes type
                for i, newbox in enumerate(boxes):
                    self.positions[i] = []

            # draw tracked objects
            for i, newbox in enumerate(boxes):
                # coordinates of the tracking box
                p1 = (int(newbox[0]), int(newbox[1]))
                p2 = (int(newbox[0] + newbox[2]), int(newbox[1] + newbox[3]))
                cv2.rectangle(frame, p1, p2, (255, 0, 0), 2, 1)
                # append the center of the box to dict "self.positions[index of box]" according to refresh rate
                if countimage:
                    xcenter = (p1[0] + p2[0]) / 2
                    ycenter = (p1[1] + p2[1]) / 2
                    self.positions[i].append((xcenter, ycenter))

                for pos in self.positions[i]:
                    xcenter = pos[0]
                    ycenter = pos[1]
                    cv2.circle(frame, (int(xcenter), int(ycenter)), 1, self.circle_color, -1)

            countimage += 1
            cv2.putText(frame, f"Frame no{countimage}", (10, 37), cv2.FONT_HERSHEY_PLAIN, 1, (0, 0, 0), 1)
            cv2.imshow('Analysis', frame)

            # quit on ESC button and save last frame
            if cv2.waitKey(1) & 0xFF == 27:  # Esc pressed
                cv2.imwrite('img_path{}_lastframe.jpg'.format(num_of_img), frame)
                num_of_img += 1
                self.logger.info("\n Last Image save in your working directory !")
                break
            print_progress(countimage, self.video_length)
        return(self.positions)
Esempio n. 15
0
import numpy as np
import cv2 as cv
import sys

# if len(sys.argv) != 2:
#     print('Input video name is missing')
#     exit()

print('Select 3 tracking targets')

cv.namedWindow("tracking")
camera = cv.VideoCapture(0)
tracker = cv.MultiTracker_create()
init_once = False

ok, image = camera.read()
if not ok:
    print('Failed to read video')
    exit()

bbox1 = cv.selectROI('tracking', image)
bbox2 = cv.selectROI('tracking', image)
# bbox3 = cv.selectROI('tracking', image)

while camera.isOpened():
    ok, image = camera.read()
    if not ok:
        print('no image to read')
        break

    if not init_once:
    def get_features(self):  # type: () -> List[Feature]
        _ = self.image.value
        rospy.sleep(2)

        trackers = {col: cv2.MultiTracker_create() for col in self.masks.keys()}
        # Wait for image to warm up
        image = self.image.wait_for_n_messages(10)
        features = {}

        for col, mask in self._split_image_into_masks(image).items():
            boxes = self._find_boxes_to_track(mask)
            track_image = cv2.cvtColor(mask.astype(np.uint8) * 255, cv2.COLOR_GRAY2BGR)
            # print('ADDING BOXES: ', boxes)
            for box in boxes:
                trackers[col].add(make_tracker('Boosting'), track_image, box)
            # print(boxes)
            features[col] = [[] for _ in boxes]

        duration = 2
        rate = rospy.Rate(100)
        start_time = rospy.get_time()
        while not rospy.is_shutdown() and rospy.get_time() - start_time < duration:
            image = self.image.value
            show_image = image
            for col, mask in self._split_image_into_masks(image).items():
                try:
                    track_image = cv2.cvtColor(mask.astype(np.uint8) * 255, cv2.COLOR_GRAY2BGR)
                    success, boxes = trackers[col].update(track_image)

                    boxes = [(box[0] - 10, box[1] - 10, box[2] + 20, box[3] + 20) for box in boxes]

                    # print(success, boxes)
                    for box in boxes:
                        x, y, w, h = (int(v) for v in box)
                        show_image = cv2.rectangle(show_image, (x, y), (x+w, y+h), (255, 0, 0), 5)

                    features_found = self._find_features(mask, boxes, col)
                    for index, feature in enumerate(features_found):
                        features[col][index].append(feature)

                    for feature in features_found:
                        if feature is not None:
                            show_image = cv2.drawContours(show_image, [feature.contour], -1, (255, 255, 0), 5)

                            col = self.col_name_to_rgb(feature.colour)
                            # print(col)
                            show_image = cv2.putText(
                                show_image,
                                '{}'.format(feature.shape),
                                tuple(int(x) for x in feature.centroid),
                                cv2.FONT_HERSHEY_SIMPLEX,
                                3.0,
                                col,
                            )
                except Exception as err:
                    print(err)
            self.gui_image_pub.publish(self.bridge.cv2_to_imgmsg(show_image))

            rate.sleep()

        # cv2.waitKey(0)
        # print(features)
        features = sum(([self._combine_features(f) for f in features[col]] for col in self.masks.keys()), [])
        return [f for f in features if f is not None]
Esempio n. 17
0
    def trackMobject(self):
        trackerTypes = ['BOOSTING', 'MIL', 'KCF', 'TLD', 'MEDIANFLOW', 'GOTURN', 'MOSSE', 'CSRT']

        def createTrackerByName(trackerType):
            # Create a tracker based on tracker name
            if trackerType == trackerTypes[0]:
                tracker = cv2.TrackerBoosting_create()
            elif trackerType == trackerTypes[1]:
                tracker = cv2.TrackerMIL_create()
            elif trackerType == trackerTypes[2]:
                tracker = cv2.TrackerKCF_create()
            elif trackerType == trackerTypes[3]:
                tracker = cv2.TrackerTLD_create()
            elif trackerType == trackerTypes[4]:
                tracker = cv2.TrackerMedianFlow_create()
            elif trackerType == trackerTypes[5]:
                tracker = cv2.TrackerGOTURN_create()
            elif trackerType == trackerTypes[6]:
                tracker = cv2.TrackerMOSSE_create()
            elif trackerType == trackerTypes[7]:
                tracker = cv2.TrackerCSRT_create()
            else:
                tracker = None
                print('Incorrect tracker name')
                print('Available trackers are:')
                for t in trackerTypes:
                    print(t)

            return tracker

        if __name__ == '__main__':

            print("Default tracking algoritm is CSRT \n"
                  "Available tracking algorithms are:\n")
            for t in trackerTypes:
                print(t)

            trackerType = "CSRT"

            # Set video to load
            self.video_track = self.ids['video_track']
            videofile = self.video_track.text
            #videoPath = 'videofile'

            # Create a video capture object to read videos
            cap = cv2.VideoCapture(videofile)

            # Read first frame
            success, frame = cap.read()
            # quit if unable to read the video file
            if not success:
                print('Failed to read video')
                sys.exit(1)

            ## Select boxes
            bboxes = []
            colors = []

            # OpenCV's selectROI function doesn't work for selecting multiple objects in Python
            # So we will call this function in a loop till we are done selecting all objects
            while True:
                # draw bounding boxes over objects
                # selectROI's default behaviour is to draw box starting from the center
                # when fromCenter is set to false, you can draw box starting from top left corner
                bbox = cv2.selectROI('multiTracker/MultiTracker', frame)
                bboxes.append(bbox)
                colors.append((randint(64, 255), randint(64, 255), randint(64, 255)))
                print("Press q to quit selecting boxes and start tracking")
                print("Press any other key to select next object")
                k = cv2.waitKey(0) & 0xFF
                if (k == 113):  # q is pressed
                    break

            print('Selected bounding boxes {}'.format(bboxes))

            ## Initialize MultiTracker
            # There are two ways you can initialize multitracker
            # 1. tracker = cv2.MultiTracker("CSRT")
            # All the trackers added to this multitracker
            # will use CSRT algorithm as default
            # 2. tracker = cv2.MultiTracker()
            # No default algorithm specified

            # Initialize MultiTracker with tracking algo
            # Specify tracker type

            # Create MultiTracker object
            multiTracker = cv2.MultiTracker_create()

            # Initialize MultiTracker
            for bbox in bboxes:
                multiTracker.add(createTrackerByName(trackerType), frame, bbox)

            # Process video and track objects
            while cap.isOpened():
                success, frame = cap.read()
                if not success:
                    break

                # get updated location of objects in subsequent frames
                success, boxes = multiTracker.update(frame)

                # draw tracked objects
                for i, newbox in enumerate(boxes):
                    p1 = (int(newbox[0]), int(newbox[1]))
                    p2 = (int(newbox[0] + newbox[2]), int(newbox[1] + newbox[3]))
                    cv2.rectangle(frame, p1, p2, colors[i], 2, 1)

                # show frame
                cv2.imshow('multiTracker/MultiTracker', frame)

                # quit on ESC button
                k = cv2.waitKey(5) & 0xFF
                if k == 27:
                    break
def main():
    camera, net = initializer(VIDEO, PROTO, MODEL)

    init_once = False
    tracker = None
    boxes = tuple()

    ok, frame = camera.read()
    if not ok:
        print('Failed to read video')
        exit()
    fr_h, fr_w = frame.shape[:2]

    cnt = 0
    while camera.isOpened():
        ok, frame = camera.read()
        if not ok:
            print('no frame to read')
            break

        # get prediction on current frame
        detections = forward_net(frame, net)

        # Display objects count
        for j, (obj, obj_cnt) in enumerate(gen_obj_cnt(detections)):
            cv2.putText(img=frame,
                        text='{}: {}'.format(obj, obj_cnt),
                        org=(5, 40 * (j + 1)),
                        fontFace=cv2.FONT_HERSHEY_SIMPLEX,
                        fontScale=1,
                        color=(0, 0, 255),
                        thickness=2)

        if cnt % 5 == 0:
            del tracker
            # concatenate a new box. Ref:
            # https://stackoverflow.com/a/22732845/8809592
            # boxes = np.vstack([boxes, np.array([[770, 590, 90, 80]])]) if len(boxes) else np.array(
            #     [[580, 570, 100, 90]])
            boxes = get_trk_box(detections, fr_h, fr_w)
            print(boxes)
            for box in boxes:
                box[0] = box[0] - cnt * 2
                box[1] = box[1] - cnt * 2
            tracker = cv2.MultiTracker_create()
            init_once = False

        if not init_once:
            for i, box in enumerate(boxes):
                if not (0 < box[0] < fr_w) & (0 < box[1] < fr_h) & (box[0] + box[2] < fr_w) & (box[1] + box[3] < fr_h) \
                       & (box[2] != 0) & (box[3] != 0):
                    boxes = np.delete(boxes,
                                      np.where(np.all(boxes == box, axis=1)),
                                      0)
                else:
                    tracker.add(cv2.TrackerMIL_create(), frame, tuple(box))
            init_once = True

        ok, boxes = tracker.update(frame)

        for tkr_box in list(boxes):
            p1 = (int(tkr_box[0]), int(tkr_box[1]))
            p2 = (int(tkr_box[0] + tkr_box[2]), int(tkr_box[1] + tkr_box[3]))
            cv2.rectangle(frame, p1, p2, (200, 0, 0))

        cv2.imshow('tracking', frame)
        k = cv2.waitKey(1)
        if k == 27:
            break  # esc pressed

        cnt += 1
def run(video, pts_src, sport):
    Player_Marked = False

    # otherwise, grab a reference to the video file
    i = 0
    FPS_SMOOTHING = 0.9

    fps = 0.0
    prev = 0

    distance = 0
    total_distance = 0
    speed_list = []
    max_speed = 0
    min_speed = 0
    pointer = 0
    t = 3
    frames_count = 0

    if (sport == 1):
        per_frame = 90
        t = 3
    else:
        per_frame = 60
        t = 2
    # loop over frames from the video stream
    vs = cv2.VideoCapture(video)
    #change_res(vs,854,480)

    trackers = cv2.MultiTracker_create()

    while True:
        frames_count = frames_count + 1
        # grab the current frame, then handle if we are using a
        # VideoStream or VideoCapture object
        frame = vs.read()
        frame = frame[1]
        # now = time.time()
        # fps = (fps*FPS_SMOOTHING + (1/(now - prev))*(1.0 - FPS_SMOOTHING))
        # prev = now
        # fpstext = 'FPS = ' + str(int(fps))

        ##real_time_stats_on_screen
        real_time(frame, distance, total_distance, max_speed, min_speed)

        # check to see if we have reached the end of the stream
        if frame is None:
            #print(corr)
            average_speed = sum(speed_list) / len(speed_list)

            return xs, ys, frames, total_distance, max_speed, min_speed, average_speed
            break

        # resize the frame (so we can process it faster)
        #frame = imutils.resize(frame, width=1920,height=1080)
        # grab the updated bounding box coordinates (if any) for each
        # object that is being tracked
        (success, boxes) = trackers.update(frame)

        # loop over the bounding boxes and draw then on the frame
        for box in boxes:

            (x, y, w, h) = [int(v) for v in box]
            cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 255), 2)

            # print(str(x+int(w/2))+','+str(y+int(h/2)))

            if (i % per_frame == 0):
                # xs.append(x+int(w/2))
                # ys.append(y+int(h))
                x_map, y_map = map.map([x + int(w / 2), y + int(h)], pts_src,
                                       sport)
                xs.append(x_map)
                ys.append(y_map)
                frames.append(frames_count)

                points.append(x_map)
                points.append(y_map)

                if (len(points) >= 4):
                    #(x1,y1,x2,y2)
                    #distance = mt.distance(points[pointer-3],points[pointer-2],points[pointer-1],points[pointer])/100
                    distance = dis.calculateDistance(points[pointer - 3],
                                                     points[pointer - 2],
                                                     points[pointer - 1],
                                                     points[pointer], sport)
                    total_distance = total_distance + distance
                    speed = speedFunction.calculate_Speed(distance, t)
                    speed_list.append(speed)
                    max_speed = max(speed_list)
                    min_speed = min(speed_list)

                pointer += 2

            cv2.circle(frame, (x + int(w / 2), y + int(h)), 5, red, -1)

            i += 1
        cv2.imshow("Frame", frame)
        key = cv2.waitKey(1) & 0xFF

        # if the 's' key is selected, we are going to "select" a bounding
        # box to track
        if (Player_Marked == False):

            if key == ord("s"):
                # select the bounding box of the object we want to track (make
                # sure you press ENTER or SPACE after selecting the ROI)
                # mt.mappingMatch()
                # mt.mappingPlayer()

                box = cv2.selectROI("Frame",
                                    frame,
                                    fromCenter=False,
                                    showCrosshair=True)

                # create a new object tracker for the bounding box and add it
                # to our multi-object tracker
                tracker = OPENCV_OBJECT_TRACKERS['csrt']()
                trackers.add(tracker, frame, box)
                Player_Marked = True

                # if the `q` key was pressed, break from the loop
        if key == ord("q"):
            break

        if key == ord("p"):
            cv2.waitKey()

    # otherwise, release the file pointer
    else:
        vs.release()

    # close all windows
    cv2.destroyAllWindows()
def flow (videoPath, trackerType, labelName, savefolder, waitingTime) :

    if __name__ == '__main__' :
        print("\n\n\nrun module separately \n\n\n")
        from BBoxToDataset.CreateTracker import createTrackerByName
        from BBoxToDataset.BboxObject import BboxObject
        from BBoxToDataset.BboxObject import FileData

    else :
        print("\n\n\ncalled by\n", __name__, "\n\n")
        from .BBoxToDataset.CreateTracker import createTrackerByName
        from .BBoxToDataset.BboxObject import BboxObject
        from .BBoxToDataset.BboxObject import FileData


    # Create a video capture object to read videos
    cap = cv2.VideoCapture(videoPath)

    # Read first frame
    success, frame = cap.read()

    # quit if unable to read the video file
    if not success:
        print('Failed to read video')
        sys.exit(1)


    #namedWindow : string 이름을 가진 window 를 표시한다.
    #imshow : 특정 window 에 img 를 표시한다.
    cv2.namedWindow('Selecting RoI')
    frame = cv2.resize(frame, dsize=(416, 416), interpolation=cv2.INTER_AREA)
    cv2.imshow('Selecting RoI', frame)

    print("size is : ", np.size(frame, 0), np.size(frame, 1))

    ## Select boxes
    bboxes = []
    colors = []


    # OpenCV's selectROI function doesn't work for selecting multiple objects in Python
    # So we will call this function in a loop till we are done selecting all objects
    count = 0
    while True:
        # draw bounding boxes over objects
        # selectROI's default behaviour is to draw box starting from the center
        # when fromCenter is set to false, you can draw box starting from top left corner
        print("\n\n현재 저장된 boundary box 개수 : {}".format(count))
        bbox = cv2.selectROI('Selecting RoI', frame)
        print("\n\n")


        print("object 선택을 마치고 tracking 을 시작하려면 q를 누르세요.")
        print("다음 object 를 선택하려면 아무 키나 누르세요.")
        bboxes.append(bbox)
        colors.append((randint(0, 255), randint(0, 255), randint(0, 255)))
        count += 1
        k = cv2.waitKey(0) & 0xFF
        if (k == 113):  # q is pressed
            break

    print('Selected bounding boxes : {}'.format(bboxes))



    # Create MultiTracker object
    multiTracker = cv2.MultiTracker_create()

    # Initialize MultiTracker
    for bbox in bboxes:
        multiTracker.add(createTrackerByName(trackerType), frame, bbox)


    #----------------------------------------#
    #Show original boundingbox with Subwindow#
    #----------------------------------------#
    frameandBbox = frame
    for bbox, color in zip(bboxes, colors):
        p1 = (int(bbox[0]), int(bbox[1]))
        p2 = (int(bbox[0] + bbox[2]), int(bbox[1] + bbox[3]))
        cv2.rectangle(frameandBbox, pt1 = p1, pt2 = p2, color = color, thickness= 2)
        print(p1, p2, color)
    cv2.namedWindow('Selecting RoI')
    cv2.imshow('Selecting RoI', frameandBbox)




    #---------------------#
    #Movie Saving Settings#
    #---------------------#
    w = np.size(frame, 0)
    h = np.size(frame, 0)
    output_size = (int(w), int(h))
    print(output_size)
    codec = cv2.VideoWriter_fourcc('m', 'p', '4', 'v')
    outconfig = cv2.VideoWriter('%s_output.mp4' % (videoPath.split('.')[0]), codec, cap.get(cv2.CAP_PROP_FPS), output_size)
    #cap.get(cv2.CAP_PROP_FPS) : get - cap 에서 가져와. prop_fps : 적당한 frame per second를. 즉, 그 동영상의 fps
    #output_size : 위에서 설정한 hyper parameter 임.




    #-----------------------------------------#
    #Imagefile and Annotatioin Saving Settings#
    #-----------------------------------------#

        # bbox object 객체들을 보관할것
        # bbox 가 4개라면 bbox 객체 4개를 만드는것
    bboxobjects = []
    for i in range(0,len(bboxes),1) :
        a = BboxObject((0,0), (0,0), labelName)
        bboxobjects.append(a)

    if not os.path.isdir(savefolder) :
        # save folder 이 존재하지 않는다면..
        os.makedirs(savefolder)
        print("폴더가 존재하지 않아, 새로운 폴더를 생성합니다.")
        # 폴더를 생성한다.

    #class init parameters
    filefullpath = videoPath
    savefolder = savefolder


    #-------------------------------#
    #Process video and track objects#
    #-------------------------------#
    font = cv2.FONT_HERSHEY_SIMPLEX #just font setting
    save_image_per_n_milliseconds = waitingTime
    framecount = 0
    while cap.isOpened():
        success, frame = cap.read()

        if not success:
            break
        framecount += 1

        frame = cv2.resize(frame, dsize=(416, 416), interpolation=cv2.INTER_AREA)
        originalframe = frame.copy()
        filedata = FileData(filefullpath, framecount, frame, savefolder, labelName)

        # get updated location of objects in subsequent frames
        success, boxes = multiTracker.update(frame)

        # 그냥 박스 안에 무슨 요소들이 들어가 있는지 궁금해서...
        # print(boxes)


        # draw tracked objects
        # 만약 object 가 4개라면 loop 를 4번 돌게 된다.
        for i, newbox in enumerate(boxes):
            p1 = (int(newbox[0]), int(newbox[1]))
            p2 = (int(newbox[0] + newbox[2]), int(newbox[1] + newbox[3]))
            cv2.rectangle(frame, p1, p2, colors[i], 2, 1)
            # Set Font and put Text on BBox
            cv2.putText(frame, labelName,
                        (int(newbox[0]), int(newbox[1] - 4)),
                        font,
                        fontScale=0.3,
                        color=colors[i],
                        thickness=1,
                        lineType=cv2.LINE_AA)

            if framecount % save_image_per_n_milliseconds == 0 :
                # -ms 로 지정한 만큼마다, bbox object 객체의 데이터를 초기화해줌.
                # 초기화된 데이터는 object 객체에 들어가줘야함.
                bboxobjects[i].__init__(p1, p2, labelName)
                filedata.setObject(bboxobjects[i])

            cv2.rectangle(frame,
                          (p1[0]-7,p1[1]-7),
                          (p2[0]+7,p2[1]+7),
                          colors[i], 1, 1)

        # show frame
        cv2.imshow('MultiTracker', frame)
        outconfig.write(frame)

        # -ms 로 지정한 만큼마다, 그 프레임의 데이터와 사진을 저장함.
        if framecount % save_image_per_n_milliseconds == 0 :
            filedata.writeAndSave(originalframe)
            framecount = 0 # int 자료형 overflow 방지

        # quit on ESC button
        if cv2.waitKey(1) & 0xFF == 27:  # Esc pressed
            break

    print("session end")
Esempio n. 21
0
# main variable -------------------------------------------------------------
inVideo = cv2.VideoCapture('origin.avi')
success, img = inVideo.read()
height, width = img.shape[:2]
imgSet = []  # set of image for write video
imgCount = 0  # number of processed imgs
avgTime = 0  # to calculate this program's each image processing time
font = cv2.FONT_HERSHEY_SIMPLEX  # alarm safety or not
prevDist = 0
GPIO.setmode(GPIO.BOARD)
GPIO.setup(BUZZER_PIN, GPIO.OUT)
buzzer = GPIO.PWM(BUZZER_PIN, 882)

# shoes variable ------------------------------------------------------------
tracker = cv2.MultiTracker_create()  # init tracker for tracking shoes
x1, y1, w1, h1 = 720, 870, 940 - 720, 1080 - 870  # first shoe's location
x2, y2, w2, h2 = 1050, 840, 1250 - 1050, 1025 - 840  # second shoe's location
box1 = (x1, y1, w1, h1)
box2 = (x2, y2, w2, h2)
tracker.add(cv2.TrackerBoosting_create(), img,
            box1)  # start tracking first shoe
tracker.add(cv2.TrackerBoosting_create(), img,
            box2)  # start tracking second shoe

# stairs variable -----------------------------------------------------------
numOfStairs = 2  # init stair's variables
numOfTester = int(height / 10)
stairInterval = height / numOfStairs
edgeArea = int(stairInterval / numOfStairs)  # area of stair's edge
voteStair = []  # for voting stair's edge, save the voting set of stairs
Esempio n. 22
0
def main(cam1_path, cam2_path):
    #Read video object
    cam1_video = cv2.VideoCapture(cam1_path)
    cam2_video = cv2.VideoCapture(cam2_path)

    # Exit if video not opened.
    if not cam1_video.isOpened():
        print("Could not open cam1_video")
        sys.exit()
    if not cam2_video.isOpened():
        print("Could not open cam2_video")
        sys.exit()

    #Read first frame.
    ok, cam1_image = cam1_video.read()
    if not ok:
        print('Cannot read cam1_video file')
        sys.exit()

    ok, cam2_image = cam2_video.read()
    if not ok:
        print('Cannot read cam2_video file')
        sys.exit()

    #initialize camera 1, 2 trackers here
    cam1_tracker = cv2.MultiTracker_create()
    cam2_tracker = cv2.MultiTracker_create()

    cam1_personsTracked = []
    cam2_personsTracked = []
    #cam1_tracker.add(cam1_image, cam1_personsTracked);
    #cam2_tracker.add(cam2_image, cam2_personsTracked);

    #For each frame call individual trackers and then the centralServer
    frameNumber = 0
    personDetectionFrameThres = 10
    personDetectionFlag = False
    while True:
        frameNumber = frameNumber + 1
        print('*************************************')
        print('frameNumber: %d' % frameNumber)
        ok, cam1_image = cam1_video.read()
        if not ok:
            print('cam1_image not read!')
            break
        ok, cam2_image = cam2_video.read()
        if not ok:
            print('cam2_image not read!')
            break

        if (frameNumber == 1 or frameNumber % personDetectionFrameThres == 0):
            personDetectionFlag = True
        else:
            personDetectionFlag = False

        #cam1_personsTracked = cam_personTracking(cam1_image, cam1_tracker, personDetectionFlag)#True)#personDetectionFlag)
        #cam2_personsTracked = cam_personTracking(cam2_image, cam2_tracker, personDetectionFlag)#True)#personDetectionFlag)
        cam1_personsTracked, new_flag_1 = cam_personTracking(
            cam1_image, cam1_tracker,
            personDetectionFlag)  #True)#personDetectionFlag)
        cam2_personsTracked, new_flag_2 = cam_personTracking(
            cam2_image, cam2_tracker,
            personDetectionFlag)  #True)#personDetectionFlag)

        print('CAMERA 1 INFO: ')
        print(cam1_personsTracked)
        print('Possible new person added this frame :', new_flag_1)

        print('CAMERA 2 INFO: ')
        print(cam2_personsTracked)
        print('Possible new person added this frame :', new_flag_2)

        #aggregatedPersonsList = centralServer(cam1_personsTracked, cam2_personsTracked, cam1_image, cam2_image, new_flag_1, new_flag_2)
        aggregatedPersonsList = []
        if (len(aggregatedPersonsList)):
            #Draw tracks and bbox for individual camera outputs and aggregated outputs
            print('-------------------------------------------------------')
        print('*************************************')
        cam1_image_copy = cam1_image[:]
        cam2_image_copy = cam2_image[:]
        '''
		index = -1
		for newbox in cam1_personsTracked:
			index = index + 1
			p1 = (int(newbox[0]), int(newbox[1]))
			p2 = (int(newbox[0] + newbox[2]), int(newbox[1] + newbox[3]))
			cv2.rectangle(cam1_image, p1, p2, (255,0,0), 2, 1)
			cv2.putText(cam1_image, "ID:"+str(index), p1, cv2.FONT_HERSHEY_SIMPLEX, 0.75, (50,170,50),2);
		
		index = -1
		for newbox in cam2_personsTracked:
			index = index + 1
			p1 = (int(newbox[0]), int(newbox[1]))
			p2 = (int(newbox[0] + newbox[2]), int(newbox[1] + newbox[3]))
			cv2.rectangle(cam2_image, p1, p2, (255,0,0), 2, 1)
			cv2.putText(cam2_image, "ID:"+str(index), p1, cv2.FONT_HERSHEY_SIMPLEX, 0.75, (50,170,50),2);
		'''
        for person in aggregatedPersonsList:
            if (person.vis1):
                newbox = person.box1
                p1 = (int(newbox[0]), int(newbox[1]))
                p2 = (int(newbox[0] + newbox[2]), int(newbox[1] + newbox[3]))
                cv2.rectangle(cam1_image_copy, p1, p2, (255, 0, 0), 2, 1)
                cv2.putText(cam1_image_copy, "ID:" + str(person.ID), p1,
                            cv2.FONT_HERSHEY_SIMPLEX, 0.75, (50, 170, 50), 2)

            if (person.vis2):
                newbox = person.box2
                p1 = (int(newbox[0]), int(newbox[1]))
                p2 = (int(newbox[0] + newbox[2]), int(newbox[1] + newbox[3]))
                cv2.rectangle(cam2_image_copy, p1, p2, (255, 0, 0), 2, 1)
                cv2.putText(cam2_image_copy, "ID:" + str(person.ID), p1,
                            cv2.FONT_HERSHEY_SIMPLEX, 0.75, (50, 170, 50), 2)

        cv2.imshow("Camera1", cam1_image)
        cv2.imshow("Camera2", cam2_image)

        cv2.imshow("Camera1_Central", cam1_image_copy)
        cv2.imshow("Camera2_Central", cam2_image_copy)

        keyPressed = cv2.waitKey(10)
        if (keyPressed == 27):
            break
Esempio n. 23
0
    def __init__(self, object_registry):
        self._object_registry = object_registry
        self._tracker_created = set()

        self._multitracker = cv2.MultiTracker_create()
Esempio n. 24
0
 def __init__(self, parent=None):
     super().__init__(parent)
     self.resize(640, 360)
     self.tracking = 0
     self.motion_detector = MotionDetector()
     self.multiTracker = cv2.MultiTracker_create()
Esempio n. 25
0
 def __init__(self, initial_bboxes, initial_frame):
     self.multiTracker = cv2.MultiTracker_create()
     formated_bboxes = voc_bboxes_to_coco(initial_bboxes.copy())
     for bbox in formated_bboxes:
         self.multiTracker.add(cv2.TrackerMedianFlow_create(),
                               initial_frame, bbox)
Esempio n. 26
0
def yolo():
    parser = argparse.ArgumentParser()

    parser.add_argument('-m',
                        '--model-path',
                        type=str,
                        default='./yolov3-coco/',
                        help='The directory where the model weights and \
        configuration files are.')

    parser.add_argument('-w',
                        '--weights',
                        type=str,
                        default='./yolov3-coco/yolov3.weights',
                        help='Path to the file which contains the weights \
        for YOLOv3.')

    parser.add_argument(
        '-cfg',
        '--config',
        type=str,
        default='./yolov3-coco/yolov3.cfg',
        help='Path to the configuration file for the YOLOv3 model.')

    parser.add_argument('-vo',
                        '--video-output-path',
                        type=str,
                        default='./output.avi',
                        help='The path of the output video file')

    parser.add_argument('-l',
                        '--labels',
                        type=str,
                        default='./yolov3-coco/coco-labels',
                        help='Path to the file having the \
          labels in a new-line seperated way.')

    parser.add_argument('-c',
                        '--confidence',
                        type=float,
                        default=0.5,
                        help='The model will reject boundaries which has a \
        probabiity less than the confidence value. \
        default: 0.5')

    parser.add_argument('-th',
                        '--threshold',
                        type=float,
                        default=0.3,
                        help='The threshold to use when applying the \
        Non-Max Suppresion')

    parser.add_argument(
        '--download-model',
        type=bool,
        default=False,
        help='Set to True, if the model weights and configurations \
        are not present on your local machine.')

    parser.add_argument('-t',
                        '--show-time',
                        type=bool,
                        default=False,
                        help='Show the time taken to infer each image.')

    FLAGS, unparsed = parser.parse_known_args()
    #print(FLAGS)

    # Get the labels
    labels = open(FLAGS.labels).read().strip().split('\n')

    # Intializing colors to represent each label uniquely
    colors = np.random.randint(0, 255, size=(len(labels), 3), dtype='uint8')

    # Load the weights and configutation to form the pretrained YOLOv3 model
    net = cv.dnn.readNetFromDarknet(FLAGS.config, FLAGS.weights)

    # Get the output layer names of the model
    layer_names = net.getLayerNames()
    layer_names = [
        layer_names[i[0] - 1] for i in net.getUnconnectedOutLayers()
    ]

    ################################

    height, width = frame.shape[:2]

    img, bboxes, _, classid, _ = infer_image(net, layer_names, height, width,
                                             frame, colors, labels, FLAGS)

    boxes = []  #It's a list now

    j = 0
    for i in classid:
        if i == 0:
            print("persons bounding box is: ", bboxes[j])
            boxes.append(bboxes[j].copy())
            #print(boxes[i])
        j = j + 1

    print(boxes)  #all faces

    ############################temp ###########33
    #for index,value in enumerate(boxes):
    global itr
    for i in range(len(boxes)):
        itr = itr + 1
        # Matching part
        labels = {}

        #Matching part end

        y = boxes[i][1]
        x = boxes[i][0]
        h = boxes[i][3]
        w = boxes[i][2]
        crop_img = img[y:y + h, x:x + w]
        #cv.imwrite(name,crop_img)

        detector = MTCNN()
        print("I am a detector phewww !")
        print(detector.detect_faces(crop_img))
        face_cropped = detector.detect_faces(crop_img)
        if (len(face_cropped) > 0):

            boxes_face = (face_cropped[0]['box'])
            y1 = boxes_face[1]
            x1 = boxes_face[0]
            h1 = boxes_face[3]
            w1 = boxes_face[2]
            crop_img_2 = crop_img[y1:y1 + h1, x1:x1 + w1]
            name = 'dataset/' + str("face") + str(itr) + '.jpg'
            #cv.imwrite(name,crop_img_2)

        #Matching Part
        path = 'dataset/Face' + str(itr)
        os.mkdir(path)
        name = 'dataset/Face' + str(itr) + '/' + str("person") + str(
            itr) + ".jpg"
        cv.imwrite(name, crop_img)
        name = 'dataset/Face' + str(itr) + '/' + str("face") + str(
            itr) + ".jpg"
        try:
            cv.imwrite(name, crop_img_2)
        except:
            pass
        #amtching

        #Faces_Train.py

        os.system('faces_train.py')

        #Faces_Train.py

        with open("labels.pickle", 'rb') as f:
            og_labels = pickle.load(f)
            labels = {v: k
                      for k, v in og_labels.items()
                      }  #talk to me about this ;) (key, value pair conversion)
        face_cascade = cv2.CascadeClassifier(
            'cascade/data/haarcascade_frontalface_alt2.xml'
        )  #only front of a face
        recognizer = cv2.face.LBPHFaceRecognizer_create()
        recognizer.read("trainer.yml")

        gray = cv2.cvtColor(crop_img, cv2.COLOR_BGR2GRAY)

        faces = face_cascade.detectMultiScale(gray,
                                              scaleFactor=1.5,
                                              minNeighbors=5)
        #scaleFactor aghe peeche krke dekho
        print("Faces", faces)

        for (x, y, w, h) in faces:
            print(x, y, w, h)
            print("Amir")
            roi_gray = gray[y:y + h, x:x + w]  #roi -> Region of Interest
            roi_color = crop_img[y:y + h, x:x + w]
            #Recognition part
            id_, conf = recognizer.predict(
                roi_gray)  #id's and confidence level
            print(id_, conf, "moz")
            if (conf >= 25 and conf <= 85):
                print(id_)
                print("he")
                print(labels[id_])
                font = cv2.FONT_HERSHEY_SIMPLEX
                name = labels[id_]
                color = (255, 255, 255)
                stroke = 2
                cv2.putText(frame, name, (x, y), font, 1, color, stroke,
                            cv2.LINE_AA)

    ##########################temp done#########33

    my_tuple = []

    for i in bboxes:
        my_tuple.append(tuple(i))

    #print(my_tuple)

    # Create MultiTracker object
    multiTracker = cv2.MultiTracker_create()

    # Initialize MultiTracker
    colors_multi = []
    for bbox in my_tuple:
        multiTracker.add(createTrackerByName(trackerType), frame, bbox)
        colors_multi.append((randint(64, 255), randint(64,
                                                       255), randint(64, 255)))

    return multiTracker, colors_multi
    print("Please press s to select cell(s) to track")
    print("Once you are done, press q to go to next date")
    # initialize a dictionary that maps strings to their corresponding
    # OpenCV object tracker implementations
    OPENCV_OBJECT_TRACKERS = {
        "csrt": cv2.TrackerCSRT_create,
        "kcf": cv2.TrackerKCF_create,
        "boosting": cv2.TrackerBoosting_create,
        "mil": cv2.TrackerMIL_create,
        "tld": cv2.TrackerTLD_create,
        "medianflow": cv2.TrackerMedianFlow_create,
        "mosse": cv2.TrackerMOSSE_create,
    }

    # initialize OpenCV's special multi-object tracker
    trackers = cv2.MultiTracker_create()

    vs = cv2.VideoCapture(
        "C:/Users/CoeFamily/Documents/python_awsradar_gui-master/kbox_new/" +
        file)
    global numpts
    numpts = 0
    # loop over frames from the video stream
    try:
        while vs.isOpened():
            # grab the current frame, then handle if we are using a
            # VideoStream or VideoCapture object
            frame = vs.read()
            frame = frame[1]

            # check to see if we have reached the end of the stream
Esempio n. 28
0
        R_avg = np.average(region1[:,:,2])
    
    (score, diff) = compare_ssim(ref_region, region, full=True)
    scored =0
    if score < 0.40 and np.average(region1[:,:,2]) > R_avg + 8:
        time.sleep(0.3)
        scored= 1
    return R_avg,scored


if __name__ == "__main__":
    score = 0
    not_detect = 0
    cam = cv2.VideoCapture('123.mp4')
    flag =7
    tracker1 = cv2.MultiTracker_create()
    count = 1
    detectFreq = 10
    
    hog = cv2.HOGDescriptor()
    hog.setSVMDetector(cv2.HOGDescriptor_getDefaultPeopleDetector())
    colour = (238,130,238)
    while(cam.isOpened()):
        count -=1
        ret, frame = cam.read()
        frame = np.asarray(frame)
        frame = cv2.resize(frame, (1200,720))
        frame1 = frame.copy()
        if count == 0:
            rects,frame = PedestrianDetection.PedestrianDetection(hog, frame)
            if (len(rects) == 0):
Esempio n. 29
0
video = FileVideoStream(videoPath, resolution=(640, 368))
video.start()
frame = video.read()
frame = cv2.resize(frame, (640, 368))

cv2.namedWindow("Foreground", cv2.WINDOW_AUTOSIZE)

## Select boxes
bboxes = []
colors = []

# Specify the tracker type
trackerType = "CSRT"

# Create MultiTracker object
multiTracker = cv2.MultiTracker_create()

selectRegions(frame)
frameHeight, frameWidth, _ = frame.shape

while video.running():
    originalFrame = frame.copy()

    # get updated location of objects in subsequent frames
    success, positives = multiTracker.update(frame)

    # draw tracked objects
    for i, newbox in enumerate(positives):
        p1 = (int(newbox[0]), int(newbox[1]))
        p2 = (int(newbox[0] + newbox[2]), int(newbox[1] + newbox[3]))
        cv2.rectangle(frame, p1, p2, colors[i], 2, 1)
Esempio n. 30
0
    def test(self):

        # object tracker
        OPENCV_OBJECT_TRACKERS = {
            "csrt": cv2.TrackerCSRT_create,
            "kcf": cv2.TrackerKCF_create,
            "boosting": cv2.TrackerBoosting_create,
            "mil": cv2.TrackerMIL_create,
            "tld": cv2.TrackerTLD_create,
            "medianflow": cv2.TrackerMedianFlow_create,
            "mosse": cv2.TrackerMOSSE_create
        }
        initBB = False
        # grab the appropriate object tracker using our dictionary of

        # video capture
        capture = cv2.VideoCapture(self.clip_path)
        fps = capture.get(cv2.CAP_PROP_FPS)
        # used to deal with frame rate corrections
        frame_gap = int(round(fps / self.config.target_fps))
        frame_num = 0
        frame_height = int(capture.get(4))
        frame_width = int(capture.get(3))

        fourcc = cv2.VideoWriter_fourcc(*'XVID')
        out = cv2.VideoWriter('IR_example.avi', fourcc, fps,
                              (frame_width, frame_height))

        # initialize the frame fifo
        curr_fifo = VideoFIFO(self.config, self.config.n_timesteps,
                              frame_width, frame_height)

        # initialize OpenCV's special multi-object tracker
        trackers = cv2.MultiTracker_create()

        detections = []
        predictions = []
        fusions = []

        flag = 1
        # read video frames
        while True:
            flag, frame = capture.read()
            # end of movie
            if flag == 0:
                break

            # use FASTER for detection as long as the tracker is not initialized
            if not initBB:
                # run faster to detect region every frame
                res = predict(self.faster_pred, frame)

                # get segments block of previous 13 frames - bb interpolation
                human_bb = filter_bb(res)

                human_bb = [0, 0, 0, 0]
                # skip if no detection
                if np.all([x == 0 for x in human_bb]):
                    # select the bounding box of the object we want to track (make
                    # sure you press ENTER or SPACE after selecting the ROI)
                    human_bb = cv2.selectROI("Frame",
                                             frame,
                                             fromCenter=False,
                                             showCrosshair=True)

                    # OpenCV object tracker objects
                    tracker = OPENCV_OBJECT_TRACKERS["csrt"]()

                    # start OpenCV object tracker using the supplied bounding box

                    trackers.add(tracker, frame, human_bb)
                    human_detection_det = (human_bb[0], human_bb[1],
                                           human_bb[2] + human_bb[0],
                                           human_bb[3] + human_bb[1])
                    detections.append(copy.copy(human_detection_det))
                    predictions.append(0)
                    fusions.append(0)

                else:
                    for det in human_bb:
                        # OpenCV object tracker objects
                        tracker = OPENCV_OBJECT_TRACKERS["csrt"]()

                        # start OpenCV object tracker using the supplied bounding box
                        # coordinates, then start the FPS throughput estimator as well
                        human_detection_tup = (det[0], det[1], det[2] - det[0],
                                               det[3] - det[1])

                        trackers.add(tracker, frame, human_detection_tup)

                        detections.append(copy.copy(det))
                        predictions.append(0)
                        fusions.append(0)

                initBB = True

            else:

                (success, human_detection_tups) = trackers.update(frame)
                for i, human_detection_tuple in enumerate(
                        human_detection_tups):
                    # grab the new bounding box coordinates of the object
                    (x, y, w, h) = [int(v) for v in human_detection_tuple]
                    human_detection = [x, y, x + w, y + h]
                    detections[i] = human_detection

            if frame_num % frame_gap == 0:
                # add to fifo according to the correct frame rate
                curr_fifo.add_frame(frame)

                for i, human_det in enumerate(detections):
                    curr_block = curr_fifo.recover_block(
                        human_det, self.config.new_frame_size[0],
                        self.config.new_frame_size[1], frame_num)

                    # run model to get predictions
                    predictions_add, predictions_mul, fus_mul = self.test_step(
                        curr_block)
                    predictions[i] = predictions_mul
                    fusions[i] = fus_mul
                    # draw predictions
                    print(self.label_dict_inv[int(predictions_add)])
                    print(self.label_dict_inv[int(predictions_mul)])

            frame_rec = frame.copy()

            for i, h in enumerate(detections):
                cv2.rectangle(frame_rec, (h[0], h[1]), (h[2], h[3]),
                              color=(0, 0, 255),
                              thickness=1)

                cv2.putText(frame_rec,
                            self.label_dict_inv[int(predictions[i])],
                            (h[0], h[1]), cv2.FONT_HERSHEY_SIMPLEX, 0.5,
                            (0, 0, 255), 1)


                probs1 = self.label_dict_inv[0] + '=' + str(fusions[i][0][0]) + ',' + \
                self.label_dict_inv[1] + '=' + str(fusions[i][0][1]) + ',' + \
                self.label_dict_inv[2] + '=' + str(fusions[i][0][2])
                probs2 = self.label_dict_inv[3] + '=' + str(fusions[i][0][3]) + ',' +\
                self.label_dict_inv[4] + '=' + str(fusions[i][0][4])

                cv2.putText(frame_rec, probs1, (10, 40),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.3, (0, 0, 255), 1)

                cv2.putText(frame_rec, probs2, (10, 50),
                            cv2.FONT_HERSHEY_SIMPLEX, 0.3, (0, 0, 255), 1)

            out.write(frame_rec)

            # plot frame
            cv2.imshow('vid', frame_rec)
            cv2.waitKey(100)
            #cv2.destroyAllWindows()
            frame_num += 1

        # When everything done, release the capture
        capture.release()
        out.release()
        cv2.destroyAllWindows()