def __init__(self, model_path,use_cuda,map_location_flag,bad_time,left_time):
     self.min_confidence = 0.3
     self.nms_max_overlap = 1.0
     self.extractor = Extractor(model_path, use_cuda=use_cuda,map_location_flag=map_location_flag)
     # 违规时间
     self.bad_time = bad_time
     max_cosine_distance = 0.2
     nn_budget = 100
     metric = NearestNeighborDistanceMetric("cosine", max_cosine_distance, nn_budget)
     self.tracker = Tracker(metric,left_time=left_time)
Exemple #2
0
    def __init__(self, model_path):
        self.min_confidence = 0.3
        self.nms_max_overlap = 1.0

        self.extractor = Extractor(model_path, use_cuda=True)

        max_cosine_distance = 0.2
        nn_budget = 100
        metric = NearestNeighborDistanceMetric("cosine", max_cosine_distance, nn_budget)
        self.tracker = Tracker(metric)
Exemple #3
0
 def __init__(self, model_path):
     super(DeepSort, self).__init__()
     self.min_confidence = 0.3  #根据置信度对检测框进行过滤,即对置信度不足够高的检测框及特征予以删除;
     self.nms_max_overlap = 1.0  #对检测框进行非最大值抑制,消除一个目标身上多个框的情况;
     self.extractor = Extractor(model_path,
                                use_cuda=True)  #读取当前帧目标检测框的位置及各检测框图像块的深度特征
     max_cosine_distance = 0.2
     nn_budget = 100
     metric = NearestNeighborDistanceMetric("cosine", max_cosine_distance,
                                            nn_budget)
     self.tracker = Tracker(metric)
    def __init__(self, model_path):
        # 检测结果阈值。低于这个阈值的检测结果将会被忽略  # 过滤掉置信度小于self.min_confidence的bbox,生成detections
        self.min_confidence = 0.25
        self.nms_max_overlap = 1.0  # 非极大抑制的阈值 原始值1.0
        # NMS (这里self.nms_max_overlap的值为1,即保留了所有的detections)
        self.extractor = Extractor(model_path, use_cuda=True)

        max_cosine_distance = 0.2  # 0.2 余弦距离的控制阈值 调节这个能改善IDsw
        # 描述的区域的最大值 它是一个列表,列出了每次出现曲目的特征。nn_bodget确定此列表的大小。例如,如果它是10,则仅存储曲目在板上出现的最后10次的特征
        nn_budget = 100
        metric = NearestNeighborDistanceMetric("cosine", max_cosine_distance,
                                               nn_budget)
        self.tracker = Tracker(metric)
Exemple #5
0
def run_demo(net, image_provider, height_size, cpu, track_ids):  # , filename):

    stride = 8
    upsample_ratio = 4
    num_keypoints = Pose.num_kpts  # 18개
    previous_poses = []
    c = 0
    ttt = 0
    idxx = 0
    csv_dict = {
        'frame_number': [],
        'driver_index': [],
        'is_driver_flag': []
    }  #,'state' : []}
    driver_find_flag = False
    weird_state_flag = False
    extractor = Extractor('default_checkpoints/ckpt.t7', True)
    find_class = Find_assault(extractor)

    for img in image_provider:
        is_driver_flag = False
        t5 = time.time()
        orig_img = img.copy()
        heatmaps, pafs, scale, pad = infer_fast(net, img, height_size, stride,
                                                upsample_ratio, cpu)

        total_keypoints_num = 0
        all_keypoints_by_type = []
        for kpt_idx in range(num_keypoints):  # 19th for bg
            total_keypoints_num += extract_keypoints(heatmaps[:, :, kpt_idx],
                                                     all_keypoints_by_type,
                                                     total_keypoints_num)

        pose_entries, all_keypoints = group_keypoints(all_keypoints_by_type,
                                                      pafs,
                                                      demo=True)

        for kpt_id in range(all_keypoints.shape[0]):  ##사이즈 변환
            all_keypoints[kpt_id, 0] = (all_keypoints[kpt_id, 0] * stride /
                                        upsample_ratio - pad[1]) / scale
            all_keypoints[kpt_id, 1] = (all_keypoints[kpt_id, 1] * stride /
                                        upsample_ratio - pad[0]) / scale
        current_poses = []

        for n in range(len(pose_entries)):
            if len(pose_entries[n]) == 0:
                continue
            pose_keypoints = np.ones((num_keypoints, 2), dtype=np.int32) * -1
            for kpt_id in range(num_keypoints):
                if pose_entries[n][kpt_id] != -1.0:  # keypoint was found
                    pose_keypoints[kpt_id, 0] = int(
                        all_keypoints[int(pose_entries[n][kpt_id]), 0])
                    pose_keypoints[kpt_id, 1] = int(
                        all_keypoints[int(pose_entries[n][kpt_id]), 1])
            pose = Pose(pose_keypoints, pose_entries[n][18])
            current_poses.append(pose)
            pose.draw(img)
        img = cv2.addWeighted(orig_img, 0.6, img, 0.4, 0)

        # 운전자를 못 찾았으면 find_driver 에 들어감.
        if driver_find_flag is False:
            driver_find_flag, find_driver_count, find_state = find_class.find_driver(
                current_poses, orig_img)
            cv2.putText(img, "Driver_find_count : " + str(find_driver_count),
                        (0, 20), cv2.FONT_HERSHEY_COMPLEX, 1, (0, 255, 0))
            cv2.putText(img, "State : Find_Driver", (0, 50),
                        cv2.FONT_HERSHEY_COMPLEX, 1, (0, 255, 0))
        else:
            # print("idxx : ", idxx)
            is_driver_flag, driver_index, weird_state_count, weird_state_flag = find_class.is_driver(
                current_poses, orig_img)
            cv2.putText(img, "Weird_State_Count : " + str(weird_state_count),
                        (0, 20), cv2.FONT_HERSHEY_COMPLEX, 1, (0, 255, 0))
            if weird_state_flag:
                cv2.putText(img, 'State : ABNORMAL', (0, 50),
                            cv2.FONT_HERSHEY_COMPLEX, 1, (0, 0, 255))
            else:
                cv2.putText(img, "State : Driver_Found", (0, 50),
                            cv2.FONT_HERSHEY_COMPLEX, 1, (0, 255, 0))

            # print("Driver_index :", driver_index)
            # print("Driver_Flag : ", is_driver_flag)
            # csv_dict['frame_number'].append(idxx)
            # csv_dict['driver_index'].append(driver_index)
            # csv_dict['is_driver_flag'].append(is_driver_flag)
            # csv_dict['state'].append(state)

        if track_ids == True:  ##Track Poses
            propagate_ids(previous_poses, current_poses)
            previous_poses = current_poses
            index_counter = 0
            for pose in current_poses:
                cv2.rectangle(
                    img, (pose.bbox[0], pose.bbox[1]),
                    (pose.bbox[0] + pose.bbox[2], pose.bbox[1] + pose.bbox[3]),
                    (0, 255, 0))
                cv2.putText(img, 'id: {}'.format(pose.id),
                            (pose.bbox[0], pose.bbox[1] - 16),
                            cv2.FONT_HERSHEY_COMPLEX, 0.5, (0, 0, 255))
                if is_driver_flag and index_counter == driver_index:
                    cv2.putText(img, 'DRIVER',
                                (pose.bbox[0] + 100, pose.bbox[1] - 16),
                                cv2.FONT_HERSHEY_COMPLEX, 1, (0, 0, 255))
                index_counter += 1

        tt = time.time()

        fps = 1 / (tt - ttt)
        print('fps=', fps)
        ttt = time.time()

        str_ = "FPS : %0.2f" % fps
        cv2.putText(img, str_, (0, 100), cv2.FONT_HERSHEY_SIMPLEX, 1,
                    (0, 255, 0))
        cv2.imshow('Lightweight Human Pose Estimation Python Demo', img)
        #         cv2_imshow(img)

        cv2.imwrite('output/two_2/' + str(idxx) + '.png', img)
        idxx += 1

        key = cv2.waitKey(1)

        if key == 27:  # esc
            return
    df = pd.DataFrame(csv_dict)