Ejemplo n.º 1
0
 def update(self):
     # keep looping infinitely
     while True:
         # if the thread indicator variable is set, stop the
         # thread
         if self.stopped:
             if self.save_video:
                 self.stream.release()
             return
         # otherwise, ensure the queue is not empty
         if not self.Q.empty():
             (boxes, scores, hm_data, pt1, pt2, orig_img,
              im_name) = self.Q.get()
             orig_img = np.array(orig_img, dtype=np.uint8)
             if boxes is None:
                 if opt.save_img or opt.save_video or opt.vis:
                     img = orig_img
                     if opt.vis:
                         cv2.imshow("AlphaPose Demo", img)
                         cv2.waitKey(30)
                     if opt.save_img:
                         cv2.imwrite(
                             os.path.join(opt.outputpath, 'vis', im_name),
                             img)
                     if opt.save_video:
                         self.stream.write(img)
             else:
                 # location prediction (n, kp, 2) | score prediction (n, kp, 1)
                 if opt.matching:
                     preds = getMultiPeakPrediction(hm_data, pt1.numpy(),
                                                    pt2.numpy(),
                                                    opt.inputResH,
                                                    opt.inputResW,
                                                    opt.outputResH,
                                                    opt.outputResW)
                     result = matching(boxes, scores.numpy(), preds)
                 else:
                     preds_hm, preds_img, preds_scores = getPrediction(
                         hm_data, pt1, pt2, opt.inputResH, opt.inputResW,
                         opt.outputResH, opt.outputResW)
                     result = pose_nms(boxes, scores, preds_img,
                                       preds_scores)
                 result = {'imgname': im_name, 'result': result}
                 self.final_result.append(result)
                 if opt.save_img or opt.save_video or opt.vis:
                     img = vis_frame(orig_img, result)
                     if opt.vis:
                         cv2.imshow("AlphaPose Demo", img)
                         cv2.waitKey(30)
                     if opt.save_img:
                         cv2.imwrite(
                             os.path.join(opt.outputpath, 'vis', im_name),
                             img)
                     if opt.save_video:
                         self.stream.write(img)
         else:
             time.sleep(0.1)
Ejemplo n.º 2
0
    def predict(self, image, bboxs, bboxs_scores):
        inps, pt1, pt2 = crop_dets(image, bboxs, self.inp_h, self.inp_w)
        pose_hm = self.model(inps.to(self.device)).cpu().data

        # Cut eyes and ears.
        pose_hm = torch.cat([pose_hm[:, :1, ...], pose_hm[:, 5:, ...]], dim=1)

        xy_hm, xy_img, scores = getPrediction(pose_hm, pt1, pt2, self.inp_h, self.inp_w,
                                              pose_hm.shape[-2], pose_hm.shape[-1])
        result = pose_nms(bboxs, bboxs_scores, xy_img, scores)
        return result
Ejemplo n.º 3
0
    def update(self):
        # keep looping infinitely
        while True:
            # if the thread indicator variable is set, stop the
            # thread
            if self.stopped:
                if self.save_video:
                    self.stream.release()
                return
            # otherwise, ensure the queue is not empty
            if not self.Q.empty():
                (boxes, scores, hm_data, pt1, pt2, orig_img, im_name) = self.Q.get()
                orig_img = np.array(orig_img, dtype=np.uint8)
                if boxes is None:
                    if opt.save_img or opt.save_video or opt.vis:
                        img = orig_img
                        if opt.vis:
                            cv2.imshow("AlphaPose Demo", img)
                            cv2.waitKey(30)
                        if opt.save_img:
                            cv2.imwrite(os.path.join(opt.outputpath, 'vis', im_name), img)
                        if opt.save_video:
                            self.stream.write(img)
                else:
                    # location prediction (n, kp, 2) | score prediction (n, kp, 1)
                    
                    preds_hm, preds_img, preds_scores = getPrediction(
                        hm_data, pt1, pt2, opt.inputResH, opt.inputResW, opt.outputResH, opt.outputResW, opt.nClasses)

                    # print('dataloader:',boxes.shape, scores.shape, preds_img.shape, preds_scores.shape)
                    result = pose_nms(boxes, scores, preds_img, preds_scores)
                    result = {
                        'imgname': im_name,
                        'result': result,
                        'boxes':boxes
                    }
                    # print('dataloader.py:result:',result)
                    # print('dataloader.py:hm_data.shape:',hm_data.shape)
                    # print('dataloader.py:preds_hm.shape:',preds_hm.shape)
                    # self.count+=1
                    # print('dataloader.py:count:',self.count)

                    self.final_result.append(result)
                    if opt.save_img or opt.save_video or opt.vis:
                        img = vis_frame(orig_img, result)
                        if opt.vis and len(img)!=0:
                            cv2.imshow("AlphaPose Demo", img)
                            cv2.waitKey(30)
                        if opt.save_img and len(img)!=0:
                            cv2.imwrite(os.path.join(opt.outputpath, 'vis', im_name), img)
                        if opt.save_video and len(img)!=0:
                            self.stream.write(img)
            else:
                time.sleep(0.1)
Ejemplo n.º 4
0
    def update(boxes, scores, hm_data, pt1, pt2, orig_img, i):
        
        orig_img = np.array(orig_img, dtype=np.uint8)

        preds_hm, preds_img, preds_scores = getPrediction(
            hm_data, pt1, pt2, opt.inputResH, opt.inputResW, opt.outputResH, opt.outputResW)
        result = pose_nms(
            boxes, scores, preds_img, preds_scores)
        result = {
            'imgname': str(i),
            'result': result
        }
        img = vis_frame(orig_img, result)
        return img, result
Ejemplo n.º 5
0
def fetch_result(boxes, scores, hm_data, pt1, pt2, ori_im, im_name):
    ori_im = np.array(ori_im, dtype=np.uint8)
    if boxes is None:
        return None
    preds_hm, preds_img, preds_scores = getPrediction(hm_data, pt1, pt2,
                                                      opt.inputResH,
                                                      opt.inputResW,
                                                      opt.outputResH,
                                                      opt.outputResW)
    result = pose_nms(boxes, scores, preds_img, preds_scores)
    result = {
        'imgname': im_name,
        'result': result,
        'bbox': boxes,
    }
    return result
Ejemplo n.º 6
0
    def update(self):
        # keep looping infinitely
        while True:
            # if the thread indicator variable is set, stop the
            # thread
            if self.stopped:
                if self.save_video:
                    self.stream.release()
                return
            # otherwise, ensure the queue is not empty
            if not self.Q.empty():
                (boxes, scores, hm_data, pt1, pt2, orig_img,
                 im_name) = self.Q.get()
                if boxes is None:
                    if opt.save_img or opt.save_video:
                        #img = display_frame(orig_img, result, opt.outputpath)
                        img = orig_img
                        if opt.save_img:
                            cv2.imwrite(
                                os.path.join(opt.outputpath, 'vis', im_name),
                                img)
                        if opt.save_video:
                            self.stream.write(img)
                else:
                    # location prediction (n, kp, 2) | score prediction (n, kp, 1)
                    preds_hm, preds_img, preds_scores = getPrediction(
                        hm_data, pt1, pt2, opt.inputResH, opt.inputResW,
                        opt.outputResH, opt.outputResW)

                    result = pose_nms(boxes, scores, preds_img, preds_scores)
                    result = {'imgname': im_name, 'result': result}
                    self.final_result.append(result)
                    if opt.save_img or opt.save_video:
                        #img = display_frame(orig_img, result, opt.outputpath)
                        img = vis_frame(orig_img, result)
                        if opt.save_img:
                            cv2.imwrite(
                                os.path.join(opt.outputpath, 'vis', im_name),
                                img)
                        if opt.save_video:
                            self.stream.write(img)
            else:
                time.sleep(0.01)
Ejemplo n.º 7
0
 def process(self, boxes, scores, hm_data, pt1, pt2, orig_img, im_name):
     orig_img = np.array(orig_img, dtype=np.uint8)
     if boxes is None:
         return orig_img, []
     else:
         # location prediction (n, kp, 2) | score prediction (n, kp, 1)
         preds_hm, preds_img, preds_scores = getPrediction(
             hm_data, pt1, pt2, opt.inputResH, opt.inputResW,
             opt.outputResH, opt.outputResW)
         self.result = pose_nms(boxes, scores, preds_img, preds_scores)
         if self.result:
             result = self.locate()
             # result = self.result
             result = {'imgname': im_name, 'result': result}
             self.final_result.append(result)
             img_black, pred_black = vis_frame_black(orig_img, result)
             self.img = img_black
             self.skeleton = pred_black
             img, pred = vis_frame(orig_img, result)
             return img, self.skeleton, self.img
         else:
             return orig_img, [], orig_img
Ejemplo n.º 8
0
    def gen_pose(self):
        with torch.no_grad():
            (inps, orig_img, boxes, scores, pt1,
             pt2) = self.human_detect_result
            if boxes is None or boxes.nelement() == 0:
                self.det_human_num = 0
                self.out_img = orig_img
                self.result = None
                self.resultNew = None
                return
            #print(inps, 'inps')
            #print(type(boxes), 'boxes')
            #
            inps = inps.cuda()

            hm = self.pose_model(inps)
            hm = [hm]
            hm = torch.cat(hm)
            hm = hm.cpu()
            hm_data = hm
            orig_img = np.array(orig_img, dtype=np.uint8)
            if boxes is None:
                self.det_human_num = 0
                self.out_img = orig_img
                self.result = None
                self.resultNew = None
                return
            else:
                preds_hm, preds_img, preds_scores = getPrediction(
                    hm_data, pt1, pt2, opt.inputResH, opt.inputResW,
                    opt.outputResH, opt.outputResW)

                result = pose_nms(boxes, scores, preds_img, preds_scores)
                self.result = result
                self.resultNew = conserv_result_format_to_list(self.result)
                resultDict = {'imgname': 'zhjs', 'result': result}
                self.out_img = vis_frame(orig_img, resultDict)
                self.det_human_num = len(result)
Ejemplo n.º 9
0
    def update(self):
        # keep looping infinitely
        while True:
            # if the thread indicator variable is set, stop the
            # thread

            if self.stopped:
                if self.save_video:
                    self.stream.release()
                return

            # otherwise, ensure the queue is not empty
            if not self.Q.empty():
                (boxes, scores, hm_data, pt1, pt2, orig_img,
                 im_name) = self.Q.get()

                orig_img = np.array(orig_img, dtype=np.uint8)
                if boxes is None:
                    if opt.save_img or opt.save_video or opt.vis:
                        img = orig_img
                        if opt.vis:
                            cv2.imshow("AlphaPose Demo", img)
                            cv2.waitKey(30)
                        if opt.save_img:
                            cv2.imwrite(
                                os.path.join(self.outputpath, self.dir_folder,
                                             im_name), img)
                        if opt.save_video:
                            self.stream.write(img)
                else:
                    # location prediction (n, kp, 2) | score prediction (n, kp, 1)

                    # if opt.matching:
                    #     preds = getMultiPeakPrediction(
                    #         hm_data, pt1.numpy(), pt2.numpy(), opt.inputResH, opt.inputResW, opt.outputResH, opt.outputResW)
                    #     result = matching(boxes, scores.numpy(), preds)
                    # else:

                    preds_hm, preds_img, preds_scores = getPrediction(
                        hm_data, pt1, pt2, opt.inputResH, opt.inputResW,
                        opt.outputResH, opt.outputResW)
                    # (hm_data, pt1, pt2, 320, 256, 80, 64)
                    result = pose_nms(boxes, scores, preds_img, preds_scores)
                    #  (bbox定位list, bbox评分list, 位姿定位list, 位姿评分list)
                    # result[ {'keypoints', 'kp_score', 'proposal_score'}, {---}, ...], pPose_nms.py line-114

                    result = {'imgname': im_name, 'result': result}

                    # self.final_result.append(result)

                    self.show_img = vis_frame(orig_img, result)
                    cv2.imwrite(
                        os.path.join(self.outputpath, self.dir_folder,
                                     im_name), img)

                    # if opt.save_img or opt.save_video or opt.vis:
                    #     img = vis_frame(orig_img, result)
                    #     if opt.vis:
                    #         cv2.imshow("AlphaPose Demo", img)
                    #         cv2.waitKey(30)
                    #     if opt.save_img:
                    #         cv2.imwrite(os.path.join(opt.outputpath, 'vis', im_name), img)
                    #     if opt.save_video:
                    #         self.stream.write(img)
            else:
                # time.sleep(0.1)
                pass
Ejemplo n.º 10
0
    def update(self):
        next_id = 0
        car_next_id = 0
        bbox_dets_list_list = []
        keypoints_list_list = []
        car_dets_list_list = []

        while True:
            # if the thread indicator variable is set, stop the
            # thread
            if self.stopped:
                if self.save_video:
                    self.stream.release()
                return
            # otherwise, ensure the queue is not empty

            if not self.Q.empty():
                start_time = getTime()

                (boxes, scores, hm_data, pt1, pt2, orig_img, img_id, CAR) = self.Q.get()

                orig_img = np.array(orig_img, dtype=np.uint8)
                if boxes is not None:
                    boxes = boxes.astype(np.int32)

                img = orig_img

                # text_filled2(img,(5,200),str(img_id),LIGHT_GREEN,2,2)

                bbox_dets_list = []  # keyframe: start from empty
                keypoints_list = []  # keyframe: start from empty
                # print(boxes)
                if boxes is None:  # No person detection
                    pass
                    # bbox_det_dict = {"img_id": img_id,
                    #                  "det_id": 0,
                    #                  "track_id": None,
                    #                  "bbox": [0, 0, 2, 2]}
                    # bbox_dets_list.append(bbox_det_dict)
                    #
                    # keypoints_dict = {"img_id": img_id,
                    #                   "det_id": 0,
                    #                   "track_id": None,
                    #                   "keypoints": []}
                    # keypoints_list.append(keypoints_dict)


                else:
                    if opt.matching:
                        preds = getMultiPeakPrediction(
                            hm_data, pt1.numpy(), pt2.numpy(), opt.inputResH, opt.inputResW, opt.outputResH,
                            opt.outputResW)
                        result = matching(boxes, scores.numpy(), preds)
                    else:

                        preds_hm, preds_img, preds_scores = getPrediction(hm_data, pt1, pt2, opt.inputResH,
                                                                          opt.inputResW, opt.outputResH,
                                                                          opt.outputResW)

                        # print('number of result', preds_hm,  preds_scores )
                        result = pose_nms(boxes, scores, preds_img, preds_scores)  # list type
                        # result = {  'keypoints': ,  'kp_score': , 'proposal_score': ,  'bbox' }

                    if img_id > 0:  # First frame does not have previous frame
                        bbox_list_prev_frame = bbox_dets_list_list[img_id - 1].copy()
                        keypoints_list_prev_frame = keypoints_list_list[img_id - 1].copy()
                    else:
                        bbox_list_prev_frame = []
                        keypoints_list_prev_frame = []

                    # boxes.size(0)
                    num_dets = len(result)

                    for bbox in boxes:
                        x, y, w, h = bbox.astype(np.uint32)
                        cv2.rectangle(orig_img, (x, y), (x + w, y + h), (253, 222, 111), 1)

                    for det_id in range(num_dets):  # IOU tracking for detections in current frame.
                        # detections for current frame
                        # obtain bbox position and track id

                        result_box = result[det_id]
                        kp_score = result_box['kp_score']
                        proposal_score = result_box['proposal_score'].numpy()[0]
                        if proposal_score < 1.3:
                            continue

                        keypoints = result_box['keypoints']  # torch, (17,2)
                        keypoints_pf = np.zeros((15, 2))

                        idx_list = [16, 14, 12, 11, 13, 15, 10, 8, 6, 5, 7, 9, 0, 0, 0]
                        for i, idx in enumerate(idx_list):
                            keypoints_pf[i] = keypoints[idx]
                        keypoints_pf[12] = (keypoints[5] + keypoints[6]) / 2  # neck

                        # COCO-order {0-nose    1-Leye    2-Reye    3-Lear    4Rear    5-Lsho    6-Rsho    7-Lelb    8-Relb    9-Lwri    10-Rwri    11-Lhip    12-Rhip    13-Lkne    14-Rkne    15-Lank    16-Rank} 
                        # PoseFLow order  #{0-Rank    1-Rkne    2-Rhip    3-Lhip    4-Lkne    5-Lank    6-Rwri    7-Relb    8-Rsho    9-Lsho   10-Lelb    11-Lwri    12-neck  13-nose 14-TopHead}

                        bbox_det = bbox_from_keypoints(keypoints)  # xxyy

                        # bbox_in_xywh = enlarge_bbox(bbox_det, enlarge_scale)
                        # bbox_det = x1y1x2y2_to_xywh(bbox_in_xywh)

                        # Keyframe: use provided bbox
                        # if bbox_invalid(bbox_det):
                        #     track_id = None  # this id means null
                        #     keypoints = []
                        #     bbox_det = [0, 0, 2, 2]
                        #     # update current frame bbox
                        #     bbox_det_dict = {"img_id": img_id,
                        #                      "det_id": det_id,
                        #                      "track_id": track_id,
                        #                      "bbox": bbox_det}
                        #     bbox_dets_list.append(bbox_det_dict)
                        #     # update current frame keypoints
                        #     keypoints_dict = {"img_id": img_id,
                        #                       "det_id": det_id,
                        #                       "track_id": track_id,
                        #                       "keypoints": keypoints}
                        #     keypoints_list.append(keypoints_dict)
                        #     continue

                        # # update current frame bbox

                        if img_id == 0:  # First frame, all ids are assigned automatically
                            track_id = next_id
                            next_id += 1
                        else:
                            track_id, match_index = get_track_id_SpatialConsistency(bbox_det, bbox_list_prev_frame)
                            # print('track' ,track_id, match_index)

                            if track_id != -1:  # if candidate from prev frame matched, prevent it from matching another
                                del bbox_list_prev_frame[match_index]
                                del keypoints_list_prev_frame[match_index]

                        # update current frame bbox
                        bbox_det_dict = {"img_id": img_id,
                                         "det_id": det_id,
                                         "track_id": track_id,
                                         "bbox": bbox_det}

                        # update current frame keypoints
                        keypoints_dict = {"img_id": img_id,
                                          "det_id": det_id,
                                          "track_id": track_id,
                                          "keypoints": keypoints,
                                          'kp_poseflow': keypoints_pf,
                                          'kp_score': kp_score,
                                          'bbox': bbox_det,
                                          'proposal_score': proposal_score}

                        bbox_dets_list.append(bbox_det_dict)
                        keypoints_list.append(keypoints_dict)

                    num_dets = len(bbox_dets_list)
                    for det_id in range(num_dets):  # if IOU tracking failed, run pose matching tracking.
                        bbox_det_dict = bbox_dets_list[det_id]
                        keypoints_dict = keypoints_list[det_id]

                        # assert (det_id == bbox_det_dict["det_id"])
                        # assert (det_id == keypoints_dict["det_id"])

                        if bbox_det_dict["track_id"] == -1:  # this id means matching not found yet
                            # track_id = bbox_det_dict["track_id"]
                            track_id, match_index = get_track_id_SGCN(bbox_det_dict["bbox"], bbox_list_prev_frame,
                                                                      keypoints_dict["kp_poseflow"],
                                                                      keypoints_list_prev_frame)

                            if track_id != -1:  # if candidate from prev frame matched, prevent it from matching another
                                del bbox_list_prev_frame[match_index]
                                del keypoints_list_prev_frame[match_index]
                                bbox_det_dict["track_id"] = track_id
                                keypoints_dict["track_id"] = track_id

                            # if still can not find a match from previous frame, then assign a new id
                            # if track_id == -1 and not bbox_invalid(bbox_det_dict["bbox"]):
                            if track_id == -1:
                                bbox_det_dict["track_id"] = next_id
                                keypoints_dict["track_id"] = next_id
                                next_id += 1

                    # update frame
                    # print('keypoint list', len(keypoints_list))
                    vis_frame(img, keypoints_list)

                """
                Car
                """

                if CAR is not None:
                    car_np = CAR
                    new_car_bboxs = car_np[:, 0:4].astype(np.uint32)  # b/  x y w h c / cls_conf, cls_idx
                    new_car_score = car_np[:, 4]
                    cls_conf = car_np[:, 4]

                    # print("id: ", img_id , " ------------ " , new_car_bboxs, new_car_score)
                    # cls_conf = car_np[:, 6]
                    car_dest_list = []

                    if img_id > 1:  # First frame does not have previous frame
                        car_bbox_list_prev_frame = car_dets_list_list[img_id - 1].copy()
                    else:
                        car_bbox_list_prev_frame = []

                    # print('car bbox list prev frame ', len(car_bbox_list_prev_frame))
                    for c, score, conf in zip(new_car_bboxs, new_car_score, cls_conf):
                        # car_bbox_det = c
                        # car_bbox_det = x1y1x2y2_to_xywh(c)
                        bbox_det = c
                        # bbox_in_xywh = enlarge_bbox(car_bbox_det, enlarge_scale)
                        # bbox_det = x1y1x2y2_to_xywh(bbox_in_xywh)

                        if img_id == 0:  # First frame, all ids are assigned automatically
                            car_track_id = car_next_id
                            car_next_id += 1
                        else:
                            car_track_id, match_index = get_track_id_SpatialConsistency(bbox_det,
                                                                                        car_bbox_list_prev_frame)
                            # print(car_track_id, match_index)
                            if car_track_id != -1:  # if candidate from prev frame matched, prevent it from matching another
                                del car_bbox_list_prev_frame[match_index]

                        bbox_det_dict = {"img_id": img_id,
                                         "track_id": car_track_id,
                                         "bbox": bbox_det,
                                         "score": score,
                                         "conf": conf}
                        car_dest_list.append(bbox_det_dict)

                    for car_bbox_det_dict in car_dest_list:  # detections for current frame
                        if car_bbox_det_dict["track_id"] == -1:  # this id means matching not found yet
                            car_bbox_det_dict["track_id"] = car_next_id
                            car_next_id += 1

                    self.tracking(car_dest_list)
                    car_dets_list_list.append(car_dest_list)

                else:
                    car_dest_list = []
                    bbox_det_dict = {"img_id": img_id,
                                     "det_id": 0,
                                     "track_id": None,
                                     "bbox": [0, 0, 2, 2],
                                     "score": 0,
                                     "conf": 0}
                    car_dest_list.append(bbox_det_dict)
                    car_dets_list_list.append(car_dest_list)

                bbox_dets_list_list.append(bbox_dets_list)
                keypoints_list_list.append(keypoints_list)

                if img_id != 0:
                    self.car_person_detection(car_dest_list, bbox_dets_list, img)
                    self.car_parking_detection(car_dest_list, img, img_id)

                ckpt_time, det_time = getTime(start_time)
                cv2.putText(img, str(1 / det_time), (5, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 0), 1)
                if opt.vis:
                    cv2.imshow("AlphaPose Demo", img)
                    cv2.waitKey(33)
                if opt.save_video:
                    self.stream.write(img)
            else:
                time.sleep(0.1)
Ejemplo n.º 11
0
    def update(self):

        # keep looping infinitely
        while True:
            sys.stdout.flush()
            print("generator len : " + str(self.Q.qsize()))

            # if the thread indicator variable is set, stop the
            # thread
            # if self.stopped:
            #     cv2.destroyAllWindows()
            #     if self.save_video:
            #         self.stream.release()
            #     return
            # otherwise, ensure the queue is not empty
            if not self.det_processor.Q.empty():

                with torch.no_grad():
                    (inps, orig_img, im_name, boxes, scores, pt1,
                     pt2) = self.det_processor.read()

                    if orig_img is None:
                        sys.stdout.flush()
                        print(f'{im_name} image read None: handle_video')
                        break

                    orig_img = np.array(orig_img, dtype=np.uint8)
                    if boxes is None or boxes.nelement() == 0:
                        (boxes, scores, hm_data, pt1, pt2, orig_img,
                         im_name) = (None, None, None, None, None, orig_img,
                                     im_name.split('/')[-1])

                        res = {'keypoints': -1, 'image': orig_img}
                        self.Q.put(res)  #TODO

                        # cv2.imwrite("/home/hrs/Desktop/dd/now.jpg", orig_img)

                        # img = orig_img
                        # cv2.imshow("AlphaPose Demo", img)
                        # cv2.waitKey(30)
                        ######################################################################################
                        # self.image = self.ax_in.imshow(orig_img, aspect='equal')
                        # self.image.set_data(orig_img)
                        # plt.draw()
                        # plt.pause(0.000000000000000001)
                        ######################################################################################

                        # if opt.save_img or opt.save_video or opt.vis:
                        #     img = orig_img
                        #     if opt.vis:
                        #         cv2.imshow("AlphaPose Demo", img)
                        #         cv2.waitKey(30)
                        #     if opt.save_img:
                        #         cv2.imwrite(os.path.join(opt.outputpath, 'vis', im_name), img)
                        #     if opt.save_video:
                        #         self.stream.write(img)
                    else:
                        # location prediction (n, kp, 2) | score prediction (n, kp, 1)

                        datalen = inps.size(0)
                        batchSize = 20  #args.posebatch()
                        leftover = 0
                        if datalen % batchSize:
                            leftover = 1
                        num_batches = datalen // batchSize + leftover
                        hm = []

                        # sys.stdout.flush()
                        # print("hhhh")

                        for j in range(num_batches):
                            inps_j = inps[j * batchSize:min(
                                (j + 1) * batchSize, datalen)].cuda()
                            hm_j = self.pose_model(inps_j)
                            hm.append(hm_j)

                        # time1 = time.time()
                        hm = torch.cat(hm)
                        hm = hm.cpu().data

                        (boxes, scores, hm_data, pt1, pt2, orig_img,
                         im_name) = (boxes, scores, hm, pt1, pt2, orig_img,
                                     im_name.split('/')[-1])

                        if opt.matching:
                            preds = getMultiPeakPrediction(
                                hm_data, pt1.numpy(), pt2.numpy(),
                                opt.inputResH, opt.inputResW, opt.outputResH,
                                opt.outputResW)
                            result = matching(boxes, scores.numpy(), preds)
                        else:
                            preds_hm, preds_img, preds_scores = getPrediction(
                                hm_data, pt1, pt2, opt.inputResH,
                                opt.inputResW, opt.outputResH, opt.outputResW)
                            result = pose_nms(boxes, scores, preds_img,
                                              preds_scores)
                        result = {'imgname': im_name, 'result': result}
                        self.final_result.append(result)

                        # time2 = time.time()
                        # print(time2-time1)
                        ######################################################################################
                        # img = vis_frame(orig_img, result)

                        # cv2.imshow("AlphaPose Demo", img)
                        # cv2.imwrite("/home/hrs/Desktop/dd/now.jpg", img)
                        # cv2.waitKey(30)
                        ########################################################################
                        # self.point.set_offsets(keypoints[self.i])

                        # self.image = self.ax_in.imshow(orig_img, aspect='equal')
                        # self.image.set_data(orig_img)
                        # plt.draw()
                        # plt.pause(0.000000000000000001)
                        ##########################################################################
                        if not result['result']:  # No people
                            res = {'keypoints': -1, 'image': orig_img}
                            self.Q.put(res)  #TODO
                        else:
                            kpt = max(
                                result['result'],
                                key=lambda x: x['proposal_score'].data[0] *
                                calculate_area(x['keypoints']),
                            )['keypoints']

                            res = {'keypoints': kpt, 'image': orig_img}

                            self.Q.put(res)

                            # kpt_np = kpt.numpy()
                            # n = kpt_np.shape[0]
                            # print(kpt_np.shape)
                            # point_list = [(kpt_np[m, 0], kpt_np[m, 1]) for m in range(17)]
                            # for point in point_list:
                            #     cv2.circle(pose_img, point, 1, (0, 43, 32), 4)

                        # cv2.imshow(self.window, pose_img)
                        # cv2.waitKey()

                        # if opt.save_img or opt.save_video or opt.vis:
                        #     img = vis_frame(orig_img, result)
                        #     if opt.vis:
                        #         cv2.imshow("AlphaPose Demo", img)
                        #         cv2.waitKey(30)
                        #     if opt.save_img:
                        #         cv2.imwrite(os.path.join(opt.outputpath, 'vis', im_name), img)
                        #     if opt.save_video:
                        #         self.stream.write(img)
            else:
                time.sleep(0.1)
Ejemplo n.º 12
0
    def update(self):
        # keep looping infinitely
        temp_kps = []
        while True:
            # if the thread indicator variable is set, stop the
            # thread
            if self.stopped:
                if self.save_video:
                    self.stream.release()
                return
            # otherwise, ensure the queue is not empty
            if not self.Q.empty():
                (boxes, scores, hm_data, pt1, pt2, orig_img,
                 im_name) = self.Q.get()
                orig_img = np.array(orig_img, dtype=np.uint8)
                if boxes is None:
                    if opt.save_img or opt.save_video or opt.vis:
                        img = orig_img
                        if opt.vis:
                            h, w, c = img.shape
                            img = cv2.resize(img, (int(w / 2), int(h / 2)),
                                             interpolation=cv2.INTER_CUBIC)
                            cv2.imshow("AlphaPose Demo", img)
                            cv2.waitKey(30)
                        if opt.save_img:
                            cv2.imwrite(
                                os.path.join(opt.outputpath, 'vis', im_name),
                                img)
                        if opt.save_video:
                            self.stream.write(img)
                else:
                    # location prediction (n, kp, 2) | score prediction (n, kp, 1)

                    preds_hm, preds_img, preds_scores = getPrediction(
                        hm_data, pt1, pt2, opt.inputResH, opt.inputResW,
                        opt.outputResH, opt.outputResW)

                    result = pose_nms(boxes, scores, preds_img, preds_scores)
                    #print(len(result))
                    #print(boxes.shape)
                    result = []
                    if preds_img.shape[0] > 0:
                        for re in range(preds_img.shape[0]):
                            pos = preds_img[re].unsqueeze(0).numpy()
                            pos = self.aligner.align_points(pos)[0]
                            pos = (pos[..., :2] - 129) / 255
                            pos = torch.FloatTensor(pos)

                            kp = torch.cat(
                                (pos, preds_scores[re].unsqueeze(1)), 1)
                            kp = kp.unsqueeze(0)
                            kp = kp.reshape([1, -1]).cuda()
                            kp = kp.repeat(9, 1).reshape(1, -1)
                            outputs = self.pos_reg_model(kp)
                            _, preds = torch.max(outputs, 1)
                            classidx = preds.cpu()
                            result.append({
                                'class':
                                str(float(classidx)),
                                'keypoints':
                                preds_img[re],
                                'kp_score':
                                preds_scores[re].unsqueeze(1),
                                'bbox':
                                boxes[re]
                            })
                        # print(preds)
                        result = {'imgname': im_name, 'result': result}
                        # if len(result)>0:
                        #     for re in range(len(result)):
                        #         pos = result[re]['keypoints'].unsqueeze(0).numpy()
                        #         pos = self.aligner.align_points(pos)[0]
                        #         pos = (pos[..., :2] - 129) / 255
                        #         pos = torch.FloatTensor(pos)
                        #         kp = torch.cat((pos, result[0]['kp_score']), 1)
                        #         kp = kp.unsqueeze(0)
                        #         kp = kp.reshape([1, -1]).cuda()
                        #         kp = kp.repeat(9, 1).reshape(1, -1)
                        #         outputs = self.pos_reg_model(kp)
                        #         _, preds = torch.max(outputs, 1)
                        #         classidx = preds.cpu()
                        #         result[re]['class'] = str(float(classidx))
                        #     # print(preds)
                        #     result = {
                        #         'imgname': im_name,
                        #         'result': result
                        #     }
                        self.result_Q.put(result)

                        self.final_result.append(result)
                        if opt.save_img or opt.save_video or opt.vis:
                            img = vis_frame(orig_img, result)
                            if opt.vis:
                                h, w, c = img.shape
                                img = cv2.resize(img, (int(w / 2), int(h / 2)),
                                                 interpolation=cv2.INTER_CUBIC)
                                cv2.imshow("AlphaPose Demo", img)
                                cv2.waitKey(30)
                            if opt.save_img:
                                cv2.imwrite(
                                    os.path.join(opt.outputpath, 'vis',
                                                 im_name), img)
                            if opt.save_video:
                                self.stream.write(img)

                        # 发送图像
                        img = vis_frame(orig_img, result)
                        h, w, c = img.shape
                        img = cv2.resize(img, (int(w / 2), int(h / 2)),
                                         interpolation=cv2.INTER_CUBIC)
                        self.tcp_client.send_img(img)
            else:
                time.sleep(0.1)
Ejemplo n.º 13
0
    def update(self):
        count = 0
#         filepath = '/home/yurik/Documents/Program/Alphapose_zed_video/testdata/20191014/walkstraightly/walkstraightly.svo'
#         init = sl.InitParameters(svo_input_filename=filepath,svo_real_time_mode=False)
#         init.depth_mode = sl.DEPTH_MODE.DEPTH_MODE_QUALITY
#         cam = sl.Camera()
#         runtime = sl.RuntimeParameters()
#         status = cam.open(init)
#         mat = sl.Mat()
#         zeroarr = np.zeros((720,1280,3))
        # keep looping infinitely
        while True:
            # if the thread indicator variable is set, stop the
            # thread
            if self.stopped:
                if self.save_video:
                    self.stream.release()
                return
            # otherwise, ensure the queue is not empty
            if not self.Q.empty():
                (boxes, scores, hm_data, pt1, pt2, orig_img, im_name) = self.Q.get()
                orig_img = np.array(orig_img, dtype=np.uint8)
                if boxes is None:
                    if opt.save_img or opt.save_video or opt.vis:
                        img = orig_img
                        if opt.vis:
                            cv2.imshow("AlphaPose Demo", img)
                            cv2.waitKey(30)
                        if opt.save_img:
                            cv2.imwrite(os.path.join(opt.outputpath, 'vis', im_name), img)
                        if opt.save_video:
                            self.stream.write(img)
                else:
                    # location prediction (n, kp, 2) | score prediction (n, kp, 1)
                    if opt.matching:
                        preds = getMultiPeakPrediction(
                            hm_data, pt1.numpy(), pt2.numpy(), opt.inputResH, opt.inputResW, opt.outputResH, opt.outputResW)
                        result = matching(boxes, scores.numpy(), preds)
                    else:
                        preds_hm, preds_img, preds_scores = getPrediction(
                            hm_data, pt1, pt2, opt.inputResH, opt.inputResW, opt.outputResH, opt.outputResW)
                        result = pose_nms(
                            boxes, scores, preds_img, preds_scores)
                    result = {
                        'imgname': im_name,
                        'result': result
                    }

                    # 3D coordinates computation
                    ppl = result['result']
                    ppl_num = len(ppl)
                    self.coordinates_u, self.coordinates_v, self.truex, self.truey, self.dists = fl.people_3d_coord(ppl, ppl_num,
                                                                             self.video_mode, self.camMtx1, orig_img)

                    self.final_result.append(result)
                    if opt.save_img or opt.save_video or opt.vis:
                        img = vis_frame(orig_img, result)
#                         err = cam.grab(runtime)
#                         if err == sl.ERROR_CODE.SUCCESS:
#                             cam.retrieve_image(mat, sl.VIEW.VIEW_DEPTH)
#                             depthmap = mat.get_data()
#                             if img.shape[2] == 3:
#                                 depthmap = cv2.cvtColor(depthmap, cv2.COLOR_RGBA2RGB)
#                             depthmap = cv2.resize(depthmap, (int(img.shape[1]/2), img.shape[0]))
#                             depthmap = cv2.applyColorMap(depthmap, cv2.COLORMAP_JET)
#                             depthmap = np.hstack((depthmap, zeroarr))
#                             depthmap = depthmap.astype(np.uint8)
#                             img = cv2.addWeighted(img, 0.5, depthmap, 0.5, 3)
                        if len(self.coordinates_v) > 0 and len(self.coordinates_u) > 0:
                            for i in range(len(self.coordinates_v)):
#                                 cv2.putText(img, 'z:' + str(round((self.dists[i] / 10), 1)),
#                                             (int(self.coordinates_u[i]), int(self.coordinates_v[i]) - 15),
#                                             cv2.FONT_HERSHEY_PLAIN, 2, (0, 255, 0), 3, 8)
                                cv2.putText(img, str(round((self.truex[i] / 10), 1)),
                                            (int(self.coordinates_u[i]), int(self.coordinates_v[i]) - 15),
                                            cv2.FONT_HERSHEY_PLAIN, 2, (0, 255, 0), 3, 8)
                                cv2.putText(img, str(round((self.truey[i] / 10), 1)),
                                            (int(self.coordinates_u[i]) + 200, int(self.coordinates_v[i]) - 15),
                                            cv2.FONT_HERSHEY_PLAIN, 2, (255, 0, 0), 3, 8)
                                cv2.putText(img, str(round((self.dists[i] / 10), 1)),
                                            (int(self.coordinates_u[i]) + 400, int(self.coordinates_v[i]) - 15),
                                            cv2.FONT_HERSHEY_PLAIN, 2, (0, 0, 255), 3, 8)
                                cv2.putText(img, 'frames: ' + str(count), (620, 620), cv2.FONT_HERSHEY_PLAIN, 2, (0,100,90), 3, 8)
                        else:
                            cv2.putText(img, '[N/A]',
                                        (40, 620), cv2.FONT_HERSHEY_PLAIN, 2, (0, 100, 90), 3, 8)
                        if opt.vis:
                            cv2.imshow("AlphaPose Demo", img)
                            cv2.waitKey(30)
                        if opt.save_img:
                            cv2.imwrite(os.path.join(opt.outputpath, 'vis', im_name), img)
                        if opt.save_video:
                            self.stream.write(img)
            else:
                time.sleep(0.1)
            count = count + 1
Ejemplo n.º 14
0
    def update(self):
        # keep looping infinitely
        while True:
            sys.stdout.flush()
            print("generator len : " + str(self.Q.qsize()))

            # if the thread indicator variable is set, stop the
            # thread
            if self.stopped:
                if self.save_video:
                    self.stream.release()
                return
            # otherwise, ensure the queue is not empty
            if not self.det_processor.Q.empty():

                with torch.no_grad():
                    (inps, orig_img, im_name, boxes, scores, pt1,
                     pt2) = self.det_processor.read()

                    if orig_img is None:
                        print(f'{im_name} image read None: handle_video')
                        break

                    orig_img = np.array(orig_img, dtype=np.uint8)
                    if boxes is None or boxes.nelement() == 0:
                        (boxes, scores, hm_data, pt1, pt2, orig_img,
                         im_name) = (None, None, None, None, None, orig_img,
                                     im_name.split('/')[-1])

                        # if opt.save_img or opt.save_video or opt.vis:
                        #     img = orig_img
                        #     if opt.vis:
                        #         cv2.imshow("AlphaPose Demo", img)
                        #         cv2.waitKey(30)
                        #     if opt.save_img:
                        #         cv2.imwrite(os.path.join(opt.outputpath, 'vis', im_name), img)
                        #     if opt.save_video:
                        #         self.stream.write(img)
                    else:
                        # location prediction (n, kp, 2) | score prediction (n, kp, 1)

                        datalen = inps.size(0)
                        batchSize = 10  #args.posebatch()
                        leftover = 0
                        if datalen % batchSize:
                            leftover = 1
                        num_batches = datalen // batchSize + leftover
                        hm = []

                        sys.stdout.flush()
                        print("hhhh")

                        for j in range(num_batches):
                            inps_j = inps[j * batchSize:min(
                                (j + 1) * batchSize, datalen)]  #.cuda()
                            hm_j = self.pose_model(inps_j)
                            hm.append(hm_j)

                        hm = torch.cat(hm)
                        hm = hm.cpu().data

                        (boxes, scores, hm_data, pt1, pt2, orig_img,
                         im_name) = (boxes, scores, hm, pt1, pt2, orig_img,
                                     im_name.split('/')[-1])

                        if opt.matching:
                            preds = getMultiPeakPrediction(
                                hm_data, pt1.numpy(), pt2.numpy(),
                                opt.inputResH, opt.inputResW, opt.outputResH,
                                opt.outputResW)
                            result = matching(boxes, scores.numpy(), preds)
                        else:
                            preds_hm, preds_img, preds_scores = getPrediction(
                                hm_data, pt1, pt2, opt.inputResH,
                                opt.inputResW, opt.outputResH, opt.outputResW)
                            result = pose_nms(boxes, scores, preds_img,
                                              preds_scores)
                        result = {'imgname': im_name, 'result': result}
                        self.final_result.append(result)

                        kpts = []
                        no_person = []
                        if not result['result']:  # No people
                            self.Q.put(None)  #TODO
                        else:
                            self.Q.put(result)

                        # if opt.save_img or opt.save_video or opt.vis:
                        #     img = vis_frame(orig_img, result)
                        #     if opt.vis:
                        #         cv2.imshow("AlphaPose Demo", img)
                        #         cv2.waitKey(30)
                        #     if opt.save_img:
                        #         cv2.imwrite(os.path.join(opt.outputpath, 'vis', im_name), img)
                        #     if opt.save_video:
                        #         self.stream.write(img)
            else:
                time.sleep(0.1)
Ejemplo n.º 15
0
    def update(self):
        # keep looping infinitely
        while True:
            # if the thread indicator variable is set, stop the
            # thread
            if self.stopped:
                if self.save_video:
                    self.stream.release()
                return
            # otherwise, ensure the queue is not empty
            if not self.Q.empty():
                (boxes, scores, hm_data, pt1, pt2, orig_img,
                 im_name) = self.Q.get()
                orig_img = np.array(orig_img, dtype=np.uint8)
                if boxes is None:
                    if opt.save_img or opt.save_video or opt.vis:
                        img = orig_img
                        if opt.vis:
                            cv2.imshow("AlphaPose Demo", img)
                            cv2.waitKey(30)
                        if opt.save_img:
                            cv2.imwrite(
                                os.path.join(opt.outputpath, 'vis', im_name),
                                img)
                        if opt.save_video:
                            self.stream.write(img)
                else:
                    # location prediction (n, kp, 2) | score prediction (n, kp, 1)

                    preds_hm, preds_img, preds_scores = getPrediction(
                        hm_data, pt1, pt2, opt.inputResH, opt.inputResW,
                        opt.outputResH, opt.outputResW)

                    result = pose_nms(boxes, scores, preds_img, preds_scores)
                    result = {
                        'imgname': im_name,
                        'result': result
                    }  # append imgname here.
                    # result here includes imgname, bbox, kps, kp_score, proposal_score
                    # Critical, run pnp algorithm here to get 6d pose.
                    # embed()
                    if result['result']:
                        kp_2d = np.array(result['result'][0]['keypoints'])
                        kp_3d = np.array(self.kp_3d)
                        R, t = pnp(kp_3d, kp_2d, self.cam_K)
                        result.update({'cam_R': R, 'cam_t': t})
                    else:
                        result.update({'cam_R': [], 'cam_t': []})
                    self.final_result.append(result)
                    if opt.save_img or opt.save_video or opt.vis:
                        img = vis_frame(orig_img, result)
                        if opt.vis:
                            cv2.imshow("AlphaPose Demo", img)
                            cv2.waitKey(30)
                        if opt.save_img:
                            cv2.imwrite(
                                os.path.join(opt.outputpath, 'vis', im_name),
                                img)
                        if opt.save_video:
                            self.stream.write(img)
            else:
                time.sleep(0.1)
Ejemplo n.º 16
0
    def update(self):
        # keep looping infinitely
        temp_kps=[]
        while True:
            # if the thread indicator variable is set, stop the
            # thread
            if self.stopped:
                if self.save_video:
                    self.stream.release()
                return
            # otherwise, ensure the queue is not empty
            if not self.Q.empty():
                (boxes, scores, hm_data, pt1, pt2, orig_img, im_name) = self.Q.get()
                orig_img = np.array(orig_img, dtype=np.uint8)
                if boxes is None:
                    if opt.save_img or opt.save_video or opt.vis:
                        img = orig_img
                        if opt.vis:
                            cv2.imshow("AlphaPose Demo", img)
                            cv2.waitKey(30)
                        if opt.save_img:
                            cv2.imwrite(os.path.join(opt.outputpath, 'vis', im_name), img)
                        if opt.save_video:
                            self.stream.write(img)
                else:
                    # location prediction (n, kp, 2) | score prediction (n, kp, 1)
                    
                    preds_hm, preds_img, preds_scores = getPrediction(
                        hm_data, pt1, pt2, opt.inputResH, opt.inputResW, opt.outputResH, opt.outputResW)

                    result = pose_nms(boxes, scores, preds_img, preds_scores)

                    pos = result[0]['keypoints'].unsqueeze(0).numpy()
                    pos = self.aligner.align_points(pos)[0]
                    pos = (pos[..., :2] - 129) / 255
                    pos = torch.FloatTensor(pos)
                    kp = torch.cat((pos, result[0]['kp_score']), 1)
                    kp = kp.unsqueeze(0)
                    if len(temp_kps) < 9:
                        kp = kp.reshape([1, -1]).cuda()
                        temp_kps.append(kp)
                        kp = kp.repeat(9, 1).reshape(1, -1)
                        outputs = self.pos_reg_model(kp)
                        _, preds = torch.max(outputs, 1)
                        classidx = preds.cpu()
                        result[0]['class'] = str(float(classidx))
                    else:
                        kp = kp.cuda().reshape(1,-1)
                        temp_kps.append(kp)
                        temp_kps.pop(0)
                        _temp_kps = torch.cat(temp_kps)
                        _temp_kps.cuda()
                        _temp_kps = _temp_kps.reshape([1, -1])
                        outputs = self.pos_reg_model(_temp_kps)
                        _, preds = torch.max(outputs, 1)
                        classidx = preds.cpu()
                        result[0]['class'] = str(float(classidx))
                    # print(preds)
                    result = {
                        'imgname': im_name,
                        'result': result
                    }
                    self.result_Q.put((boxes, classidx))

                    self.final_result.append(result)
                    if opt.save_img or opt.save_video or opt.vis:
                        img = vis_frame(orig_img, result)
                        if opt.vis:
                            cv2.imshow("AlphaPose Demo", img)
                            cv2.waitKey(30)
                        if opt.save_img:
                            cv2.imwrite(os.path.join(opt.outputpath, 'vis', im_name), img)
                        if opt.save_video:
                            self.stream.write(img)
            else:
                time.sleep(0.1)
Ejemplo n.º 17
0
    def update(self):
        # keep looping infinitely
        while True:
            # if the thread indicator variable is set, stop the
            # thread
            
            if self.stopped:
                if self.save_video:
                    self.stream.release()
                return
#            num = 1
            if not self.Q.empty():
                
                (boxes, scores, hm_data, pt1, pt2, orig_img, im_name) = self.Q.get()

                img_T = get_frame(self.iter, self.Cap_T)
                self.iter +=1 
                orig_img = np.array(orig_img, dtype=np.uint8)
               
                if boxes is None:

                        if opt.save_img or opt.save_video or opt.vis:
                            if self.iter<self.total_len:
                                    img = orig_img
                                    if opt.save_img or opt.save_video or opt.vis:
                                        
                                            img_T = cv2.resize(img_T, (img.shape[1], img.shape[0]), interpolation = cv2.INTER_AREA)
                                            img = np.hstack((img,img_T))
                                            cv2.imshow("Action_Trainer Demo", img)
                                            cv2.waitKey(60)
                                            
                                    if opt.save_img:
                                        cv2.imwrite(os.path.join(opt.outputpath, 'vis', im_name), img)
                                    if opt.save_video:
                                        self.stream.write(img)
                            else:
#                                  img = orig_img
                                  cv2.putText(img, "The movie repeats, press ctrl+ c key to terminate" , (50, 100),  cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 3)
                                  cv2.imshow("Action_Trainer Demo", img)
                                  cv2.waitKey(300)  
#                                  self.Q.queue.clear()
                                  self.iter=0
                                  with self.Q.mutex:
                                          self.Q.queue.clear()
#                                  self.Q.queue.clear()

                else:
                    
                             
                        preds_hm, preds_img, preds_scores = getPrediction(hm_data, pt1, pt2, opt.inputResH, opt.inputResW, opt.outputResH, opt.outputResW)
    
                        result = pose_nms(boxes, scores, preds_img, preds_scores)
                        result2 = {
                            'imgname': im_name,
                            'result': result
                        }
                        self.final_result.append(result2)
                        try:

                            if self.iter< self.total_len:
                                k= self.pose_id_[self.iter]
                                if k==0:
                                    continue
                                Old_pose = np.array(result[0]['keypoints'])
                                poseU = old2new_joint(Old_pose) 
                                poseU_align = align_torso(poseU)
                                poseT = self.T_pose[k] 
                                poseT_align = align_torso(poseT)
                                theta = get_diff( poseT_align, poseU_align) 
                                img = draw_pose_final(poseT,  poseU, orig_img, theta) 
#                                img_T = draw_pos(poseT, img_T)
                                img_T = cv2.resize(img_T, (img.shape[1], img.shape[0]), interpolation = cv2.INTER_AREA)
                                

                                img = np.hstack((img,img_T))
#                                cv2.putText(img, str(self.iter) , (900, 150),  cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 0), 3)
                                cv2.imshow("Action_Trainer Demo", img)
                                cv2.waitKey(30)
    
                            else:
                                cv2.putText(img, "The movie repeats, press ctrl+c key to terminate" , (50, 100),  cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 3)
                                cv2.imshow("Action_Trainer Demo", img)
                                cv2.waitKey(300)
                                
                                self.iter=0
                                with self.Q.mutex:
                                    self.Q.queue.clear()
#
                        except:
    
                            print("***********", self.iter)
            else:
                time.sleep(0.1)
Ejemplo n.º 18
0
def detect_main(args, im_names, yolo_model, pose_net):
    # Load input images
    data_loader = ImageLoader(im_names, batchSize=args.detbatch, format='yolo').start()

    # Load detection loader
    det_loader = DetectionLoader(data_loader, model=yolo_model, batchSize=args.detbatch).start()
    det_processor = DetectionProcessor(det_loader).start()

    runtime_profile = {
        'dt': [],
        'pt': [],
        'pn': []
    }

    # Init data writer
    # writer = DataWriter(args.save_video).start()

    data_len = data_loader.length()
    fall_res_all = []
    batchSize = args.posebatch
    for i in range(data_len):
        start_time = getTime()
        with torch.no_grad():
            (inps, orig_img, im_name, boxes, scores, pt1, pt2) = det_processor.read()
            if boxes is None or boxes.nelement() == 0:
                # writer.save(None, None, None, None, None, orig_img, im_name.split('/')[-1])
                continue

            ckpt_time, det_time = getTime(start_time)
            runtime_profile['dt'].append(det_time)
            # Pose Estimation
            # print(im_name)
            datalen = inps.size(0)
            leftover = 0
            if (datalen) % batchSize:
                leftover = 1
            num_batches = datalen // batchSize + leftover
            hm = []
            for j in range(num_batches):
                inps_j = inps[j * batchSize:min((j + 1) * batchSize, datalen)].cuda()
                hm_j = pose_net(inps_j)
                hm.append(hm_j)
            hm = torch.cat(hm)
            ckpt_time, pose_time = getTime(ckpt_time)
            runtime_profile['pt'].append(pose_time)
            hm = hm.cpu()
            # writer.save(boxes, scores, hm, pt1, pt2, orig_img, im_name.split('/')[-1])
            fall_res = []
            fall_res.append(im_name.split('/')[-1])
            if boxes is None:
                cv2.imwrite(opt.outputpath + '/' + im_name.split('/')[-1], img)
            else:
                if opt.matching:
                    preds = getMultiPeakPrediction(
                        hm, pt1.numpy(), pt2.numpy(), opt.inputResH, opt.inputResW, opt.outputResH, opt.outputResW)
                    result = matching(boxes, scores.numpy(), preds)
                else:
                    preds_hm, preds_img, preds_scores = getPrediction(hm, pt1, pt2, opt.inputResH, opt.inputResW,
                                                                      opt.outputResH, opt.outputResW)
                    result = pose_nms(boxes, scores, preds_img, preds_scores)
                    result = {'imgname': im_name, 'result': result}
                img = vis_frame(orig_img, result)
               
                for human in result['result']:
                    keypoint = human['keypoints']
                    keypoint = keypoint.numpy()
                    xmax = max(keypoint[:, 0])
                    xmin = min(keypoint[:, 0])
                    ymax = max(keypoint[:, 1])
                    ymin = min(keypoint[:, 1])
                    w = xmax - xmin
                    h = ymax - ymin
                    distance = abs((keypoint[15][1] + keypoint[16][1]) / 2 - (keypoint[11][1] + keypoint[12][1]) / 2)
                    if w / h >= 0.95:
                        cv2.rectangle(img, (xmin, ymin), (xmax, ymax), (0, 0, 255), 2)
                        font = cv2.FONT_HERSHEY_SIMPLEX
                        cv2.putText(img, 'Warning!Fall', (int(xmin + 10), int(ymax - 10)), font, 1, (0, 0, 255), 2)
                        fall_res.append([xmin,ymin,xmax,ymax])
                        '''
                        print('1 location:[%f,' % (xmin) + '%f]' % (ymin) + ' [%f,' % (xmax) + '%f]' % (
                            ymin) + ' [%f,' % (
                                  xmin) + '%f]' % (ymax) + ' [%f,' % (xmax) + '%f]' % (ymax))
                        '''
                    else:
                        if distance < 55:
                            cv2.rectangle(img, (xmin, ymin), (xmax, ymax), (0, 255, 0), 2)
                            font = cv2.FONT_HERSHEY_SIMPLEX
                            cv2.putText(img, 'Warning!Fall!', (int(xmin + 10), int(ymax - 10)), font, 1, (0, 255, 0), 2)
                            fall_res.append(1)
                            fall_res.append([xmin,ymin,xmax,ymax])
                            '''
                            print('1 location:[%f,' % (xmin) + '%f]' % (ymin) + ' [%f,' % (xmax) + '%f]' % (
                                ymin) + ' [%f,' % (
                                      xmin) + '%f]' % (ymax) + ' [%f,' % (xmax) + '%f]' % (ymax))
                            '''
                        else:
                            cv2.rectangle(img, (xmin, ymin), (xmax, ymax), (255, 0, 0), 2)
                #cv2.imwrite(os.path.join(opt.outputpath, 'vis', im_name), img)
                print(fall_res)
                cv2.imwrite(opt.outputpath + '/' + im_name.split('/')[-1], img)
            
            ckpt_time, post_time = getTime(ckpt_time)
            runtime_profile['pn'].append(post_time)
            fall_res_all.append(fall_res)
    return fall_res_all
Ejemplo n.º 19
0
    def update(self):

        time1 = time.time()

        _, frame = self.stream.read()
        # frame = cv2.resize(frame, (frame.shape[1]//2,frame.shape[0]//2))

        #TODO TESTING
        # frame[:,:200,:]=0
        # frame[:,450:,:]=0


        img_k, self.orig_img, im_dim_list_k = prep_frame(frame, self.inp_dim)
        
        img = [img_k]
        im_name = ["im_name"]
        im_dim_list = [im_dim_list_k] 

        img = torch.cat(img)
        im_dim_list = torch.FloatTensor(im_dim_list).repeat(1, 2)

        time2 = time.time()


        with torch.no_grad():
            ### detector 
            #########################
            # Human Detection
            img = img.cuda()
            prediction = self.det_model(img, CUDA=True)
            # NMS process
            dets = dynamic_write_results(prediction, opt.confidence,
                                        opt.num_classes, nms=True, nms_conf=opt.nms_thesh)
            if isinstance(dets, int) or dets.shape[0] == 0:   
                self.visualize2dnoperson()
                return None
                
            
            dets = dets.cpu()
            im_dim_list = torch.index_select(im_dim_list, 0, dets[:, 0].long())
            scaling_factor = torch.min(self.det_inp_dim / im_dim_list, 1)[0].view(-1, 1)

            # coordinate transfer
            dets[:, [1, 3]] -= (self.det_inp_dim - scaling_factor * im_dim_list[:, 0].view(-1, 1)) / 2
            dets[:, [2, 4]] -= (self.det_inp_dim - scaling_factor * im_dim_list[:, 1].view(-1, 1)) / 2

            dets[:, 1:5] /= scaling_factor
            for j in range(dets.shape[0]):
                dets[j, [1, 3]] = torch.clamp(dets[j, [1, 3]], 0.0, im_dim_list[j, 0])
                dets[j, [2, 4]] = torch.clamp(dets[j, [2, 4]], 0.0, im_dim_list[j, 1])
            boxes = dets[:, 1:5]
            scores = dets[:, 5:6]

            boxes_k = boxes[dets[:, 0] == 0]
            if isinstance(boxes_k, int) or boxes_k.shape[0] == 0:
                self.visualize2dnoperson()
                raise NotImplementedError
                return None
            inps = torch.zeros(boxes_k.size(0), 3, opt.inputResH, opt.inputResW)
            pt1 = torch.zeros(boxes_k.size(0), 2)
            pt2 = torch.zeros(boxes_k.size(0), 2)

            time3 = time.time()


            ### processor 
            #########################
            inp = im_to_torch(cv2.cvtColor(self.orig_img, cv2.COLOR_BGR2RGB))
            inps, pt1, pt2 = self.crop_from_dets(inp, boxes, inps, pt1, pt2)

            ### generator
            #########################            
            self.orig_img = np.array(self.orig_img, dtype=np.uint8)
            # location prediction (n, kp, 2) | score prediction (n, kp, 1)

            datalen = inps.size(0)
            batchSize = 20 #args.posebatch()
            leftover = 0
            if datalen % batchSize:
                leftover = 1
            num_batches = datalen // batchSize + leftover
            hm = []

            time4 = time.time()

            for j in range(num_batches):
                inps_j = inps[j * batchSize:min((j + 1) * batchSize, datalen)].cuda()
                hm_j = self.pose_model(inps_j)
                hm.append(hm_j)
            
            
            hm = torch.cat(hm)
            hm = hm.cpu().data

            preds_hm, preds_img, preds_scores = getPrediction(
                hm, pt1, pt2, opt.inputResH, opt.inputResW, opt.outputResH, opt.outputResW)
            result = pose_nms(
                boxes, scores, preds_img, preds_scores)

            time5 = time.time() 
            
                    
            if not result: # No people
                self.visualize2dnoperson()
                return None
            else:
                self.kpt = max(result,
                        key=lambda x: x['proposal_score'].data[0] * calculate_area(x['keypoints']), )['keypoints']
                self.visualize2d()
                return self.kpt 

            time6 = time.time()
            print("process time : {} ".format(time6 - time5))
Ejemplo n.º 20
0
    def update(self):
        # keep looping infinitely

        frame_prev = -1
        frame_cur = 0
        img_id = -1
        next_id = 0
        bbox_dets_list_list = []
        keypoints_list_list = []
        car_dets_list_list = []

        car_next_id = 0

        while True:
            # if the thread indicator variable is set, stop the
            # thread
            if self.stopped:
                if self.save_video:
                    self.stream.release()
                return
            # otherwise, ensure the queue is not empty

            if not self.Q.empty():

                (boxes, scores, hm_data, pt1, pt2, orig_img, img_id,
                 CAR) = self.Q.get()
                # print(img_id)
                orig_img = np.array(orig_img, dtype=np.uint8)
                img = orig_img

                bbox_dets_list = []  # keyframe: start from empty
                keypoints_list = []  # keyframe: start from empty

                if boxes is None:  # No person detection
                    bbox_det_dict = {
                        "img_id": img_id,
                        "det_id": 0,
                        "track_id": None,
                        "bbox": [0, 0, 2, 2]
                    }
                    bbox_dets_list.append(bbox_det_dict)

                    keypoints_dict = {
                        "img_id": img_id,
                        "det_id": 0,
                        "track_id": None,
                        "keypoints": []
                    }
                    keypoints_list.append(keypoints_dict)

                    bbox_dets_list_list.append(bbox_dets_list)
                    keypoints_list_list.append(keypoints_list)

                else:
                    if opt.matching:
                        preds = getMultiPeakPrediction(hm_data, pt1.numpy(),
                                                       pt2.numpy(),
                                                       opt.inputResH,
                                                       opt.inputResW,
                                                       opt.outputResH,
                                                       opt.outputResW)
                        result = matching(boxes, scores.numpy(), preds)
                    else:

                        preds_hm, preds_img, preds_scores = getPrediction(
                            hm_data, pt1, pt2, opt.inputResH, opt.inputResW,
                            opt.outputResH, opt.outputResW)
                        result = pose_nms(boxes, scores, preds_img,
                                          preds_scores)  # list type

                        # 'keypoints':
                        # 'kp_score':
                        # 'proposal_score':
                        # 'bbox'
                    #
                    # print('boexes', boxes.size(), boxes)
                    # for aa in result:
                    #     keys = aa['keypoints']
                    #     bbox2  = aa['bbox']
                    #     print('pose nms keys', keys.size())
                    #     print('pose nms, box', bbox2.size(), bbox2)
                    #
                    # _result = {
                    #     'imgname': img_id,
                    #     'result': result,
                    #     'pt1': pt1,
                    #     'pt2': pt2
                    # }

                    if img_id > 0:  # First frame does not have previous frame
                        bbox_list_prev_frame = bbox_dets_list_list[img_id -
                                                                   1].copy()
                        keypoints_list_prev_frame = keypoints_list_list[
                            img_id - 1].copy()
                    else:
                        bbox_list_prev_frame = []
                        keypoints_list_prev_frame = []

                    # boxes.size(0)
                    num_dets = len(result)
                    for det_id in range(
                            num_dets):  # detections for current frame
                        # obtain bbox position and track id

                        result_box = result[det_id]

                        kp_score = result_box['kp_score']
                        proposal_score = result_box['proposal_score'].numpy(
                        )[0]
                        if proposal_score < 1.3:
                            continue

                        keypoints = result_box['keypoints']
                        bbox_det = bbox_from_keypoints(keypoints)  # xxyy

                        # enlarge bbox by 20% with same center position
                        # bbox_x1y1x2y2 = xywh_to_x1y1x2y2(bbox_det)
                        bbox_in_xywh = enlarge_bbox(bbox_det, enlarge_scale)
                        # print('enlared', bbox_in_xywh)
                        bbox_det = x1y1x2y2_to_xywh(bbox_in_xywh)
                        # print('converted', bbox_det)

                        # Keyframe: use provided bbox
                        # if bbox_invalid(bbox_det):
                        #     track_id = None  # this id means null
                        #     keypoints = []
                        #     bbox_det = [0, 0, 2, 2]
                        #     # update current frame bbox
                        #     bbox_det_dict = {"img_id": img_id,
                        #                      "det_id": det_id,
                        #                      "track_id": track_id,
                        #                      "bbox": bbox_det}
                        #     bbox_dets_list.append(bbox_det_dict)
                        #     # update current frame keypoints
                        #     keypoints_dict = {"img_id": img_id,
                        #                       "det_id": det_id,
                        #                       "track_id": track_id,
                        #                       "keypoints": keypoints}
                        #     keypoints_list.append(keypoints_dict)
                        #     continue

                        # # update current frame bbox

                        # obtain keypoints for each bbox position in the keyframe

                        # print('img id ', img_id)

                        if img_id == 0:  # First frame, all ids are assigned automatically
                            track_id = next_id
                            next_id += 1

                        else:
                            track_id, match_index = get_track_id_SpatialConsistency(
                                bbox_det, bbox_list_prev_frame)
                            # print('track' ,track_id, match_index)

                            if track_id != -1:  # if candidate from prev frame matched, prevent it from matching another
                                del bbox_list_prev_frame[match_index]
                                del keypoints_list_prev_frame[match_index]

                        # update current frame bbox
                        bbox_det_dict = {
                            "img_id": img_id,
                            "det_id": det_id,
                            "track_id": track_id,
                            "bbox": bbox_det
                        }
                        bbox_dets_list.append(bbox_det_dict)

                        # update current frame keypoints
                        keypoints_dict = {
                            "img_id": img_id,
                            "det_id": det_id,
                            "track_id": track_id,
                            "keypoints": keypoints,
                            'kp_score': kp_score,
                            'bbox': bbox_det,
                            'proposal_score': proposal_score
                        }
                        keypoints_list.append(keypoints_dict)

                    num_dets = len(bbox_dets_list)
                    for det_id in range(
                            num_dets):  # detections for current frame
                        bbox_det_dict = bbox_dets_list[det_id]
                        keypoints_dict = keypoints_list[det_id]
                        # assert (det_id == bbox_det_dict["det_id"])
                        # assert (det_id == keypoints_dict["det_id"])

                        if bbox_det_dict[
                                "track_id"] == -1:  # this id means matching not found yet
                            track_id = bbox_det_dict["track_id"]
                            # track_id, match_index = get_track_id_SGCN(bbox_det_dict["bbox"], bbox_list_prev_frame,
                            #                                           keypoints_dict["keypoints"],
                            #                                           keypoints_list_prev_frame)

                            if track_id != -1:  # if candidate from prev frame matched, prevent it from matching another
                                del bbox_list_prev_frame[match_index]
                                del keypoints_list_prev_frame[match_index]
                                bbox_det_dict["track_id"] = track_id
                                keypoints_dict["track_id"] = track_id

                            # if still can not find a match from previous frame, then assign a new id
                            if track_id == -1 and not bbox_invalid(
                                    bbox_det_dict["bbox"]):
                                bbox_det_dict["track_id"] = next_id
                                keypoints_dict["track_id"] = next_id
                                next_id += 1

                    # update frame

                    bbox_dets_list_list.append(bbox_dets_list)
                    keypoints_list_list.append(keypoints_list)

                    # draw keypoints

                    vis_frame(img, keypoints_list)
                    # _pt1, _pt2 = _result['pt1'].numpy(), _result['pt2'].numpy()
                    # pt1 = _pt1.astype(np.uint32)
                    # pt2 = _pt2.astype(np.uint32)
                    # for p1, p2 in zip(pt1, pt2):
                    #     cv2.rectangle(img, (p1[0], p1[1]), (p2[0], p2[1]), (34, 154, 11), 1)

                if CAR is not None:  # No car detection
                    car_track_id = 0
                    car_np = CAR
                    new_car_bboxs = car_np[:, 0:4].astype(np.uint32)
                    new_car_score = car_np[:, 4]
                    car_dest_list = []

                    if img_id > 1:  # First frame does not have previous frame
                        car_bbox_list_prev_frame = car_dets_list_list[
                            img_id - 1].copy()
                    else:
                        car_bbox_list_prev_frame = []

                    # print('car bbox list prev frame ', len(car_bbox_list_prev_frame))
                    for c, score in zip(new_car_bboxs, new_car_score):
                        car_bbox_det = c
                        bbox_in_xywh = enlarge_bbox(car_bbox_det,
                                                    enlarge_scale)
                        bbox_det = x1y1x2y2_to_xywh(bbox_in_xywh)

                        # obtain keypoints for each bbox position in the keyframe

                        # print('img id ', img_id)

                        if img_id == 0:  # First frame, all ids are assigned automatically
                            car_track_id = car_next_id
                            car_next_id += 1
                            # print('if img id zero' , car_next_id)

                        else:
                            car_track_id, match_index = get_track_id_SpatialConsistency(
                                bbox_det, car_bbox_list_prev_frame)
                            # print(car_track_id, match_index)
                            if car_track_id != -1:  # if candidate from prev frame matched, prevent it from matching another
                                del car_bbox_list_prev_frame[match_index]

                        bbox_det_dict = {
                            "img_id": img_id,
                            "track_id": car_track_id,
                            "bbox": bbox_det
                        }
                        car_dest_list.append(bbox_det_dict)

                    # print()
                    num_dets = len(car_dest_list)
                    for det_id in range(
                            num_dets):  # detections for current frame
                        car_bbox_det_dict = car_dest_list[det_id]
                        # assert (det_id == bbox_det_dict["det_id"])
                        # assert (det_id == keypoints_dict["det_id"])
                        # print(Pose_matchercar_bbox_det_dict["track_id"])
                        if car_bbox_det_dict[
                                "track_id"] == -1:  # this id means matching not found yet
                            car_bbox_det_dict["track_id"] = car_next_id
                            car_next_id += 1
                            # print('car net id ', car_next_id)

                    self.tracking(car_dest_list, img_id)

                    for car in car_dest_list:
                        x, y, w, h = car['bbox']
                        track_id = car['track_id']

                        tracker = self.track_dict[track_id]
                        history = tracker['history']
                        moved = np.sum(history[-10:])
                        last_moved = np.sum(history[-60:])

                        COLOR_MOVING = (0, 255, 0)
                        COLOR_RED = (0, 0, 255)

                        COLOR_INACTIVE = (255, 0, 0)

                        cv2.rectangle(img, (x, y), (x + w, y + h),
                                      COLOR_INACTIVE, 1)
                        text_filled(img, (x, y), f'{track_id} Inactive',
                                    COLOR_INACTIVE)

                        # if moved:
                        #     cv2.rectangle(img, (x, y), (x + w, y + h), COLOR_MOVING, 1)
                        #     text_filled(img, (x, y), f'CAR {track_id} Active', COLOR_MOVING)
                        # else:
                        #
                        #     if last_moved:
                        #         cv2.rectangle(img, (x, y), (x + w, y + h), COLOR_RED, 1)
                        #         text_filled(img, (x, y), f'CAR {track_id} Standstill', COLOR_RED)
                        #
                        #         cropped = img[y:y+h, x:x+w,:]
                        #         filter = np.zeros(cropped.shape,dtype=img.dtype)
                        #         # print(cropped.shape, filter.shape)
                        #         filter[:,:,2] = 255
                        #         # print(overlay.shape)
                        #         # cv2.rectangle(overlay, (0, 0), (w, h), COLOR_RED, -1)
                        #         overlayed = cv2.addWeighted(cropped,0.8,filter,0.2,0)
                        #         img[y:y+h, x:x+w,:] = overlayed[:,:,:]
                        #     else:
                        #         cv2.rectangle(img, (x, y), (x + w, y + h), COLOR_INACTIVE, 1)
                        #         text_filled(img, (x, y), f'{track_id} Inactive', COLOR_INACTIVE)

                    car_dets_list_list.append(car_dest_list)

                else:
                    car_dest_list = []
                    bbox_det_dict = {
                        "img_id": img_id,
                        "det_id": 0,
                        "track_id": None,
                        "bbox": [0, 0, 2, 2]
                    }
                    car_dest_list.append(bbox_det_dict)
                    car_dets_list_list.append(car_dest_list)

                # if img_id != 0:
                #     for car in car_dets_list_list[-1]:
                #         car_track_id = car['track_id']
                #         if car_track_id is None:
                #             continue
                #
                #         car_bbox = car['bbox']
                #         for human in bbox_dets_list_list[-1]:
                #             human_track_id = human['track_id']
                #             if human_track_id is None:
                #                 continue
                #             hum_bbox = human['bbox']
                #             boxa = xywh_to_x1y1x2y2(hum_bbox)
                #             boxb = xywh_to_x1y1x2y2(car_bbox)
                #             x,y,w,h = x1y1x2y2_to_xywh(boxa)
                #             area = iou(boxa,boxb)
                #
                #             if area > 0.02:
                #                 cropped = img[y:y+h, x:x+w,:]
                #                 filter = np.zeros(cropped.shape,dtype=img.dtype)
                #                 filter[:,:,2] = 255
                #                 overlayed = cv2.addWeighted(cropped,0.9,filter,0.1,0)
                #                 img[y:y+h, x:x+w,:] = overlayed[:,:,:]

                if opt.vis:
                    cv2.imshow("AlphaPose Demo", img)
                    cv2.waitKey(1)
                if opt.save_video:
                    self.stream.write(img)
            else:
                time.sleep(0.1)
Ejemplo n.º 21
0
    def person_tracking(self, boxes, scores, hm_data, pt1, pt2, img_id):

        person_list = []

        if boxes is None:
            self.person_list_list.append([])
            return person_list

        if opt.matching:  # TODO Check the difference,
            preds = getMultiPeakPrediction(hm_data, pt1.numpy(), pt2.numpy(),
                                           opt.inputResH, opt.inputResW,
                                           opt.outputResH, opt.outputResW)
            # result = matching(boxes, scores.numpy(), preds)
            result = matching(boxes, scores, preds)
        else:
            preds_hm, preds_img, preds_scores = getPrediction(
                hm_data, pt1, pt2, opt.inputResH, opt.inputResW,
                opt.outputResH, opt.outputResW)
            result = pose_nms(boxes, scores, preds_img,
                              preds_scores)  # list type
            # result = {  'keypoints': ,  'kp_score': , 'proposal_score': ,  'bbox' }

        to_final_result = {'imgname': img_id, 'result': result, 'boxes': boxes}

        self.final_result.append(to_final_result)

        if img_id > 0:  # First frame does not have previous frame
            person_list_prev_frame = self.person_list_list[img_id - 1].copy()
        else:
            person_list_prev_frame = []
        # print(result)

        num_dets = len(result)
        for det_id in range(
                num_dets):  # IOU tracking for detections in current frame.
            # detections for current frame, obtain bbox position and track id

            result_box = result[det_id]
            kp_score = result_box['kp_score']
            if opt.matching:
                proposal_score = result_box['proposal_score']
            else:
                proposal_score = result_box['proposal_score'].numpy()[0]

            if proposal_score < 0.2:  # TODO check person proposal threshold
                continue

            if isnan(proposal_score):
                continue

            keypoints = result_box['keypoints']  # torch, (17,2)
            keypoints_pf = np.zeros((15, 2))

            idx_list = [16, 14, 12, 11, 13, 15, 10, 8, 6, 5, 7, 9, 0, 0, 0]
            for i, idx in enumerate(idx_list):
                keypoints_pf[i] = keypoints[idx]
            keypoints_pf[12] = (keypoints[5] + keypoints[6]) / 2  # neck

            keypoints_norm = keypoints_pf - keypoints_pf[12]

            # COCO-order {0-nose    1-Leye    2-Reye    3-Lear    4Rear    5-Lsho    6-Rsho    7-Lelb    8-Relb    9-Lwri    10-Rwri    11-Lhip    12-Rhip    13-Lkne    14-Rkne    15-Lank    16-Rank}
            # PoseFLow order  #{0-Rank    1-Rkne    2-Rhip    3-Lhip    4-Lkne    5-Lank    6-Rwri    7-Relb    8-Rsho    9-Lsho   10-Lelb    11-Lwri    12-neck  13-nose 14-TopHead}
            bbox_det = bbox_from_keypoints(keypoints)  # xxyy

            # enlarge bbox by 20% with same center position
            bbox_in_xywh = enlarge_bbox(bbox_det, enlarge_scale)
            bbox_det = x1y1x2y2_to_xywh(bbox_in_xywh)

            # # update current frame bbox
            if img_id == 0:  # First frame, all ids are assigned automatically
                track_id = self.person_next_id
                self.person_next_id += 1
            else:
                track_id, match_index = get_track_id_SpatialConsistency(
                    bbox_det, person_list_prev_frame)
                if track_id != -1:  # if candidate from prev frame matched, prevent it from matching another
                    del person_list_prev_frame[match_index]

            person_det_dict = {
                "img_id": img_id,
                "det_id": det_id,
                "track_id": track_id,
                "bbox": bbox_det,
                "keypoints": keypoints,
                'kp_norm': keypoints_norm,
                'kp_poseflow': keypoints_pf,
                'kp_score': kp_score,
                'proposal_score': proposal_score
            }

            person_list.append(person_det_dict)

        num_dets = len(person_list)
        for det_id in range(
                num_dets
        ):  # if IOU tracking failed, run pose matching tracking.
            person_dict = person_list[det_id]

            if person_dict[
                    "track_id"] == -1:  # this id means matching not found yet
                # track_id = bbox_det_dict["track_id"]
                track_id, match_index = get_track_id_SGCN(
                    person_dict["bbox"], person_list_prev_frame,
                    person_dict["kp_poseflow"])

                if track_id != -1:  # if candidate from prev frame matched, prevent it from matching another
                    del person_list_prev_frame[match_index]
                    person_dict["track_id"] = track_id
                else:
                    # if still can not find a match from previous frame, then assign a new id
                    # if track_id == -1 and not bbox_invalid(bbox_det_dict["bbox"]):
                    person_dict["track_id"] = self.person_next_id
                    self.person_next_id += 1

        self.person_list_list.append(person_list)
        return person_list
Ejemplo n.º 22
0
    def update(self):
        print(f'DataWriter_update_thread: {threading.currentThread().name}')
        # keep looping infinitely
        temp_kps = []
        while True:
            # if the thread indicator variable is set, stop the
            # thread
            if self.stopped:
                if self.save_video:
                    self.stream.release()
                return
            # otherwise, ensure the queue is not empty
            if not self.Q.empty():
                (boxes, scores, hm_data, pt1, pt2, orig_img,
                 im_name) = self.Q.get()
                orig_img = np.array(orig_img, dtype=np.uint8)
                if boxes is None:
                    if opt.save_img or opt.save_video or opt.vis:
                        img = orig_img
                        if opt.vis:
                            h, w, c = img.shape
                            img = cv2.resize(img, (int(w / 2), int(h / 2)),
                                             interpolation=cv2.INTER_CUBIC)
                            cv2.imshow("AlphaPose Demo", img)
                            cv2.waitKey(30)
                        if opt.save_img:
                            cv2.imwrite(
                                os.path.join(opt.outputpath, 'vis', im_name),
                                img)
                        if opt.save_video:
                            self.stream.write(img)

                    # 发送图像
                    img = orig_img
                    h, w, c = img.shape
                    img = cv2.resize(img, (int(w / 2), int(h / 2)),
                                     interpolation=cv2.INTER_CUBIC)
                    #  绘制床的位置矩形
                    # cv2.rectangle(img, (conf.Urls.bed_min_x, conf.Urls.bed_min_y),
                    #               (conf.Urls.bed_max_x, conf.Urls.bed_max_y), (0, 255, 0), 1)
                    self.tcp_client.send_img(img)
                else:
                    # location prediction (n, kp, 2) | score prediction (n, kp, 1)

                    preds_hm, preds_img, preds_scores = getPrediction(
                        hm_data, pt1, pt2, opt.inputResH, opt.inputResW,
                        opt.outputResH, opt.outputResW)

                    result = pose_nms(boxes, scores, preds_img, preds_scores)

                    if len(result) > 0:
                        pos = result[0]['keypoints'].unsqueeze(0).numpy()
                        pos = self.aligner.align_points(pos)[0]
                        pos = (pos[..., :2] - 129) / 255
                        pos = torch.FloatTensor(pos)
                        kp = torch.cat((pos, result[0]['kp_score']), 1)
                        kp = kp.unsqueeze(0)
                        if len(temp_kps) < 9:
                            kp = kp.reshape([1, -1]).cuda()
                            temp_kps.append(kp)
                            kp = kp.repeat(9, 1).reshape(1, -1)
                            outputs = self.pos_reg_model(kp)
                            _, preds = torch.max(outputs, 1)
                            classidx = preds.cpu()
                            result[0]['class'] = str(float(classidx))
                            result[0]['bbox'] = boxes[0]
                        else:
                            kp = kp.cuda().reshape(1, -1)
                            temp_kps.append(kp)
                            temp_kps.pop(0)
                            _temp_kps = torch.cat(temp_kps)
                            _temp_kps.cuda()
                            _temp_kps = _temp_kps.reshape([1, -1])
                            outputs = self.pos_reg_model(_temp_kps)
                            _, preds = torch.max(outputs, 1)
                            classidx = preds.cpu()
                            result[0]['class'] = str(float(classidx))
                            result[0]['bbox'] = boxes[0]
                        # print(preds)
                        result = {'imgname': im_name, 'result': result}
                        self.result_Q.put(result)

                        self.final_result.append(result)

                        # 发送图像
                        img = vis_frame(orig_img, result)
                        # h, w, c = img.shape
                        # img = cv2.resize(img, (int(w / 2), int(h / 2)), interpolation=cv2.INTER_CUBIC)
                        #  绘制床的位置矩形
                        # cv2.rectangle(img, (conf.Urls.bed_min_x, conf.Urls.bed_min_y), (conf.Urls.bed_max_x, conf.Urls.bed_max_y), (0, 255, 0), 1)
                        self.tcp_client.send_img(img)

                        if opt.save_img or opt.save_video or opt.vis:
                            # img = vis_frame(orig_img, result)
                            if opt.vis:

                                h, w, c = img.shape
                                #img = cv2.resize(img, (int(w / 4), int(h / 4)), interpolation=cv2.INTER_CUBIC)
                                cv2.imshow("AlphaPose Demo", img)
                                cv2.waitKey(30)
                            if opt.save_img:
                                cv2.imwrite(
                                    os.path.join(opt.outputpath, 'vis',
                                                 im_name), img)
                            if opt.save_video:
                                self.stream.write(img)
            else:
                time.sleep(0.1)