def update(self): # keep looping infinitely while True: # if the thread indicator variable is set, stop the # thread if self.stopped: if self.save_video: self.stream.release() return # num = 1 if not self.Q.empty(): (boxes, scores, hm_data, pt1, pt2, orig_img, im_name) = self.Q.get() img_T = get_frame(self.iter, self.Cap_T) self.iter +=1 orig_img = np.array(orig_img, dtype=np.uint8) if boxes is None: if opt.save_img or opt.save_video or opt.vis: if self.iter<self.total_len: img = orig_img if opt.save_img or opt.save_video or opt.vis: img_T = cv2.resize(img_T, (img.shape[1], img.shape[0]), interpolation = cv2.INTER_AREA) img = np.hstack((img,img_T)) cv2.imshow("Action_Trainer Demo", img) cv2.waitKey(60) if opt.save_img: cv2.imwrite(os.path.join(opt.outputpath, 'vis', im_name), img) if opt.save_video: self.stream.write(img) else: # img = orig_img cv2.putText(img, "The movie repeats, press ctrl+ c key to terminate" , (50, 100), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 3) cv2.imshow("Action_Trainer Demo", img) cv2.waitKey(300) # self.Q.queue.clear() self.iter=0 with self.Q.mutex: self.Q.queue.clear() # self.Q.queue.clear() else: preds_hm, preds_img, preds_scores = getPrediction(hm_data, pt1, pt2, opt.inputResH, opt.inputResW, opt.outputResH, opt.outputResW) result = pose_nms(boxes, scores, preds_img, preds_scores) result2 = { 'imgname': im_name, 'result': result } self.final_result.append(result2) try: if self.iter< self.total_len: k= self.pose_id_[self.iter] if k==0: continue Old_pose = np.array(result[0]['keypoints']) poseU = old2new_joint(Old_pose) poseU_align = align_torso(poseU) poseT = self.T_pose[k] poseT_align = align_torso(poseT) theta = get_diff( poseT_align, poseU_align) img = draw_pose_final(poseT, poseU, orig_img, theta) # img_T = draw_pos(poseT, img_T) img_T = cv2.resize(img_T, (img.shape[1], img.shape[0]), interpolation = cv2.INTER_AREA) img = np.hstack((img,img_T)) # cv2.putText(img, str(self.iter) , (900, 150), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 0), 3) cv2.imshow("Action_Trainer Demo", img) cv2.waitKey(30) else: cv2.putText(img, "The movie repeats, press ctrl+c key to terminate" , (50, 100), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 3) cv2.imshow("Action_Trainer Demo", img) cv2.waitKey(300) self.iter=0 with self.Q.mutex: self.Q.queue.clear() # except: print("***********", self.iter) else: time.sleep(0.1)
def update(self): count = 0 # filepath = '/home/yurik/Documents/Program/Alphapose_zed_video/testdata/20191014/walkstraightly/walkstraightly.svo' # init = sl.InitParameters(svo_input_filename=filepath,svo_real_time_mode=False) # init.depth_mode = sl.DEPTH_MODE.DEPTH_MODE_QUALITY # cam = sl.Camera() # runtime = sl.RuntimeParameters() # status = cam.open(init) # mat = sl.Mat() # zeroarr = np.zeros((720,1280,3)) # keep looping infinitely while True: # if the thread indicator variable is set, stop the # thread if self.stopped: if self.save_video: self.stream.release() return # otherwise, ensure the queue is not empty if not self.Q.empty(): (boxes, scores, hm_data, pt1, pt2, orig_img, im_name) = self.Q.get() orig_img = np.array(orig_img, dtype=np.uint8) if boxes is None: if opt.save_img or opt.save_video or opt.vis: img = orig_img if opt.vis: cv2.imshow("AlphaPose Demo", img) cv2.waitKey(30) if opt.save_img: cv2.imwrite(os.path.join(opt.outputpath, 'vis', im_name), img) if opt.save_video: self.stream.write(img) else: # location prediction (n, kp, 2) | score prediction (n, kp, 1) if opt.matching: preds = getMultiPeakPrediction( hm_data, pt1.numpy(), pt2.numpy(), opt.inputResH, opt.inputResW, opt.outputResH, opt.outputResW) result = matching(boxes, scores.numpy(), preds) else: preds_hm, preds_img, preds_scores = getPrediction( hm_data, pt1, pt2, opt.inputResH, opt.inputResW, opt.outputResH, opt.outputResW) result = pose_nms( boxes, scores, preds_img, preds_scores) result = { 'imgname': im_name, 'result': result } # 3D coordinates computation ppl = result['result'] ppl_num = len(ppl) self.coordinates_u, self.coordinates_v, self.truex, self.truey, self.dists = fl.people_3d_coord(ppl, ppl_num, self.video_mode, self.camMtx1, orig_img) self.final_result.append(result) if opt.save_img or opt.save_video or opt.vis: img = vis_frame(orig_img, result) # err = cam.grab(runtime) # if err == sl.ERROR_CODE.SUCCESS: # cam.retrieve_image(mat, sl.VIEW.VIEW_DEPTH) # depthmap = mat.get_data() # if img.shape[2] == 3: # depthmap = cv2.cvtColor(depthmap, cv2.COLOR_RGBA2RGB) # depthmap = cv2.resize(depthmap, (int(img.shape[1]/2), img.shape[0])) # depthmap = cv2.applyColorMap(depthmap, cv2.COLORMAP_JET) # depthmap = np.hstack((depthmap, zeroarr)) # depthmap = depthmap.astype(np.uint8) # img = cv2.addWeighted(img, 0.5, depthmap, 0.5, 3) if len(self.coordinates_v) > 0 and len(self.coordinates_u) > 0: for i in range(len(self.coordinates_v)): # cv2.putText(img, 'z:' + str(round((self.dists[i] / 10), 1)), # (int(self.coordinates_u[i]), int(self.coordinates_v[i]) - 15), # cv2.FONT_HERSHEY_PLAIN, 2, (0, 255, 0), 3, 8) cv2.putText(img, str(round((self.truex[i] / 10), 1)), (int(self.coordinates_u[i]), int(self.coordinates_v[i]) - 15), cv2.FONT_HERSHEY_PLAIN, 2, (0, 255, 0), 3, 8) cv2.putText(img, str(round((self.truey[i] / 10), 1)), (int(self.coordinates_u[i]) + 200, int(self.coordinates_v[i]) - 15), cv2.FONT_HERSHEY_PLAIN, 2, (255, 0, 0), 3, 8) cv2.putText(img, str(round((self.dists[i] / 10), 1)), (int(self.coordinates_u[i]) + 400, int(self.coordinates_v[i]) - 15), cv2.FONT_HERSHEY_PLAIN, 2, (0, 0, 255), 3, 8) cv2.putText(img, 'frames: ' + str(count), (620, 620), cv2.FONT_HERSHEY_PLAIN, 2, (0,100,90), 3, 8) else: cv2.putText(img, '[N/A]', (40, 620), cv2.FONT_HERSHEY_PLAIN, 2, (0, 100, 90), 3, 8) if opt.vis: cv2.imshow("AlphaPose Demo", img) cv2.waitKey(30) if opt.save_img: cv2.imwrite(os.path.join(opt.outputpath, 'vis', im_name), img) if opt.save_video: self.stream.write(img) else: time.sleep(0.1) count = count + 1
def update(self): # keep looping infinitely temp_kps=[] while True: # if the thread indicator variable is set, stop the # thread if self.stopped: if self.save_video: self.stream.release() return # otherwise, ensure the queue is not empty if not self.Q.empty(): (boxes, scores, hm_data, pt1, pt2, orig_img, im_name) = self.Q.get() orig_img = np.array(orig_img, dtype=np.uint8) if boxes is None: if opt.save_img or opt.save_video or opt.vis: img = orig_img if opt.vis: cv2.imshow("AlphaPose Demo", img) cv2.waitKey(30) if opt.save_img: cv2.imwrite(os.path.join(opt.outputpath, 'vis', im_name), img) if opt.save_video: self.stream.write(img) else: # location prediction (n, kp, 2) | score prediction (n, kp, 1) preds_hm, preds_img, preds_scores = getPrediction( hm_data, pt1, pt2, opt.inputResH, opt.inputResW, opt.outputResH, opt.outputResW) result = pose_nms(boxes, scores, preds_img, preds_scores) pos = result[0]['keypoints'].unsqueeze(0).numpy() pos = self.aligner.align_points(pos)[0] pos = (pos[..., :2] - 129) / 255 pos = torch.FloatTensor(pos) kp = torch.cat((pos, result[0]['kp_score']), 1) kp = kp.unsqueeze(0) if len(temp_kps) < 9: kp = kp.reshape([1, -1]).cuda() temp_kps.append(kp) kp = kp.repeat(9, 1).reshape(1, -1) outputs = self.pos_reg_model(kp) _, preds = torch.max(outputs, 1) classidx = preds.cpu() result[0]['class'] = str(float(classidx)) else: kp = kp.cuda().reshape(1,-1) temp_kps.append(kp) temp_kps.pop(0) _temp_kps = torch.cat(temp_kps) _temp_kps.cuda() _temp_kps = _temp_kps.reshape([1, -1]) outputs = self.pos_reg_model(_temp_kps) _, preds = torch.max(outputs, 1) classidx = preds.cpu() result[0]['class'] = str(float(classidx)) # print(preds) result = { 'imgname': im_name, 'result': result } self.result_Q.put((boxes, classidx)) self.final_result.append(result) if opt.save_img or opt.save_video or opt.vis: img = vis_frame(orig_img, result) if opt.vis: cv2.imshow("AlphaPose Demo", img) cv2.waitKey(30) if opt.save_img: cv2.imwrite(os.path.join(opt.outputpath, 'vis', im_name), img) if opt.save_video: self.stream.write(img) else: time.sleep(0.1)
def detect_main(im_name, orig_img, det_model, pose_model, opt): args = opt mode = args.mode inp_dim = int(opt.inp_dim) dim = orig_img.shape[1], orig_img.shape[0] img_ = (letterbox_image(orig_img, (inp_dim, inp_dim))) img_ = img_[:, :, ::-1].transpose((2, 0, 1)).copy() img = torch.from_numpy(img_).float().div(255.0).unsqueeze(0) img = [img] orig_img = [orig_img] im_name = [im_name] im_dim_list = [dim] # img.append(img_k) # orig_img.append(orig_img_k) # im_name.append(im_name_k) # im_dim_list.append(im_dim_list_k) with torch.no_grad(): # Human Detection img = torch.cat(img) im_dim_list = torch.FloatTensor(im_dim_list).repeat(1, 2) # im_dim_list_ = im_dim_list # DetectionLoader det_inp_dim = int(det_model.net_info['height']) assert det_inp_dim % 32 == 0 assert det_inp_dim > 32 # res_n = 0 with torch.no_grad(): img = img.cuda() prediction = det_model(img, CUDA=True) # a tensor boxes_chair = get_box(prediction, det_inp_dim, im_dim_list, opt.confidence, opt.num_classes, 56) # boxes_sofa = get_box(prediction, det_inp_dim, im_dim_list, opt.confidence, opt.num_classes, 57) # boxes_bed = get_box(prediction, det_inp_dim, im_dim_list, opt.confidence, opt.num_classes, 59) dets = dynamic_write_results(prediction, opt.confidence, opt.num_classes, 0, nms=True, nms_conf=opt.nms_thesh) if isinstance(dets, int) or dets.shape[0] == 0: # cv2.imwrite('err_result/no_person/'+im_name[0][0:-4]+'_re.jpg', orig_img[0]) return [] dets = dets.cpu() im_dim_list = torch.index_select(im_dim_list, 0, dets[:, 0].long()) scaling_factor = torch.min(det_inp_dim / im_dim_list, 1)[0].view(-1, 1) # coordinate transfer dets[:, [1, 3]] -= (det_inp_dim - scaling_factor * im_dim_list[:, 0].view(-1, 1)) / 2 dets[:, [2, 4]] -= (det_inp_dim - scaling_factor * im_dim_list[:, 1].view(-1, 1)) / 2 dets[:, 1:5] /= scaling_factor for j in range(dets.shape[0]): dets[j, [1, 3]] = torch.clamp(dets[j, [1, 3]], 0.0, im_dim_list[j, 0]) dets[j, [2, 4]] = torch.clamp(dets[j, [2, 4]], 0.0, im_dim_list[j, 1]) boxes = dets[:, 1:5] scores = dets[:, 5:6] boxes_k = boxes[dets[:, 0] == 0] if isinstance(boxes_k, int) or boxes_k.shape[0] == 0: boxes = None scores = None inps = None pt1 = None pt2 = None else: inps = torch.zeros(boxes_k.size(0), 3, opt.inputResH, opt.inputResW) pt1 = torch.zeros(boxes_k.size(0), 2) pt2 = torch.zeros(boxes_k.size(0), 2) orig_img = orig_img[0] im_name = im_name[0] boxes = boxes_k scores = scores[dets[:, 0] == 0] # orig_img[k], im_name[k], boxes_k, scores[dets[:, 0] == k], inps, pt1, pt2 # DetectionProcess with torch.no_grad(): if boxes is None or boxes.nelement() == 0: pass else: inp = im_to_torch(cv2.cvtColor(orig_img, cv2.COLOR_BGR2RGB)) inps, pt1, pt2 = crop_from_dets(inp, boxes, inps, pt1, pt2) # self.Q.put((inps, orig_img, im_name, boxes, scores, pt1, pt2)) batchSize = args.posebatch # fall_res_all = [] for i in range(1): with torch.no_grad(): if boxes is None or boxes.nelement() == 0: # writer.save(None, None, None, None, None, orig_img, im_name.split('/')[-1]) # res_n = 0 continue # Pose Estimation datalen = inps.size(0) leftover = 0 if (datalen) % batchSize: leftover = 1 num_batches = datalen // batchSize + leftover hm = [] for j in range(num_batches): inps_j = inps[j * batchSize:min((j + 1) * batchSize, datalen)].cuda() hm_j = pose_model(inps_j) hm.append(hm_j) hm = torch.cat(hm) hm = hm.cpu() # writer.save(boxes, scores, hm, pt1, pt2, orig_img, im_name.split('/')[-1]) fall_res = [] keypoint_res = [] # fall_res.append(im_name.split('/')[-1]) if opt.matching: preds = getMultiPeakPrediction( hm, pt1.numpy(), pt2.numpy(), opt.inputResH, opt.inputResW, opt.outputResH, opt.outputResW) result = matching(boxes, scores.numpy(), preds) else: preds_hm, preds_img, preds_scores = getPrediction(hm, pt1, pt2, opt.inputResH, opt.inputResW, opt.outputResH, opt.outputResW) result = pose_nms(boxes, scores, preds_img, preds_scores) result = {'imgname': im_name, 'result': result} # img = orig_img img = vis_frame(orig_img, result) for human in result['result']: keypoint = human['keypoints'] kp_scores = human['kp_score'] keypoint = keypoint.numpy() xmax = max(keypoint[:, 0]) xmin = min(keypoint[:, 0]) ymax = max(keypoint[:, 1]) ymin = min(keypoint[:, 1]) box_hm = [xmin, ymin, xmax, ymax] kp_num = 0 for i in range(len(kp_scores)): if kp_scores[i] > 0.05: kp_num += 1 if kp_num < 10: # cv2.rectangle(img, (xmin, ymin), (xmax, ymax), (255, 0, 0), 2) fall_res.append([False, xmin, ymin, xmax, ymax]) # print("kp_num:"+str(kp_num)) continue overlap = [] for box in boxes_chair: overlap.append(compute_overlap(box_hm, box)) # for box in boxes_sofa: # overlap.append(compute_overlap(box_hm, box)) # for box in boxes_bed: # overlap.append(compute_overlap(box_hm, box)) if len(overlap) > 0 and max(overlap) >= 0.6: # res_n = 0 fall_res.append([False, xmin, ymin, xmax, ymax]) keypoint_res.append(keypoint) # cv2.rectangle(img, (xmin, ymin), (xmax, ymax), (255, 0, 0), 2) # print("overlap:"+str(overlap)) continue w = xmax - xmin h = ymax - ymin # distance = abs((keypoint[15][1] + keypoint[16][1]) / 2 - (keypoint[11][1] + keypoint[12][1]) / 2) xhead = (keypoint[1][0] + keypoint[2][0] + keypoint[2][0] + keypoint[3][0] + keypoint[4][0]) / 4 yhead = (keypoint[1][1] + keypoint[2][1] + keypoint[2][1] + keypoint[3][1] + keypoint[4][1]) / 4 xfeet = (keypoint[15][0] + keypoint[16][0]) / 2 yfeet = (keypoint[15][1] + keypoint[16][1]) / 2 d_ear = (abs(keypoint[3][0] - keypoint[4][0]) ** 2 + abs(keypoint[3][1] - keypoint[4][1]) ** 2) ** 0.5 r = (w ** 2 + h ** 2) ** 0.5 / d_ear if kp_scores[3] > 0.05 and kp_scores[4] > 0.05 and r < 4: fall_res.append([False, xmin, ymin, xmax, ymax]) # cv2.rectangle(img, (xmin, ymin), (xmax, ymax), (255, 0, 0), 2) # print("r<4") continue # distance = abs((keypoint[15][1] + keypoint[16][1]) / 2 - (keypoint[11][1] + keypoint[12][1]) / 2) # xhead_foot = abs(xfeet - xhead) # yhead_foot = abs(yfeet - yhead) # dhead_foot = (xhead_foot ** 2 + yhead_foot ** 2) ** 0.5 # ratio = yhead_foot / dhead_foot if min(kp_scores[3], kp_scores[4], kp_scores[15], kp_scores[16]) > 0.05 and yfeet < ( keypoint[3][1] + keypoint[4][1]) / 2: # cv2.rectangle(img, (xmin, ymin), (xmax, ymax), (0, 255, 0), 2) # font = cv2.FONT_HERSHEY_SIMPLEX # cv2.putText(img, 'Warning!Fall!', (int(xmin + 10), int(ymax - 10)), font, 1, (0, 255, 0), 2) fall_res.append([True, xmin, ymin, xmax, ymax]) # res_n = 2 elif w / h >= 1.0: # cv2.rectangle(img, (xmin, ymin), (xmax, ymax), (0, 0, 255), 2) # font = cv2.FONT_HERSHEY_SIMPLEX # cv2.putText(img, 'Warning!Fall', (int(xmin + 10), int(ymax - 10)), font, 1, (0, 0, 255), 2) fall_res.append([True, xmin, ymin, xmax, ymax]) # res_n = 1 else: # cv2.rectangle(img, (xmin, ymin), (xmax, ymax), (255, 0, 0), 2) # print("normal") fall_res.append([False, xmin, ymin, xmax, ymax]) # res_n = 0 # cv2.imwrite(os.path.join(opt.outputpath, 'vis', im_name), img) ''' for box in boxes_chair: cv2.rectangle(img, (int(box[0]), int(box[1])), (int(box[2]), int(box[3])), (255, 255, 0), 2) for box in boxes_sofa: cv2.rectangle(img, (int(box[0]), int(box[1])), (int(box[2]), int(box[3])), (0, 255, 255), 2) for box in boxes_bed: cv2.rectangle(img, (int(box[0]), int(box[1])), (int(box[2]), int(box[3])), (255, 0, 255), 2) cv2.imwrite('err_result/false/'+im_name[0:-4]+'_re.jpg', img) ''' return keypoint_res
def update(self): # keep looping infinitely while True: sys.stdout.flush() print("generator len : " + str(self.Q.qsize())) # if the thread indicator variable is set, stop the # thread if self.stopped: if self.save_video: self.stream.release() return # otherwise, ensure the queue is not empty if not self.det_processor.Q.empty(): with torch.no_grad(): (inps, orig_img, im_name, boxes, scores, pt1, pt2) = self.det_processor.read() if orig_img is None: print(f'{im_name} image read None: handle_video') break orig_img = np.array(orig_img, dtype=np.uint8) if boxes is None or boxes.nelement() == 0: (boxes, scores, hm_data, pt1, pt2, orig_img, im_name) = (None, None, None, None, None, orig_img, im_name.split('/')[-1]) # if opt.save_img or opt.save_video or opt.vis: # img = orig_img # if opt.vis: # cv2.imshow("AlphaPose Demo", img) # cv2.waitKey(30) # if opt.save_img: # cv2.imwrite(os.path.join(opt.outputpath, 'vis', im_name), img) # if opt.save_video: # self.stream.write(img) else: # location prediction (n, kp, 2) | score prediction (n, kp, 1) datalen = inps.size(0) batchSize = 10 #args.posebatch() leftover = 0 if datalen % batchSize: leftover = 1 num_batches = datalen // batchSize + leftover hm = [] sys.stdout.flush() print("hhhh") for j in range(num_batches): inps_j = inps[j * batchSize:min( (j + 1) * batchSize, datalen)] #.cuda() hm_j = self.pose_model(inps_j) hm.append(hm_j) hm = torch.cat(hm) hm = hm.cpu().data (boxes, scores, hm_data, pt1, pt2, orig_img, im_name) = (boxes, scores, hm, pt1, pt2, orig_img, im_name.split('/')[-1]) if opt.matching: preds = getMultiPeakPrediction( hm_data, pt1.numpy(), pt2.numpy(), opt.inputResH, opt.inputResW, opt.outputResH, opt.outputResW) result = matching(boxes, scores.numpy(), preds) else: preds_hm, preds_img, preds_scores = getPrediction( hm_data, pt1, pt2, opt.inputResH, opt.inputResW, opt.outputResH, opt.outputResW) result = pose_nms(boxes, scores, preds_img, preds_scores) result = {'imgname': im_name, 'result': result} self.final_result.append(result) kpts = [] no_person = [] if not result['result']: # No people self.Q.put(None) #TODO else: self.Q.put(result) # if opt.save_img or opt.save_video or opt.vis: # img = vis_frame(orig_img, result) # if opt.vis: # cv2.imshow("AlphaPose Demo", img) # cv2.waitKey(30) # if opt.save_img: # cv2.imwrite(os.path.join(opt.outputpath, 'vis', im_name), img) # if opt.save_video: # self.stream.write(img) else: time.sleep(0.1)
def update(self): # keep looping infinitely while True: # if the thread indicator variable is set, stop the # thread if self.stopped: if self.save_video: self.stream.release() return # otherwise, ensure the queue is not empty if not self.Q.empty(): (boxes, scores, hm_data, pt1, pt2, orig_img, im_name) = self.Q.get() orig_img = np.array(orig_img, dtype=np.uint8) if boxes is None: if opt.save_img or opt.save_video or opt.vis: img = orig_img if opt.vis: cv2.imshow("AlphaPose Demo", img) cv2.waitKey(30) if opt.save_img: cv2.imwrite( os.path.join(self.outputpath, self.dir_folder, im_name), img) if opt.save_video: self.stream.write(img) else: # location prediction (n, kp, 2) | score prediction (n, kp, 1) # if opt.matching: # preds = getMultiPeakPrediction( # hm_data, pt1.numpy(), pt2.numpy(), opt.inputResH, opt.inputResW, opt.outputResH, opt.outputResW) # result = matching(boxes, scores.numpy(), preds) # else: preds_hm, preds_img, preds_scores = getPrediction( hm_data, pt1, pt2, opt.inputResH, opt.inputResW, opt.outputResH, opt.outputResW) # (hm_data, pt1, pt2, 320, 256, 80, 64) result = pose_nms(boxes, scores, preds_img, preds_scores) # (bbox定位list, bbox评分list, 位姿定位list, 位姿评分list) # result[ {'keypoints', 'kp_score', 'proposal_score'}, {---}, ...], pPose_nms.py line-114 result = {'imgname': im_name, 'result': result} # self.final_result.append(result) self.show_img = vis_frame(orig_img, result) cv2.imwrite( os.path.join(self.outputpath, self.dir_folder, im_name), img) # if opt.save_img or opt.save_video or opt.vis: # img = vis_frame(orig_img, result) # if opt.vis: # cv2.imshow("AlphaPose Demo", img) # cv2.waitKey(30) # if opt.save_img: # cv2.imwrite(os.path.join(opt.outputpath, 'vis', im_name), img) # if opt.save_video: # self.stream.write(img) else: # time.sleep(0.1) pass
def update(self): # keep looping infinitely while True: sys.stdout.flush() print("generator len : " + str(self.Q.qsize())) # if the thread indicator variable is set, stop the # thread # if self.stopped: # cv2.destroyAllWindows() # if self.save_video: # self.stream.release() # return # otherwise, ensure the queue is not empty if not self.det_processor.Q.empty(): with torch.no_grad(): (inps, orig_img, im_name, boxes, scores, pt1, pt2) = self.det_processor.read() if orig_img is None: sys.stdout.flush() print(f'{im_name} image read None: handle_video') break orig_img = np.array(orig_img, dtype=np.uint8) if boxes is None or boxes.nelement() == 0: (boxes, scores, hm_data, pt1, pt2, orig_img, im_name) = (None, None, None, None, None, orig_img, im_name.split('/')[-1]) res = {'keypoints': -1, 'image': orig_img} self.Q.put(res) #TODO # cv2.imwrite("/home/hrs/Desktop/dd/now.jpg", orig_img) # img = orig_img # cv2.imshow("AlphaPose Demo", img) # cv2.waitKey(30) ###################################################################################### # self.image = self.ax_in.imshow(orig_img, aspect='equal') # self.image.set_data(orig_img) # plt.draw() # plt.pause(0.000000000000000001) ###################################################################################### # if opt.save_img or opt.save_video or opt.vis: # img = orig_img # if opt.vis: # cv2.imshow("AlphaPose Demo", img) # cv2.waitKey(30) # if opt.save_img: # cv2.imwrite(os.path.join(opt.outputpath, 'vis', im_name), img) # if opt.save_video: # self.stream.write(img) else: # location prediction (n, kp, 2) | score prediction (n, kp, 1) datalen = inps.size(0) batchSize = 20 #args.posebatch() leftover = 0 if datalen % batchSize: leftover = 1 num_batches = datalen // batchSize + leftover hm = [] # sys.stdout.flush() # print("hhhh") for j in range(num_batches): inps_j = inps[j * batchSize:min( (j + 1) * batchSize, datalen)].cuda() hm_j = self.pose_model(inps_j) hm.append(hm_j) # time1 = time.time() hm = torch.cat(hm) hm = hm.cpu().data (boxes, scores, hm_data, pt1, pt2, orig_img, im_name) = (boxes, scores, hm, pt1, pt2, orig_img, im_name.split('/')[-1]) if opt.matching: preds = getMultiPeakPrediction( hm_data, pt1.numpy(), pt2.numpy(), opt.inputResH, opt.inputResW, opt.outputResH, opt.outputResW) result = matching(boxes, scores.numpy(), preds) else: preds_hm, preds_img, preds_scores = getPrediction( hm_data, pt1, pt2, opt.inputResH, opt.inputResW, opt.outputResH, opt.outputResW) result = pose_nms(boxes, scores, preds_img, preds_scores) result = {'imgname': im_name, 'result': result} self.final_result.append(result) # time2 = time.time() # print(time2-time1) ###################################################################################### # img = vis_frame(orig_img, result) # cv2.imshow("AlphaPose Demo", img) # cv2.imwrite("/home/hrs/Desktop/dd/now.jpg", img) # cv2.waitKey(30) ######################################################################## # self.point.set_offsets(keypoints[self.i]) # self.image = self.ax_in.imshow(orig_img, aspect='equal') # self.image.set_data(orig_img) # plt.draw() # plt.pause(0.000000000000000001) ########################################################################## if not result['result']: # No people res = {'keypoints': -1, 'image': orig_img} self.Q.put(res) #TODO else: kpt = max( result['result'], key=lambda x: x['proposal_score'].data[0] * calculate_area(x['keypoints']), )['keypoints'] res = {'keypoints': kpt, 'image': orig_img} self.Q.put(res) # kpt_np = kpt.numpy() # n = kpt_np.shape[0] # print(kpt_np.shape) # point_list = [(kpt_np[m, 0], kpt_np[m, 1]) for m in range(17)] # for point in point_list: # cv2.circle(pose_img, point, 1, (0, 43, 32), 4) # cv2.imshow(self.window, pose_img) # cv2.waitKey() # if opt.save_img or opt.save_video or opt.vis: # img = vis_frame(orig_img, result) # if opt.vis: # cv2.imshow("AlphaPose Demo", img) # cv2.waitKey(30) # if opt.save_img: # cv2.imwrite(os.path.join(opt.outputpath, 'vis', im_name), img) # if opt.save_video: # self.stream.write(img) else: time.sleep(0.1)
def run(self, folder_or_imglist, sample_rate): time_run_start = time.time() if type(folder_or_imglist) == 'str': inputpath = folder_or_imglist print(inputpath) args.inputpath = inputpath # Load input images im_names = [ img for img in sorted(os.listdir(inputpath)) if img.endswith('jpg') ] N = len(im_names) dataset = Image_loader(im_names, format='yolo') else: N = len(folder_or_imglist) imglist = [ img for i, img in enumerate(folder_or_imglist) if i % sample_rate == 0 ] dataset = Image_loader_from_images(imglist, format='yolo') # Load detection loader test_loader = DetectionLoader(dataset, self.det_model, self.cuda_id).start() skeleton_result_list = [] for i in range(dataset.__len__()): with torch.no_grad(): (inp, orig_img, im_name, boxes, scores) = test_loader.read() if boxes is None or boxes.nelement() == 0: skeleton_result = None else: # Pose Estimation time_det_start = time.time() inps, pt1, pt2 = crop_from_dets(inp, boxes) inps = Variable(inps.cuda(self.cuda_id)) hm = self.pose_model(inps) hm_data = hm.cpu().data preds_hm, preds_img, preds_scores = getPrediction( hm_data, pt1, pt2, args.inputResH, args.inputResW, args.outputResH, args.outputResW) skeleton_result = pose_nms(boxes, scores, preds_img, preds_scores) self.time_det += (time.time() - time_det_start) skeleton_result_list.append(skeleton_result) skeleton_list = [] j = 0 for i in range(N): im_name = 'image_{:05d}.jpg'.format(i + 1) if (i == sample_rate * (1 + j)): j += 1 skeleton_result = skeleton_result_list[j] skeleton_list.append([im_name.split('/')[-1]]) if skeleton_result is not None: for human in skeleton_result: kp_preds = human['keypoints'] kp_scores = human['kp_score'] # ## remove small hand # if float(kp_scores[9]) < 0.2 and float(kp_scores[10]) < 0.2: # continue ## extend LWrist kp_preds[9, 0] = self.__extend_wrist(kp_preds[7, 0], kp_preds[9, 0]) kp_preds[9, 1] = self.__extend_wrist(kp_preds[7, 1], kp_preds[9, 1]) ## extend RWrist kp_preds[10, 0] = self.__extend_wrist(kp_preds[8, 0], kp_preds[10, 0]) kp_preds[10, 1] = self.__extend_wrist(kp_preds[8, 1], kp_preds[10, 1]) for n in range(kp_scores.shape[0]): skeleton_list[-1].append(int(kp_preds[n, 0])) skeleton_list[-1].append(int(kp_preds[n, 1])) skeleton_list[-1].append(round(float(kp_scores[n]), 2)) self.time_run += (time.time() - time_run_start) return skeleton_list
def update(self): # keep looping infinitely frame_prev = -1 frame_cur = 0 img_id = -1 next_id = 0 bbox_dets_list_list = [] keypoints_list_list = [] car_dets_list_list = [] car_next_id = 0 while True: # if the thread indicator variable is set, stop the # thread if self.stopped: if self.save_video: self.stream.release() return # otherwise, ensure the queue is not empty if not self.Q.empty(): (boxes, scores, hm_data, pt1, pt2, orig_img, img_id, CAR) = self.Q.get() # print(img_id) orig_img = np.array(orig_img, dtype=np.uint8) img = orig_img bbox_dets_list = [] # keyframe: start from empty keypoints_list = [] # keyframe: start from empty if boxes is None: # No person detection bbox_det_dict = { "img_id": img_id, "det_id": 0, "track_id": None, "bbox": [0, 0, 2, 2] } bbox_dets_list.append(bbox_det_dict) keypoints_dict = { "img_id": img_id, "det_id": 0, "track_id": None, "keypoints": [] } keypoints_list.append(keypoints_dict) bbox_dets_list_list.append(bbox_dets_list) keypoints_list_list.append(keypoints_list) else: if opt.matching: preds = getMultiPeakPrediction(hm_data, pt1.numpy(), pt2.numpy(), opt.inputResH, opt.inputResW, opt.outputResH, opt.outputResW) result = matching(boxes, scores.numpy(), preds) else: preds_hm, preds_img, preds_scores = getPrediction( hm_data, pt1, pt2, opt.inputResH, opt.inputResW, opt.outputResH, opt.outputResW) result = pose_nms(boxes, scores, preds_img, preds_scores) # list type # 'keypoints': # 'kp_score': # 'proposal_score': # 'bbox' # # print('boexes', boxes.size(), boxes) # for aa in result: # keys = aa['keypoints'] # bbox2 = aa['bbox'] # print('pose nms keys', keys.size()) # print('pose nms, box', bbox2.size(), bbox2) # # _result = { # 'imgname': img_id, # 'result': result, # 'pt1': pt1, # 'pt2': pt2 # } if img_id > 0: # First frame does not have previous frame bbox_list_prev_frame = bbox_dets_list_list[img_id - 1].copy() keypoints_list_prev_frame = keypoints_list_list[ img_id - 1].copy() else: bbox_list_prev_frame = [] keypoints_list_prev_frame = [] # boxes.size(0) num_dets = len(result) for det_id in range( num_dets): # detections for current frame # obtain bbox position and track id result_box = result[det_id] kp_score = result_box['kp_score'] proposal_score = result_box['proposal_score'].numpy( )[0] if proposal_score < 1.3: continue keypoints = result_box['keypoints'] bbox_det = bbox_from_keypoints(keypoints) # xxyy # enlarge bbox by 20% with same center position # bbox_x1y1x2y2 = xywh_to_x1y1x2y2(bbox_det) bbox_in_xywh = enlarge_bbox(bbox_det, enlarge_scale) # print('enlared', bbox_in_xywh) bbox_det = x1y1x2y2_to_xywh(bbox_in_xywh) # print('converted', bbox_det) # Keyframe: use provided bbox # if bbox_invalid(bbox_det): # track_id = None # this id means null # keypoints = [] # bbox_det = [0, 0, 2, 2] # # update current frame bbox # bbox_det_dict = {"img_id": img_id, # "det_id": det_id, # "track_id": track_id, # "bbox": bbox_det} # bbox_dets_list.append(bbox_det_dict) # # update current frame keypoints # keypoints_dict = {"img_id": img_id, # "det_id": det_id, # "track_id": track_id, # "keypoints": keypoints} # keypoints_list.append(keypoints_dict) # continue # # update current frame bbox # obtain keypoints for each bbox position in the keyframe # print('img id ', img_id) if img_id == 0: # First frame, all ids are assigned automatically track_id = next_id next_id += 1 else: track_id, match_index = get_track_id_SpatialConsistency( bbox_det, bbox_list_prev_frame) # print('track' ,track_id, match_index) if track_id != -1: # if candidate from prev frame matched, prevent it from matching another del bbox_list_prev_frame[match_index] del keypoints_list_prev_frame[match_index] # update current frame bbox bbox_det_dict = { "img_id": img_id, "det_id": det_id, "track_id": track_id, "bbox": bbox_det } bbox_dets_list.append(bbox_det_dict) # update current frame keypoints keypoints_dict = { "img_id": img_id, "det_id": det_id, "track_id": track_id, "keypoints": keypoints, 'kp_score': kp_score, 'bbox': bbox_det, 'proposal_score': proposal_score } keypoints_list.append(keypoints_dict) num_dets = len(bbox_dets_list) for det_id in range( num_dets): # detections for current frame bbox_det_dict = bbox_dets_list[det_id] keypoints_dict = keypoints_list[det_id] # assert (det_id == bbox_det_dict["det_id"]) # assert (det_id == keypoints_dict["det_id"]) if bbox_det_dict[ "track_id"] == -1: # this id means matching not found yet track_id = bbox_det_dict["track_id"] # track_id, match_index = get_track_id_SGCN(bbox_det_dict["bbox"], bbox_list_prev_frame, # keypoints_dict["keypoints"], # keypoints_list_prev_frame) if track_id != -1: # if candidate from prev frame matched, prevent it from matching another del bbox_list_prev_frame[match_index] del keypoints_list_prev_frame[match_index] bbox_det_dict["track_id"] = track_id keypoints_dict["track_id"] = track_id # if still can not find a match from previous frame, then assign a new id if track_id == -1 and not bbox_invalid( bbox_det_dict["bbox"]): bbox_det_dict["track_id"] = next_id keypoints_dict["track_id"] = next_id next_id += 1 # update frame bbox_dets_list_list.append(bbox_dets_list) keypoints_list_list.append(keypoints_list) # draw keypoints vis_frame(img, keypoints_list) # _pt1, _pt2 = _result['pt1'].numpy(), _result['pt2'].numpy() # pt1 = _pt1.astype(np.uint32) # pt2 = _pt2.astype(np.uint32) # for p1, p2 in zip(pt1, pt2): # cv2.rectangle(img, (p1[0], p1[1]), (p2[0], p2[1]), (34, 154, 11), 1) if CAR is not None: # No car detection car_track_id = 0 car_np = CAR new_car_bboxs = car_np[:, 0:4].astype(np.uint32) new_car_score = car_np[:, 4] car_dest_list = [] if img_id > 1: # First frame does not have previous frame car_bbox_list_prev_frame = car_dets_list_list[ img_id - 1].copy() else: car_bbox_list_prev_frame = [] # print('car bbox list prev frame ', len(car_bbox_list_prev_frame)) for c, score in zip(new_car_bboxs, new_car_score): car_bbox_det = c bbox_in_xywh = enlarge_bbox(car_bbox_det, enlarge_scale) bbox_det = x1y1x2y2_to_xywh(bbox_in_xywh) # obtain keypoints for each bbox position in the keyframe # print('img id ', img_id) if img_id == 0: # First frame, all ids are assigned automatically car_track_id = car_next_id car_next_id += 1 # print('if img id zero' , car_next_id) else: car_track_id, match_index = get_track_id_SpatialConsistency( bbox_det, car_bbox_list_prev_frame) # print(car_track_id, match_index) if car_track_id != -1: # if candidate from prev frame matched, prevent it from matching another del car_bbox_list_prev_frame[match_index] bbox_det_dict = { "img_id": img_id, "track_id": car_track_id, "bbox": bbox_det } car_dest_list.append(bbox_det_dict) # print() num_dets = len(car_dest_list) for det_id in range( num_dets): # detections for current frame car_bbox_det_dict = car_dest_list[det_id] # assert (det_id == bbox_det_dict["det_id"]) # assert (det_id == keypoints_dict["det_id"]) # print(Pose_matchercar_bbox_det_dict["track_id"]) if car_bbox_det_dict[ "track_id"] == -1: # this id means matching not found yet car_bbox_det_dict["track_id"] = car_next_id car_next_id += 1 # print('car net id ', car_next_id) self.tracking(car_dest_list, img_id) for car in car_dest_list: x, y, w, h = car['bbox'] track_id = car['track_id'] tracker = self.track_dict[track_id] history = tracker['history'] moved = np.sum(history[-10:]) last_moved = np.sum(history[-60:]) COLOR_MOVING = (0, 255, 0) COLOR_RED = (0, 0, 255) COLOR_INACTIVE = (255, 0, 0) cv2.rectangle(img, (x, y), (x + w, y + h), COLOR_INACTIVE, 1) text_filled(img, (x, y), f'{track_id} Inactive', COLOR_INACTIVE) # if moved: # cv2.rectangle(img, (x, y), (x + w, y + h), COLOR_MOVING, 1) # text_filled(img, (x, y), f'CAR {track_id} Active', COLOR_MOVING) # else: # # if last_moved: # cv2.rectangle(img, (x, y), (x + w, y + h), COLOR_RED, 1) # text_filled(img, (x, y), f'CAR {track_id} Standstill', COLOR_RED) # # cropped = img[y:y+h, x:x+w,:] # filter = np.zeros(cropped.shape,dtype=img.dtype) # # print(cropped.shape, filter.shape) # filter[:,:,2] = 255 # # print(overlay.shape) # # cv2.rectangle(overlay, (0, 0), (w, h), COLOR_RED, -1) # overlayed = cv2.addWeighted(cropped,0.8,filter,0.2,0) # img[y:y+h, x:x+w,:] = overlayed[:,:,:] # else: # cv2.rectangle(img, (x, y), (x + w, y + h), COLOR_INACTIVE, 1) # text_filled(img, (x, y), f'{track_id} Inactive', COLOR_INACTIVE) car_dets_list_list.append(car_dest_list) else: car_dest_list = [] bbox_det_dict = { "img_id": img_id, "det_id": 0, "track_id": None, "bbox": [0, 0, 2, 2] } car_dest_list.append(bbox_det_dict) car_dets_list_list.append(car_dest_list) # if img_id != 0: # for car in car_dets_list_list[-1]: # car_track_id = car['track_id'] # if car_track_id is None: # continue # # car_bbox = car['bbox'] # for human in bbox_dets_list_list[-1]: # human_track_id = human['track_id'] # if human_track_id is None: # continue # hum_bbox = human['bbox'] # boxa = xywh_to_x1y1x2y2(hum_bbox) # boxb = xywh_to_x1y1x2y2(car_bbox) # x,y,w,h = x1y1x2y2_to_xywh(boxa) # area = iou(boxa,boxb) # # if area > 0.02: # cropped = img[y:y+h, x:x+w,:] # filter = np.zeros(cropped.shape,dtype=img.dtype) # filter[:,:,2] = 255 # overlayed = cv2.addWeighted(cropped,0.9,filter,0.1,0) # img[y:y+h, x:x+w,:] = overlayed[:,:,:] if opt.vis: cv2.imshow("AlphaPose Demo", img) cv2.waitKey(1) if opt.save_video: self.stream.write(img) else: time.sleep(0.1)
def update(self): print(f'DataWriter_update_thread: {threading.currentThread().name}') # keep looping infinitely temp_kps = [] while True: # if the thread indicator variable is set, stop the # thread if self.stopped: if self.save_video: self.stream.release() return # otherwise, ensure the queue is not empty if not self.Q.empty(): (boxes, scores, hm_data, pt1, pt2, orig_img, im_name) = self.Q.get() orig_img = np.array(orig_img, dtype=np.uint8) if boxes is None: if opt.save_img or opt.save_video or opt.vis: img = orig_img if opt.vis: h, w, c = img.shape img = cv2.resize(img, (int(w / 2), int(h / 2)), interpolation=cv2.INTER_CUBIC) cv2.imshow("AlphaPose Demo", img) cv2.waitKey(30) if opt.save_img: cv2.imwrite( os.path.join(opt.outputpath, 'vis', im_name), img) if opt.save_video: self.stream.write(img) # 发送图像 img = orig_img h, w, c = img.shape img = cv2.resize(img, (int(w / 2), int(h / 2)), interpolation=cv2.INTER_CUBIC) # 绘制床的位置矩形 # cv2.rectangle(img, (conf.Urls.bed_min_x, conf.Urls.bed_min_y), # (conf.Urls.bed_max_x, conf.Urls.bed_max_y), (0, 255, 0), 1) self.tcp_client.send_img(img) else: # location prediction (n, kp, 2) | score prediction (n, kp, 1) preds_hm, preds_img, preds_scores = getPrediction( hm_data, pt1, pt2, opt.inputResH, opt.inputResW, opt.outputResH, opt.outputResW) result = pose_nms(boxes, scores, preds_img, preds_scores) if len(result) > 0: pos = result[0]['keypoints'].unsqueeze(0).numpy() pos = self.aligner.align_points(pos)[0] pos = (pos[..., :2] - 129) / 255 pos = torch.FloatTensor(pos) kp = torch.cat((pos, result[0]['kp_score']), 1) kp = kp.unsqueeze(0) if len(temp_kps) < 9: kp = kp.reshape([1, -1]).cuda() temp_kps.append(kp) kp = kp.repeat(9, 1).reshape(1, -1) outputs = self.pos_reg_model(kp) _, preds = torch.max(outputs, 1) classidx = preds.cpu() result[0]['class'] = str(float(classidx)) result[0]['bbox'] = boxes[0] else: kp = kp.cuda().reshape(1, -1) temp_kps.append(kp) temp_kps.pop(0) _temp_kps = torch.cat(temp_kps) _temp_kps.cuda() _temp_kps = _temp_kps.reshape([1, -1]) outputs = self.pos_reg_model(_temp_kps) _, preds = torch.max(outputs, 1) classidx = preds.cpu() result[0]['class'] = str(float(classidx)) result[0]['bbox'] = boxes[0] # print(preds) result = {'imgname': im_name, 'result': result} self.result_Q.put(result) self.final_result.append(result) # 发送图像 img = vis_frame(orig_img, result) # h, w, c = img.shape # img = cv2.resize(img, (int(w / 2), int(h / 2)), interpolation=cv2.INTER_CUBIC) # 绘制床的位置矩形 # cv2.rectangle(img, (conf.Urls.bed_min_x, conf.Urls.bed_min_y), (conf.Urls.bed_max_x, conf.Urls.bed_max_y), (0, 255, 0), 1) self.tcp_client.send_img(img) if opt.save_img or opt.save_video or opt.vis: # img = vis_frame(orig_img, result) if opt.vis: h, w, c = img.shape #img = cv2.resize(img, (int(w / 4), int(h / 4)), interpolation=cv2.INTER_CUBIC) cv2.imshow("AlphaPose Demo", img) cv2.waitKey(30) if opt.save_img: cv2.imwrite( os.path.join(opt.outputpath, 'vis', im_name), img) if opt.save_video: self.stream.write(img) else: time.sleep(0.1)
def detect_main(args, im_names, yolo_model, pose_net): # Load input images data_loader = ImageLoader(im_names, batchSize=args.detbatch, format='yolo').start() # Load detection loader det_loader = DetectionLoader(data_loader, model=yolo_model, batchSize=args.detbatch).start() det_processor = DetectionProcessor(det_loader).start() runtime_profile = { 'dt': [], 'pt': [], 'pn': [] } # Init data writer # writer = DataWriter(args.save_video).start() data_len = data_loader.length() fall_res_all = [] batchSize = args.posebatch for i in range(data_len): start_time = getTime() with torch.no_grad(): (inps, orig_img, im_name, boxes, scores, pt1, pt2) = det_processor.read() if boxes is None or boxes.nelement() == 0: # writer.save(None, None, None, None, None, orig_img, im_name.split('/')[-1]) continue ckpt_time, det_time = getTime(start_time) runtime_profile['dt'].append(det_time) # Pose Estimation # print(im_name) datalen = inps.size(0) leftover = 0 if (datalen) % batchSize: leftover = 1 num_batches = datalen // batchSize + leftover hm = [] for j in range(num_batches): inps_j = inps[j * batchSize:min((j + 1) * batchSize, datalen)].cuda() hm_j = pose_net(inps_j) hm.append(hm_j) hm = torch.cat(hm) ckpt_time, pose_time = getTime(ckpt_time) runtime_profile['pt'].append(pose_time) hm = hm.cpu() # writer.save(boxes, scores, hm, pt1, pt2, orig_img, im_name.split('/')[-1]) fall_res = [] fall_res.append(im_name.split('/')[-1]) if boxes is None: cv2.imwrite(opt.outputpath + '/' + im_name.split('/')[-1], img) else: if opt.matching: preds = getMultiPeakPrediction( hm, pt1.numpy(), pt2.numpy(), opt.inputResH, opt.inputResW, opt.outputResH, opt.outputResW) result = matching(boxes, scores.numpy(), preds) else: preds_hm, preds_img, preds_scores = getPrediction(hm, pt1, pt2, opt.inputResH, opt.inputResW, opt.outputResH, opt.outputResW) result = pose_nms(boxes, scores, preds_img, preds_scores) result = {'imgname': im_name, 'result': result} img = vis_frame(orig_img, result) for human in result['result']: keypoint = human['keypoints'] keypoint = keypoint.numpy() xmax = max(keypoint[:, 0]) xmin = min(keypoint[:, 0]) ymax = max(keypoint[:, 1]) ymin = min(keypoint[:, 1]) w = xmax - xmin h = ymax - ymin distance = abs((keypoint[15][1] + keypoint[16][1]) / 2 - (keypoint[11][1] + keypoint[12][1]) / 2) if w / h >= 0.95: cv2.rectangle(img, (xmin, ymin), (xmax, ymax), (0, 0, 255), 2) font = cv2.FONT_HERSHEY_SIMPLEX cv2.putText(img, 'Warning!Fall', (int(xmin + 10), int(ymax - 10)), font, 1, (0, 0, 255), 2) fall_res.append([xmin,ymin,xmax,ymax]) ''' print('1 location:[%f,' % (xmin) + '%f]' % (ymin) + ' [%f,' % (xmax) + '%f]' % ( ymin) + ' [%f,' % ( xmin) + '%f]' % (ymax) + ' [%f,' % (xmax) + '%f]' % (ymax)) ''' else: if distance < 55: cv2.rectangle(img, (xmin, ymin), (xmax, ymax), (0, 255, 0), 2) font = cv2.FONT_HERSHEY_SIMPLEX cv2.putText(img, 'Warning!Fall!', (int(xmin + 10), int(ymax - 10)), font, 1, (0, 255, 0), 2) fall_res.append(1) fall_res.append([xmin,ymin,xmax,ymax]) ''' print('1 location:[%f,' % (xmin) + '%f]' % (ymin) + ' [%f,' % (xmax) + '%f]' % ( ymin) + ' [%f,' % ( xmin) + '%f]' % (ymax) + ' [%f,' % (xmax) + '%f]' % (ymax)) ''' else: cv2.rectangle(img, (xmin, ymin), (xmax, ymax), (255, 0, 0), 2) #cv2.imwrite(os.path.join(opt.outputpath, 'vis', im_name), img) print(fall_res) cv2.imwrite(opt.outputpath + '/' + im_name.split('/')[-1], img) ckpt_time, post_time = getTime(ckpt_time) runtime_profile['pn'].append(post_time) fall_res_all.append(fall_res) return fall_res_all
def update(self): while True: (img, orig_img, im_name, im_dim_list) = self.dataloder.getitem() with self.dataloder.Q.mutex: self.dataloder.Q.queue.clear() with torch.no_grad(): # Human Detection #img = img.cuda() img = img.cuda() prediction = self.det_model(img, CUDA=True) # im_dim_list = im_dim_list.cuda() frame_id = int(im_name.split('.')[0]) # NMS process dets = dynamic_write_results(prediction, opt.confidence, opt.num_classes, nms=True, nms_conf=opt.nms_thesh) if isinstance(dets, int) or dets.shape[0] == 0: if self.Q.full(): time.sleep(2) self.Q.put( (orig_img, frame_id, None, None, None, None, None)) continue dets = dets.cpu() im_dim_list = torch.index_select(im_dim_list, 0, dets[:, 0].long()) scaling_factor = torch.min(self.det_inp_dim / im_dim_list, 1)[0].view(-1, 1) # coordinate transfer dets[:, [1, 3]] -= (self.det_inp_dim - scaling_factor * im_dim_list[:, 0].view(-1, 1)) / 2 dets[:, [2, 4]] -= (self.det_inp_dim - scaling_factor * im_dim_list[:, 1].view(-1, 1)) / 2 dets[:, 1:5] /= scaling_factor for j in range(dets.shape[0]): dets[j, [1, 3]] = torch.clamp(dets[j, [1, 3]], 0.0, im_dim_list[j, 0]) dets[j, [2, 4]] = torch.clamp(dets[j, [2, 4]], 0.0, im_dim_list[j, 1]) boxes = dets[:, 1:5] scores = dets[:, 5:6] # Pose Estimation inp = im_to_torch(orig_img) inps = torch.zeros(boxes.size(0), 3, opt.inputResH, opt.inputResW) pt1 = torch.zeros(boxes.size(0), 2) pt2 = torch.zeros(boxes.size(0), 2) inps, pt1, pt2 = crop_from_dets(inp, boxes, inps, pt1, pt2) inps = Variable(inps.cuda()) hm = self.pose_model(inps) if boxes is None: if self.Q.full(): time.sleep(2) self.Q.put( (orig_img, frame_id, None, None, None, None, None)) continue else: preds_hm, preds_img, preds_scores = getPrediction( hm.cpu(), pt1, pt2, opt.inputResH, opt.inputResW, opt.outputResH, opt.outputResW) bbox, b_score, kp, kp_score, roi = pose_nms( orig_img, boxes, scores, preds_img, preds_scores) # result = { # 'imgname': im_name, # 'result': result, # 'orig_img' : orig_img # } if self.Q.full(): time.sleep(2) #self.Q.put((orig_img[k], im_name[k], boxes_k, scores[dets[:,0]==k], inps, pt1, pt2)) #self.Q.put((result, orig_img, im_name)) self.Q.put( (orig_img, frame_id, bbox, b_score, kp, kp_score, roi))
inps, pt1, pt2 = crop_from_dets(inp, boxes, inps, pt1, pt2) inps = Variable(inps.cuda()) hm = pose_model(inps) ckpt_time, pose_time = getTime(ckpt_time) runtime_profile['pt'].append(pose_time) if boxes is None: result = None else: # Get keypoint location from heatmaps preds_hm, preds_img, preds_scores = getPrediction( hm.cpu(), pt1, pt2, args.inputResH, args.inputResW, args.outputResH, args.outputResW) # result : keypoints, kp_score, proposal_score result = pose_nms(boxes, scores, preds_img, preds_scores) # print('result:',result) # result: [{'keypoints':...每一个ROI为一个字典,包含kp等数据 # print('-------------') # print('result:',result[0]) # 第一个ROI im_name = str(i) + '.jpg' # result = { # 'result': result, # 'orig_img': orig_img, # 'imgname': im_name # } # writer.save(boxes, scores, hm.cpu(), pt1, pt2, orig_img, im_name=str(i)+'.jpg') # writer.save(result, orig_img, str(i) + '.jpg') writer.save(result, orig_img=orig_img, im_name=im_name) ckpt_time, post_time = getTime(ckpt_time)
def person_tracking(self, boxes, scores, hm_data, pt1, pt2, img_id): person_list = [] if opt.matching: # TODO Check the difference, preds = getMultiPeakPrediction(hm_data, pt1.numpy(), pt2.numpy(), opt.inputResH, opt.inputResW, opt.outputResH, opt.outputResW) result = matching(boxes, scores.numpy(), preds) else: preds_hm, preds_img, preds_scores = getPrediction( hm_data, pt1, pt2, opt.inputResH, opt.inputResW, opt.outputResH, opt.outputResW) result = pose_nms(boxes, scores, preds_img, preds_scores) # list type # result = { 'keypoints': , 'kp_score': , 'proposal_score': , 'bbox' } if img_id > 0: # First frame does not have previous frame person_list_prev_frame = self.person_list_list[img_id - 1].copy() else: person_list_prev_frame = [] num_dets = len(result) for det_id in range( num_dets): # IOU tracking for detections in current frame. # detections for current frame, obtain bbox position and track id result_box = result[det_id] kp_score = result_box['kp_score'] proposal_score = result_box['proposal_score'].numpy()[0] if proposal_score < 1.3: # TODO check person proposal threshold continue keypoints = result_box['keypoints'] # torch, (17,2) keypoints_pf = np.zeros((15, 2)) idx_list = [16, 14, 12, 11, 13, 15, 10, 8, 6, 5, 7, 9, 0, 0, 0] for i, idx in enumerate(idx_list): keypoints_pf[i] = keypoints[idx] keypoints_pf[12] = (keypoints[5] + keypoints[6]) / 2 # neck # COCO-order {0-nose 1-Leye 2-Reye 3-Lear 4Rear 5-Lsho 6-Rsho 7-Lelb 8-Relb 9-Lwri 10-Rwri 11-Lhip 12-Rhip 13-Lkne 14-Rkne 15-Lank 16-Rank} # PoseFLow order #{0-Rank 1-Rkne 2-Rhip 3-Lhip 4-Lkne 5-Lank 6-Rwri 7-Relb 8-Rsho 9-Lsho 10-Lelb 11-Lwri 12-neck 13-nose 14-TopHead} bbox_det = bbox_from_keypoints(keypoints) # xxyy # enlarge bbox by 20% with same center position bbox_in_xywh = enlarge_bbox(bbox_det, enlarge_scale) bbox_det = x1y1x2y2_to_xywh(bbox_in_xywh) # # update current frame bbox if img_id == 0: # First frame, all ids are assigned automatically track_id = self.person_next_id self.person_next_id += 1 else: track_id, match_index = get_track_id_SpatialConsistency( bbox_det, person_list_prev_frame) if track_id != -1: # if candidate from prev frame matched, prevent it from matching another del person_list_prev_frame[match_index] person_det_dict = { "img_id": img_id, "det_id": det_id, "track_id": track_id, "bbox": bbox_det, "keypoints": keypoints, 'kp_poseflow': keypoints_pf, 'kp_score': kp_score, 'proposal_score': proposal_score } person_list.append(person_det_dict) num_dets = len(person_list) for det_id in range( num_dets ): # if IOU tracking failed, run pose matching tracking. person_dict = person_list[det_id] if person_dict[ "track_id"] == -1: # this id means matching not found yet # track_id = bbox_det_dict["track_id"] track_id, match_index = get_track_id_SGCN( person_dict["bbox"], person_list_prev_frame, person_dict["kp_poseflow"]) if track_id != -1: # if candidate from prev frame matched, prevent it from matching another del person_list_prev_frame[match_index] person_dict["track_id"] = track_id else: # if still can not find a match from previous frame, then assign a new id # if track_id == -1 and not bbox_invalid(bbox_det_dict["bbox"]): person_dict["track_id"] = self.person_next_id self.person_next_id += 1 return person_list
def update(self): next_id = 0 car_next_id = 0 bbox_dets_list_list = [] keypoints_list_list = [] car_dets_list_list = [] while True: # if the thread indicator variable is set, stop the # thread if self.stopped: if self.save_video: self.stream.release() return # otherwise, ensure the queue is not empty if not self.Q.empty(): start_time = getTime() (boxes, scores, hm_data, pt1, pt2, orig_img, img_id, CAR) = self.Q.get() orig_img = np.array(orig_img, dtype=np.uint8) if boxes is not None: boxes = boxes.astype(np.int32) img = orig_img # text_filled2(img,(5,200),str(img_id),LIGHT_GREEN,2,2) bbox_dets_list = [] # keyframe: start from empty keypoints_list = [] # keyframe: start from empty # print(boxes) if boxes is None: # No person detection pass # bbox_det_dict = {"img_id": img_id, # "det_id": 0, # "track_id": None, # "bbox": [0, 0, 2, 2]} # bbox_dets_list.append(bbox_det_dict) # # keypoints_dict = {"img_id": img_id, # "det_id": 0, # "track_id": None, # "keypoints": []} # keypoints_list.append(keypoints_dict) else: if opt.matching: preds = getMultiPeakPrediction( hm_data, pt1.numpy(), pt2.numpy(), opt.inputResH, opt.inputResW, opt.outputResH, opt.outputResW) result = matching(boxes, scores.numpy(), preds) else: preds_hm, preds_img, preds_scores = getPrediction(hm_data, pt1, pt2, opt.inputResH, opt.inputResW, opt.outputResH, opt.outputResW) # print('number of result', preds_hm, preds_scores ) result = pose_nms(boxes, scores, preds_img, preds_scores) # list type # result = { 'keypoints': , 'kp_score': , 'proposal_score': , 'bbox' } if img_id > 0: # First frame does not have previous frame bbox_list_prev_frame = bbox_dets_list_list[img_id - 1].copy() keypoints_list_prev_frame = keypoints_list_list[img_id - 1].copy() else: bbox_list_prev_frame = [] keypoints_list_prev_frame = [] # boxes.size(0) num_dets = len(result) for bbox in boxes: x, y, w, h = bbox.astype(np.uint32) cv2.rectangle(orig_img, (x, y), (x + w, y + h), (253, 222, 111), 1) for det_id in range(num_dets): # IOU tracking for detections in current frame. # detections for current frame # obtain bbox position and track id result_box = result[det_id] kp_score = result_box['kp_score'] proposal_score = result_box['proposal_score'].numpy()[0] if proposal_score < 1.3: continue keypoints = result_box['keypoints'] # torch, (17,2) keypoints_pf = np.zeros((15, 2)) idx_list = [16, 14, 12, 11, 13, 15, 10, 8, 6, 5, 7, 9, 0, 0, 0] for i, idx in enumerate(idx_list): keypoints_pf[i] = keypoints[idx] keypoints_pf[12] = (keypoints[5] + keypoints[6]) / 2 # neck # COCO-order {0-nose 1-Leye 2-Reye 3-Lear 4Rear 5-Lsho 6-Rsho 7-Lelb 8-Relb 9-Lwri 10-Rwri 11-Lhip 12-Rhip 13-Lkne 14-Rkne 15-Lank 16-Rank} # PoseFLow order #{0-Rank 1-Rkne 2-Rhip 3-Lhip 4-Lkne 5-Lank 6-Rwri 7-Relb 8-Rsho 9-Lsho 10-Lelb 11-Lwri 12-neck 13-nose 14-TopHead} bbox_det = bbox_from_keypoints(keypoints) # xxyy # bbox_in_xywh = enlarge_bbox(bbox_det, enlarge_scale) # bbox_det = x1y1x2y2_to_xywh(bbox_in_xywh) # Keyframe: use provided bbox # if bbox_invalid(bbox_det): # track_id = None # this id means null # keypoints = [] # bbox_det = [0, 0, 2, 2] # # update current frame bbox # bbox_det_dict = {"img_id": img_id, # "det_id": det_id, # "track_id": track_id, # "bbox": bbox_det} # bbox_dets_list.append(bbox_det_dict) # # update current frame keypoints # keypoints_dict = {"img_id": img_id, # "det_id": det_id, # "track_id": track_id, # "keypoints": keypoints} # keypoints_list.append(keypoints_dict) # continue # # update current frame bbox if img_id == 0: # First frame, all ids are assigned automatically track_id = next_id next_id += 1 else: track_id, match_index = get_track_id_SpatialConsistency(bbox_det, bbox_list_prev_frame) # print('track' ,track_id, match_index) if track_id != -1: # if candidate from prev frame matched, prevent it from matching another del bbox_list_prev_frame[match_index] del keypoints_list_prev_frame[match_index] # update current frame bbox bbox_det_dict = {"img_id": img_id, "det_id": det_id, "track_id": track_id, "bbox": bbox_det} # update current frame keypoints keypoints_dict = {"img_id": img_id, "det_id": det_id, "track_id": track_id, "keypoints": keypoints, 'kp_poseflow': keypoints_pf, 'kp_score': kp_score, 'bbox': bbox_det, 'proposal_score': proposal_score} bbox_dets_list.append(bbox_det_dict) keypoints_list.append(keypoints_dict) num_dets = len(bbox_dets_list) for det_id in range(num_dets): # if IOU tracking failed, run pose matching tracking. bbox_det_dict = bbox_dets_list[det_id] keypoints_dict = keypoints_list[det_id] # assert (det_id == bbox_det_dict["det_id"]) # assert (det_id == keypoints_dict["det_id"]) if bbox_det_dict["track_id"] == -1: # this id means matching not found yet # track_id = bbox_det_dict["track_id"] track_id, match_index = get_track_id_SGCN(bbox_det_dict["bbox"], bbox_list_prev_frame, keypoints_dict["kp_poseflow"], keypoints_list_prev_frame) if track_id != -1: # if candidate from prev frame matched, prevent it from matching another del bbox_list_prev_frame[match_index] del keypoints_list_prev_frame[match_index] bbox_det_dict["track_id"] = track_id keypoints_dict["track_id"] = track_id # if still can not find a match from previous frame, then assign a new id # if track_id == -1 and not bbox_invalid(bbox_det_dict["bbox"]): if track_id == -1: bbox_det_dict["track_id"] = next_id keypoints_dict["track_id"] = next_id next_id += 1 # update frame # print('keypoint list', len(keypoints_list)) vis_frame(img, keypoints_list) """ Car """ if CAR is not None: car_np = CAR new_car_bboxs = car_np[:, 0:4].astype(np.uint32) # b/ x y w h c / cls_conf, cls_idx new_car_score = car_np[:, 4] cls_conf = car_np[:, 4] # print("id: ", img_id , " ------------ " , new_car_bboxs, new_car_score) # cls_conf = car_np[:, 6] car_dest_list = [] if img_id > 1: # First frame does not have previous frame car_bbox_list_prev_frame = car_dets_list_list[img_id - 1].copy() else: car_bbox_list_prev_frame = [] # print('car bbox list prev frame ', len(car_bbox_list_prev_frame)) for c, score, conf in zip(new_car_bboxs, new_car_score, cls_conf): # car_bbox_det = c # car_bbox_det = x1y1x2y2_to_xywh(c) bbox_det = c # bbox_in_xywh = enlarge_bbox(car_bbox_det, enlarge_scale) # bbox_det = x1y1x2y2_to_xywh(bbox_in_xywh) if img_id == 0: # First frame, all ids are assigned automatically car_track_id = car_next_id car_next_id += 1 else: car_track_id, match_index = get_track_id_SpatialConsistency(bbox_det, car_bbox_list_prev_frame) # print(car_track_id, match_index) if car_track_id != -1: # if candidate from prev frame matched, prevent it from matching another del car_bbox_list_prev_frame[match_index] bbox_det_dict = {"img_id": img_id, "track_id": car_track_id, "bbox": bbox_det, "score": score, "conf": conf} car_dest_list.append(bbox_det_dict) for car_bbox_det_dict in car_dest_list: # detections for current frame if car_bbox_det_dict["track_id"] == -1: # this id means matching not found yet car_bbox_det_dict["track_id"] = car_next_id car_next_id += 1 self.tracking(car_dest_list) car_dets_list_list.append(car_dest_list) else: car_dest_list = [] bbox_det_dict = {"img_id": img_id, "det_id": 0, "track_id": None, "bbox": [0, 0, 2, 2], "score": 0, "conf": 0} car_dest_list.append(bbox_det_dict) car_dets_list_list.append(car_dest_list) bbox_dets_list_list.append(bbox_dets_list) keypoints_list_list.append(keypoints_list) if img_id != 0: self.car_person_detection(car_dest_list, bbox_dets_list, img) self.car_parking_detection(car_dest_list, img, img_id) ckpt_time, det_time = getTime(start_time) cv2.putText(img, str(1 / det_time), (5, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 255, 0), 1) if opt.vis: cv2.imshow("AlphaPose Demo", img) cv2.waitKey(33) if opt.save_video: self.stream.write(img) else: time.sleep(0.1)
def update(self): # keep looping infinitely while True: # if the thread indicator variable is set, stop the # thread if self.stopped: if self.save_video: self.stream.release() return # otherwise, ensure the queue is not empty if not self.Q.empty(): (boxes, scores, hm_data, pt1, pt2, orig_img, im_name) = self.Q.get() orig_img = np.array(orig_img, dtype=np.uint8) if boxes is None: if opt.save_img or opt.save_video or opt.vis: img = orig_img if opt.vis: cv2.imshow("AlphaPose Demo", img) cv2.waitKey(30) if opt.save_img: cv2.imwrite( os.path.join(opt.outputpath, 'vis', im_name), img) if opt.save_video: self.stream.write(img) else: # location prediction (n, kp, 2) | score prediction (n, kp, 1) preds_hm, preds_img, preds_scores = getPrediction( hm_data, pt1, pt2, opt.inputResH, opt.inputResW, opt.outputResH, opt.outputResW) result = pose_nms(boxes, scores, preds_img, preds_scores) result = { 'imgname': im_name, 'result': result } # append imgname here. # result here includes imgname, bbox, kps, kp_score, proposal_score # Critical, run pnp algorithm here to get 6d pose. # embed() KP_REMAIN = self.left_number if result['result']: kp_score = np.array(result['result'][0]['kp_score'][:, 0]) kp_2d = np.array(result['result'][0]['keypoints']) kp_3d = np.array(self.kp_3d) while (len(kp_2d) > KP_REMAIN): delidx = np.argmin(kp_score, axis=0) kp_score = np.delete(kp_score, delidx) kp_2d = np.delete(kp_2d, delidx, axis=0) kp_3d = np.delete(kp_3d, delidx, axis=0) # embed() R, t = pnp(kp_3d, kp_2d, self.cam_K) result.update({'cam_R': R, 'cam_t': t}) else: result.update({'cam_R': [], 'cam_t': []}) self.final_result.append(result) # if opt.save_img or opt.save_video or opt.vis: # img = vis_frame(orig_img, result) # if opt.vis: # cv2.imshow("AlphaPose Demo", img) # cv2.waitKey(30) # if opt.save_img: # cv2.imwrite(os.path.join(opt.outputpath, 'vis', im_name), img) # if opt.save_video: # self.stream.write(img) else: time.sleep(0.1)
def update(self): # keep looping infinitely while True: # if the thread indicator variable is set, stop the # thread if self.stopped: if self.save_video: self.stream.release() return # otherwise, ensure the queue is not empty if not self.Q.empty(): (boxes, scores, hm_data, pt1, pt2, orig_img, im_name) = self.Q.get() orig_img = np.array(orig_img, dtype=np.uint8) if boxes is None: if opt.save_img or opt.save_video or opt.vis: img = orig_img if opt.vis: cv2.imshow("AlphaPose Demo", img) cv2.waitKey(30) if opt.save_img: cv2.imwrite(os.path.join(opt.outputpath, "vis", f"{im_name}.jpg"), img) if opt.save_video: self.stream.write(img) else: # location prediction (n, kp, 2) | score prediction (n, kp, 1) if opt.matching: preds = getMultiPeakPrediction( hm_data, pt1.numpy(), pt2.numpy(), opt.inputResH, opt.inputResW, opt.outputResH, opt.outputResW, ) result = matching(boxes, scores.numpy(), preds) else: preds_hm, preds_img, preds_scores = getPrediction( hm_data, pt1, pt2, opt.inputResH, opt.inputResW, opt.outputResH, opt.outputResW, ) result = pose_nms(boxes, scores, preds_img, preds_scores) result = {"imgname": im_name, "result": result} self.final_result.append(result) if opt.save_img or opt.save_video or opt.vis or self.result_handler: # Draw poses on frame or exececute custom result_handler if self.result_handler is None: img = vis_frame(orig_img, result) else: img = self.result_handler(orig_img, result) if opt.vis: cv2.imshow("AlphaPose Demo", img) cv2.waitKey(30) if opt.save_img: cv2.imwrite(os.path.join(opt.outputpath, "vis", f"{im_name}.jpg"), img) if opt.save_video: self.stream.write(img) else: time.sleep(0.1)
def get_pose(self, img_names): if len(img_names) > 1: start_lc = 4000 start_rc = 4000 now_time = time.strftime("%Y-%m-%d-%H:%M:%S", time.localtime()) print('========START-Ten========') final_result = [] vis_images = [] height_difference = [] for img_index in range(len(img_names)): print('--------------------') img_name = img_names[img_index] try: img, orig_img, im_name, im_dim_list = [], [], [], [] inp_dim = int(self.args.inp_dim) im_name_k = img_name img_k, orig_img_k, im_dim_list_k = prep_image( im_name_k, inp_dim) img.append(img_k) orig_img.append(orig_img_k) im_name.append(im_name_k) im_dim_list.append(im_dim_list_k) except: print('index-{}: image have problem'.format(img_index)) final_result.append((None, None)) continue with torch.no_grad(): img = torch.cat(img) im_dim_list = torch.FloatTensor(im_dim_list).repeat(1, 2) img = img.cuda() prediction = self.det_model(img, CUDA=True) dets = dynamic_write_results(prediction, self.args.confidence, self.args.num_classes, nms=True, nms_conf=self.args.nms_thesh) if isinstance(dets, int) or dets.shape[0] == 0: print('index-{}: No person detected'.format(img_index)) final_result.append((None, None)) height_difference.append(None) continue dets = dets.cpu() im_dim_list = torch.index_select(im_dim_list, 0, dets[:, 0].long()) scaling_factor = torch.min(self.det_inp_dim / im_dim_list, 1)[0].view(-1, 1) dets[:, [1, 3]] -= (self.det_inp_dim - scaling_factor * im_dim_list[:, 0].view(-1, 1)) / 2 dets[:, [2, 4]] -= (self.det_inp_dim - scaling_factor * im_dim_list[:, 1].view(-1, 1)) / 2 dets[:, 1:5] /= scaling_factor for j in range(dets.shape[0]): dets[j, [1, 3]] = torch.clamp(dets[j, [1, 3]], 0.0, im_dim_list[j, 0]) dets[j, [2, 4]] = torch.clamp(dets[j, [2, 4]], 0.0, im_dim_list[j, 1]) boxes = dets[:, 1:5] scores = dets[:, 5:6] k = 0 boxes_k = boxes[dets[:, 0] == k] inps = torch.zeros(boxes_k.size(0), 3, self.args.inputResH, self.args.inputResW) pt1 = torch.zeros(boxes_k.size(0), 2) pt2 = torch.zeros(boxes_k.size(0), 2) orig_img, im_name, boxes, scores, inps, pt1, pt2 = orig_img[ k], im_name[k], boxes_k, scores[dets[:, 0] == k], inps, pt1, pt2 inp = im_to_torch(cv2.cvtColor(orig_img, cv2.COLOR_BGR2RGB)) inps, pt1, pt2 = crop_from_dets(inp, boxes, inps, pt1, pt2) batchSize = self.args.posebatch datalen = inps.size(0) leftover = 0 if (datalen) % batchSize: leftover = 1 num_batches = datalen // batchSize + leftover hm = [] for j in range(num_batches): inps_j = inps[j * batchSize:min( (j + 1) * batchSize, datalen)].cuda() hm_j = self.pose_model(inps_j) hm.append(hm_j) hm = torch.cat(hm) hm_data = hm.cpu() orig_img = np.array(orig_img, dtype=np.uint8) im_name = im_name.split('/')[-1] preds_hm, preds_img, preds_scores = getPrediction( hm_data, pt1, pt2, self.args.inputResH, self.args.inputResW, self.args.outputResH, self.args.outputResW) result = pose_nms(boxes, scores, preds_img, preds_scores) result = {'imgname': im_name, 'result': result} img = vis_frame(orig_img, result) vis_images.append(img) outpur_dir = os.path.join(self.args.outputpath, 'vis') outpur_dir_raw = os.path.join(self.args.outputpath, 'raw') if not os.path.exists(outpur_dir): os.makedirs(outpur_dir) if not os.path.exists(outpur_dir_raw): os.makedirs(outpur_dir_raw) width = img.shape[1] keypoints = [res['keypoints'][0] for res in result['result']] distance = [xy[0] - width / 2 for xy in keypoints] distance = torch.tensor([torch.abs(m) for m in distance]) indice = torch.argsort(distance)[0] pose_result = result['result'][indice]['keypoints'] # left_arm = pose_result[[6, 8, 10]].numpy() # right_arm = pose_result[[5, 7, 9]].numpy() # ['Nose', 'LEye', 'REye', 'LEar', 'REar', 'LShoulder', 'RShoulder', 'LElbow', 'RElbow', 'LWrist', 'RWrist', 'LHip', # 'RHip', 'LKnee', 'RKnee', 'LAnkle', 'RAnkle'] left_arm = pose_result[[10]].numpy().astype(int) right_arm = pose_result[[9]].numpy().astype(int) left_arm_c_y = np.mean(left_arm, axis=0)[1] right_arm_c_y = np.mean(right_arm, axis=0)[1] # left_arm_c = tuple(np.mean(left_arm, axis=0).astype(int)) # right_arm_c = tuple(np.mean(right_arm, axis=0).astype(int)) left_arm_c = tuple(left_arm[0]) right_arm_c = tuple(right_arm[0]) hd = np.abs(left_arm_c_y - right_arm_c_y) height_difference.append(hd) cv2.circle(img, left_arm_c, 10, (0, 255, 0), -1, 8) cv2.circle(img, right_arm_c, 10, (0, 255, 0), -1, 8) log__vis_name = now_time + '-' + im_name cv2.imwrite(os.path.join(outpur_dir_raw, log__vis_name), orig_img) cv2.imwrite(os.path.join(outpur_dir, log__vis_name), img) if start_lc == 4000 and start_rc == 4000: start_lc = left_arm_c_y start_rc = right_arm_c_y left_move = 0 right_move = 0 else: left_move = left_arm_c_y - start_lc right_move = right_arm_c_y - start_rc print('index-{}--{}: left_c {:0f},right_c {:0f}'.format( img_index, im_name, left_arm_c_y, right_arm_c_y)) print('index-{}--{}: start_lc {:0f},start_rc {:0f}'.format( img_index, im_name, start_lc, start_rc)) print('index-{}--{}: left_move {:0f},right_move {:0f}'.format( img_index, im_name, left_move, right_move)) print('index-{}--{}: height_difference {:0f}'.format( img_index, im_name, hd)) final_result.append((left_move, right_move)) return final_result, vis_images, now_time, height_difference elif len(img_names) == 1: now_time = time.strftime("%Y-%m-%d-%H:%M:%S", time.localtime()) print('========START-One========') final_result = [] vis_images = [] height_difference = [] for img_index in range(len(img_names)): img_name = img_names[img_index] try: img, orig_img, im_name, im_dim_list = [], [], [], [] inp_dim = int(self.args.inp_dim) im_name_k = img_name img_k, orig_img_k, im_dim_list_k = prep_image( im_name_k, inp_dim) img.append(img_k) orig_img.append(orig_img_k) im_name.append(im_name_k) im_dim_list.append(im_dim_list_k) except: print('index-{}: image have problem'.format(img_index)) final_result.append((None, None)) with torch.no_grad(): img = torch.cat(img) vis_img = img.numpy()[0] vis_img = np.transpose(vis_img, (1, 2, 0)) vis_img = vis_img[:, :, ::-1] vis_images.append(vis_img) im_dim_list = torch.FloatTensor(im_dim_list).repeat(1, 2) img = img.cuda() prediction = self.det_model(img, CUDA=True) dets = dynamic_write_results(prediction, self.args.confidence, self.args.num_classes, nms=True, nms_conf=self.args.nms_thesh) if isinstance(dets, int) or dets.shape[0] == 0: print('index-{}: No person detected'.format(img_index)) final_result.append((None, None)) else: print('index-{}: Person detected'.format(img_index)) final_result.append((4, 4)) return final_result, vis_images, now_time, height_difference
def run(): ret = [] # for i in im_names_desc: # try: start_time = getTime() with torch.no_grad(): (inps, orig_img, im_name, boxes, scores, pt1, pt2) = det_processor.read() if boxes is None or boxes.nelement() == 0: writer.save(None, None, None, None, None, orig_img, im_name.split('/')[-1]) # continue ckpt_time, det_time = getTime(start_time) runtime_profile['dt'].append(det_time) # Pose Estimation if (inps == None): writer.save(boxes, scores, [], pt1, pt2, orig_img, im_name.split('/')[-1]) return [] datalen = inps.size(0) leftover = 0 if (datalen) % batchSize: leftover = 1 num_batches = datalen // batchSize + leftover hm = [] for j in range(num_batches): inps_j = inps[j * batchSize:min((j + 1) * batchSize, datalen)].cuda() hm_j = pose_model(inps_j) hm.append(hm_j) hm = torch.cat(hm) ckpt_time, pose_time = getTime(ckpt_time) runtime_profile['pt'].append(pose_time) hm = hm.cpu().data # new code preds_hm, preds_img, preds_scores = getPrediction( hm, pt1, pt2, opt.inputResH, opt.inputResW, opt.outputResH, opt.outputResW) result = pose_nms(boxes, scores, preds_img, preds_scores) print("\n") for human in result: kp_preds = human['keypoints'] kp_scores = human['kp_score'] kp_preds = torch.cat( (kp_preds, torch.unsqueeze((kp_preds[5, :] + kp_preds[6, :]) / 2, 0))) kp_scores = torch.cat( (kp_scores, torch.unsqueeze((kp_scores[5, :] + kp_scores[6, :]) / 2, 0))) for n in range(kp_scores.shape[0]): if kp_scores[n] <= 0.05: continue cor_x, cor_y = int(kp_preds[n, 0]), int(kp_preds[n, 1]) if (n == 0): print('코' + ': ' + str(cor_x) + ', ' + str(cor_y)) ret.append([n, cor_x, cor_y]) if (n == 1): print('왼눈' + ': ' + str(cor_x) + ', ' + str(cor_y)) ret.append([n, cor_x, cor_y]) if (n == 2): print('오눈' + ': ' + str(cor_x) + ', ' + str(cor_y)) ret.append([n, cor_x, cor_y]) if (n == 3): print('왼귀' + ': ' + str(cor_x) + ', ' + str(cor_y)) ret.append([n, cor_x, cor_y]) if (n == 4): print('오귀' + ': ' + str(cor_x) + ', ' + str(cor_y)) ret.append([n, cor_x, cor_y]) if (n == 5): print('왼어' + ': ' + str(cor_x) + ', ' + str(cor_y)) ret.append([n, cor_x, cor_y]) if (n == 6): print('오어' + ': ' + str(cor_x) + ', ' + str(cor_y)) ret.append([n, cor_x, cor_y]) if (n == 17): print('목' + ': ' + str(cor_x) + ', ' + str(cor_y)) ret.append([n, cor_x, cor_y]) writer.save(boxes, scores, hm, pt1, pt2, orig_img, im_name.split('/')[-1]) ckpt_time, post_time = getTime(ckpt_time) runtime_profile['pn'].append(post_time) if args.profile: # TQDM im_names_desc.set_description( 'det time: {dt:.3f} | pose time: {pt:.2f} | post processing: {pn:.4f}' .format(dt=np.mean(runtime_profile['dt']), pt=np.mean(runtime_profile['pt']), pn=np.mean(runtime_profile['pn']))) # except KeyboardInterrupt: # break return ret