Пример #1
0
def PoseEstimatorPredict(image_path,plot = False,resolution ='432x368', scales = '[None]',model = 'mobilenet_thin'):
    '''
    input:
        image_path,图片路径,jpg
        plot = False,是否画图,如果True,两样内容,关键点信息+标点图片matrix
        resolution ='432x368', 规格
        scales = '[None]',
        model = 'mobilenet_thin',模型选择
    
    output:
        plot为false,返回一个内容:关键点信息
        plot为true,返回两个内容:关键点信息+标点图片matrix
    '''
    w, h = model_wh(resolution)
    e = TfPoseEstimator(get_graph_path(model), target_size=(w, h))
    image = common.read_imgfile(image_path, None, None)
    t = time.time()
    humans = e.inference(image, scales=scales)  # 主要的预测函数
    elapsed = time.time() - t

    logger.info('inference image: %s in %.4f seconds.' % (image_path, elapsed))
    centers = get_keypoint(image,humans)                                # 拿上关键点信息

    if plot:
        # 画图的情况下:
        image = TfPoseEstimator.draw_humans(image, humans, imgcopy=False)  # 画图函数
        fig = plt.figure()
        a = fig.add_subplot(2, 2, 1)
        a.set_title('Result')
        plt.imshow(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
        return centers,image
    else:
        # 不画图的情况下:
        return centers
Пример #2
0
def cb_pose(data):
    # get image with pose time
    t = data.header.stamp
    image = vf.get_latest(t, remove_older=True)
    if image is None:
        rospy.logwarn('No received images.')
        return

    h, w = image.shape[:2]
    if resize_ratio > 0:
        image = cv2.resize(image, (int(resize_ratio*w), int(resize_ratio*h)), interpolation=cv2.INTER_LINEAR)

    # ros topic to Person instance
    humans = []
    for p_idx, person in enumerate(data.persons):
        human = Human([])
        for body_part in person.body_part:
            part = BodyPart('', body_part.part_id, body_part.x, body_part.y, body_part.confidence)
            human.body_parts[body_part.part_id] = part

        humans.append(human)

    # draw
    image = TfPoseEstimator.draw_humans(image, humans, imgcopy=False)
    pub_img.publish(cv_bridge.cv2_to_imgmsg(image, 'bgr8'))
Пример #3
0
    def liveData(self):
        directory_in_str = sys.path[0] + r"/../images/LiveTest/"

        try:
            os.remove(outputfile)
            os.remove(cleanedOutputfile)
        except OSError:
            pass

        for file in os.listdir(directory_in_str):
            filename = os.fsdecode(file)
            if filename.endswith(".jpg") or filename.endswith(".png"):
                fullpath = directory_in_str + filename

                # estimate human poses from a single image !
                image = common.read_imgfile(fullpath, None, None)

                humans = self.e.inference(image, scales=self.scales)

                image = TfPoseEstimator.draw_humans(image,
                                                    humans,
                                                    imgcopy=False)
                # cv2.imshow('tf-pose-estimation result', image)
                # cv2.waitKey()

                myFile = open(outputfile, 'a')
                # myFile.write(str(filename) + ',')
                # print(filename)
                myFile.write('\n')
                # break
                myFile.close()
def Estimate_3Ddata(image, e, scales):
    # t0 = time.time()
    # # estimate human poses from a single image !
    # t = time.time()
    humans = e.inference(image, scales=scales)
    #elapsed = time.time() - t
    image = TfPoseEstimator.draw_humans(image, humans)
    #logger.info('inference image:%.4f seconds.' % (elapsed))
    logger.info('3d lifting initialization.')

    poseLifting = Prob3dPose('lifting/models/prob_model_params.mat')

    standard_w = 320
    standard_h = 240

    pose_2d_mpiis = []
    visibilities = []
    for human in humans:
        pose_2d_mpii, visibility = common.MPIIPart.from_coco(human)
        pose_2d_mpiis.append([(int(x * standard_w + 0.5),
                               int(y * standard_h + 0.5))
                              for x, y in pose_2d_mpii])
        visibilities.append(visibility)

    pose_2d_mpiis = np.array(pose_2d_mpiis)
    visibilities = np.array(visibilities)
    if (pose_2d_mpiis.ndim != 3):
        return 0
    transformed_pose2d, weights = poseLifting.transform_joints(
        pose_2d_mpiis, visibilities)
    pose_3d = poseLifting.compute_3d(transformed_pose2d, weights)

    #alltime= time.time() - t0
    #logger.info('estimate all time:%.4f seconds.' % (alltime))
    return pose_3d, image
Пример #5
0
def cnvt(img) :
    os.chdir(read_path)
    image = cv2.imread(img)
    humans = e.inference(image)
    image = TfPoseEstimator.draw_humans(image, humans, imgcopy=False)
    os.chdir(save_path)
    cv2.imwrite(img, image)
    os.chdir(old_path)
Пример #6
0
def main():
    parser = argparse.ArgumentParser(
        description='tf-pose-estimation run by folder')
    parser.add_argument('--folder', type=str, default='./images/')
    parser.add_argument('--resolution',
                        type=str,
                        default='432x368',
                        help='network input resolution. default=432x368')
    parser.add_argument('--model',
                        type=str,
                        default='mobilenet_thin',
                        help='cmu / mobilenet_thin')
    parser.add_argument('--scales',
                        type=str,
                        default='[None]',
                        help='for multiple scales, eg. [1.0, (1.1, 0.05)]')
    args = parser.parse_args()
    scales = ast.literal_eval(args.scales)

    w, h = model_wh(args.resolution)
    e = TfPoseEstimator(get_graph_path(args.model), target_size=(w, h))
    types = ('*.png', '*.jpg')
    files_grabbed = []
    for files in types:
        files_grabbed.extend(glob.glob(os.path.join(args.folder, files)))
    all_humans = dict()
    if not os.path.exists('output'):
        os.mkdir('output')
    for i, file in enumerate(files_grabbed):
        # estimate human poses from a single image !
        image = common.read_imgfile(file, None, None)
        t = time.time()
        humans = e.inference(image, scales=scales)
        elapsed = time.time() - t

        logger.info('inference image #%d: %s in %.4f seconds.' %
                    (i, file, elapsed))

        image = TfPoseEstimator.draw_humans(image, humans, imgcopy=False)
        # cv2.imshow('tf-pose-estimation result', image)
        filename = 'pose_{}.png'.format(i)
        output_filepath = os.path.join('output', filename)
        cv2.imwrite(output_filepath, image)
        logger.info('image saved: {}'.format(output_filepath))
        # cv2.waitKey(5000)

        all_humans[file.replace(args.folder, '')] = humans

    with open(os.path.join(args.folder, 'pose.dil'), 'wb') as f:
        dill.dump(all_humans, f, protocol=dill.HIGHEST_PROTOCOL)
Пример #7
0
def joint():
    try:
        data = request.data
        with open('/tmp/temp.jpg', 'wb') as f:
            f.write(data)
        img = common.read_imgfile('/tmp/temp.jpg', 432, 368)
        scales = ast.literal_eval(args.scales)
        e = TfPoseEstimator(get_graph_path(args.model), target_size=(w, h))
        humans = e.inference(img, scales=scales)
        image = TfPoseEstimator.draw_humans(img, humans, imgcopy=False)
        b_str = base64.b64encode(img2bytes(
            Image.fromarray(image))).decode('utf-8')
        return jsonify({"image": b_str})
    except Exception as e:
        return jsonify({"error": str(traceback.format_exc())})
def cnvt(img, name) :
    os.chdir(read_path)
    image = cv2.imread(img)

    os.chdir(old_path)
    model = 'mobilenet_thin'
    resolution = '432x368'
    w, h = model_wh(resolution)

    e = TfPoseEstimator(get_graph_path(model), target_size=(w, h))
    humans = e.inference(image)
    blank_image = np.zeros((h,w,3), np.uint8)
    image = TfPoseEstimator.draw_humans(blank_image, humans, imgcopy=False)

    os.chdir(save_path)
    cv2.imwrite(name, image)
    print("Saved - %s As - %s" % (img, name))

    os.chdir(old_path)
Пример #9
0
    def detectCandidates(self, frame):

        cands = []
        humans = self.estimator.inference(frame)
        image_h, image_w = frame.shape[:2]

        feat_list = []
        for i in range(len(humans)):
            if i >= len(humans):
                break

            keys = humans[i].body_parts.keys()
            if len(np.setdiff1d(needed_elements, keys)):
                del humans[i]
                continue
            neck = humans[i].body_parts[1]
            lhip = humans[i].body_parts[8]
            rhip = humans[i].body_parts[11]
            center = (neck.x + lhip.x + rhip.x) / 3, (neck.y + lhip.y +
                                                      rhip.y) / 3

            feats = []
            for idx in needed_elements:
                part = humans[i].body_parts[idx]
                feats = feats + [part.x - center[0], part.y - center[1]]

            feat_list.append(np.asarray(feats))

            center = image_w * center[0], image_h * center[1]
            cv2.circle(frame, (int(center[0]), int(center[1])), 3, (255, 0, 0),
                       3)
            cands.append(np.asarray(center, dtype=np.float32))

        # print feat_list[0]

        if (self.show):
            frame = TfPoseEstimator.draw_humans(frame, humans, imgcopy=False)
        return cands, feat_list, frame
Пример #10
0
def generate_skeletonize_video():
    """
    The method takes images , save a skeleton per image and creates a video output
    :return:
    """
    input_folder = "./videos/demo/"
    output_folder = "./videos"
    results_folder = "./images/demo/"
    input_video = "./videos/demo.mp4"
    images = video_utils.load_images_from_folder(input_folder)
    w = 432
    h = 368
    estimator = TfPoseEstimator(get_graph_path('mobilenet_thin'),
                                target_size=(w, h))
    count = 1
    for i in images:
        image_parts = estimator.inference(i, scales=None)
        image_skeleton = TfPoseEstimator.draw_humans(i,
                                                     image_parts,
                                                     imgcopy=True)
        cv2.imwrite(r".\images\demo\{}.png".format(count), image_skeleton)
        count = count + 1
    video_utils.create_video(input_video, results_folder, output_folder)
Пример #11
0
    def detect(self, image):
        logger.debug('image preprocess+')
        if self.zoom < 1.0:
            canvas = np.zeros_like(image)
            img_scaled = cv2.resize(image,
                                    None,
                                    fx=self.zoom,
                                    fy=self.zoom,
                                    interpolation=cv2.INTER_LINEAR)
            dx = (canvas.shape[1] - img_scaled.shape[1]) // 2
            dy = (canvas.shape[0] - img_scaled.shape[0]) // 2
            canvas[dy:dy + img_scaled.shape[0],
                   dx:dx + img_scaled.shape[1]] = img_scaled
            image = canvas
        elif self.zoom > 1.0:
            img_scaled = cv2.resize(image,
                                    None,
                                    fx=self.zoom,
                                    fy=self.zoom,
                                    interpolation=cv2.INTER_LINEAR)
            dx = (img_scaled.shape[1] - image.shape[1]) // 2
            dy = (img_scaled.shape[0] - image.shape[0]) // 2
            image = img_scaled[dy:image.shape[0], dx:image.shape[1]]

        logger.debug('image process+')
        humans = self.e.inference(image)

        logger.debug('postprocess+')
        image = TfPoseEstimator.draw_humans(image, humans, imgcopy=False)
        fps_time = 0

        logger.debug('show+')
        cv2.putText(image, "FPS: %f" % (1.0 / (time.time() - fps_time)),
                    (10, 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
        cv2.imshow('tf-pose-estimation result', image)
        fps_time = time.time()
        logger.debug('finished+')
Пример #12
0
    parser.add_argument('--showBG',
                        type=bool,
                        default=True,
                        help='False to show skeleton only.')
    args = parser.parse_args()

    w, h = 432, 368
    e = TfPoseEstimator('graph/{}/graph_freeze.pb'.format(args.model),
                        target_size=(w, h))
    cap = cv2.VideoCapture(args.video)

    if cap.isOpened() is False:
        print("Error opening video stream or file")
    while cap.isOpened():
        ret_val, image = cap.read()
        tic = time.time()
        humans = e.inference(image, resize_to_default=True, upsample_size=4.0)
        if not args.showBG:
            image = np.zeros(image.shape)
        res = TfPoseEstimator.draw_humans(image, humans, imgcopy=True)
        cv2.putText(res, "FPS: %f" % (1.0 / (time.time() - tic)), (10, 10),
                    cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
        cv2.imshow('rr', res)
        toc = time.time()
        logger.info('inference %.4f seconds.' % (toc - tic))

        if cv2.waitKey(1) == 27:
            break
    cv2.destroyAllWindows()
logger.debug('finished+')
Пример #13
0
def doCNNTracking(args):    
    global is_tracking,logger

    fps_time = 0

    logger.debug('initialization %s : %s' % (args.model, get_graph_path(args.model)))
    w, h = model_wh(args.resolution)
    e = TfPoseEstimator(get_graph_path(args.model), target_size=(w, h))
    logger.debug('cam read+')
    pipeline = rs.pipeline()
    pipeline.start()
    
    frames = pipeline.wait_for_frames()
    #get depth影像
    depth = frames.get_depth_frame() 
    depth_image_data = depth.as_frame().get_data()
    depth_image = np.asanyarray(depth_image_data)
    
    
    
    logger.info('cam depth image=%dx%d' % (depth_image.shape[1], depth_image.shape[0])) 
    logger.info('camera ready') 
    
    
    #計算depth影像對應至rgb影像的clip
    clip_box = [100,-100,290,-300]
    

    while (True):
        if(is_tracking):
            fps_time = time.time()
            frames = pipeline.wait_for_frames()
            #get rgb影像
            image_frame = frames.get_color_frame()
            image_data = image_frame.as_frame().get_data()
            image = np.asanyarray(image_data)
            
            #change bgr 2 rgb
            image = np.array(image[...,::-1])
            origen_image = image

            #get depth影像
            depth = frames.get_depth_frame() 
            depth_image_data = depth.as_frame().get_data()
            depth_image = np.asanyarray(depth_image_data)
            depth_image = depth_image[(int)(clip_box[0]):(int)(clip_box[1]),(int)(clip_box[2]):(int)(clip_box[3])]
            depth_image = cv2.resize(depth_image, (640, 480), interpolation=cv2.INTER_CUBIC)
            depth_image=depth_image/30
            depth_image.astype(np.uint8)
            
            #深度去背的遮罩
            thresh=cv2.inRange(depth_image,20,200)

            #去背的遮罩做影像處理
            kernel = cv2.getStructuringElement(cv2.MORPH_RECT,(1, 1))  
            eroded = cv2.erode(thresh,kernel)
            kernel2 = cv2.getStructuringElement(cv2.MORPH_RECT,(5, 5))  
            dilated = cv2.dilate(eroded,kernel2)
            
            #亮度遮罩
            bright_mask = np.zeros(image.shape);
            bright_mask.fill(200)
            bright_mask = bright_mask.astype(np.uint8);
            bright_mask = cv2.bitwise_and(bright_mask, bright_mask, mask=dilated)
            
            #rgb影像亮度處理
#             image = cv2.bitwise_and(image, image, mask=dilated)
            
            image = image.astype(int)+200-bright_mask.astype(int);
            image = np.clip(image, 0, 255)
            image = image.astype(np.uint8);
            

            
            
            
            #tfpose image 縮放
            if args.zoom < 1.0:
                canvas = np.zeros_like(image)
                img_scaled = cv2.resize(image, None, fx=args.zoom, fy=args.zoom, interpolation=cv2.INTER_LINEAR)
                dx = (canvas.shape[1] - img_scaled.shape[1]) // 2
                dy = (canvas.shape[0] - img_scaled.shape[0]) // 2
                canvas[dy:dy + img_scaled.shape[0], dx:dx + img_scaled.shape[1]] = img_scaled
                image = canvas
            elif args.zoom > 1.0:
                img_scaled = cv2.resize(image, None, fx=args.zoom, fy=args.zoom, interpolation=cv2.INTER_LINEAR)
                dx = (img_scaled.shape[1] - image.shape[1]) // 2
                dy = (img_scaled.shape[0] - image.shape[0]) // 2
                image = img_scaled[dy:image.shape[0], dx:image.shape[1]]

            #tfpose 計算
            humans = e.inference(image)
            
#             #得到joint
#             jdata = TfPoseEstimator.get_json_data(image.shape[0],image.shape[1],humans)
#             if(len(jdata)>2):
#                 try:
#                     #傳送Position資料至SERVER
#                     chating_room.sendTrackingData(jdata,'track')
#                 except:
#                     print("Cannot send data to server.")
           
            
            #去背後深度影像
            depth_masked = cv2.bitwise_and(depth_image, depth_image, mask=dilated)
            
            human_json_datas = []
            for human in humans:
                #計算深度資料
                depthDatas=[]
                image_h, image_w = image.shape[:2]
                # get point
                for i in range(common.CocoPart.Background.value):
                    if i not in human.body_parts.keys():
                        continue
                    body_part = human.body_parts[i]
                    y= int(body_part.y * image_h+ 0.5)
                    x = int(body_part.x * image_w + 0.5)
                    s=5;
                    mat = depth_masked[y-s if(y-s>=0) else 0:y+s if(y+s<=479) else 479,x-s if(x-s>=0) else 0:x+s if (x+s<=639) else 639]

                    count=0;
                    sum_depth=0;

                    for j in range (mat.shape[0]):
                        for k in range (mat.shape[1]):     
                            if mat[j,k]!=0:
                                sum_depth+=mat[j,k]
                                count+=1
                                
                    if(count>0):
                        depth=sum_depth/count
                    else:
                        depth=0
                        
                    try:
                        depthDatas.append(JointDepthData(i,x,y,depth).__dict__)
                    except:
                        print("err:"+str(x)+" "+str(y)+" "+str(body_part.x )+" "+str(body_part.y ))
                    

                human_json_datas.append(json.dumps(depthDatas))
            json_data = json.dumps(human_json_datas)
            if(len(json_data)>2):
                try:
                    #傳送Depth資料至SERVER
                    chating_room.sendTrackingData(json_data,'track_depth')
                except:
                    print("Cannot send depth data to server.")
                        
            depth_image =  cv2.applyColorMap(cv2.convertScaleAbs(depth_image/25), cv2.COLORMAP_JET)
            cv2.circle(image,(320,240), 5, (255,255,255), -1)
            cv2.circle(image,(304,480-98), 5, (0,0,255), -1)
            cv2.circle(image,(377,480-197), 5, (0,160,255), -1)
            cv2.circle(image,(106,480-49), 5, (0,255,255), -1)
            cv2.circle(image,(460,480-136), 5, (0,255,0), -1)
            cv2.circle(image,(481,480-134), 5, (255,0,0), -1)
            cv2.circle(image,(85,480-143), 5, (255,160,0), -1)
            image = TfPoseEstimator.draw_humans(image, humans, imgcopy=False)

            cv2.putText(image,"FPS: %f" % (1.0 / (time.time() - fps_time)),(10, 10),  cv2.FONT_HERSHEY_SIMPLEX, 0.5,(0, 255, 0), 2)
            
            cv2.imshow('tf-pose-estimation result', image)
            
            if cv2.waitKey(25) & 0xFF == ord('q'):
                cv2.destroyAllWindows()
                break


    cv2.destroyAllWindows()
Пример #14
0
                'keypoints':
                write_coco_json(human, img_meta['width'], img_meta['height']),
                'score':
                human.score
            }
            result.append(item)
            scores += item['score']

        avg_score = scores / len(humans) if len(humans) > 0 else 0
        if args.data_idx >= 0:
            logger.info('score:', k, len(humans), len(anns), avg_score)

            import matplotlib.pyplot as plt
            fig = plt.figure()
            a = fig.add_subplot(2, 3, 1)
            plt.imshow(e.draw_humans(image, humans, True))

            a = fig.add_subplot(2, 3, 2)
            # plt.imshow(cv2.resize(image, (e.heatMat.shape[1], e.heatMat.shape[0])), alpha=0.5)
            tmp = np.amax(e.heatMat[:, :, :-1], axis=2)
            plt.imshow(tmp, cmap=plt.cm.gray, alpha=0.5)
            plt.colorbar()

            tmp2 = e.pafMat.transpose((2, 0, 1))
            tmp2_odd = np.amax(np.absolute(tmp2[::2, :, :]), axis=0)
            tmp2_even = np.amax(np.absolute(tmp2[1::2, :, :]), axis=0)

            a = fig.add_subplot(2, 3, 4)
            a.set_title('Vectormap-x')
            # plt.imshow(CocoPose.get_bgimg(inp, target_size=(vectmap.shape[1], vectmap.shape[0])), alpha=0.5)
            plt.imshow(tmp2_odd, cmap=plt.cm.gray, alpha=0.5)
Пример #15
0
def main():

    global fps_time

    if len(sys.argv) != 2:
        print("Please specify path to .svo file.")
        exit()

    filepath = sys.argv[1]
    ite = loadall("pickle.dat")
    print("Reading SVO file: {0}".format(filepath))
    t = time.time()
    init = zcam.PyInitParameters(svo_input_filename=filepath,
                                 svo_real_time_mode=False)
    init.depth_mode = sl.PyDEPTH_MODE.PyDEPTH_MODE_QUALITY
    cam = zcam.PyZEDCamera()
    status = cam.open(init)
    if status != tp.PyERROR_CODE.PySUCCESS:
        print(repr(status))
        exit()

    runtime = zcam.PyRuntimeParameters()
    mat = core.PyMat()
    depth = core.PyMat()
    print('Initilisation of svo took ', time.time() - t, ' seconds')
    key = ''
    graph_path = "./models/graph/mobilenet_thin/graph_opt.pb"  #"./models/graph/cmu/graph_opt.pb"
    target = (432, 368)  #(656,368) (432,368) (1312,736)
    e = TfPoseEstimator(graph_path, target)
    nbf = 0
    nbp = 0
    print("  Save the current image:     s")
    print("  Quit the video reading:     q\n")
    while key != 113:  # for 'q' key
        t = time.time()
        err = cam.grab(runtime)
        if err == tp.PyERROR_CODE.PySUCCESS:
            retrieve_start = time.time()
            cam.retrieve_image(mat)
            cam.retrieve_measure(depth, sl.PyMEASURE.PyMEASURE_XYZRGBA)
            image = mat.get_data()
            retrieve_end = time.time()
            print('Retrieving of svo data took ',
                  retrieve_end - retrieve_start, ' seconds')
            pt = np.array(depth.get_data()[:, :, 0:3], dtype=np.float64)
            print(pt.shape, image.shape)
            pt[np.logical_not(np.isfinite(pt))] = 0.0
            nbf += 1
            pose_start = time.time()
            humans = e.inference(np.array(image[:, :, 0:3]))
            image_h, image_w = image.shape[:2]
            pose_end = time.time()
            print("Inference took", pose_end - pose_start, 'seconds')

            for pid, human in enumerate(humans):
                for kid, bdp in enumerate(human.body_parts.values()):
                    #print(bdp)
                    #print(kid, bdp.x, bdp.y, bdp.score)
                    if (bdp.score > 5.0):
                        print((int(bdp.x * image_w + 0.5),
                               int(bdp.y * image_h + 0 / 5)))
                        print(pt[int(bdp.y * image_h + 0 / 5),
                                 int(bdp.x * image_w + 0.5)])
                        nbp += 1
                        cv2.circle(image, (int(bdp.x * image_w + 0.5),
                                           int(bdp.y * image_h + 0 / 5)),
                                   5, (255, 255, 255),
                                   thickness=5,
                                   lineType=8,
                                   shift=0)
                        #coord_uv[pid,kid,:]=np.array([int(bdp.x*image_w+0.5),int(bdp.y*image_h+0/5)])
                        #coord_vis[pid,kid]=bdp.score/10

            image = TfPoseEstimator.draw_humans(image, humans, imgcopy=False)
            cv2.putText(image, "FPS: %f" % (1.0 / (time.time() - fps_time)),
                        (10, 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0),
                        2)

            #print(time.time()-t)
            cv2.imshow('tf-pose-estimation result', image)
            fps_time = time.time()
            key = cv2.waitKey(1)
            saving_image(key, mat)
            #time.sleep(0.033)
        else:
            key = cv2.waitKey(1)
    cv2.destroyAllWindows()

    print(nbp / nbf, (nbp / nbf) / 18)
    #saving_depth(cam)
    #saving_point_cloud(cam)

    cam.close()
    print("\nFINISH")
Пример #16
0
def pose_dd():
    fps_time = 0
    global humans
    global BREAK 
    global image 
    parser = argparse.ArgumentParser(description='tf-pose-estimation realtime webcam')
    parser.add_argument('--camera', type=int, default=0)
    parser.add_argument('--zoom', type=float, default=1.0)
    parser.add_argument('--model', type=str, default='mobilenet_thin_432x368', help='cmu_640x480 / cmu_640x360 / mobilenet_thin_432x368')
    parser.add_argument('--show-process', type=bool, default=False,
                        help='for debug purpose, if enabled, speed for inference is dropped.')
    args = parser.parse_args()

    ##logger.debug('initialization %s : %s' % (args.model, get_graph_path(args.model)))
    w, h = model_wh(args.model)
    e = TfPoseEstimator(get_graph_path(args.model), target_size=(w, h))
    #logger.debug('cam read+')
    cam = cv2.VideoCapture(args.camera)
    ret_val, image = cam.read()
    ##logger.info('cam image=%dx%d' % (image.shape[1], image.shape[0]))

    while True:
        ret_val, image = cam.read()

        #logger.debug('image preprocess+')
        if args.zoom < 1.0:
            canvas = np.zeros_like(image)
            img_scaled = cv2.resize(image, None, fx=args.zoom, fy=args.zoom, interpolation=cv2.INTER_LINEAR)
            dx = (canvas.shape[1] - img_scaled.shape[1]) // 2
            dy = (canvas.shape[0] - img_scaled.shape[0]) // 2
            canvas[dy:dy + img_scaled.shape[0], dx:dx + img_scaled.shape[1]] = img_scaled
            image = canvas
        elif args.zoom > 1.0:
            img_scaled = cv2.resize(image, None, fx=args.zoom, fy=args.zoom, interpolation=cv2.INTER_LINEAR)
            dx = (img_scaled.shape[1] - image.shape[1]) // 2
            dy = (img_scaled.shape[0] - image.shape[0]) // 2
            image = img_scaled[dy:image.shape[0], dx:image.shape[1]]

        #logger.debug('image process+')
        humans = e.inference(image)

        #logger.debug('postprocess+')
        image = TfPoseEstimator.draw_humans(image, humans, imgcopy=False)

        #logger.debug('show+')
        cv2.putText(image,
                    "FPS: %f" % (1.0 / (time.time() - fps_time)),
                    (10, 10),  cv2.FONT_HERSHEY_SIMPLEX, 0.5,
                    (0, 255, 0), 2)
        cv2.imshow('tf-pose-estimation result', image)
        fps_time = time.time()
        #out.write(image)
        if cv2.waitKey(1) == 27:
            #out.release()
            cv2.destroyAllWindows()
            BREAK = True
            clientsocket.send("off".encode('utf-8'))
            print("off sent")
            import sys
            sys.exit(0)
            break
Пример #17
0
    def adHocData(self):
        directory_in_str = sys.path[0] + "\\..\\images\\OurTest\\"

        # Delete old csv files
        try:
            os.remove(outputfile)
            os.remove(cleanedOutputfile)
        except OSError:
            pass

        # Run through every image in the folder
        for file in os.listdir(directory_in_str):
            filename = os.fsdecode(file)
            if filename.endswith(".jpg") or filename.endswith(".png"):
                fullpath = directory_in_str + filename

                # Estimate human poses from a single image !
                image = common.read_imgfile(fullpath, None, None)
                # image = cv2.fastNlMeansDenoisingColored(image, None, 10, 10, 7, 21)
                t = time.time()
                humans = self.e.inference(image, scales=self.scales)
                elapsed = time.time() - t

                logger.info('inference image: %s in %.4f seconds.' %
                            (fullpath, elapsed))

                image = TfPoseEstimator.draw_humans(image,
                                                    humans,
                                                    imgcopy=False)
                # cv2.imshow('tf-pose-estimation result', image)
                # cv2.waitKey()

                myFile = open(outputfile, 'a')
                # myFile.write(str(filename) + ',')
                # print(filename)
                myFile.write('\n')
                myFile.close()

                # Attempt to plot our skeletons onto the image
                try:

                    fig = plt.figure()
                    a = fig.add_subplot(2, 2, 1)
                    a.set_title('Result')
                    plt.imshow(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))

                    bgimg = cv2.cvtColor(image.astype(np.uint8),
                                         cv2.COLOR_BGR2RGB)
                    bgimg = cv2.resize(
                        bgimg,
                        (self.e.heatMat.shape[1], self.e.heatMat.shape[0]),
                        interpolation=cv2.INTER_AREA)

                    # show network output
                    a = fig.add_subplot(2, 2, 2)
                    plt.imshow(bgimg, alpha=0.5)
                    tmp = np.amax(self.e.heatMat[:, :, :-1], axis=2)
                    plt.imshow(tmp, cmap=plt.cm.gray, alpha=0.5)
                    plt.colorbar()

                    tmp2 = self.e.pafMat.transpose((2, 0, 1))
                    tmp2_odd = np.amax(np.absolute(tmp2[::2, :, :]), axis=0)
                    tmp2_even = np.amax(np.absolute(tmp2[1::2, :, :]), axis=0)

                    a = fig.add_subplot(2, 2, 3)
                    a.set_title('Vectormap-x')
                    # plt.imshow(CocoPose.get_bgimg(inp, target_size=(vectmap.shape[1], vectmap.shape[0])), alpha=0.5)
                    plt.imshow(tmp2_odd, cmap=plt.cm.gray, alpha=0.5)
                    plt.colorbar()

                    a = fig.add_subplot(2, 2, 4)
                    a.set_title('Vectormap-y')
                    # plt.imshow(CocoPose.get_bgimg(inp, target_size=(vectmap.shape[1], vectmap.shape[0])), alpha=0.5)
                    plt.imshow(tmp2_even, cmap=plt.cm.gray, alpha=0.5)
                    plt.colorbar()
                    plt.show()

                    logger.info('3d lifting initialization.')
                    poseLifting = Prob3dPose(
                        sys.path[0] +
                        '\\lifting\\models\\prob_model_params.mat')

                    image_h, image_w = image.shape[:2]
                    standard_w = 640
                    standard_h = 480

                    pose_2d_mpiis = []
                    visibilities = []
                    for human in humans:
                        pose_2d_mpii, visibility = common.MPIIPart.from_coco(
                            human)
                        pose_2d_mpiis.append([(int(x * standard_w + 0.5),
                                               int(y * standard_h + 0.5))
                                              for x, y in pose_2d_mpii])
                        visibilities.append(visibility)

                    pose_2d_mpiis = np.array(pose_2d_mpiis)
                    visibilities = np.array(visibilities)
                    transformed_pose2d, weights = poseLifting.transform_joints(
                        pose_2d_mpiis, visibilities)
                    pose_3d = poseLifting.compute_3d(transformed_pose2d,
                                                     weights)

                    for i, single_3d in enumerate(pose_3d):
                        plot_pose(single_3d)
                    plt.show()

                except:
                    print("Error when plotting image ")

        dataScriptGenerator.dataCleanup(self)
Пример #18
0
def process_image(image):
    humans = e.inference(image, scales=scales)
    image = TfPoseEstimator.draw_humans(image, humans, imgcopy=False)
    return image
Пример #19
0
    def mesh(self, image):
        image_h, image_w = image.shape[:2]
        width = 300
        height = 300
        pose_2d_mpiis = []
        visibilities = []
        zoom = 1.0
        if zoom < 1.0:
            canvas = np.zeros_like(image)
            img_scaled = cv2.resize(image,
                                    None,
                                    fx=args.zoom,
                                    fy=args.zoom,
                                    interpolation=cv2.INTER_LINEAR)
            dx = (canvas.shape[1] - img_scaled.shape[1]) // 2
            dy = (canvas.shape[0] - img_scaled.shape[0]) // 2
            canvas[dy:dy + img_scaled.shape[0],
                   dx:dx + img_scaled.shape[1]] = img_scaled
            image = canvas
        elif zoom > 1.0:
            img_scaled = cv2.resize(image,
                                    None,
                                    fx=args.zoom,
                                    fy=args.zoom,
                                    interpolation=cv2.INTER_LINEAR)
            dx = (img_scaled.shape[1] - image.shape[1]) // 2
            dy = (img_scaled.shape[0] - image.shape[0]) // 2
            image = img_scaled[dy:image.shape[0], dx:image.shape[1]]

        humans = self.e.inference(image, scales=[None])
        package = TfPoseEstimator.draw_humans(image, humans, imgcopy=False)

        image = package[0]
        status_part_body_appear = package[1]
        name_part_body = [
            "Nose",
            "Neck",
            "RShoulder",
            "RElbow",
            "RWrist",
            "LShoulder",
            "LElbow",
            "LWrist",
            "RHip",
            "RKnee",
            "RAnkle",
            "LHip",
            "LKnee",
            "LAnkle",
            "REye",
            "LEye",
            "REar",
            "LEar",
        ]
        detected_part = []
        for human in humans:
            pose_2d_mpii, visibility = common.MPIIPart.from_coco(human)
            pose_2d_mpiis.append([(int(x * width + 0.5), int(y * height + 0.5))
                                  for x, y in pose_2d_mpii])
            visibilities.append(visibility)

        cv2.putText(
            image, "FPS: %f [press 'q'to quit]" %
            (1.0 / (time.time() - self.fps_time)), (10, 10),
            cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
        image = cv2.resize(image, (width, height))
        cv2.imshow('tf-pose-estimation result', image)

        pose_2d_mpiis = np.array(pose_2d_mpiis)
        visibilities = np.array(visibilities)
        transformed_pose2d, weights = self.poseLifting.transform_joints(
            pose_2d_mpiis, visibilities)
        pose_3d = self.poseLifting.compute_3d(transformed_pose2d, weights)
        for i, single_3d in enumerate(pose_3d):
            #plot_pose(single_3d)
            plot_pose_adapt(single_3d, self.window3DBody)
        self.fps_time = time.time()

        #Matt plot lib
        #print(pose_3d)
        #----------------------------------------
        #pyQT graph
        pose_3dqt = np.array(pose_3d[0]).transpose()

        bodyPartName = [
            'C_Hip', 'R_Hip', 'R_Knee', 'R_Ankle', 'L_Hip', 'L_Knee',
            'L_Ankle', 'Center', 'C_Shoulder', 'Neck', 'Head', 'L_Shoulder',
            'L_Elbow', 'L_Wrist', 'R_Shoulder', 'R_Elbow', 'R_Wrist'
        ]
        #for part in range(len(pose_3dqt)):
        #    print(bodyPartName[part],pose_3dqt[part])

        #for id_part in range(len(status_part_body_appear)):
        #check this part body appear or not
        #   if status_part_body_appear[id_part] == 1:
        #      print("%-10s"%name_part_body[id_part],": appear")
        #     detected_part.append(id_part)
        #else:
        #   print("%-10s"%name_part_body[id_part],": disappear")
        #list_to_check_fall_deteced = [[1,8]  , #neck,RHIP
        #                                [1,9],  #neck RKnee
        #                                [1,10], #neck RAnkle
        #                                [1,11], #neck LHip
        #                                [1,12], #neck LKne e
        #                                [1,13]]  #neck LAnkle
        if int(self.fps_time) % 1 == 0:  #every 1 second record
            self.times = self.times + [self.times[-1] + 1]
            if len(self.stable) > 1000:
                self.stable = self.stable[200:]
                self.recordHead = self.recordHead[200:]
            if self.stable == [0]:
                self.stable = self.stable + [0]
                self.recordHead = [pose_3dqt[10][2]] + [pose_3dqt[10][2]]
            else:
                #highest 800  , 550-600 average
                self.stable = self.stable + [
                    abs(pose_3dqt[10][2] - self.recordHead[-1])
                ]
                self.recordHead = self.recordHead + [pose_3dqt[10][2]]

        status_found = 0
        for id_part in detected_part:
            #if id_part in [8,9,10,11,12,13] and 1 in detected_part:
            #    status_found = 1
            if id_part in [8, 11] and 1 in detected_part:
                status_found = 1
        if status_found:
            print("-------Ready for detece--------")
            if self.fall_detection(pose_3dqt):
                print("-----\nFOUND !!!\n-----")
        #----------
        keypoints = pose_3d[0].transpose()

        return keypoints / 80
Пример #20
0
            img_scaled = cv2.resize(image,
                                    None,
                                    fx=args.zoom,
                                    fy=args.zoom,
                                    interpolation=cv2.INTER_LINEAR)
            dx = (img_scaled.shape[1] - image.shape[1]) // 2
            dy = (img_scaled.shape[0] - image.shape[0]) // 2
            image = img_scaled[dy:image.shape[0], dx:image.shape[1]]

        logger.debug('image process+')
        humans = e.inference(image)

        logger.debug('postprocess+')
        #for Testing image in heatmap
        #gray_img = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
        #heatmap_img = cv2.applyColorMap(gray_img, cv2.COLORMAP_JET)
        #cv2.imshow('Testing',heatmap_img)
        image = TfPoseEstimator.draw_humans(image, humans, imgcopy=False)

        logger.debug('show+')
        cv2.putText(image, "FPS: %f" % (1.0 / (time.time() - fps_time)),
                    (10, 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2)
        cv2.imshow('tf-pose-estimation result', image)

        fps_time = time.time()
        if cv2.waitKey(1) == 27:
            break
        logger.debug('finished+')

    cv2.destroyAllWindows()
Пример #21
0
        humans = e.inference(image)
        n = len(humans)
        print(humans)
        """body = {}
        body["key_points"] = []
        for human in humans:
           for parts in human.body_parts.items():
               print("id" ,parts[0])
               print("x",parts[1].x)
               print("y",parts[1].y)
               print("score = ",parts[1].score)
               body["key_points"].append({"ID":parts[0],"X":parts[1].x,"Y":parts[1].y})
        with open("json/{0}.json".format(str(i)),'w') as file:
        	json.dump(body,file)"""
        logger.debug('postprocess+')
        image2 = TfPoseEstimator.draw_humans(image2, humans, imgcopy=False,frame=frame,dir=args.output_json)
       # image =  
        logger.debug('show+')
        cv2.putText(image,
                    "FPS: %f" % (1.0 / (time.time() - fps_time)),
                    (10, 10),  cv2.FONT_HERSHEY_SIMPLEX, 0.5,
                    (0, 255, 0), 2)
        cv2.putText(image, "persons = %f" % n,(10,30),cv2.FONT_HERSHEY_SIMPLEX, 0.5,
                    (0, 0, 255), 3)
        cv2.imshow('orignal',image)
        cv2.imshow('tf-pose-estimation result', image2)
        if n>0:
            frame=frame+1

        """logger.info('3d lifting initialization.')
        poseLifting = Prob3dPose('../src/lifting/models/prob_model_params.mat')
Пример #22
0
def main():
    parser = argparse.ArgumentParser(description='tf-pose-estimation run')
    parser.add_argument('--image', type=str, default='./images/p3.jpg')
    parser.add_argument(
        '--resolution',
        type=str,
        default='432x368',
        help='network input resolution. default=432x368')
    parser.add_argument(
        '--model',
        type=str,
        default='mobilenet_thin',
        help='cmu / mobilenet_thin')
    parser.add_argument(
        '--scales',
        type=str,
        default='[None]',
        help='for multiple scales, eg. [1.0, (1.1, 0.05)]')
    parser.add_argument(
        '--stick_only',
        action='store_true',
        help='save output without other analysis')
    parser.add_argument('--plot_3d', action='store_true', help='save 3d poses')

    args = parser.parse_args()
    scales = ast.literal_eval(args.scales)

    w, h = model_wh(args.resolution)
    e = TfPoseEstimator(get_graph_path(args.model), target_size=(w, h))

    # estimate human poses from a single image !
    image = common.read_imgfile(args.image, None, None)
    # image = cv2.fastNlMeansDenoisingColored(image, None, 10, 10, 7, 21)
    t = time.time()
    humans = e.inference(image, scales=scales)
    elapsed = time.time() - t

    logger.info('inference image: %s in %.4f seconds.' % (args.image, elapsed))

    image = TfPoseEstimator.draw_humans(image, humans, imgcopy=False)
    # cv2.imshow('tf-pose-estimation result', image)
    # cv2.waitKey()

    import matplotlib.pyplot as plt

    fig = plt.figure()
    a = fig.add_subplot(2, 2, 1)
    a.set_title('Result')
    rgb_image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
    if args.stick_only:
        cv2.imwrite('output/test.png', image)
        logger.info('image saved: {}'.format('output/test.png'))
        import sys
        sys.exit(0)
    plt.imshow(rgb_image)

    bgimg = cv2.cvtColor(image.astype(np.uint8), cv2.COLOR_BGR2RGB)
    bgimg = cv2.resize(
        bgimg, (e.heatMat.shape[1], e.heatMat.shape[0]),
        interpolation=cv2.INTER_AREA)

    # show network output
    a = fig.add_subplot(2, 2, 2)
    plt.imshow(bgimg, alpha=0.5)
    tmp = np.amax(e.heatMat[:, :, :-1], axis=2)
    plt.imshow(tmp, cmap=plt.cm.gray, alpha=0.5)
    plt.colorbar()

    tmp2 = e.pafMat.transpose((2, 0, 1))
    tmp2_odd = np.amax(np.absolute(tmp2[::2, :, :]), axis=0)
    tmp2_even = np.amax(np.absolute(tmp2[1::2, :, :]), axis=0)

    a = fig.add_subplot(2, 2, 3)
    a.set_title('Vectormap-x')
    # plt.imshow(CocoPose.get_bgimg(inp, target_size=(vectmap.shape[1], vectmap.shape[0])), alpha=0.5)
    plt.imshow(tmp2_odd, cmap=plt.cm.gray, alpha=0.5)
    plt.colorbar()

    a = fig.add_subplot(2, 2, 4)
    a.set_title('Vectormap-y')
    # plt.imshow(CocoPose.get_bgimg(inp, target_size=(vectmap.shape[1], vectmap.shape[0])), alpha=0.5)
    plt.imshow(tmp2_even, cmap=plt.cm.gray, alpha=0.5)
    plt.colorbar()
    if not os.path.exists('output'):
        os.mkdir('output')
    plt.savefig('output/test.png')
    logger.info('image saved: {}'.format('output/test.png'))

    if not args.plot_3d:
        import sys
        sys.exit(0)
    #For plotting in 3d
    logger.info('3d lifting initialization.')
    poseLifting = Prob3dPose('./src/lifting/models/prob_model_params.mat')

    image_h, image_w = image.shape[:2]
    standard_w = 640
    standard_h = 480

    pose_2d_mpiis = []
    visibilities = []
    for human in humans:
        pose_2d_mpii, visibility = common.MPIIPart.from_coco(human)
        pose_2d_mpiis.append([(int(x * standard_w + 0.5),
                               int(y * standard_h + 0.5))
                              for x, y in pose_2d_mpii])
        visibilities.append(visibility)

    pose_2d_mpiis = np.array(pose_2d_mpiis)
    visibilities = np.array(visibilities)
    transformed_pose2d, weights = poseLifting.transform_joints(
        pose_2d_mpiis, visibilities)
    pose_3d = poseLifting.compute_3d(transformed_pose2d, weights)

    for i, single_3d in enumerate(pose_3d):
        plot_pose(single_3d)
    plt.draw()
    plt.savefig('output/pose_3d_test.png')
    logger.info('3d plot saved: {}'.format('output/pose_3d_test.png'))
Пример #23
0
            canvas[dy:dy + img_scaled.shape[0],
                   dx:dx + img_scaled.shape[1]] = img_scaled
            image_rgb = canvas

        elif args.zoom > 1.0:
            img_scaled = cv2.resize(image_rgb,
                                    None,
                                    fx=args.zoom,
                                    fy=args.zoom,
                                    interpolation=cv2.INTER_LINEAR)
            dx = (img_scaled.shape[1] - image_rgb.shape[1]) // 2
            dy = (img_scaled.shape[0] - image_rgb.shape[0]) // 2
            image1 = img_scaled[dy:image1.shape[0], dx:image_rgb.shape[1]]
        humans = e.inference(image_rgb)
        image_rgb = TfPoseEstimator.draw_humans(image_rgb,
                                                humans,
                                                imgcopy=False)

        if humans:
            # distance values call (humans list length = people).
            # Nose = 0
            # Neck = 1
            # RShoulder = 2
            # RElbow = 3
            # RWrist = 4
            # LShoulder = 5
            # LElbow = 6
            # LWrist = 7
            # RHip = 8
            # RKnee = 9
            # RAnkle = 10
Пример #24
0
def doCNNTracking(args):    
    global is_tracking,logger

    fps_time = 0

    logger.debug('initialization %s : %s' % (args.model, get_graph_path(args.model)))
    w, h = model_wh(args.resolution)
    e = TfPoseEstimator(get_graph_path(args.model), target_size=(w, h))
    logger.debug('cam read+')
    pipeline = rs.pipeline()
    pipeline.start()
    
    frames = pipeline.wait_for_frames()
    #get depth影像
    depth = frames.get_depth_frame() 
    depth_image_data = depth.as_frame().get_data()
    depth_image = np.asanyarray(depth_image_data)
    
    
    
    logger.info('cam depth image=%dx%d' % (depth_image.shape[1], depth_image.shape[0])) 
    logger.info('camera ready') 
    
    
    #計算depth影像對應至rgb影像的clip
    clip_box = [100,-100,290,-300]
    
    human_list = []
    while (True):
        if(is_tracking):
            
            fps_time = time.time()
            frames = pipeline.wait_for_frames()
            #get rgb影像
            image_frame = frames.get_color_frame()
            image_data = image_frame.as_frame().get_data()
            image = np.asanyarray(image_data)
            
            #change bgr 2 rgb
            image = np.array(image[...,::-1])
            origen_image = image

            #get depth影像
            depth = frames.get_depth_frame() 
            depth_image_data = depth.as_frame().get_data()
            depth_image = np.asanyarray(depth_image_data)
            depth_image = depth_image[(int)(clip_box[0]):(int)(clip_box[1]),(int)(clip_box[2]):(int)(clip_box[3])]
            depth_image = cv2.resize(depth_image, (640, 480), interpolation=cv2.INTER_CUBIC)
            depth_image=depth_image/30
            depth_image.astype(np.uint8)
            
            #深度去背的遮罩
            thresh=cv2.inRange(depth_image,20,160)

            #去背的遮罩做影像處理
            kernel = cv2.getStructuringElement(cv2.MORPH_RECT,(1, 1))  
            eroded = cv2.erode(thresh,kernel)
            kernel2 = cv2.getStructuringElement(cv2.MORPH_RECT,(5, 5))  
            dilated = cv2.dilate(eroded,kernel2)
            
            #亮度遮罩
            bright_mask = np.zeros(image.shape);
            bright_mask.fill(200)
            bright_mask = bright_mask.astype(np.uint8);
            bright_mask = cv2.bitwise_and(bright_mask, bright_mask, mask=dilated)
            
            #rgb影像亮度處理
#             image = cv2.bitwise_and(image, image, mask=dilated)
            
            image = image.astype(int)+200-bright_mask.astype(int);
            image = np.clip(image, 0, 255)
            image = image.astype(np.uint8);
            
            image=origen_image
            
            #影像邊緣檢測
            image_gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
            blurred = cv2.GaussianBlur(image_gray, (3, 3), 0)
            canny = cv2.Canny(blurred, 50, 50)
            canny_blurred = cv2.GaussianBlur(canny, (13, 13), 0)
            cv2.imshow('test', canny_blurred)
            
            #tfpose image 縮放
            if args.zoom < 1.0:
                canvas = np.zeros_like(image)
                img_scaled = cv2.resize(image, None, fx=args.zoom, fy=args.zoom, interpolation=cv2.INTER_LINEAR)
                dx = (canvas.shape[1] - img_scaled.shape[1]) // 2
                dy = (canvas.shape[0] - img_scaled.shape[0]) // 2
                canvas[dy:dy + img_scaled.shape[0], dx:dx + img_scaled.shape[1]] = img_scaled
                image = canvas
            elif args.zoom > 1.0:
                img_scaled = cv2.resize(image, None, fx=args.zoom, fy=args.zoom, interpolation=cv2.INTER_LINEAR)
                dx = (img_scaled.shape[1] - image.shape[1]) // 2
                dy = (img_scaled.shape[0] - image.shape[0]) // 2
                image = img_scaled[dy:image.shape[0], dx:image.shape[1]]

            #tfpose 計算
            humans = e.inference(image)
            
            # #得到joint
            # jdata = TfPoseEstimator.get_json_data(image.shape[0],image.shape[1],humans)
            # if(len(jdata)>2):
            #     try:
            #         #傳送Position資料至SERVER
            #         chating_room.sendTrackingData(jdata,'track')
            #     except:
            #         print("Cannot send data to server.")
           
            
            #去背後深度影像
            depth_masked = cv2.bitwise_and(depth_image, depth_image, mask=dilated)
            new_human_list = []
            for human in humans:
                #計算深度資料
                depthDatas=[None] * 20
                image_h, image_w = image.shape[:2]
                
                               
                # get joint data with depth
                for i in range(common.CocoPart.Background.value):
                    if i not in human.body_parts.keys():
                        continue
                    body_part = human.body_parts[i]
                    y= int(body_part.y * image_h+ 0.5)
                    x = int(body_part.x * image_w + 0.5)
                    s=5;
                    mat = depth_masked[y-s if(y-s>=0) else 0:y+s if(y+s<=479) else 479,x-s if(x-s>=0) else 0:x+s if (x+s<=639) else 639]

                    count=0;
                    sum_depth=0;

                    for j in range (mat.shape[0]):
                        for k in range (mat.shape[1]):     
                            if mat[j,k]!=0:
                                sum_depth+=mat[j,k]
                                count+=1
                                
                    if(count>0):
                        depth=sum_depth/count
                    else:
                        depth=0
                    
                    mat = depth_masked[y-s if(y-s>=0) else 0:y+s if(y+s<=479) else 479,x-s if(x-s>=0) else 0:x+s if (x+s<=639) else 639]
                    
                    try:
                        depthDatas[i] =  (JointDepthData(i,x,y,depth))
                    except:
                        print("err:"+str(x)+" "+str(y)+" "+str(body_part.x )+" "+str(body_part.y ))
                
                # 後處理                
                jn0=np.zeros((20,3))
                
                for j in depthDatas:
                    if(j!=None):
                        jn0[j.jn]=np.array([1,j.x,j.y])
                jn0 = jn0.astype(int)
                jn00 = jn0.copy()
                
                old_images=None
                if(len(human_list)>0):
                # 找與之前最相似的Human Data  
                    most_simular_value = 9999999
                    most_simular_human = ''
                    jn1=np.zeros((20,3))
                    
                    for human_data in human_list:                        
                        for j in human_data.joint_list:
                            if(j != None):
                                jn1[j.jn]=np.array([1,j.x,j.y])
                        jn1 = jn1.astype(int)

                        different_value=0
                        match_count=0
                        for i in range (18) :
                            if(jn0[i,0]*jn1[i,0]!=0):
                                different_value+= np.linalg.norm(jn0[i]-jn1[i])
                                match_count+=1
                        if(different_value/match_count<most_simular_value):
                            most_simular_human = human_data
                
                    old_images = most_simular_human.image_list
                    
                    if(old_images==None):
                        old_images = [np.zeos((80,80))]*18
                    w = 160
                    
                    
#                     for joint_i in range (18):
#                         smallest_diff = 9999999
#                         if(jn0[joint_i,0]==int(1) and jn1[joint_i,0]==int(1) and old_images[joint_i].shape[0]>0 and old_images[joint_i].shape[1]>0):
#                             new_center=np.zeros(2)
#                             for i in range (-20,20,10):
#                                 for j in range (-20,20,10):
#                                     center = np.array([jn0[joint_i,1]+i,jn0[joint_i,2]+j])
                                    
#                                     mat0 = canny_blurred[center[0]-w:center[0]+w,center[1]-w:center[1]+w]
#                                     if(mat0.shape[0]!=2*w or mat0.shape[1]!=2*w or old_images[joint_i].shape[0]!=2*w or old_images[joint_i].shape[1]!=2*w):
#                                         continue
#                                     try:
#                                         mat1 = mat0-old_images[joint_i]
#                                     except:
#                                         print(mat0.shape)
#                                     mat2 = np.exp2(mat1)
#                                     diff = np.sum(mat2)
#                                     if(diff<smallest_diff):
#                                         smallest_diff=diff
#                                         new_images[joint_i] = canny_blurred[center[0]-w:center[0]+w,center[1]-w:center[1]+w]
#                                         new_center=center
#                             if(smallest_diff<9999999):
#                                 jn0[joint_i,1] = int(new_center[0]+jn0[joint_i,1])/2
#                                 jn0[joint_i,2] = int(new_center[1]+jn1[joint_i,1])/2
                    temp = np.copy(image)
                    
                    for joint_i in range(18):
                        center = np.array([jn0[joint_i,1],jn0[joint_i,2]])
                        mat0 = image_gray[center[1]-w if(center[1]-w>=0) else 0:center[1]+w if(center[1]+w<480) else 479,center[0]-w if(center[0]-w>=0) else 0:center[0]+w if(center[0]+w<640) else 639]
                        mat1 = old_images[joint_i]
                        if(mat0.shape[0]>mat1.shape[0]and mat0.shape[1]>mat1.shape[1] and mat0.shape[0]>0 and mat0.shape[1]>0 and mat1.shape[0]>0 and mat1.shape[1]>0):
                            loc = match_img(mat0,mat1)
                            lt = (center[0]-w if(center[0]-w>=0) else 0,center[1]-w if(center[1]-w>=0) else 0)
                            
                            
                            if(len(loc[1])>0):                                 
                                c = (int(sum(loc[1])/len(loc[1])),int(sum(loc[0])/len(loc[0])))
                                cv2.rectangle(temp,(c[0]+lt[0],c[1]+lt[1]),(c[0]+lt[0]+40,c[1]+lt[1]+40), common.CocoColors[joint_i], 2)                             
                            # for pt in : 
                            #     c = pt-np.array((center[1]-w if(center[1]-w>=0) else 0,center[0]-w if(center[0]-w>=0) else 0))+np.array((center[1],center[0]))
                            #     c2 = (c[0],c[1])
                                # cv2.rectangle(temp,c2,(c[0]+40,c[1]+40),(255,255,255), 2) 
                                
                    
                    cv2.imshow('test3',  temp )
                          
 
                
                w = 40
                new_depthDatas=[None]*20
                new_images = [np.zeros((80,80))]*20
                for joint_i in range (18):
                    if(jn0[joint_i,0]==1):
                        new_depthDatas[joint_i]=(JointDepthData(joint_i,jn0[joint_i,1],jn0[joint_i,2],depthDatas[joint_i].dp))
                        m=np.copy(image_gray[jn0[joint_i,2]-w:jn0[joint_i,2]+w,jn0[joint_i,1]-w:jn0[joint_i,1]+w])

                        if(old_images==None and m.shape[0]==2*w and m.shape[1]==2*w):
                            cv2.circle(image,(jn0[joint_i,1],jn0[joint_i,2]), w, (0,0,255), -1)
                            new_images[joint_i]=m
                        elif(old_images==None and (m.shape[0]<2*w or m.shape[1]<2*w)):
                            pass
                        elif(m.shape[0]==2*w and m.shape[1]==2*w):
                            cv2.circle(image,(jn0[joint_i,1],jn0[joint_i,2]),w, (0,255,0), -1)
                            new_images[joint_i]=old_images[joint_i]*0.9+m*0.1
                        else:
                            cv2.circle(image,(jn0[joint_i,1],jn0[joint_i,2]), w, (255,0,0), -1)
                            new_images[joint_i]=old_images[joint_i]
                        
                        if(joint_i==1 and m.shape[0]==2*w and m.shape[1]==2*w):
                            cv2.imshow('test2', m)
                            cv2.circle(image,(jn00[joint_i,1],jn00[joint_i,2]), w, (255,255,0), -1)
                    
                        # new_images.append(depth_masked[jn0[joint_i,1]-5:jn0[joint_i,1]+5,jn0[joint_i,2]-5:jn0[joint_i,2]+5])
                        
                
                
                new_human = HumanData(new_depthDatas,new_images)
                
                new_human_list.append(new_human)
                
#                 depth_jdata=json.dumps(new_depthDatas)
#                 if(len(depth_jdata)>2):
#                     try:
#                         #傳送Depth資料至SERVER
#                         print(depth_jdata)
#                         chating_room.sendTrackingData(depth_jdata,'track_depth')

#                     except:
#                         print("Cannot send depth data to server.")
            
            human_list = new_human_list           
            depth_image =  cv2.applyColorMap(cv2.convertScaleAbs(depth_image/25), cv2.COLORMAP_JET)
#             cv2.circle(image,(320,240), 5, (255,0,0), -1)

            
            image = TfPoseEstimator.draw_humans(image, humans, imgcopy=False)

            cv2.putText(image,"FPS: %f" % (1.0 / (time.time() - fps_time)),(10, 10),  cv2.FONT_HERSHEY_SIMPLEX, 0.5,(0, 255, 0), 2)
            
            cv2.imshow('tf-pose-estimation result', image)
           
            
            if cv2.waitKey(25) & 0xFF == ord('q'):
                cv2.destroyAllWindows()
                break


    cv2.destroyAllWindows()
Пример #25
0
def main():

    yolo = YOLO()
    max_cosine_distance = 0.3
    nn_budget = None
    nms_max_overlap = 1.0
    model_filename = 'model_data/mars-small128.pb'
    encoder = gdet.create_box_encoder(model_filename, batch_size=1)
    metric = nn_matching.NearestNeighborDistanceMetric("cosine",
                                                       max_cosine_distance,
                                                       nn_budget)
    tracker = Tracker(metric)
    parser = argparse.ArgumentParser(
        description='Training codes for Openpose using Tensorflow')
    parser.add_argument('--checkpoint_path',
                        type=str,
                        default='checkpoints/train/2018-12-13-16-56-49/')
    parser.add_argument('--backbone_net_ckpt_path',
                        type=str,
                        default='checkpoints/vgg/vgg_19.ckpt')
    parser.add_argument('--image', type=str, default=None)
    # parser.add_argument('--run_model', type=str, default='img')
    parser.add_argument('--video', type=str, default=None)
    parser.add_argument('--train_vgg', type=bool, default=True)
    parser.add_argument('--use_bn', type=bool, default=False)
    parser.add_argument('--save_video', type=str, default='result/our.mp4')
    args = parser.parse_args()
    checkpoint_path = args.checkpoint_path
    logger.info('checkpoint_path: ' + checkpoint_path)

    with tf.name_scope('inputs'):
        raw_img = tf.placeholder(tf.float32, shape=[None, None, None, 3])
        img_size = tf.placeholder(dtype=tf.int32,
                                  shape=(2, ),
                                  name='original_image_size')

    img_normalized = raw_img / 255 - 0.5

    # define vgg19
    with slim.arg_scope(vgg.vgg_arg_scope()):
        vgg_outputs, end_points = vgg.vgg_19(img_normalized)

    # get net graph
    logger.info('initializing model...')
    net = PafNet(inputs_x=vgg_outputs, use_bn=args.use_bn)
    hm_pre, cpm_pre, added_layers_out = net.gen_net()
    hm_up = tf.image.resize_area(hm_pre[5], img_size)
    cpm_up = tf.image.resize_area(cpm_pre[5], img_size)
    # hm_up = hm_pre[5]
    # cpm_up = cpm_pre[5]
    smoother = Smoother({'data': hm_up}, 25, 3.0)
    gaussian_heatMat = smoother.get_output()

    max_pooled_in_tensor = tf.nn.pool(gaussian_heatMat,
                                      window_shape=(3, 3),
                                      pooling_type='MAX',
                                      padding='SAME')
    tensor_peaks = tf.where(tf.equal(gaussian_heatMat, max_pooled_in_tensor),
                            gaussian_heatMat, tf.zeros_like(gaussian_heatMat))

    logger.info('initialize saver...')
    # trainable_var_list = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='openpose_layers')
    # trainable_var_list = []
    trainable_var_list = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
                                           scope='openpose_layers')
    if args.train_vgg:
        trainable_var_list = trainable_var_list + tf.get_collection(
            tf.GraphKeys.TRAINABLE_VARIABLES, scope='vgg_19')

    restorer = tf.train.Saver(tf.get_collection(
        tf.GraphKeys.TRAINABLE_VARIABLES, scope='vgg_19'),
                              name='vgg_restorer')
    saver = tf.train.Saver(trainable_var_list)

    logger.info('initialize session...')
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    with tf.Session(config=config) as sess:
        sess.run(tf.group(tf.global_variables_initializer()))
        logger.info('restoring vgg weights...')
        restorer.restore(sess, args.backbone_net_ckpt_path)
        logger.info('restoring from checkpoint...')
        #saver.restore(sess, tf.train.latest_checkpoint(checkpoint_dir=checkpoint_path))
        saver.restore(sess, args.checkpoint_path + 'model-59000.ckpt')
        logger.info('initialization done')
        writeVideo_flag = True
        if args.image is None:
            if args.video is not None:
                cap = cv2.VideoCapture(args.video)
                w = int(cap.get(3))
                h = int(cap.get(4))

            else:
                cap = cv2.VideoCapture("images/video.mp4")
                #cap = cv2.VideoCapture("rtsp://*****:*****@192.168.43.51:554//Streaming/Channels/1")
                #cap = cv2.VideoCapture("http://*****:*****@192.168.1.111:8081")
                #cap = cv2.VideoCapture("rtsp://*****:*****@192.168.1.106:554//Streaming/Channels/1")
            _, image = cap.read()
            #print(_,image)
            if image is None:
                logger.error("Can't read video")
                sys.exit(-1)
            fps = cap.get(cv2.CAP_PROP_FPS)
            ori_w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
            ori_h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
            #print(fps,ori_w,ori_h)
            if args.save_video is not None:
                fourcc = cv2.VideoWriter_fourcc(*'MP4V')
                video_saver = cv2.VideoWriter('result/our.mp4', fourcc, fps,
                                              (ori_w, ori_h))
                logger.info('record vide to %s' % args.save_video)
            logger.info('fps@%f' % fps)
            size = [int(654 * (ori_h / ori_w)), 654]
            h = int(654 * (ori_h / ori_w))
            time_n = time.time()
            #print(time_n)

            max_boxs = 0
            person_track = {}
            yolo2 = YOLO2()

            while True:
                face = []
                cur1 = conn.cursor()  # 获取一个游标
                sql = "select * from worker"
                cur1.execute(sql)
                data = cur1.fetchall()
                for d in data:
                    # 注意int类型需要使用str函数转义
                    name = str(d[1]) + '_' + d[2]

                    face.append(name)
                cur1.close()  # 关闭游标

                _, image_fist = cap.read()
                #穿戴安全措施情况检测

                img = Image.fromarray(
                    cv2.cvtColor(image_fist, cv2.COLOR_BGR2RGB))
                image, wear = yolo2.detect_image(img)
                image = np.array(image)
                image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)

                # # 获取警戒线
                cv2.line(image, (837, 393), (930, 300), (0, 255, 255), 3)
                transboundaryline = t.line_detect_possible_demo(image)

                #openpose二维姿态检测
                img = np.array(cv2.resize(image, (654, h)))
                # cv2.imshow('raw', img)
                img_corner = np.array(
                    cv2.resize(image, (360, int(360 * (ori_h / ori_w)))))
                img = img[np.newaxis, :]
                peaks, heatmap, vectormap = sess.run(
                    [tensor_peaks, hm_up, cpm_up],
                    feed_dict={
                        raw_img: img,
                        img_size: size
                    })
                bodys = PoseEstimator.estimate_paf(peaks[0], heatmap[0],
                                                   vectormap[0])

                image, person = TfPoseEstimator.draw_humans(image,
                                                            bodys,
                                                            imgcopy=False)
                #取10右脚 13左脚

                foot = []
                if len(person) > 0:
                    for p in person:
                        foot_lr = []
                        if 10 in p and 13 in p:
                            foot_lr.append(p[10])
                            foot_lr.append(p[13])

                        if len(foot_lr) > 1:
                            foot.append(foot_lr)

                fps = round(1 / (time.time() - time_n), 2)
                image = cv2.putText(image,
                                    str(fps) + 'fps', (10, 15),
                                    cv2.FONT_HERSHEY_COMPLEX_SMALL, 1,
                                    (255, 255, 255))
                time_n = time.time()

                #deep目标检测
                image2 = Image.fromarray(image_fist)
                boxs = yolo.detect_image(image2)
                features = encoder(image, boxs)
                detections = [
                    Detection(bbox, 1.0, feature)
                    for bbox, feature in zip(boxs, features)
                ]
                boxes = np.array([d.tlwh for d in detections])
                scores = np.array([d.confidence for d in detections])
                indices = preprocessing.non_max_suppression(
                    boxes, nms_max_overlap, scores)
                detections = [detections[i] for i in indices]
                if len(boxs) > max_boxs:
                    max_boxs = len(boxs)
                # print(max_boxs)

                # Call the tracker
                tracker.predict()
                tracker.update(detections)

                for track in tracker.tracks:

                    if max_boxs < track.track_id:
                        tracker.tracks.remove(track)
                        tracker._next_id = max_boxs + 1

                    if not track.is_confirmed() or track.time_since_update > 1:
                        continue

                    bbox = track.to_tlbr()
                    PointX = bbox[0] + ((bbox[2] - bbox[0]) / 2)
                    PointY = bbox[3]

                    if track.track_id not in person_track:
                        track2 = copy.deepcopy(track)
                        person_track[track.track_id] = track2

                    else:

                        track2 = copy.deepcopy(track)
                        bbox2 = person_track[track.track_id].to_tlbr()
                        PointX2 = bbox2[0] + ((bbox2[2] - bbox2[0]) / 2)
                        PointY2 = bbox2[3]
                        distance = math.sqrt(
                            pow(PointX - PointX2, 2) +
                            pow(PointY - PointY2, 2))
                        if distance < 120:
                            person_track[track.track_id] = track2

                        else:

                            # print('last',track.track_id)
                            dis = {}
                            for key in person_track:
                                bbox3 = person_track[key].to_tlbr()
                                PointX3 = bbox3[0] + (
                                    (bbox3[2] - bbox3[0]) / 2)
                                PointY3 = bbox3[3]

                                d = math.sqrt(
                                    pow(PointX3 - PointX, 2) +
                                    pow(PointY3 - PointY, 2))
                                dis[key] = d
                            dis = sorted(dis.items(),
                                         key=operator.itemgetter(1),
                                         reverse=False)

                            track2.track_id = dis[0][0]
                            person_track[dis[0][0]] = track2
                            tracker.tracks.remove(track)
                            tracker.tracks.append(person_track[track.track_id])

                    # 写入class

                    try:
                        box_title = face[track2.track_id - 1]
                    except Exception as e:
                        box_title = str(track2.track_id) + "_" + "unknow"
                    if box_title not in workers:
                        wid = box_title.split('_')[0]
                        localtime = time.asctime(time.localtime(time.time()))
                        workers[box_title] = wk.Worker()
                        workers[box_title].set(box_title, localtime,
                                               (int(PointX), int(PointY)))
                        cur2 = conn.cursor()  # 获取一个游标
                        sql2 = "UPDATE worker SET in_time='" + localtime + "' WHERE worker_id= '" + wid + "'"
                        cur2.execute(sql2)
                        cur2.close()  # 关闭游标

                    else:
                        localtime = time.asctime(time.localtime(time.time()))
                        yoloPoint = (int(PointX), int(PointY))
                        foot_dic = {}
                        wear_dic = {}

                        for f in foot:
                            fp = []
                            footCenter = ((f[0][0] + f[1][0]) / 2,
                                          (f[0][1] + f[1][1]) / 2)
                            foot_dis = int(
                                math.sqrt(
                                    pow(footCenter[0] - yoloPoint[0], 2) +
                                    pow(footCenter[1] - yoloPoint[1], 2)))
                            #print(foot_dis)
                            fp.append(f)
                            fp.append(footCenter)
                            foot_dic[foot_dis] = fp

                        #print(box_title, 'sss', foot_dic)
                        foot_dic = sorted(foot_dic.items(),
                                          key=operator.itemgetter(0),
                                          reverse=False)
                        workers[box_title].current_point = foot_dic[0][1][1]
                        workers[box_title].track_point.append(
                            workers[box_title].current_point)

                        #print(box_title,'sss',foot_dic[0][1][1])
                        mytrack = str(workers[box_title].track_point)
                        wid = box_title.split('_')[0]
                        #卡尔曼滤波预测
                        if wid not in KalmanNmae:
                            myKalman(wid)
                        if wid not in lmp:
                            setLMP(wid)
                        cpx, cpy = predict(workers[box_title].current_point[0],
                                           workers[box_title].current_point[1],
                                           wid)

                        if cpx[0] == 0.0 or cpy[0] == 0.0:
                            cpx[0] = workers[box_title].current_point[0]
                            cpy[0] = workers[box_title].current_point[1]
                        workers[box_title].next_point = (int(cpx), int(cpy))

                        workers[box_title].current_footR = foot_dic[0][1][0][0]
                        workers[box_title].current_footL = foot_dic[0][1][0][1]
                        cur3 = conn.cursor()  # 获取一个游标
                        sql = "UPDATE worker SET current_point= '" + str(
                            workers[box_title].current_point
                        ) + "' , current_footR = '" + str(
                            workers[box_title].current_footR
                        ) + "',current_footL = '" + str(
                            workers[box_title].current_footL
                        ) + "',track_point = '" + mytrack + "',next_point = '" + str(
                            workers[box_title].next_point
                        ) + "' WHERE worker_id= '" + wid + "'"
                        cur3.execute(sql)
                        cur3.close()
                        #写入安全措施情况
                        if len(wear) > 0:
                            for w in wear:
                                wear_dis = int(
                                    math.sqrt(
                                        pow(w[0] - yoloPoint[0], 2) +
                                        pow(w[1] - yoloPoint[1], 2)))
                                wear_dic[wear_dis] = w
                            wear_dic = sorted(wear_dic.items(),
                                              key=operator.itemgetter(0),
                                              reverse=False)

                            if wear_dic[0][0] < 120:
                                cur4 = conn.cursor()  # 获取一个游标

                                if wear[wear_dic[0][1]] == 1:
                                    if len(workers[box_title].wear['no helmet']
                                           ) == 0:
                                        workers[box_title].wear[
                                            'no helmet'].append(localtime)

                                        sql = "INSERT INTO wear SET worker_id = '" + wid + "', type = 'no_helmet',abnormal_time = '" + localtime + "'"
                                        cur4.execute(sql)
                                        cur4.close()  # 关闭游标

                                    else:
                                        if localtime not in workers[
                                                box_title].wear['no helmet']:

                                            workers[box_title].wear[
                                                'no helmet'].append(localtime)
                                            sql = "INSERT INTO wear SET worker_id = '" + wid + "', type = 'no_helmet',abnormal_time = '" + localtime + "'"
                                            cur4.execute(sql)
                                            cur4.close()  # 关闭游标

                                elif wear[wear_dic[0][1]] == 2:
                                    if len(workers[box_title].
                                           wear['no work cloths']) == 0:
                                        workers[box_title].wear[
                                            'no work cloths'].append(localtime)
                                        sql = "INSERT INTO wear SET worker_id = '" + wid + "', type = 'no work cloths',abnormal_time = '" + localtime + "'"
                                        cur4.execute(sql)
                                        cur4.close()  # 关闭游标
                                    else:
                                        if localtime not in workers[
                                                box_title].wear[
                                                    'no work cloths']:
                                            workers[box_title].wear[
                                                'no work cloths'].append(
                                                    localtime)
                                            sql = "INSERT INTO wear SET worker_id = '" + wid + "', type = 'no work cloths',abnormal_time = '" + localtime + "'"
                                            cur4.execute(sql)
                                            cur4.close()  # 关闭游标
                                elif wear[wear_dic[0][1]] == 3:
                                    if len(workers[box_title].
                                           wear['unsafe wear']) == 0:
                                        workers[box_title].wear[
                                            'unsafe wear'].append(localtime)
                                        sql = "INSERT INTO wear SET worker_id = '" + wid + "', type = 'unsafe wear',abnormal_time = '" + localtime + "'"
                                        cur4.execute(sql)
                                        cur4.close()  # 关闭游标
                                    else:
                                        if localtime not in workers[
                                                box_title].wear['unsafe wear']:
                                            workers[box_title].wear[
                                                'unsafe wear'].append(
                                                    localtime)
                                            sql = "INSERT INTO wear SET worker_id = '" + wid + "', type = 'unsafe wear',abnormal_time = '" + localtime + "'"
                                            cur4.execute(sql)
                                            cur4.close()  # 关闭游标

                        #写入越线情况

                        if len(workers[box_title].track_point) > 4:

                            for i in range(len(transboundaryline)):
                                p1 = (transboundaryline[i][0],
                                      transboundaryline[i][1])
                                p2 = (transboundaryline[i][2],
                                      transboundaryline[i][3])
                                p3 = workers[box_title].track_point[-2]
                                p4 = workers[box_title].track_point[-1]
                                a = t.IsIntersec(p1, p2, p3, p4)
                                if a == '有交点':

                                    cur5 = conn.cursor()  # 获取一个游标
                                    cur6 = conn.cursor()  # 获取一个游标
                                    cur5.execute(
                                        "select time from transboundary where worker_id = '"
                                        + wid + "' ")
                                    qurrytime = cur5.fetchone()
                                    cur5.close()  # 关闭游标
                                    if qurrytime == None:
                                        print('越线')
                                        sql = "INSERT INTO transboundary SET worker_id = '" + wid + "',time = '" + localtime + "'"
                                        cur6.execute(sql)
                                        cur6.close()  # 关闭游标
                                    else:
                                        temp1 = 0
                                        for qt in qurrytime:

                                            if qt == localtime:
                                                temp1 = 1
                                        if temp1 == 0:
                                            print('越线')
                                            sql = "INSERT INTO transboundary SET worker_id = '" + wid + "',time = '" + localtime + "'"
                                            cur6.execute(sql)
                                            cur6.close()  # 关闭游标
                        if len(workers[box_title].track_point) >= 20:
                            workers[box_title].previous_point = workers[
                                box_title].track_point[-5]
                    conn.commit()
                    try:
                        cv2.putText(image, face[track2.track_id - 1],
                                    (int(bbox[0]), int(bbox[1])), 0,
                                    5e-3 * 200, (0, 255, 0), 2)
                    except Exception as e:
                        cv2.putText(image, "unknow",
                                    (int(bbox[0]), int(bbox[1])), 0,
                                    5e-3 * 200, (0, 255, 0), 2)

                if args.video is not None:
                    image[27:img_corner.shape[0] +
                          27, :img_corner.shape[1]] = img_corner  # [3:-10, :]
                cv2.imshow(' ', image)
                if args.save_video is not None:
                    video_saver.write(image)
                cv2.waitKey(1)
            else:

                image = common.read_imgfile(args.image)
                size = [image.shape[0], image.shape[1]]
                if image is None:
                    logger.error('Image can not be read, path=%s' % args.image)
                    sys.exit(-1)
                h = int(654 * (size[0] / size[1]))
                img = np.array(cv2.resize(image, (654, h)))
                cv2.imshow('ini', img)
                img = img[np.newaxis, :]
                peaks, heatmap, vectormap = sess.run(
                    [tensor_peaks, hm_up, cpm_up],
                    feed_dict={
                        raw_img: img,
                        img_size: size
                    })
                cv2.imshow('in', vectormap[0, :, :, 0])
                bodys = PoseEstimator.estimate_paf(peaks[0], heatmap[0],
                                                   vectormap[0])
                image = TfPoseEstimator.draw_humans(image,
                                                    bodys,
                                                    imgcopy=False)
                cv2.imshow(' ', image)
                cv2.waitKey(0)
Пример #26
0
    e = TfPoseEstimator(get_graph_path(args.model), target_size=(w, h))

    # estimate human poses from a single image !
    image = common.read_imgfile(args.image, None, None)
    # image = cv2.fastNlMeansDenoisingColored(image, None, 10, 10, 7, 21)
    t = time.time()
    humans = e.inference(image, scales=[None])
    # humans = e.inference(image, scales=[None, (0.7, 0.5, 1.8)])
    # humans = e.inference(image, scales=[(1.2, 0.05)])
    # humans = e.inference(image, scales=[(0.2, 0.2, 1.4)])
    elapsed = time.time() - t

    logger.info('inference image: %s in %.4f seconds.' % (args.image, elapsed))

    image = cv2.imread(args.image, cv2.IMREAD_COLOR)
    image = TfPoseEstimator.draw_humans(image, humans, imgcopy=False)
    cv2.imshow('tf-pose-estimation result', image)
    cv2.waitKey()

    import sys
    sys.exit(0)

    logger.info('3d lifting initialization.')
    poseLifting = Prob3dPose('./src/lifting/models/prob_model_params.mat')

    image_h, image_w = image.shape[:2]
    standard_w = 640
    standard_h = 480

    pose_2d_mpiis = []
    visibilities = []
 def mesh(self, image):
     image = common.read_imgfile(image, None, None)
     image = cv2.resize(image, (self.width, self.height))
     print('start-inderence', time.time())
     humans = self.e.inference(image, scales=[None])
     print('end-inderence', time.time())
     self.resetBitFalling()
     self.savesecondNeck(image)
     package = TfPoseEstimator.draw_humans(image, humans, imgcopy=False)
     self.globalTime = time.time()  #time of after drawing
     image = package[0]
     #status_part_body_appear = package[1]
     center_each_body_part = package[2]
     #camera not found NECK more than 10 second then reset list
     if self.globalTime - self.getLastRecordTime() >= 12:
         print('RESET STABLE,RECORDNECK,HIP,etc. [complete 12 second]')
         self.destroyAll()
     if self.globalTime - self.getLastRecordTime() >= 2:
         print('maybe NECK or HUMAN not found [complete 2 second]')
         self.human_in_frame = False
     print('end Initialize mesh')
     # print(status_part_body_appear)
     #when draw2D stick man
     # name_part_body = ["Nose",  # 0
     #                   "Neck",  # 1
     #                   "RShoulder",  # 2
     #                   "RElbow",  # 3
     #                   "RWrist",  # 4
     #                   "LShoulder",  # 5
     #                   "LElbow",  # 6
     #                   "LWrist",  # 7
     #                   "RHip",  # 8
     #                   "RKnee",  # 9
     #                   "RAnkle",  # 10
     #                   "LHip",  # 11
     #                   "LKnee",  # 12
     #                   "LAnkle",  # 13
     #                   "REye",  # 14
     #                   "LEye",  # 15
     #                   "REar",  # 16
     #                   "LEar",  # 17
     #                   ]
     # detected_part = []
     self.addRecordTime(self.globalTime)
     print('start record everything')
     if 1 in center_each_body_part:
         # print(self.globalTime - self.getLastRecordTime())
         self.addCountTimes()
         self.human_in_frame = True
         self.lastTimesFoundNeck = self.recordTimeList[-1]
         self.used_quotaVirtureNeck = 0
         self.addRecordNeck(center_each_body_part[1][1])
         if len(self.recordNeck) >= 2:
             self.addRecordVelocity(self.recordNeck, self.recordTimeList)
         if 11 in center_each_body_part:
             self.addRecordHIP(center_each_body_part[11][1])
             print('neck :| HIP: ',
                   self.recordHIP[-1] - self.recordNeck[-1])
         elif 8 in center_each_body_part:
             self.addRecordHIP(center_each_body_part[8][1])
             print('neck :| HIP: ',
                   self.recordHIP[-1] - self.recordNeck[-1])
     elif self.used_quotaVirtureNeck < self.quotaVirtureNeck and self.secondNeck >= self.getLastNeck(
     ):
         # print(self.globalTime - self.getLastRecordTime())
         self.addCountTimes()
         self.lastTimesFoundNeck = self.recordTimeList[-1]
         self.addRecordNeck(self.getSecondNeck())
         if len(self.recordNeck) >= 2:
             self.addRecordVelocity(self.recordNeck, self.recordTimeList)
         print('addSecond Neck', self.used_quotaVirtureNeck)
         self.used_quotaVirtureNeck += 1
     if len(self.recordNeck) > 300:
         self.reduceRecord()
     # print('find highest neck , hip')
     totalTime = 0
     loop = 1
     for i in range(1, len(self.recordTimeList)):
         totalTime += self.recordTimeList[-i] - self.recordTimeList[-(i +
                                                                      1)]
         loop += 1
         if totalTime >= 2:
             break
     print('totalTime:', totalTime, loop)
     minNumber = -1
     if len(self.recordNeck) < loop:
         loop = len(self.recordNeck)
     for i in range(1, loop + 1):
         if minNumber == -1 or self.recordNeck[-i] <= minNumber:
             self.highestNeck = self.recordNeck[
                 -i]  #more HIGH more low value
             # self.highestNeckTime = self.recordTimeList[-i]
             minNumber = self.recordNeck[-i]
     if len(self.recordHIP) > 1:
         #11 L_HIP
         if 11 in center_each_body_part:
             self.highestHIP = min(self.recordHIP[-loop:])
         #8 R_HIP
         elif 8 in center_each_body_part:
             self.highestHIP = min(self.recordHIP[-loop:])
     if len(self.recordNeck) > 1:
         self.processFall(image)
         print('end processing falling end mash()')
Пример #28
0
        humans = PoseEstimator.estimate(heatMat, pafMat)
        logging.info('pose- elapsed_time={}'.format(time.time() - a))
        for human in humans:
            res = write_coco_json(human, args.input_width, args.input_height)
            print(res)

        logging.info('image={} heatMap={} pafMat={}'.format(
            image.shape, heatMat.shape, pafMat.shape))
        process_img = CocoPose.display_image(image,
                                             heatMat,
                                             pafMat,
                                             as_numpy=True)

        # display
        image = cv2.imread(args.imgpath)
        image_h, image_w = image.shape[:2]
        image = TfPoseEstimator.draw_humans(image, humans)

        scale = 480.0 / image_h
        newh, neww = 480, int(scale * image_w + 0.5)

        image = cv2.resize(image, (neww, newh), interpolation=cv2.INTER_AREA)

        convas = np.zeros([480, 640 + neww, 3], dtype=np.uint8)
        convas[:, :640] = process_img
        convas[:, 640:] = image

        #cv2.imshow('result', convas)
        #cv2.waitKey(0)
        cv2.imwrite(args.model + args.imgpath.split('/')[-1], image)
Пример #29
0
    def post(self):
        global fallen
        if ((self.request.headers['Content-Type'] == 'imagebin')):
            print('Received image')
            image = self.request.body
            fh = open('static/image1.jpg', 'wb')
            fh.write(image)
            fh.close()
            #fh = open('static/image1.jpg','ab')
            #fh.write(bytes([0xD9]))
            #fh.close()
            print('0')
            #image = cv2.imread('static/image1.jpg')
            print('1')
            print('2')

            parser = argparse.ArgumentParser(
                description='tf-pose-estimation run')
            parser.add_argument(
                '--resolution',
                type=str,
                default='432x368',
                help='network input resolution. default=432x368')
            parser.add_argument('--model',
                                type=str,
                                default='mobilenet_thin',
                                help='cmu / mobilenet_thin')
            parser.add_argument(
                '--scales',
                type=str,
                default='[None]',
                help='for multiple scales, eg. [1.0, (1.1, 0.05)]')
            args = parser.parse_args()
            scales = ast.literal_eval(args.scales)

            w, h = model_wh(args.resolution)
            e = TfPoseEstimator(get_graph_path(args.model), target_size=(w, h))

            # estimate human poses from a single image !
            image = common.read_imgfile('static/image1.jpg', None, None)
            # image = cv2.fastNlMeansDenoisingColored(image, None, 10, 10, 7, 21)
            t = time.time()
            humans = e.inference(image, scales=scales)
            elapsed = time.time() - t

            logger.info('inference image: image3.jpg in %.4f seconds.' %
                        (elapsed))

            image = TfPoseEstimator.draw_humans(image, humans, imgcopy=False)
            fallen = 'OKAY'
            for i, h in enumerate(humans):
                TORSO_INDEX = 1
                LEFT_HIP_INDEX = 8
                RIGHT_HIP_INDEX = 11
                RIGHT_HAND_INDEX = 4
                RIGHT_FOOT_INDEX = 12
                LEFT_FOOT_INDEX = 9

# and RIGHT_HAND_INDEX in parts and RIGHT_FOOT_INDEX in parts and LEFT_FOOT_INDEX in parts:

            parts = h.body_parts
            if TORSO_INDEX in parts and LEFT_HIP_INDEX in parts and RIGHT_HIP_INDEX in parts:

                torso = parts[TORSO_INDEX]
                left_hip = parts[LEFT_HIP_INDEX]
                right_hip = parts[RIGHT_HIP_INDEX]

                tx, ty = torso.x, torso.y
                lhx, lhy = left_hip.x, left_hip.y
                rhx, rhy = right_hip.x, right_hip.y

                if tx < lhx or tx > rhx:
                    fallen = 'FALL'

                if abs(lhy - ty) < 0.1 or abs(rhy - ty) < 0.1:
                    fallen = 'FALL'
                if RIGHT_HAND_INDEX in parts and RIGHT_FOOT_INDEX in parts and LEFT_FOOT_INDEX in parts:
                    right_foot = parts[RIGHT_FOOT_INDEX]
                    left_foot = parts[LEFT_FOOT_INDEX]
                    right_hand = parts[RIGHT_HAND_INDEX]
                    righax, righay = right_hand.x, right_hand.y
                    rfx, rfy = right_foot.x, right_foot.y
                    lfx, lfy = left_foot.x, left_foot.y
                    if abs(lfy - lhy) < 0.1 or abs(rhy - ty) < 0.1:
                        fallen = 'FALL'
                    if (lfy - lhy) > (lhy - ty):
                        fallen = 'FALL'
                    print(lfy - lhy, lhy - ty)
                    lowermag = math.sqrt((lfy - lhy) * (lfy - lhy) +
                                         (lhx - lfx) * (lhx - lfx))
                    uppermag = math.sqrt((lhy - ty) * (lhy - ty) + (tx - lhx) *
                                         (tx - lhx))
                    if lowermag > uppermag:
                        fallen = 'FALL'
            #cv2.putText(image,
            #               f"Fallen: False",
            #              (60, 60),  cv2.FONT_HERSHEY_SIMPLEX, 2,
            #             (0, 255, 0), 5)

            cv2.putText(image, f"Fallen: {fallen}", (60, 60),
                        cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 5)

            cv2.imwrite('static/image3.jpg', image)
            for client in clients:
                update_clients(client)
Пример #30
0
    def post(self):
        global finished_post
        if ((self.request.headers['Content-Type'] == 'imagebin')
                and (finished_post == 1)):
            print(finished_post)
            finished_post = 0
            print('Received image')
            image = self.request.body
            fh = open('static/image1.jpg', 'wb')
            fh.write(image)
            fh.close()
            #fh = open('static/image1.jpg','ab')
            #fh.write(bytes([0xD9]))
            #fh.close()
            print('0')
            #image = cv2.imread('static/image1.jpg')
            print('1')
            print('2')

            parser = argparse.ArgumentParser(
                description='tf-pose-estimation run')
            parser.add_argument(
                '--resolution',
                type=str,
                default='432x368',
                help='network input resolution. default=432x368')
            parser.add_argument('--model',
                                type=str,
                                default='mobilenet_thin',
                                help='cmu / mobilenet_thin')
            parser.add_argument(
                '--scales',
                type=str,
                default='[None]',
                help='for multiple scales, eg. [1.0, (1.1, 0.05)]')
            args = parser.parse_args()
            scales = ast.literal_eval(args.scales)

            w, h = model_wh(args.resolution)
            e = TfPoseEstimator(get_graph_path(args.model), target_size=(w, h))

            # estimate human poses from a single image !
            image = common.read_imgfile('static/image1.jpg', None, None)
            # image = cv2.fastNlMeansDenoisingColored(image, None, 10, 10, 7, 21)
            t = time.time()
            humans = e.inference(image, scales=scales)
            elapsed = time.time() - t

            logger.info('inference image: image3.jpg in %.4f seconds.' %
                        (elapsed))

            image = TfPoseEstimator.draw_humans(image, humans, imgcopy=False)

            #cv2.putText(image,
            #               f"Fallen: False",
            #              (60, 60),  cv2.FONT_HERSHEY_SIMPLEX, 2,
            #             (0, 255, 0), 5)

            cv2.imwrite('static/image3.jpg', image)

            for client in clients:
                update_clients(client)

            print(finished_post)
            finished_post = 1
            data = numpy.frombuffer(stringData,
                                    numpy.uint8)  # 将获取到的字符流数据转换成1维数组
            image = cv2.imdecode(data, cv2.IMREAD_COLOR)  # 将数组解码成图像
            print(image)

            width = int(image.shape[1])
            height = int(image.shape[0])
            # print(width,height)

            ##2D姿态识别
            humans = model.inference(image)
            if not args.showBG:
                image = np.zeros(image.shape)
            scales = ast.literal_eval(node_or_string='[None]')
            humans = model.inference(image, scales=scales)
            image = model.draw_humans(image, humans, imgcopy=False)
            # image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
            cv2.putText(image, "FPS: %f" % (1.0 / (time.time() - fps_time)),
                        (10, 10), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0),
                        2)
            # 生成2D姿态视频
            video.write(image)

            ##2D-->3D
            poseLifting = Prob3dPose('./lifting/models/prob_model_params.mat')
            image_h, image_w = image.shape[:2]
            standard_w = 640
            standard_h = 480

            pose_2d_mpiis = []
            visibilities = []