def liveData(self):
        directory_in_str = sys.path[0] + r"/../images/LiveTest/"

        try:
            os.remove(outputfile)
            os.remove(cleanedOutputfile)
        except OSError:
            pass

        for file in os.listdir(directory_in_str):
            filename = os.fsdecode(file)
            if filename.endswith(".jpg") or filename.endswith(".png"):
                fullpath = directory_in_str + filename

                # estimate human poses from a single image !
                image = common.read_imgfile(fullpath, None, None)

                humans = self.e.inference(image, scales=self.scales)

                image = TfPoseEstimator.draw_humans(image,
                                                    humans,
                                                    imgcopy=False)
                # cv2.imshow('tf-pose-estimation result', image)
                # cv2.waitKey()

                myFile = open(outputfile, 'a')
                # myFile.write(str(filename) + ',')
                # print(filename)
                myFile.write('\n')
                # break
                myFile.close()
Exemple #2
0
def PoseEstimatorPredict(image_path,plot = False,resolution ='432x368', scales = '[None]',model = 'mobilenet_thin'):
    '''
    input:
        image_path,图片路径,jpg
        plot = False,是否画图,如果True,两样内容,关键点信息+标点图片matrix
        resolution ='432x368', 规格
        scales = '[None]',
        model = 'mobilenet_thin',模型选择
    
    output:
        plot为false,返回一个内容:关键点信息
        plot为true,返回两个内容:关键点信息+标点图片matrix
    '''
    w, h = model_wh(resolution)
    e = TfPoseEstimator(get_graph_path(model), target_size=(w, h))
    image = common.read_imgfile(image_path, None, None)
    t = time.time()
    humans = e.inference(image, scales=scales)  # 主要的预测函数
    elapsed = time.time() - t

    logger.info('inference image: %s in %.4f seconds.' % (image_path, elapsed))
    centers = get_keypoint(image,humans)                                # 拿上关键点信息

    if plot:
        # 画图的情况下:
        image = TfPoseEstimator.draw_humans(image, humans, imgcopy=False)  # 画图函数
        fig = plt.figure()
        a = fig.add_subplot(2, 2, 1)
        a.set_title('Result')
        plt.imshow(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
        return centers,image
    else:
        # 不画图的情况下:
        return centers
Exemple #3
0
    def classify(self, path):
        imgPaths = os.listdir(path)
        cnt = 0
        for imgPath in imgPaths:
            tot = 0
            img = read_imgfile(os.path.join(path, imgPath), self.imgW,
                               self.imgH)
            humans = self.getHumans(img)
            imgcv = cv2.imread(os.path.join(path, imgPath))
            res, delta = draw_humans(imgcv, humans)
            tot += delta

            if (len(humans) == 0):
                imgp90 = cv2.flip(cv2.transpose(img), 1)
                humans = self.getHumans(imgp90)
                imgcvp90 = cv2.flip(cv2.transpose(imgcv), 1)
                res, delta = draw_humans(imgcvp90, humans)
                delta = len(humans) - delta
                tot += delta

                imgn90 = cv2.flip(cv2.transpose(img), 0)
                humans = self.getHumans(imgn90)
                imgcvn90 = cv2.flip(cv2.transpose(imgcv), 0)
                res, delta = draw_humans(imgcvn90, humans)
                delta = len(humans) - delta
                tot += delta
            if tot > 0:
                cnt += 1

        print(cnt)
        if cnt >= self.th * len(os.listdir(path)):
            return 1
        return 0
Exemple #4
0
def main(input_img, model, e):
  '''
  Query the model given an image
  '''
  if(input_img):
    image = stringToImage(input_img[input_img.find(",")+1:])
    image = toRGB(image)

    if(model == None):
      model = 'mobilenet_thin'

    humans = e.inference(image)
    coords = []
    for human in humans:
      coords.append([[HUMAN_COCO_PART[k], b.x, b.y] for k, b in human.body_parts.items()])

    outdata = {
      'humans': coords
    }
    return outdata

  else:
    # Test with a sample image
    image = common.read_imgfile('./images/p1.jpg', None, None)
    e = TfPoseEstimator(get_graph_path('mobilenet_thin'), target_size=(432, 368))
    humans = e.inference(image)
    coords = []
    for human in humans:
      coords.append([[HUMAN_COCO_PART[k], b.x, b.y] for k, b in human.body_parts.items()])

    outdata = {
      'humans': coords
    }
    return outdata
Exemple #5
0
def index():
    try:
        data = request.data
        with open('/tmp/temp.jpg', 'wb') as f:
            f.write(data)
        img = common.read_imgfile('/tmp/temp.jpg', 432, 368)
        scales = ast.literal_eval(args.scales)
        e = TfPoseEstimator(get_graph_path(args.model), target_size=(w, h))
        humans = e.inference(img, scales=scales)
        return jsonify({"humans": list(map(lambda x: x.to_dict(), humans))})
    except Exception as e:
        return jsonify({"error": str(traceback.format_exc())})
def process_multi(img_path, model):
    image = common.read_imgfile(img_path, None, None)
    filename = os.path.split(img_path)[1].split('.')[0]
    scales = ast.literal_eval(node_or_string='[None]')
    humans = model.inference(image, scales=scales)
    image = model.draw_humans(image, humans, imgcopy=False)
    # image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
    path_save_2d = path = './results/' + filename + '_2d.jpg'
    cv2.imwrite(path_save_2d, image)
    # cv2.imshow('result', image)
    #cv2.waitKey()

    #poseLifting = Prob3dPose('/data/ai/JF/pose_estimation/multi_pose_estimator/lifting/models/prob_model_params.mat')
    poseLifting = Prob3dPose('./lifting/models/prob_model_params.mat')
    image_h, image_w = image.shape[:2]
    standard_w = 640
    standard_h = 480

    pose_2d_mpiis = []
    visibilities = []
    for human in humans:
        pose_2d_mpii, visibility = common.MPIIPart.from_coco(human)
        pose_2d_mpiis.append([(int(x * image_w + 0.5), int(y * image_h + 0.5))
                              for x, y in pose_2d_mpii])
        visibilities.append(visibility)

    pose_2d_mpiis = np.array(pose_2d_mpiis)
    visibilities = np.array(visibilities)
    transformed_pose2d, weights = poseLifting.transform_joints(
        pose_2d_mpiis, visibilities)
    pose_3d = poseLifting.compute_3d(transformed_pose2d, weights)

    num = len(pose_3d)
    if num % 2 == 0:
        l = num // 2
    else:
        l = num // 2 + 1

    IMAGES_PATH = './results/'
    if not os.path.exists(IMAGES_PATH):
        os.makedirs(IMAGES_PATH)
    path_save_3d = './results/' + filename + '_3d.png'

    fig = plt.figure()
    for i, single_3d in enumerate(pose_3d):
        plot_pose(single_3d, i, l, fig, num)
    plt.savefig(path_save_3d)
    # plt.show()

    return path_save_2d, path_save_3d
Exemple #7
0
def main():
    parser = argparse.ArgumentParser(
        description='tf-pose-estimation run by folder')
    parser.add_argument('--folder', type=str, default='./images/')
    parser.add_argument('--resolution',
                        type=str,
                        default='432x368',
                        help='network input resolution. default=432x368')
    parser.add_argument('--model',
                        type=str,
                        default='mobilenet_thin',
                        help='cmu / mobilenet_thin')
    parser.add_argument('--scales',
                        type=str,
                        default='[None]',
                        help='for multiple scales, eg. [1.0, (1.1, 0.05)]')
    args = parser.parse_args()
    scales = ast.literal_eval(args.scales)

    w, h = model_wh(args.resolution)
    e = TfPoseEstimator(get_graph_path(args.model), target_size=(w, h))
    types = ('*.png', '*.jpg')
    files_grabbed = []
    for files in types:
        files_grabbed.extend(glob.glob(os.path.join(args.folder, files)))
    all_humans = dict()
    if not os.path.exists('output'):
        os.mkdir('output')
    for i, file in enumerate(files_grabbed):
        # estimate human poses from a single image !
        image = common.read_imgfile(file, None, None)
        t = time.time()
        humans = e.inference(image, scales=scales)
        elapsed = time.time() - t

        logger.info('inference image #%d: %s in %.4f seconds.' %
                    (i, file, elapsed))

        image = TfPoseEstimator.draw_humans(image, humans, imgcopy=False)
        # cv2.imshow('tf-pose-estimation result', image)
        filename = 'pose_{}.png'.format(i)
        output_filepath = os.path.join('output', filename)
        cv2.imwrite(output_filepath, image)
        logger.info('image saved: {}'.format(output_filepath))
        # cv2.waitKey(5000)

        all_humans[file.replace(args.folder, '')] = humans

    with open(os.path.join(args.folder, 'pose.dil'), 'wb') as f:
        dill.dump(all_humans, f, protocol=dill.HIGHEST_PROTOCOL)
Exemple #8
0
def joint():
    try:
        data = request.data
        with open('/tmp/temp.jpg', 'wb') as f:
            f.write(data)
        img = common.read_imgfile('/tmp/temp.jpg', 432, 368)
        scales = ast.literal_eval(args.scales)
        e = TfPoseEstimator(get_graph_path(args.model), target_size=(w, h))
        humans = e.inference(img, scales=scales)
        image = TfPoseEstimator.draw_humans(img, humans, imgcopy=False)
        b_str = base64.b64encode(img2bytes(
            Image.fromarray(image))).decode('utf-8')
        return jsonify({"image": b_str})
    except Exception as e:
        return jsonify({"error": str(traceback.format_exc())})
Exemple #9
0
    def get_skeleton(self, im, shape=None):
        """
        This method returns a list of skeletons per given image
        :param im: given image
        :param shape: image shape
        :return: return a list of skeletons
        """
        # estimate human poses from a single image !
        image = common.read_imgfile(im, None, None)
        if shape is not None:
            shape.clear()
            shape.extend(image.shape[:-1])
        humans = self.model['e'].inference(
            image, scales=self.model['scales'])  # list of skeletons

        return humans
    def adHocData(self):
        directory_in_str = sys.path[0] + "\\..\\images\\OurTest\\"

        # Delete old csv files
        try:
            os.remove(outputfile)
            os.remove(cleanedOutputfile)
        except OSError:
            pass

        # Run through every image in the folder
        for file in os.listdir(directory_in_str):
            filename = os.fsdecode(file)
            if filename.endswith(".jpg") or filename.endswith(".png"):
                fullpath = directory_in_str + filename

                # Estimate human poses from a single image !
                image = common.read_imgfile(fullpath, None, None)
                # image = cv2.fastNlMeansDenoisingColored(image, None, 10, 10, 7, 21)
                t = time.time()
                humans = self.e.inference(image, scales=self.scales)
                elapsed = time.time() - t

                logger.info('inference image: %s in %.4f seconds.' %
                            (fullpath, elapsed))

                image = TfPoseEstimator.draw_humans(image,
                                                    humans,
                                                    imgcopy=False)
                # cv2.imshow('tf-pose-estimation result', image)
                # cv2.waitKey()

                myFile = open(outputfile, 'a')
                # myFile.write(str(filename) + ',')
                # print(filename)
                myFile.write('\n')
                myFile.close()

                # Attempt to plot our skeletons onto the image
                try:

                    fig = plt.figure()
                    a = fig.add_subplot(2, 2, 1)
                    a.set_title('Result')
                    plt.imshow(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))

                    bgimg = cv2.cvtColor(image.astype(np.uint8),
                                         cv2.COLOR_BGR2RGB)
                    bgimg = cv2.resize(
                        bgimg,
                        (self.e.heatMat.shape[1], self.e.heatMat.shape[0]),
                        interpolation=cv2.INTER_AREA)

                    # show network output
                    a = fig.add_subplot(2, 2, 2)
                    plt.imshow(bgimg, alpha=0.5)
                    tmp = np.amax(self.e.heatMat[:, :, :-1], axis=2)
                    plt.imshow(tmp, cmap=plt.cm.gray, alpha=0.5)
                    plt.colorbar()

                    tmp2 = self.e.pafMat.transpose((2, 0, 1))
                    tmp2_odd = np.amax(np.absolute(tmp2[::2, :, :]), axis=0)
                    tmp2_even = np.amax(np.absolute(tmp2[1::2, :, :]), axis=0)

                    a = fig.add_subplot(2, 2, 3)
                    a.set_title('Vectormap-x')
                    # plt.imshow(CocoPose.get_bgimg(inp, target_size=(vectmap.shape[1], vectmap.shape[0])), alpha=0.5)
                    plt.imshow(tmp2_odd, cmap=plt.cm.gray, alpha=0.5)
                    plt.colorbar()

                    a = fig.add_subplot(2, 2, 4)
                    a.set_title('Vectormap-y')
                    # plt.imshow(CocoPose.get_bgimg(inp, target_size=(vectmap.shape[1], vectmap.shape[0])), alpha=0.5)
                    plt.imshow(tmp2_even, cmap=plt.cm.gray, alpha=0.5)
                    plt.colorbar()
                    plt.show()

                    logger.info('3d lifting initialization.')
                    poseLifting = Prob3dPose(
                        sys.path[0] +
                        '\\lifting\\models\\prob_model_params.mat')

                    image_h, image_w = image.shape[:2]
                    standard_w = 640
                    standard_h = 480

                    pose_2d_mpiis = []
                    visibilities = []
                    for human in humans:
                        pose_2d_mpii, visibility = common.MPIIPart.from_coco(
                            human)
                        pose_2d_mpiis.append([(int(x * standard_w + 0.5),
                                               int(y * standard_h + 0.5))
                                              for x, y in pose_2d_mpii])
                        visibilities.append(visibility)

                    pose_2d_mpiis = np.array(pose_2d_mpiis)
                    visibilities = np.array(visibilities)
                    transformed_pose2d, weights = poseLifting.transform_joints(
                        pose_2d_mpiis, visibilities)
                    pose_3d = poseLifting.compute_3d(transformed_pose2d,
                                                     weights)

                    for i, single_3d in enumerate(pose_3d):
                        plot_pose(single_3d)
                    plt.show()

                except:
                    print("Error when plotting image ")

        dataScriptGenerator.dataCleanup(self)
 def mesh(self, image):
     image = common.read_imgfile(image, None, None)
     image = cv2.resize(image, (self.width, self.height))
     print('start-inderence', time.time())
     humans = self.e.inference(image, scales=[None])
     print('end-inderence', time.time())
     self.resetBitFalling()
     self.savesecondNeck(image)
     package = TfPoseEstimator.draw_humans(image, humans, imgcopy=False)
     self.globalTime = time.time()  #time of after drawing
     image = package[0]
     #status_part_body_appear = package[1]
     center_each_body_part = package[2]
     #camera not found NECK more than 10 second then reset list
     if self.globalTime - self.getLastRecordTime() >= 12:
         print('RESET STABLE,RECORDNECK,HIP,etc. [complete 12 second]')
         self.destroyAll()
     if self.globalTime - self.getLastRecordTime() >= 2:
         print('maybe NECK or HUMAN not found [complete 2 second]')
         self.human_in_frame = False
     print('end Initialize mesh')
     # print(status_part_body_appear)
     #when draw2D stick man
     # name_part_body = ["Nose",  # 0
     #                   "Neck",  # 1
     #                   "RShoulder",  # 2
     #                   "RElbow",  # 3
     #                   "RWrist",  # 4
     #                   "LShoulder",  # 5
     #                   "LElbow",  # 6
     #                   "LWrist",  # 7
     #                   "RHip",  # 8
     #                   "RKnee",  # 9
     #                   "RAnkle",  # 10
     #                   "LHip",  # 11
     #                   "LKnee",  # 12
     #                   "LAnkle",  # 13
     #                   "REye",  # 14
     #                   "LEye",  # 15
     #                   "REar",  # 16
     #                   "LEar",  # 17
     #                   ]
     # detected_part = []
     self.addRecordTime(self.globalTime)
     print('start record everything')
     if 1 in center_each_body_part:
         # print(self.globalTime - self.getLastRecordTime())
         self.addCountTimes()
         self.human_in_frame = True
         self.lastTimesFoundNeck = self.recordTimeList[-1]
         self.used_quotaVirtureNeck = 0
         self.addRecordNeck(center_each_body_part[1][1])
         if len(self.recordNeck) >= 2:
             self.addRecordVelocity(self.recordNeck, self.recordTimeList)
         if 11 in center_each_body_part:
             self.addRecordHIP(center_each_body_part[11][1])
             print('neck :| HIP: ',
                   self.recordHIP[-1] - self.recordNeck[-1])
         elif 8 in center_each_body_part:
             self.addRecordHIP(center_each_body_part[8][1])
             print('neck :| HIP: ',
                   self.recordHIP[-1] - self.recordNeck[-1])
     elif self.used_quotaVirtureNeck < self.quotaVirtureNeck and self.secondNeck >= self.getLastNeck(
     ):
         # print(self.globalTime - self.getLastRecordTime())
         self.addCountTimes()
         self.lastTimesFoundNeck = self.recordTimeList[-1]
         self.addRecordNeck(self.getSecondNeck())
         if len(self.recordNeck) >= 2:
             self.addRecordVelocity(self.recordNeck, self.recordTimeList)
         print('addSecond Neck', self.used_quotaVirtureNeck)
         self.used_quotaVirtureNeck += 1
     if len(self.recordNeck) > 300:
         self.reduceRecord()
     # print('find highest neck , hip')
     totalTime = 0
     loop = 1
     for i in range(1, len(self.recordTimeList)):
         totalTime += self.recordTimeList[-i] - self.recordTimeList[-(i +
                                                                      1)]
         loop += 1
         if totalTime >= 2:
             break
     print('totalTime:', totalTime, loop)
     minNumber = -1
     if len(self.recordNeck) < loop:
         loop = len(self.recordNeck)
     for i in range(1, loop + 1):
         if minNumber == -1 or self.recordNeck[-i] <= minNumber:
             self.highestNeck = self.recordNeck[
                 -i]  #more HIGH more low value
             # self.highestNeckTime = self.recordTimeList[-i]
             minNumber = self.recordNeck[-i]
     if len(self.recordHIP) > 1:
         #11 L_HIP
         if 11 in center_each_body_part:
             self.highestHIP = min(self.recordHIP[-loop:])
         #8 R_HIP
         elif 8 in center_each_body_part:
             self.highestHIP = min(self.recordHIP[-loop:])
     if len(self.recordNeck) > 1:
         self.processFall(image)
         print('end processing falling end mash()')
Exemple #12
0
        if not args.remote_data:
            df = get_dataflow_batch(args.datapath, True, args.batchsize)
        else:
            df = RemoteDataZMQ(args.remote_data, hwm=5)
        enqueuer = DataFlowToQueue(df,
                                   [input_node, heatmap_node, vectmap_node],
                                   queue_size=100)
        q_inp, q_heat, q_vect = enqueuer.dequeue()

    df_valid = get_dataflow_batch(args.datapath, False, args.batchsize)
    df_valid.reset_state()
    validation_cache = []
    for images_test, heatmaps, vectmaps in df_valid.get_data():
        validation_cache.append((images_test, heatmaps, vectmaps))

    val_image = read_imgfile('./images/p1.jpg', args.input_width,
                             args.input_height)
    val_image2 = read_imgfile('./images/p2.jpg', args.input_width,
                              args.input_height)
    val_image3 = read_imgfile('./images/p3.jpg', args.input_width,
                              args.input_height)

    # define model for multi-gpu
    q_inp_split = tf.split(q_inp, args.gpus)
    output_vectmap = []
    output_heatmap = []
    vectmap_losses = []
    heatmap_losses = []

    for gpu_id in range(args.gpus):
        with tf.device(tf.DeviceSpec(device_type="GPU", device_index=gpu_id)):
            with tf.variable_scope(tf.get_variable_scope(),
Exemple #13
0
        for root, dirs, files in os.walk(source_path):
            for filename in files:
                source_images.append({
                    'path': os.path.join(root, filename),
                    'dirname': os.path.basename(root)
                })
    print(source_images)

    data = []

    t = time.time()
    for source_image in source_images:
        print('Analyzing ' + source_image['path'])

        # estimate human poses from a single image
        image = common.read_imgfile(source_image['path'], None, None)
        try:
            humans = e.inference(image, scales=[None])
        except Exception:
            print("Couldn't load or analyze image. Skipping...")
            continue
        # print body parts:
        for human in humans:
            print('Found pose.')
            # normalize coordinates:
            parts_norm = normalize_parts(human.body_parts,
                                         all_points_needed=True)

            if parts_norm is None:
                print('Pose missing points, skipping.')
            else:
Exemple #14
0
    w, h = model_wh(args.resolution)
    e = TfPoseEstimator(get_graph_path(args.model), target_size=(w, h))

    kernel = np.ones((2, 2))
    kernel2 = np.ones((4, 4))
    #initial values
    headbol = 0  #looking at head values
    tol1 = 500
    tol2 = 50
    tol = 20
    k1 = k2 = k3 = k4 = k5 = k6 = k7 = k8 = 1
    k = [k1, k2, k3, k4, k5, k6, k7, k8]

    # estimate human poses from a single image !
    image = common.read_imgfile(args.image, None, None)
    back = common.read_imgfile(args.back, None, None)
    back = back[:image.shape[0], :image.shape[1]]
    back = cv2.cvtColor(back, cv2.COLOR_BGR2GRAY) / 255.

    print back.shape
    print image.shape
    cv2.namedWindow('image')
    # Create a black image, a window
    # create trackbars for color change
    #cv2.createTrackbar('canny1','image',0,500,nothing)
    #cv2.createTrackbar('canny2','image',0,500,nothing)
    #cv2.createTrackbar('Contour Length','image',0,500,nothing)
    cv2.createTrackbar('Thresh', 'image', 600, 850, nothing)

    if headbol:
Exemple #15
0
def main():
    parser = argparse.ArgumentParser(description='tf-pose-estimation run')
    parser.add_argument('--image', type=str, default='./images/p3.jpg')
    parser.add_argument(
        '--resolution',
        type=str,
        default='432x368',
        help='network input resolution. default=432x368')
    parser.add_argument(
        '--model',
        type=str,
        default='mobilenet_thin',
        help='cmu / mobilenet_thin')
    parser.add_argument(
        '--scales',
        type=str,
        default='[None]',
        help='for multiple scales, eg. [1.0, (1.1, 0.05)]')
    parser.add_argument(
        '--stick_only',
        action='store_true',
        help='save output without other analysis')
    parser.add_argument('--plot_3d', action='store_true', help='save 3d poses')

    args = parser.parse_args()
    scales = ast.literal_eval(args.scales)

    w, h = model_wh(args.resolution)
    e = TfPoseEstimator(get_graph_path(args.model), target_size=(w, h))

    # estimate human poses from a single image !
    image = common.read_imgfile(args.image, None, None)
    # image = cv2.fastNlMeansDenoisingColored(image, None, 10, 10, 7, 21)
    t = time.time()
    humans = e.inference(image, scales=scales)
    elapsed = time.time() - t

    logger.info('inference image: %s in %.4f seconds.' % (args.image, elapsed))

    image = TfPoseEstimator.draw_humans(image, humans, imgcopy=False)
    # cv2.imshow('tf-pose-estimation result', image)
    # cv2.waitKey()

    import matplotlib.pyplot as plt

    fig = plt.figure()
    a = fig.add_subplot(2, 2, 1)
    a.set_title('Result')
    rgb_image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
    if args.stick_only:
        cv2.imwrite('output/test.png', image)
        logger.info('image saved: {}'.format('output/test.png'))
        import sys
        sys.exit(0)
    plt.imshow(rgb_image)

    bgimg = cv2.cvtColor(image.astype(np.uint8), cv2.COLOR_BGR2RGB)
    bgimg = cv2.resize(
        bgimg, (e.heatMat.shape[1], e.heatMat.shape[0]),
        interpolation=cv2.INTER_AREA)

    # show network output
    a = fig.add_subplot(2, 2, 2)
    plt.imshow(bgimg, alpha=0.5)
    tmp = np.amax(e.heatMat[:, :, :-1], axis=2)
    plt.imshow(tmp, cmap=plt.cm.gray, alpha=0.5)
    plt.colorbar()

    tmp2 = e.pafMat.transpose((2, 0, 1))
    tmp2_odd = np.amax(np.absolute(tmp2[::2, :, :]), axis=0)
    tmp2_even = np.amax(np.absolute(tmp2[1::2, :, :]), axis=0)

    a = fig.add_subplot(2, 2, 3)
    a.set_title('Vectormap-x')
    # plt.imshow(CocoPose.get_bgimg(inp, target_size=(vectmap.shape[1], vectmap.shape[0])), alpha=0.5)
    plt.imshow(tmp2_odd, cmap=plt.cm.gray, alpha=0.5)
    plt.colorbar()

    a = fig.add_subplot(2, 2, 4)
    a.set_title('Vectormap-y')
    # plt.imshow(CocoPose.get_bgimg(inp, target_size=(vectmap.shape[1], vectmap.shape[0])), alpha=0.5)
    plt.imshow(tmp2_even, cmap=plt.cm.gray, alpha=0.5)
    plt.colorbar()
    if not os.path.exists('output'):
        os.mkdir('output')
    plt.savefig('output/test.png')
    logger.info('image saved: {}'.format('output/test.png'))

    if not args.plot_3d:
        import sys
        sys.exit(0)
    #For plotting in 3d
    logger.info('3d lifting initialization.')
    poseLifting = Prob3dPose('./src/lifting/models/prob_model_params.mat')

    image_h, image_w = image.shape[:2]
    standard_w = 640
    standard_h = 480

    pose_2d_mpiis = []
    visibilities = []
    for human in humans:
        pose_2d_mpii, visibility = common.MPIIPart.from_coco(human)
        pose_2d_mpiis.append([(int(x * standard_w + 0.5),
                               int(y * standard_h + 0.5))
                              for x, y in pose_2d_mpii])
        visibilities.append(visibility)

    pose_2d_mpiis = np.array(pose_2d_mpiis)
    visibilities = np.array(visibilities)
    transformed_pose2d, weights = poseLifting.transform_joints(
        pose_2d_mpiis, visibilities)
    pose_3d = poseLifting.compute_3d(transformed_pose2d, weights)

    for i, single_3d in enumerate(pose_3d):
        plot_pose(single_3d)
    plt.draw()
    plt.savefig('output/pose_3d_test.png')
    logger.info('3d plot saved: {}'.format('output/pose_3d_test.png'))
         if args.save_video is not None:
             video_saver.write(image)
         cv2.waitKey(1)
 else:
     keypoints_list = [
         CocoPart.Nose, CocoPart.LEye, CocoPart.REye, CocoPart.LEar,
         CocoPart.REar, CocoPart.LShoulder, CocoPart.RShoulder,
         CocoPart.LElbow, CocoPart.RElbow, CocoPart.LWrist,
         CocoPart.RWrist, CocoPart.LHip, CocoPart.RHip, CocoPart.LKnee,
         CocoPart.RKnee, CocoPart.LAnkle, CocoPart.RAnkle
     ]
     image_list = os.listdir(args.images)
     coco_images = []
     coco_annos = []
     for image_name in tqdm(image_list):
         image = common.read_imgfile(args.images + image_name)
         if image is None:
             continue
         size = [image.shape[0], image.shape[1]]
         if image is None:
             logger.error('Image can not be read, path=%s' % args.image)
             sys.exit(-1)
         h = int(654 * (size[0] / size[1]))
         img = np.array(cv2.resize(image, (654, h)))
         # cv2.imshow('ini', img)
         img = img[np.newaxis, :]
         peaks, heatmap, vectormap = sess.run(
             [tensor_peaks, hm_up, cpm_up],
             feed_dict={
                 raw_img: img,
                 img_size: size
Exemple #17
0
if __name__ == '__main__':
    parser = argparse.ArgumentParser(description='tf-pose-estimation run')
    # parser.add_argument('--image', type=str, default='/Users/ildoonet/Downloads/me.jpg')
    parser.add_argument('--image', type=str, default='./images/apink2.jpg')
    # parser.add_argument('--model', type=str, default='mobilenet_320x240', help='cmu / mobilenet_320x240')
    parser.add_argument('--model', type=str, default='mobilenet_thin_432x368', help='cmu_640x480 / cmu_640x360 / mobilenet_thin_432x368')
    parser.add_argument('--scales', type=str, default='[None]', help='for multiple scales, eg. [1.0, (1.1, 0.05)]')
    args = parser.parse_args()
    scales = ast.literal_eval(scales)

    w, h = model_wh(args.model)
    e = TfPoseEstimator(get_graph_path(args.model), target_size=(w, h))

    # estimate human poses from a single image !
    image = common.read_imgfile(args.image, None, None)
    # image = cv2.fastNlMeansDenoisingColored(image, None, 10, 10, 7, 21)
    t = time.time()
    humans = e.inference(image, scales=[None])
    # humans = e.inference(image, scales=[None, (0.7, 0.5, 1.8)])
    # humans = e.inference(image, scales=[(1.2, 0.05)])
    # humans = e.inference(image, scales=[(0.2, 0.2, 1.4)])
    elapsed = time.time() - t

    logger.info('inference image: %s in %.4f seconds.' % (args.image, elapsed))

    image = cv2.imread(args.image, cv2.IMREAD_COLOR)
    image = TfPoseEstimator.draw_humans(image, humans, imgcopy=False)
    cv2.imshow('tf-pose-estimation result', image)
    cv2.waitKey()
Exemple #18
0
        '--model',
        type=str,
        default='mobilenet',
        help='cmu / mobilenet / mobilenet_accurate / mobilenet_fast')
    args = parser.parse_args()

    input_node = tf.placeholder(tf.float32,
                                shape=(1, args.input_height, args.input_width,
                                       3),
                                name='image')

    with tf.Session(config=config) as sess:
        net, _, last_layer = get_network(args.model, input_node, sess)

        logging.debug('read image+')
        image = read_imgfile(args.imgpath, args.input_width, args.input_height)
        vec = sess.run(net.get_output(name='concat_stage7'),
                       feed_dict={'image:0': [image]})

        a = time.time()
        run_options = tf.RunOptions(trace_level=tf.RunOptions.FULL_TRACE)
        run_metadata = tf.RunMetadata()
        pafMat, heatMat = sess.run([
            net.get_output(
                name=last_layer.format(stage=args.stage_level, aux=1)),
            net.get_output(
                name=last_layer.format(stage=args.stage_level, aux=2))
        ],
                                   feed_dict={'image:0': [image]},
                                   options=run_options,
                                   run_metadata=run_metadata)
Exemple #19
0
        '--resolution',
        type=str,
        default='640x480',
        help='network input resolution. default=432x368')  #def 432 368
    parser.add_argument('--model',
                        type=str,
                        default='mobilenet_thin',
                        help='cmu / mobilenet_thin')
    args = parser.parse_args()

    w, h = model_wh(args.resolution)
    e = TfPoseEstimator(get_graph_path(args.model), target_size=(w, h))
    cam1 = cv2.VideoCapture(args.camera)
    #cam2 = cv2.VideoCapture(2)
    ret_val, image = cam1.read()
    back = common.read_imgfile(args.back, None, None)
    back = resize(back, (h, w), 1)
    tol1, tol2, tol = read.Tol()
    humans = e.inference(image)
    image2 = draw_humans(image.copy(), humans, 1)
    kernel = np.ones((7, 7))

    #bruk disse til å endre på Canny sine toleranser
    #vi bruker nemlig canny til å definere conturene som vi så bruker fillconvexpoly på
    '''
    tol1 - thresh for hvor lett punkter lar seg sammenkobles i canny, (lav verdi = lange streker, sett mellom 50 og 300) 
    tol2 - thresh for hvor lett punkter lar seg tegnes til å begynne med, (lav verdi = mye støy, sett mellom 100 og 300) 
    '''

    while True:
        #back = cam2.read()[-1]
Exemple #20
0
        ## variables for file namess etc
        result = []
        write_json = "result.json"

        image_dir = args.image_path
        coco_json_file = './COCO/annotations/person_keypoints_val2017.json'
        cocoGt = COCO(coco_json_file)
        catIds = cocoGt.getCatIds(catNms=['person'])
        keys = cocoGt.getImgIds(catIds=catIds)
        tqdm_keys = tqdm(keys)
        for i, k in enumerate(tqdm_keys):
            img_meta = cocoGt.loadImgs(k)[0]    
            img_idx = img_meta['id']

            img_name = os.path.join(image_dir, img_meta['file_name'])
            input_image = common.read_imgfile(img_name, None, None)

            size = [input_image.shape[0], input_image.shape[1]]
            if input_image is None:
                logger.error('Image can not be read, path=%s' % input_image)
                sys.exit(-1)        
            # h = int(654 * (size[0] / size[1]))
            img = np.array(cv2.resize(input_image, (654, 619)))
            # cv2.imshow('ini', img)
            img = img[np.newaxis, :]
            peaks, heatmap, vectormap = sess.run([tensor_peaks, hm_up, cpm_up], feed_dict={raw_img: img, img_size: size})
            # cv2.imshow('in', vectormap[0, :, :, 0])
            humans = PoseEstimator.estimate_paf(peaks[0], heatmap[0], vectormap[0])

            for human in humans:
                item = {
    def post(self):
        global fallen
        if ((self.request.headers['Content-Type'] == 'imagebin')):
            print('Received image')
            image = self.request.body
            fh = open('static/image1.jpg', 'wb')
            fh.write(image)
            fh.close()
            #fh = open('static/image1.jpg','ab')
            #fh.write(bytes([0xD9]))
            #fh.close()
            print('0')
            #image = cv2.imread('static/image1.jpg')
            print('1')
            print('2')

            parser = argparse.ArgumentParser(
                description='tf-pose-estimation run')
            parser.add_argument(
                '--resolution',
                type=str,
                default='432x368',
                help='network input resolution. default=432x368')
            parser.add_argument('--model',
                                type=str,
                                default='mobilenet_thin',
                                help='cmu / mobilenet_thin')
            parser.add_argument(
                '--scales',
                type=str,
                default='[None]',
                help='for multiple scales, eg. [1.0, (1.1, 0.05)]')
            args = parser.parse_args()
            scales = ast.literal_eval(args.scales)

            w, h = model_wh(args.resolution)
            e = TfPoseEstimator(get_graph_path(args.model), target_size=(w, h))

            # estimate human poses from a single image !
            image = common.read_imgfile('static/image1.jpg', None, None)
            # image = cv2.fastNlMeansDenoisingColored(image, None, 10, 10, 7, 21)
            t = time.time()
            humans = e.inference(image, scales=scales)
            elapsed = time.time() - t

            logger.info('inference image: image3.jpg in %.4f seconds.' %
                        (elapsed))

            image = TfPoseEstimator.draw_humans(image, humans, imgcopy=False)
            fallen = 'OKAY'
            for i, h in enumerate(humans):
                TORSO_INDEX = 1
                LEFT_HIP_INDEX = 8
                RIGHT_HIP_INDEX = 11
                RIGHT_HAND_INDEX = 4
                RIGHT_FOOT_INDEX = 12
                LEFT_FOOT_INDEX = 9

# and RIGHT_HAND_INDEX in parts and RIGHT_FOOT_INDEX in parts and LEFT_FOOT_INDEX in parts:

            parts = h.body_parts
            if TORSO_INDEX in parts and LEFT_HIP_INDEX in parts and RIGHT_HIP_INDEX in parts:

                torso = parts[TORSO_INDEX]
                left_hip = parts[LEFT_HIP_INDEX]
                right_hip = parts[RIGHT_HIP_INDEX]

                tx, ty = torso.x, torso.y
                lhx, lhy = left_hip.x, left_hip.y
                rhx, rhy = right_hip.x, right_hip.y

                if tx < lhx or tx > rhx:
                    fallen = 'FALL'

                if abs(lhy - ty) < 0.1 or abs(rhy - ty) < 0.1:
                    fallen = 'FALL'
                if RIGHT_HAND_INDEX in parts and RIGHT_FOOT_INDEX in parts and LEFT_FOOT_INDEX in parts:
                    right_foot = parts[RIGHT_FOOT_INDEX]
                    left_foot = parts[LEFT_FOOT_INDEX]
                    right_hand = parts[RIGHT_HAND_INDEX]
                    righax, righay = right_hand.x, right_hand.y
                    rfx, rfy = right_foot.x, right_foot.y
                    lfx, lfy = left_foot.x, left_foot.y
                    if abs(lfy - lhy) < 0.1 or abs(rhy - ty) < 0.1:
                        fallen = 'FALL'
                    if (lfy - lhy) > (lhy - ty):
                        fallen = 'FALL'
                    print(lfy - lhy, lhy - ty)
                    lowermag = math.sqrt((lfy - lhy) * (lfy - lhy) +
                                         (lhx - lfx) * (lhx - lfx))
                    uppermag = math.sqrt((lhy - ty) * (lhy - ty) + (tx - lhx) *
                                         (tx - lhx))
                    if lowermag > uppermag:
                        fallen = 'FALL'
            #cv2.putText(image,
            #               f"Fallen: False",
            #              (60, 60),  cv2.FONT_HERSHEY_SIMPLEX, 2,
            #             (0, 255, 0), 5)

            cv2.putText(image, f"Fallen: {fallen}", (60, 60),
                        cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 5)

            cv2.imwrite('static/image3.jpg', image)
            for client in clients:
                update_clients(client)
                        help='cmu / mobilenet_thin')
    parser.add_argument('--scales',
                        type=str,
                        default='[None]',
                        help='for multiple scales, eg. [1.0, (1.1, 0.05)]')
    args = parser.parse_args()
    scales = ast.literal_eval(args.scales)

    w, h = model_wh(args.resolution)
    e = TfPoseEstimator(get_graph_path(args.model), target_size=(w, h))

    files_grabbed = glob.glob(os.path.join(args.folder, '*.jpg'))
    all_humans = dict()
    for i, file in enumerate(files_grabbed):
        # estimate human poses from a single image !
        image = common.read_imgfile(file, None, None)
        t = time.time()
        humans = e.inference(image, scales=scales)
        elapsed = time.time() - t

        # translate output to a vector
        num_bodyparts = len(CocoPart.__members__)
        vector = np.zeros(num_bodyparts * 2)
        for human in humans:
            for body_ix in range(0, num_bodyparts):
                if body_ix in human.body_parts:
                    vector[body_ix] = human.body_parts[body_ix].x
                    vector[body_ix + 1] = human.body_parts[body_ix].y

        # save vector to a json file
        file_name, file_ext = os.path.splitext(file)
from networks import get_graph_path, model_wh
from normalize_parts import normalize_parts
import json
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
from compare_pose import compare_pose
import sys

# parameters
test_image_path = './images_compare/image0178.jpg'
model = 'mobilenet_thin'

# script
w, h = model_wh('432x368')
e = TfPoseEstimator(get_graph_path(model), target_size=(w, h))
image = common.read_imgfile(test_image_path, None, None)
humans = e.inference(image, scales=[None])

for human in humans:
    print('Found pose.')
    match_image_path = compare_pose(human.body_parts,
                                    verbose=True,
                                    use_second_match=True)

    if match_image_path is not None:
        # plot source and dest image:
        f, (ax1, ax2) = plt.subplots(1, 2)

        test_img = mpimg.imread(test_image_path)
        ax1.imshow(test_img)
        ax1.set_title('Test image')
Exemple #24
0
    logger.debug('initialization %s : %s' %
                 (args.model, get_graph_path(args.model)))
    w, h = model_wh(args.resize)
    if w == 0 or h == 0:
        e = TfPoseEstimator(get_graph_path(args.model), target_size=(432, 368))
    else:
        e = TfPoseEstimator(get_graph_path(args.model), target_size=(w, h))

    result = []
    for i, k in enumerate(tqdm(keys)):
        img_meta = cocoGt.loadImgs(k)[0]
        img_idx = img_meta['id']

        img_name = os.path.join(image_dir, img_meta['file_name'])
        image = read_imgfile(img_name, None, None)
        if image is None:
            logger.error('image not found, path=%s' % img_name)
            sys.exit(-1)

        # inference the image with the specified network
        humans = e.inference(image,
                             resize_to_default=(w > 0 and h > 0),
                             upsample_size=args.resize_out_ratio)

        scores = 0
        ann_idx = cocoGt.getAnnIds(imgIds=[img_idx], catIds=[1])
        anns = cocoGt.loadAnns(ann_idx)
        for human in humans:
            item = {
                'image_id':
outputs = tf.get_default_graph().get_tensor_by_name('feat_concat:0')
# heatmaps_tensor = tf.get_default_graph().get_tensor_by_name('paf/class_out:0')
heatmaps_tensor = tf.get_default_graph().get_tensor_by_name('hm_out:0')
# pafs_tensor = tf.get_default_graph().get_tensor_by_name('paf/regression_out:0')
pafs_tensor = tf.get_default_graph().get_tensor_by_name('paf_out:0')

cap = cv2.VideoCapture(args.video)
with tf.Session() as sess:
    while cap.isOpened():
        ret, img = cap.read()
        if ret:
            t2 = time.time()
            print('rea model time cost:   ', t2 - t1)

            image = read_imgfile(img,
                                 args.input_width,
                                 args.input_height,
                                 web_cam=True)
            # img_np = image.copy()

            t3 = time.time()
            print('read image time cost:    ', t3 - t2)

            backbone_feature = sess.run(outputs, feed_dict={inputs: image})
            heatMat, pafMat = sess.run([heatmaps_tensor, pafs_tensor],
                                       feed_dict={outputs: backbone_feature})

            t4 = time.time()
            print('feature out time cost:   ', t4 - t3)

            heatMat, pafMat = heatMat[0], pafMat[0]
Exemple #26
0

if __name__ == '__main__':
    parser = argparse.ArgumentParser(description='tf-pose-estimation run') 						# Adding arguments to the programs
    parser.add_argument('--image', type=str, default='../images/p1.jpg')							# Adding images name else it will take the default image
    parser.add_argument('--resolution', type=str, default='432x368', help='network input resolution. default=432x368')	# Specify resolution 
    parser.add_argument('--model', type=str, default='mobilenet_thin', help='cmu / mobilenet_thin')			# Specify Model
    parser.add_argument('--scales', type=str, default='[None]', help='for multiple scales, eg. [1.0, (1.1, 0.05)]')	# Scales - Reason Unknown 
    args = parser.parse_args()												# Argument contain all the parse
    scales = ast.literal_eval(args.scales)

    w, h = model_wh(args.resolution) #Return width and height into w, h respectively after checking if its a multiple of 16
    e = TfPoseEstimator(get_graph_path(args.model), target_size=(w, h))		# Model + width and height

    # estimate human poses from a single image !
    image = common.read_imgfile(args.image, None, None)
    # image = cv2.fastNlMeansDenoisingColored(image, None, 10, 10, 7, 21)
    t = time.time()
    humans = e.inference(image, scales=scales)
    elapsed = time.time() - t

    logger.info('inference image: %s in %.4f seconds.' % (args.image, elapsed))

    image = TfPoseEstimator.draw_humans(image, humans, imgcopy=False)
    # cv2.imshow('tf-pose-estimation result', image)
    # cv2.waitKey()

    import matplotlib.pyplot as plt

    fig = plt.figure()
    a = fig.add_subplot(2, 2, 1)
Exemple #27
0
def main():

    yolo = YOLO()
    max_cosine_distance = 0.3
    nn_budget = None
    nms_max_overlap = 1.0
    model_filename = 'model_data/mars-small128.pb'
    encoder = gdet.create_box_encoder(model_filename, batch_size=1)
    metric = nn_matching.NearestNeighborDistanceMetric("cosine",
                                                       max_cosine_distance,
                                                       nn_budget)
    tracker = Tracker(metric)
    parser = argparse.ArgumentParser(
        description='Training codes for Openpose using Tensorflow')
    parser.add_argument('--checkpoint_path',
                        type=str,
                        default='checkpoints/train/2018-12-13-16-56-49/')
    parser.add_argument('--backbone_net_ckpt_path',
                        type=str,
                        default='checkpoints/vgg/vgg_19.ckpt')
    parser.add_argument('--image', type=str, default=None)
    # parser.add_argument('--run_model', type=str, default='img')
    parser.add_argument('--video', type=str, default=None)
    parser.add_argument('--train_vgg', type=bool, default=True)
    parser.add_argument('--use_bn', type=bool, default=False)
    parser.add_argument('--save_video', type=str, default='result/our.mp4')
    args = parser.parse_args()
    checkpoint_path = args.checkpoint_path
    logger.info('checkpoint_path: ' + checkpoint_path)

    with tf.name_scope('inputs'):
        raw_img = tf.placeholder(tf.float32, shape=[None, None, None, 3])
        img_size = tf.placeholder(dtype=tf.int32,
                                  shape=(2, ),
                                  name='original_image_size')

    img_normalized = raw_img / 255 - 0.5

    # define vgg19
    with slim.arg_scope(vgg.vgg_arg_scope()):
        vgg_outputs, end_points = vgg.vgg_19(img_normalized)

    # get net graph
    logger.info('initializing model...')
    net = PafNet(inputs_x=vgg_outputs, use_bn=args.use_bn)
    hm_pre, cpm_pre, added_layers_out = net.gen_net()
    hm_up = tf.image.resize_area(hm_pre[5], img_size)
    cpm_up = tf.image.resize_area(cpm_pre[5], img_size)
    # hm_up = hm_pre[5]
    # cpm_up = cpm_pre[5]
    smoother = Smoother({'data': hm_up}, 25, 3.0)
    gaussian_heatMat = smoother.get_output()

    max_pooled_in_tensor = tf.nn.pool(gaussian_heatMat,
                                      window_shape=(3, 3),
                                      pooling_type='MAX',
                                      padding='SAME')
    tensor_peaks = tf.where(tf.equal(gaussian_heatMat, max_pooled_in_tensor),
                            gaussian_heatMat, tf.zeros_like(gaussian_heatMat))

    logger.info('initialize saver...')
    # trainable_var_list = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='openpose_layers')
    # trainable_var_list = []
    trainable_var_list = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
                                           scope='openpose_layers')
    if args.train_vgg:
        trainable_var_list = trainable_var_list + tf.get_collection(
            tf.GraphKeys.TRAINABLE_VARIABLES, scope='vgg_19')

    restorer = tf.train.Saver(tf.get_collection(
        tf.GraphKeys.TRAINABLE_VARIABLES, scope='vgg_19'),
                              name='vgg_restorer')
    saver = tf.train.Saver(trainable_var_list)

    logger.info('initialize session...')
    config = tf.ConfigProto()
    config.gpu_options.allow_growth = True
    with tf.Session(config=config) as sess:
        sess.run(tf.group(tf.global_variables_initializer()))
        logger.info('restoring vgg weights...')
        restorer.restore(sess, args.backbone_net_ckpt_path)
        logger.info('restoring from checkpoint...')
        #saver.restore(sess, tf.train.latest_checkpoint(checkpoint_dir=checkpoint_path))
        saver.restore(sess, args.checkpoint_path + 'model-59000.ckpt')
        logger.info('initialization done')
        writeVideo_flag = True
        if args.image is None:
            if args.video is not None:
                cap = cv2.VideoCapture(args.video)
                w = int(cap.get(3))
                h = int(cap.get(4))

            else:
                cap = cv2.VideoCapture("images/video.mp4")
                #cap = cv2.VideoCapture("rtsp://*****:*****@192.168.43.51:554//Streaming/Channels/1")
                #cap = cv2.VideoCapture("http://*****:*****@192.168.1.111:8081")
                #cap = cv2.VideoCapture("rtsp://*****:*****@192.168.1.106:554//Streaming/Channels/1")
            _, image = cap.read()
            #print(_,image)
            if image is None:
                logger.error("Can't read video")
                sys.exit(-1)
            fps = cap.get(cv2.CAP_PROP_FPS)
            ori_w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
            ori_h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
            #print(fps,ori_w,ori_h)
            if args.save_video is not None:
                fourcc = cv2.VideoWriter_fourcc(*'MP4V')
                video_saver = cv2.VideoWriter('result/our.mp4', fourcc, fps,
                                              (ori_w, ori_h))
                logger.info('record vide to %s' % args.save_video)
            logger.info('fps@%f' % fps)
            size = [int(654 * (ori_h / ori_w)), 654]
            h = int(654 * (ori_h / ori_w))
            time_n = time.time()
            #print(time_n)

            max_boxs = 0
            person_track = {}
            yolo2 = YOLO2()

            while True:
                face = []
                cur1 = conn.cursor()  # 获取一个游标
                sql = "select * from worker"
                cur1.execute(sql)
                data = cur1.fetchall()
                for d in data:
                    # 注意int类型需要使用str函数转义
                    name = str(d[1]) + '_' + d[2]

                    face.append(name)
                cur1.close()  # 关闭游标

                _, image_fist = cap.read()
                #穿戴安全措施情况检测

                img = Image.fromarray(
                    cv2.cvtColor(image_fist, cv2.COLOR_BGR2RGB))
                image, wear = yolo2.detect_image(img)
                image = np.array(image)
                image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)

                # # 获取警戒线
                cv2.line(image, (837, 393), (930, 300), (0, 255, 255), 3)
                transboundaryline = t.line_detect_possible_demo(image)

                #openpose二维姿态检测
                img = np.array(cv2.resize(image, (654, h)))
                # cv2.imshow('raw', img)
                img_corner = np.array(
                    cv2.resize(image, (360, int(360 * (ori_h / ori_w)))))
                img = img[np.newaxis, :]
                peaks, heatmap, vectormap = sess.run(
                    [tensor_peaks, hm_up, cpm_up],
                    feed_dict={
                        raw_img: img,
                        img_size: size
                    })
                bodys = PoseEstimator.estimate_paf(peaks[0], heatmap[0],
                                                   vectormap[0])

                image, person = TfPoseEstimator.draw_humans(image,
                                                            bodys,
                                                            imgcopy=False)
                #取10右脚 13左脚

                foot = []
                if len(person) > 0:
                    for p in person:
                        foot_lr = []
                        if 10 in p and 13 in p:
                            foot_lr.append(p[10])
                            foot_lr.append(p[13])

                        if len(foot_lr) > 1:
                            foot.append(foot_lr)

                fps = round(1 / (time.time() - time_n), 2)
                image = cv2.putText(image,
                                    str(fps) + 'fps', (10, 15),
                                    cv2.FONT_HERSHEY_COMPLEX_SMALL, 1,
                                    (255, 255, 255))
                time_n = time.time()

                #deep目标检测
                image2 = Image.fromarray(image_fist)
                boxs = yolo.detect_image(image2)
                features = encoder(image, boxs)
                detections = [
                    Detection(bbox, 1.0, feature)
                    for bbox, feature in zip(boxs, features)
                ]
                boxes = np.array([d.tlwh for d in detections])
                scores = np.array([d.confidence for d in detections])
                indices = preprocessing.non_max_suppression(
                    boxes, nms_max_overlap, scores)
                detections = [detections[i] for i in indices]
                if len(boxs) > max_boxs:
                    max_boxs = len(boxs)
                # print(max_boxs)

                # Call the tracker
                tracker.predict()
                tracker.update(detections)

                for track in tracker.tracks:

                    if max_boxs < track.track_id:
                        tracker.tracks.remove(track)
                        tracker._next_id = max_boxs + 1

                    if not track.is_confirmed() or track.time_since_update > 1:
                        continue

                    bbox = track.to_tlbr()
                    PointX = bbox[0] + ((bbox[2] - bbox[0]) / 2)
                    PointY = bbox[3]

                    if track.track_id not in person_track:
                        track2 = copy.deepcopy(track)
                        person_track[track.track_id] = track2

                    else:

                        track2 = copy.deepcopy(track)
                        bbox2 = person_track[track.track_id].to_tlbr()
                        PointX2 = bbox2[0] + ((bbox2[2] - bbox2[0]) / 2)
                        PointY2 = bbox2[3]
                        distance = math.sqrt(
                            pow(PointX - PointX2, 2) +
                            pow(PointY - PointY2, 2))
                        if distance < 120:
                            person_track[track.track_id] = track2

                        else:

                            # print('last',track.track_id)
                            dis = {}
                            for key in person_track:
                                bbox3 = person_track[key].to_tlbr()
                                PointX3 = bbox3[0] + (
                                    (bbox3[2] - bbox3[0]) / 2)
                                PointY3 = bbox3[3]

                                d = math.sqrt(
                                    pow(PointX3 - PointX, 2) +
                                    pow(PointY3 - PointY, 2))
                                dis[key] = d
                            dis = sorted(dis.items(),
                                         key=operator.itemgetter(1),
                                         reverse=False)

                            track2.track_id = dis[0][0]
                            person_track[dis[0][0]] = track2
                            tracker.tracks.remove(track)
                            tracker.tracks.append(person_track[track.track_id])

                    # 写入class

                    try:
                        box_title = face[track2.track_id - 1]
                    except Exception as e:
                        box_title = str(track2.track_id) + "_" + "unknow"
                    if box_title not in workers:
                        wid = box_title.split('_')[0]
                        localtime = time.asctime(time.localtime(time.time()))
                        workers[box_title] = wk.Worker()
                        workers[box_title].set(box_title, localtime,
                                               (int(PointX), int(PointY)))
                        cur2 = conn.cursor()  # 获取一个游标
                        sql2 = "UPDATE worker SET in_time='" + localtime + "' WHERE worker_id= '" + wid + "'"
                        cur2.execute(sql2)
                        cur2.close()  # 关闭游标

                    else:
                        localtime = time.asctime(time.localtime(time.time()))
                        yoloPoint = (int(PointX), int(PointY))
                        foot_dic = {}
                        wear_dic = {}

                        for f in foot:
                            fp = []
                            footCenter = ((f[0][0] + f[1][0]) / 2,
                                          (f[0][1] + f[1][1]) / 2)
                            foot_dis = int(
                                math.sqrt(
                                    pow(footCenter[0] - yoloPoint[0], 2) +
                                    pow(footCenter[1] - yoloPoint[1], 2)))
                            #print(foot_dis)
                            fp.append(f)
                            fp.append(footCenter)
                            foot_dic[foot_dis] = fp

                        #print(box_title, 'sss', foot_dic)
                        foot_dic = sorted(foot_dic.items(),
                                          key=operator.itemgetter(0),
                                          reverse=False)
                        workers[box_title].current_point = foot_dic[0][1][1]
                        workers[box_title].track_point.append(
                            workers[box_title].current_point)

                        #print(box_title,'sss',foot_dic[0][1][1])
                        mytrack = str(workers[box_title].track_point)
                        wid = box_title.split('_')[0]
                        #卡尔曼滤波预测
                        if wid not in KalmanNmae:
                            myKalman(wid)
                        if wid not in lmp:
                            setLMP(wid)
                        cpx, cpy = predict(workers[box_title].current_point[0],
                                           workers[box_title].current_point[1],
                                           wid)

                        if cpx[0] == 0.0 or cpy[0] == 0.0:
                            cpx[0] = workers[box_title].current_point[0]
                            cpy[0] = workers[box_title].current_point[1]
                        workers[box_title].next_point = (int(cpx), int(cpy))

                        workers[box_title].current_footR = foot_dic[0][1][0][0]
                        workers[box_title].current_footL = foot_dic[0][1][0][1]
                        cur3 = conn.cursor()  # 获取一个游标
                        sql = "UPDATE worker SET current_point= '" + str(
                            workers[box_title].current_point
                        ) + "' , current_footR = '" + str(
                            workers[box_title].current_footR
                        ) + "',current_footL = '" + str(
                            workers[box_title].current_footL
                        ) + "',track_point = '" + mytrack + "',next_point = '" + str(
                            workers[box_title].next_point
                        ) + "' WHERE worker_id= '" + wid + "'"
                        cur3.execute(sql)
                        cur3.close()
                        #写入安全措施情况
                        if len(wear) > 0:
                            for w in wear:
                                wear_dis = int(
                                    math.sqrt(
                                        pow(w[0] - yoloPoint[0], 2) +
                                        pow(w[1] - yoloPoint[1], 2)))
                                wear_dic[wear_dis] = w
                            wear_dic = sorted(wear_dic.items(),
                                              key=operator.itemgetter(0),
                                              reverse=False)

                            if wear_dic[0][0] < 120:
                                cur4 = conn.cursor()  # 获取一个游标

                                if wear[wear_dic[0][1]] == 1:
                                    if len(workers[box_title].wear['no helmet']
                                           ) == 0:
                                        workers[box_title].wear[
                                            'no helmet'].append(localtime)

                                        sql = "INSERT INTO wear SET worker_id = '" + wid + "', type = 'no_helmet',abnormal_time = '" + localtime + "'"
                                        cur4.execute(sql)
                                        cur4.close()  # 关闭游标

                                    else:
                                        if localtime not in workers[
                                                box_title].wear['no helmet']:

                                            workers[box_title].wear[
                                                'no helmet'].append(localtime)
                                            sql = "INSERT INTO wear SET worker_id = '" + wid + "', type = 'no_helmet',abnormal_time = '" + localtime + "'"
                                            cur4.execute(sql)
                                            cur4.close()  # 关闭游标

                                elif wear[wear_dic[0][1]] == 2:
                                    if len(workers[box_title].
                                           wear['no work cloths']) == 0:
                                        workers[box_title].wear[
                                            'no work cloths'].append(localtime)
                                        sql = "INSERT INTO wear SET worker_id = '" + wid + "', type = 'no work cloths',abnormal_time = '" + localtime + "'"
                                        cur4.execute(sql)
                                        cur4.close()  # 关闭游标
                                    else:
                                        if localtime not in workers[
                                                box_title].wear[
                                                    'no work cloths']:
                                            workers[box_title].wear[
                                                'no work cloths'].append(
                                                    localtime)
                                            sql = "INSERT INTO wear SET worker_id = '" + wid + "', type = 'no work cloths',abnormal_time = '" + localtime + "'"
                                            cur4.execute(sql)
                                            cur4.close()  # 关闭游标
                                elif wear[wear_dic[0][1]] == 3:
                                    if len(workers[box_title].
                                           wear['unsafe wear']) == 0:
                                        workers[box_title].wear[
                                            'unsafe wear'].append(localtime)
                                        sql = "INSERT INTO wear SET worker_id = '" + wid + "', type = 'unsafe wear',abnormal_time = '" + localtime + "'"
                                        cur4.execute(sql)
                                        cur4.close()  # 关闭游标
                                    else:
                                        if localtime not in workers[
                                                box_title].wear['unsafe wear']:
                                            workers[box_title].wear[
                                                'unsafe wear'].append(
                                                    localtime)
                                            sql = "INSERT INTO wear SET worker_id = '" + wid + "', type = 'unsafe wear',abnormal_time = '" + localtime + "'"
                                            cur4.execute(sql)
                                            cur4.close()  # 关闭游标

                        #写入越线情况

                        if len(workers[box_title].track_point) > 4:

                            for i in range(len(transboundaryline)):
                                p1 = (transboundaryline[i][0],
                                      transboundaryline[i][1])
                                p2 = (transboundaryline[i][2],
                                      transboundaryline[i][3])
                                p3 = workers[box_title].track_point[-2]
                                p4 = workers[box_title].track_point[-1]
                                a = t.IsIntersec(p1, p2, p3, p4)
                                if a == '有交点':

                                    cur5 = conn.cursor()  # 获取一个游标
                                    cur6 = conn.cursor()  # 获取一个游标
                                    cur5.execute(
                                        "select time from transboundary where worker_id = '"
                                        + wid + "' ")
                                    qurrytime = cur5.fetchone()
                                    cur5.close()  # 关闭游标
                                    if qurrytime == None:
                                        print('越线')
                                        sql = "INSERT INTO transboundary SET worker_id = '" + wid + "',time = '" + localtime + "'"
                                        cur6.execute(sql)
                                        cur6.close()  # 关闭游标
                                    else:
                                        temp1 = 0
                                        for qt in qurrytime:

                                            if qt == localtime:
                                                temp1 = 1
                                        if temp1 == 0:
                                            print('越线')
                                            sql = "INSERT INTO transboundary SET worker_id = '" + wid + "',time = '" + localtime + "'"
                                            cur6.execute(sql)
                                            cur6.close()  # 关闭游标
                        if len(workers[box_title].track_point) >= 20:
                            workers[box_title].previous_point = workers[
                                box_title].track_point[-5]
                    conn.commit()
                    try:
                        cv2.putText(image, face[track2.track_id - 1],
                                    (int(bbox[0]), int(bbox[1])), 0,
                                    5e-3 * 200, (0, 255, 0), 2)
                    except Exception as e:
                        cv2.putText(image, "unknow",
                                    (int(bbox[0]), int(bbox[1])), 0,
                                    5e-3 * 200, (0, 255, 0), 2)

                if args.video is not None:
                    image[27:img_corner.shape[0] +
                          27, :img_corner.shape[1]] = img_corner  # [3:-10, :]
                cv2.imshow(' ', image)
                if args.save_video is not None:
                    video_saver.write(image)
                cv2.waitKey(1)
            else:

                image = common.read_imgfile(args.image)
                size = [image.shape[0], image.shape[1]]
                if image is None:
                    logger.error('Image can not be read, path=%s' % args.image)
                    sys.exit(-1)
                h = int(654 * (size[0] / size[1]))
                img = np.array(cv2.resize(image, (654, h)))
                cv2.imshow('ini', img)
                img = img[np.newaxis, :]
                peaks, heatmap, vectormap = sess.run(
                    [tensor_peaks, hm_up, cpm_up],
                    feed_dict={
                        raw_img: img,
                        img_size: size
                    })
                cv2.imshow('in', vectormap[0, :, :, 0])
                bodys = PoseEstimator.estimate_paf(peaks[0], heatmap[0],
                                                   vectormap[0])
                image = TfPoseEstimator.draw_humans(image,
                                                    bodys,
                                                    imgcopy=False)
                cv2.imshow(' ', image)
                cv2.waitKey(0)
Exemple #28
0
                # List all images (frames) within camera
                cam_path = os.path.join(scene_path, 'hdImgs', cam_name)
                img_names = sorted(os.listdir(cam_path))

                # Containers to save
                heatMats = []
                pafMats = []
                baseFeatMats = []
                joints2d = []

                # Iterate over images
                for img_name in img_names:

                    # Read current image
                    img_path = os.path.join(cam_path, img_name)
                    image, IMG_WIDTH, IMG_HEIGHT = read_imgfile(
                        img_path, args.scale_factor)

                    # Run OpenPose-net on current image
                    heatMat, pafMat, baseFeatMat = \
                        sess.run([heatmaps_tensor, pafs_tensor,
                                  base_feat_tensor],
                                 feed_dict={inputs: image})
                    heatMat = heatMat[0]
                    pafMat = pafMat[0]
                    baseFeatMat = baseFeatMat[0]

                    # Compute 2d pose based on OpenPose-net output
                    humans = estimate_pose(heatMat, pafMat)

                    # Go from [0,1]-normalized joint coordinates to instead
                    # match image size
Exemple #29
0
    'Mconv7_stage6_L2/BiasAdd:0')
pafs_tensor = tf.get_default_graph().get_tensor_by_name(
    'Mconv7_stage6_L1/BiasAdd:0')

with tf.Session() as sess:
    for imgpath in glob.glob(img_path_raw + "*.jpg"):
        basename = os.path.basename(imgpath)
        outpath = img_path_final + basename

        if os.path.exists(outpath):  # in case of crash to not restart from 0
            print("Skipping image \t {}".format(basename))
            continue
        else:
            print("Processing image \t {}".format(basename))

        image = read_imgfile(imgpath, img_width, img_height)

        heatMat, pafMat = sess.run([heatmaps_tensor, pafs_tensor],
                                   feed_dict={inputs: image})

        humans = estimate_pose(heatMat[0], pafMat[0])

        # display
        image = cv2.imread(imgpath)
        image_h, image_w = image.shape[:2]
        image = draw_humans(image, humans)

        scale = 480.0 / image_h
        newh, neww = 480, int(scale * image_w + 0.5)

        image = cv2.resize(image, (neww, newh), interpolation=cv2.INTER_AREA)
    def post(self):
        global finished_post
        if ((self.request.headers['Content-Type'] == 'imagebin')
                and (finished_post == 1)):
            print(finished_post)
            finished_post = 0
            print('Received image')
            image = self.request.body
            fh = open('static/image1.jpg', 'wb')
            fh.write(image)
            fh.close()
            #fh = open('static/image1.jpg','ab')
            #fh.write(bytes([0xD9]))
            #fh.close()
            print('0')
            #image = cv2.imread('static/image1.jpg')
            print('1')
            print('2')

            parser = argparse.ArgumentParser(
                description='tf-pose-estimation run')
            parser.add_argument(
                '--resolution',
                type=str,
                default='432x368',
                help='network input resolution. default=432x368')
            parser.add_argument('--model',
                                type=str,
                                default='mobilenet_thin',
                                help='cmu / mobilenet_thin')
            parser.add_argument(
                '--scales',
                type=str,
                default='[None]',
                help='for multiple scales, eg. [1.0, (1.1, 0.05)]')
            args = parser.parse_args()
            scales = ast.literal_eval(args.scales)

            w, h = model_wh(args.resolution)
            e = TfPoseEstimator(get_graph_path(args.model), target_size=(w, h))

            # estimate human poses from a single image !
            image = common.read_imgfile('static/image1.jpg', None, None)
            # image = cv2.fastNlMeansDenoisingColored(image, None, 10, 10, 7, 21)
            t = time.time()
            humans = e.inference(image, scales=scales)
            elapsed = time.time() - t

            logger.info('inference image: image3.jpg in %.4f seconds.' %
                        (elapsed))

            image = TfPoseEstimator.draw_humans(image, humans, imgcopy=False)

            #cv2.putText(image,
            #               f"Fallen: False",
            #              (60, 60),  cv2.FONT_HERSHEY_SIMPLEX, 2,
            #             (0, 255, 0), 5)

            cv2.imwrite('static/image3.jpg', image)

            for client in clients:
                update_clients(client)

            print(finished_post)
            finished_post = 1
def pose_comparison():
    parser = argparse.ArgumentParser(description='tf-pose-estimation run')
    parser.add_argument('--image', type=str, default='./images/p1.jpg')
    parser.add_argument('--resolution',
                        type=str,
                        default='432x368',
                        help='network input resolution. default=432x368')
    parser.add_argument('--model',
                        type=str,
                        default='mobilenet_thin',
                        help='cmu / mobilenet_thin')
    parser.add_argument('--scales',
                        type=str,
                        default='[None]',
                        help='for multiple scales, eg. [1.0, (1.1, 0.05)]')
    args = parser.parse_args()
    scales = ast.literal_eval(args.scales)

    w, h = model_wh(args.resolution)
    e = TfPoseEstimator(get_graph_path(args.model), target_size=(w, h))

    ref_image = common.read_imgfile(REF_POSE_PATH, None, None)
    ref_image = cv2.resize(ref_image, (640, 480))
    # estimate human poses from a single image !
    image = common.read_imgfile(args.image, None, None)
    image = cv2.resize(image, (640, 480))
    # image = cv2.fastNlMeansDenoisingColored(image, None, 10, 10, 7, 21)
    #t = time.time()

    ref_humans = e.inference(ref_image, scales=scales)
    humans = e.inference(image, scales=scales)

    #elapsed = time.time() - t

    #logger.info('inference image: %s in %.4f seconds.' % (args.image, elapsed))

    _, ref_centers = TfPoseEstimator.draw_humans_mine(ref_image,
                                                      ref_humans,
                                                      imgcopy=False)
    _, centers = TfPoseEstimator.draw_humans_mine(image, humans, imgcopy=False)

    ref_centers = list(ref_centers.values())
    centers = list(centers.values())

    ref_centers = list(sum(ref_centers, ()))
    centers = list(sum(centers, ()))

    ref_centers = np.array(ref_centers, dtype=int)
    centers = np.array(centers, dtype=int)

    shapes = []
    shapes.append(ref_centers)
    shapes.append(centers)

    #create canvas on which the triangles will be visualized
    canvas = np.full([640, 480], 255).astype('uint8')

    #convert to 3 channel RGB for fun colors!
    canvas = cv2.cvtColor(canvas, cv2.COLOR_GRAY2RGB)
    #im = draw_shapes(canvas,shapes)

    x, y = get_translation(shapes[0])
    new_shapes = []
    new_shapes.append(shapes[0])

    for i in range(1, len(shapes)):
        new_shape = procrustes_analysis(shapes[0], shapes[i])
        new_shape[::2] = new_shape[::2] + x
        new_shape[1::2] = new_shape[1::2] + y
        new_shape = new_shape.astype(int)
        new_shapes.append(new_shape)

    pts_list = []

    for lst in new_shapes:
        temp = lst.reshape(-1, 1, 2)
        pts = list(map(tuple, temp))
        pts_list.append(pts)

    for i in range(18):
        cv2.circle(ref_image,
                   tuple(pts_list[0][i][0]),
                   3, (255, 0, 0),
                   thickness=3,
                   lineType=8,
                   shift=0)
        cv2.circle(ref_image,
                   tuple(pts_list[1][i][0]),
                   3, (255, 255, 0),
                   thickness=3,
                   lineType=8,
                   shift=0)

    cv2.imshow('tf-pose-estimation result', ref_image)
    cv2.waitKey(0)

    variations = []
    for i in range(len(new_shapes)):
        dist = procrustes_distance(shapes[0], new_shapes[i])
        variations.append(dist)

    print(variations)