Пример #1
0
def GetPoseandCostsS(cfg, dlc_cfg, sess, inputs, outputs, cap, nframes,
                     c_engine):
    """ Non batch wise pose estimation for video cap."""
    strwidth = int(np.ceil(np.log10(nframes)))  # width for strings
    if cfg["cropping"]:
        cap.set_bbox(cfg["x1"], cfg["x2"], cfg["y1"], cfg["y2"])

    PredicteData = {
    }  # np.zeros((nframes, 3 * len(dlc_cfg['all_joints_names'])))
    pbar = tqdm(total=nframes)
    counter = 0
    step = max(10, int(nframes / 100))
    while cap.video.isOpened():
        if counter % step == 0:
            pbar.update(step)
        frame = cap.read_frame(crop=cfg["cropping"])
        if frame is not None:
            frame = img_as_ubyte(frame)
            PredicteData["frame" + str(counter).zfill(
                strwidth)] = predict.get_detectionswithcosts(
                    frame,
                    dlc_cfg,
                    sess,
                    inputs,
                    outputs,
                    outall=False,
                    nms_radius=dlc_cfg["nmsradius"],
                    det_min_score=dlc_cfg["minconfidence"],
                    c_engine=c_engine,
                )
        elif counter >= nframes:
            break
        counter += 1

    pbar.close()
    PredicteData["metadata"] = {
        "nms radius":
        dlc_cfg["nmsradius"],
        "minimal confidence":
        dlc_cfg["minconfidence"],
        "PAFgraph":
        dlc_cfg["partaffinityfield_graph"],
        "PAFinds":
        dlc_cfg.get("paf_best",
                    np.arange(len(dlc_cfg["partaffinityfield_graph"]))),
        "all_joints": [[i] for i in range(len(dlc_cfg["all_joints"]))],
        "all_joints_names": [
            dlc_cfg["all_joints_names"][i]
            for i in range(len(dlc_cfg["all_joints"]))
        ],
        "nframes":
        nframes,
    }
    return PredicteData, nframes
Пример #2
0
def get_ma_pose(image, config, session, inputs, outputs):
    """
    Gets scoremap, local reference and pose from DeepLabCut using given image
    Pose is most probable points for each joint, and not really used later
    Scoremap and local reference is essential to extract skeletons
    :param image: frame which would be analyzed
    :param config, session, inputs, outputs: DeepLabCut configuration and TensorFlow variables from load_deeplabcut()

    :return: tuple of scoremap, local reference and pose
    """
    scmap, locref, paf, pose = predict_multianimal.get_detectionswithcosts(image, config, session, inputs, outputs,
                                                                           outall=True,
                                                                           nms_radius=config['nmsradius'],
                                                                           det_min_score=config['minconfidence'],
                                                                           c_engine=False)

    return pose
Пример #3
0
def GetPoseandCostsS(cfg, dlc_cfg, sess, inputs, outputs, cap, nframes,
                     c_engine):
    """ Non batch wise pose estimation for video cap."""
    strwidth = int(np.ceil(np.log10(nframes)))  # width for strings
    if cfg["cropping"]:
        print(
            "Cropping based on the x1 = %s x2 = %s y1 = %s y2 = %s. You can adjust the cropping coordinates in the config.yaml file."
            % (cfg["x1"], cfg["x2"], cfg["y1"], cfg["y2"]))
        nx = cfg["x2"] - cfg["x1"]
        ny = cfg["y2"] - cfg["y1"]
        if nx > 0 and ny > 0:
            pass
        else:
            raise Exception("Please check the order of cropping parameter!")
        if (cfg["x1"] >= 0 and cfg["x2"] < int(cap.get(3) + 1)
                and cfg["y1"] >= 0 and cfg["y2"] < int(cap.get(4) + 1)):
            pass  # good cropping box
        else:
            raise Exception("Please check the boundary of cropping!")

    PredicteData = {
    }  # np.zeros((nframes, 3 * len(dlc_cfg['all_joints_names'])))
    pbar = tqdm(total=nframes)
    counter = 0
    step = max(10, int(nframes / 100))
    while cap.isOpened():
        if counter % step == 0:
            pbar.update(step)

        ret, frame = cap.read()
        if ret:
            frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
            if cfg["cropping"]:
                frame = img_as_ubyte(frame[cfg["y1"]:cfg["y2"],
                                           cfg["x1"]:cfg["x2"]])
            else:
                frame = img_as_ubyte(frame)
            PredicteData["frame" + str(counter).zfill(
                strwidth)] = predict.get_detectionswithcosts(
                    frame,
                    dlc_cfg,
                    sess,
                    inputs,
                    outputs,
                    outall=False,
                    nms_radius=dlc_cfg.nmsradius,
                    det_min_score=dlc_cfg.minconfidence,
                    c_engine=c_engine,
                )
        else:
            nframes = counter
            break
        counter += 1

    pbar.close()
    PredicteData["metadata"] = {
        "nms radius":
        dlc_cfg.nmsradius,
        "minimal confidence":
        dlc_cfg.minconfidence,
        "PAFgraph":
        dlc_cfg.partaffinityfield_graph,
        "all_joints": [[i] for i in range(len(dlc_cfg.all_joints))],
        "all_joints_names":
        [dlc_cfg.all_joints_names[i] for i in range(len(dlc_cfg.all_joints))],
        "nframes":
        nframes,
    }

    # print(PredicteData)
    return PredicteData, nframes