コード例 #1
0
def main_inference(model_json, model_weights, num_stack, num_class, imgfile, confth, tiny):
    if tiny:
        xnet = HourglassNet(num_classes=16, num_stacks=args.num_stack, num_channels=128, inres=(192, 192),
                            outres=(48, 48))
    else:
        xnet = HourglassNet(num_classes=16, num_stacks=args.num_stack, num_channels=256, inres=(256, 256),
                            outres=(64, 64))

    xnet.load_model(model_json, model_weights)

    out, scale = xnet.inference_file(imgfile)

    kps = post_process_heatmap(out[0, :, :, :])

    ignore_kps = ['plevis', 'thorax', 'head_top']
    kp_keys = MPIIDataGen.get_kp_keys()
    mkps = list()
    for i, _kp in enumerate(kps):
        if kp_keys[i] in ignore_kps:
            _conf = 0.0
        else:
            _conf = _kp[2]
        mkps.append((_kp[0] * scale[1] * 4, _kp[1] * scale[0] * 4, _conf))

    cvmat = render_joints(cv2.imread(imgfile), mkps, confth)

    cv2.imshow('frame', cvmat)
    cv2.waitKey()
コード例 #2
0
def main_video(model_json, model_weights, num_stack, num_class, videofile,
               confth):

    xnet = HourglassNet(num_class, num_stack, (256, 256), (64, 64))
    xnet.load_model(model_json, model_weights)

    cap = cv2.VideoCapture(videofile)
    while (cap.isOpened()):
        ret, frame = cap.read()
        if ret:
            rgb = frame[:, :, ::-1]  # bgr -> rgb
            out, scale = xnet.inference_rgb(rgb, frame.shape)

            kps = post_process_heatmap(out[0, :, :, :])

            ignore_kps = ['plevis', 'thorax', 'head_top']
            kp_keys = MPIIDataGen.get_kp_keys()
            mkps = list()
            for i, _kp in enumerate(kps):
                if kp_keys[i] in ignore_kps:
                    _conf = 0.0
                else:
                    _conf = _kp[2]
                mkps.append(
                    (_kp[0] * scale[1] * 4, _kp[1] * scale[0] * 4, _conf))

            framejoints = render_joints(frame, mkps, confth)

            cv2.imshow('frame', framejoints)
            cv2.waitKey(10)
コード例 #3
0
    def infer(self, imgfile, conf_threshold):
        out, scale = self.hgnet.inference_file(imgfile)

        kps = post_process_heatmap(out[0, :, :, :])

        ignore_kps = ['plevis', 'thorax', 'head_top']
        kp_keys = MPIIDataGen.get_kp_keys()
        mkps = list()
        for i, _kp in enumerate(kps):
            if kp_keys[i] in ignore_kps:
                _conf = 0.0
            else:
                _conf = _kp[2]
            mkps.append((_kp[0] * scale[1] * 4, _kp[1] * scale[0] * 4, _conf))

        cvmat = render_joints(cv2.imread(imgfile), mkps, conf_threshold)

        cv2.imshow('frame', cvmat)
        cv2.waitKey()