def prepare_image(self, image):
     was_fixed_point = not image.is_floating_point()
     image = torch.empty(image.shape, device='cpu', dtype=torch.float32).copy_(image)
     if was_fixed_point:
         image /= 255.0
     if image.shape[-2:] != (256, 256):
         image = resize(image, 256, 256)
     image = color_normalize(image, Mpii.RGB_MEAN, Mpii.RGB_STDDEV)
     return image
Example #2
0
    def predictfromDir(self, path):
        orgImg = Image.open(path)

        try:
            for orientation in ExifTags.TAGS.keys():
                if ExifTags.TAGS[orientation] == 'Orientation': break
            exif = dict(orgImg._getexif().items())
            if exif[orientation] == 3:
                orgImg = orgImg.rotate(180, expand=True)
            elif exif[orientation] == 6:
                orgImg = orgImg.rotate(270, expand=True)
            elif exif[orientation] == 8:
                orgImg = orgImg.rotate(90, expand=True)

        except:
            pass

        RGB_MEAN = torch.as_tensor([0.4404, 0.4440, 0.4327])
        RGB_STDDEV = torch.as_tensor([0.2458, 0.2410, 0.2468])

        im = np.asarray(orgImg)
        img = torch.tensor(im).transpose(0, 2)
        img = color_normalize(img, RGB_MEAN, RGB_STDDEV)
        if (img.size(0) == 4):
            img = img[:3]

        c, h, w = img.size()
        start = time.time()
        joints = self.predictor.estimate_joints(img, flip=True)
        end = time.time()
        xs, ys = list(joints[:, 0].numpy()), list(joints[:, 1].numpy())
        #print("infer time : ",end-start)

        left_antebrachial = np.array([ys[15] - ys[14], xs[15] - xs[14]])
        left_forearm = np.array([ys[13] - ys[14], xs[13] - xs[14]])
        left_back = np.array([ys[7] - ys[13], xs[7] - xs[13]])
        left_arm_angle = np.inner(left_antebrachial, left_forearm) / (
            np.linalg.norm(left_antebrachial) * np.linalg.norm(left_forearm))
        left_back_angle = np.inner(left_forearm, left_back) / (
            np.linalg.norm(left_forearm) * np.linalg.norm(left_back))

        right_antebrachial = np.array([ys[10] - ys[11], xs[10] - xs[11]])
        right_forearm = np.array([ys[12] - ys[11], xs[12] - xs[11]])
        right_back = np.array([ys[7] - ys[12], xs[7] - xs[12]])
        right_arm_angle = np.inner(right_antebrachial, right_forearm) / (
            np.linalg.norm(right_antebrachial) * np.linalg.norm(right_forearm))
        right_back_angle = np.inner(right_back, right_forearm) / (
            np.linalg.norm(right_back) * np.linalg.norm(right_forearm))

        #angle predict
        left_arm_angle = np.arccos(left_arm_angle) * 360 / (np.pi * 2)
        left_back_angle = 180 - np.arccos(left_back_angle) * 360 / (np.pi * 2)
        right_arm_angle = np.arccos(right_arm_angle) * 360 / (np.pi * 2)
        right_back_angle = 180 - np.arccos(right_back_angle) * 360 / (np.pi *
                                                                      2)

        return left_arm_angle, left_back_angle, right_arm_angle, right_back_angle
 def prepare_image(self, image):
     was_fixed_point = not image.is_floating_point()
     image = torch.empty_like(image, dtype=torch.float32).copy_(image)
     if was_fixed_point:
         image /= 255.0
     if image.shape[-2:] != self.input_shape:
         image = fit(image, self.input_shape, fit_mode='contain')
     image = color_normalize(image, self.data_info.rgb_mean,
                             self.data_info.rgb_stddev)
     return image
 def prepare_image(self, image):
     was_fixed_point = not image.is_floating_point()
     image = torch.empty(image.shape, device='cpu',
                         dtype=torch.float32).copy_(image)
     if was_fixed_point:
         image /= 255.0
     if image.shape[-2:] != (256, 256):
         image = resize(image, 256, 256)
     image = color_normalize(image, self.data_info.rgb_mean,
                             self.data_info.rgb_stddev)
     return image
            if ExifTags.TAGS[orientation] == 'Orientation': break
        exif = dict(orgImg._getexif().items())
        if exif[orientation] == 3:
            orgImg = orgImg.rotate(180, expand=True)
        elif exif[orientation] == 6:
            orgImg = orgImg.rotate(270, expand=True)
        elif exif[orientation] == 8:
            orgImg = orgImg.rotate(90, expand=True)
    except:
        pass

    im = np.asarray(orgImg)
    idx = int(i[:-4])

    img = torch.tensor(im).transpose(0, 2)
    img = color_normalize(img, RGB_MEAN, RGB_STDDEV)
    joints = predictor.estimate_joints(img, flip=True)
    xs, ys = list(joints[:, 0].numpy()), list(joints[:, 1].numpy())

    angles = angle_predictor.predictFromjoints(joints)
    prob = model.predict_proba(np.asarray(angles).reshape(1, 4))[0][1]
    testResult.append(prob)
    #smoothing = convolve(testResult[:], g)
    smoothing = smoothListGaussian(testResult[:])
    ascending = True
    descending = False
    count = 0
    for p in range(1, len(smoothing) - 1):
        if ((smoothing[p] < threshold and smoothing[p + 1] >= threshold)
                and ascending):
            ascending = False
Example #6
0
def example_input(man_running_image):
    mean = torch.as_tensor([0.4404, 0.4440, 0.4327])
    std = torch.as_tensor([0.2458, 0.2410, 0.2468])
    image = fit(man_running_image, (256, 256), fit_mode='contain')
    image = color_normalize(image, mean, std)
    return image.unsqueeze(0)
Example #7
0
    def __getitem__(self, index):
        sf = self.scale_factor
        rf = self.rot_factor
        if self.is_train:
            a = self.anno[self.train_list[index]]
        else:
            a = self.anno[self.valid_list[index]]

        img_path = os.path.join(self.img_folder, a['img_paths'])
        pts = torch.Tensor(a['joint_self'])
        # pts[:, 0:2] -= 1  # Convert pts to zero based

        # c = torch.Tensor(a['objpos']) - 1
        c = torch.Tensor(a['objpos'])
        s = a['scale_provided']

        # Adjust center/scale slightly to avoid cropping limbs
        if c[0] != -1:
            c[1] = c[1] + 15 * s
            s = s * 1.25

        # For single-person pose estimation with a centered/scaled figure
        nparts = pts.size(0)
        img = load_image(img_path)  # CxHxW

        r = 0
        if self.is_train:
            s = s*torch.randn(1).mul_(sf).add_(1).clamp(1-sf, 1+sf)[0]
            r = torch.randn(1).mul_(rf).clamp(-2*rf, 2*rf)[0] if random.random() <= 0.6 else 0

            # Flip
            if random.random() <= 0.5:
                img = torch.from_numpy(fliplr(img.numpy())).float()
                pts = shufflelr(pts, width=img.size(2), dataset='mpii')
                c[0] = img.size(2) - c[0]

            # Color
            img[0, :, :].mul_(random.uniform(0.8, 1.2)).clamp_(0, 1)
            img[1, :, :].mul_(random.uniform(0.8, 1.2)).clamp_(0, 1)
            img[2, :, :].mul_(random.uniform(0.8, 1.2)).clamp_(0, 1)

        # Prepare image and groundtruth map
        inp = crop(img, c, s, [self.inp_res, self.inp_res], rot=r)
        inp = color_normalize(inp, self.mean, self.std)

        # Generate ground truth
        tpts = pts.clone()
        target = torch.zeros(nparts, self.out_res, self.out_res)
        target_weight = tpts[:, 2].clone().view(nparts, 1)

        for i in range(nparts):
            # if tpts[i, 2] > 0: # This is evil!!
            if tpts[i, 1] > 0:
                tpts[i, 0:2] = to_torch(transform(tpts[i, 0:2]+1, c, s, [self.out_res, self.out_res], rot=r))
                target[i], vis = draw_labelmap(target[i], tpts[i]-1, self.sigma, type=self.label_type)
                target_weight[i, 0] *= vis

        # Meta info
        meta = {'index' : index, 'center' : c, 'scale' : s,
        'pts' : pts, 'tpts' : tpts, 'target_weight': target_weight}

        return inp, target, meta
Example #8
0
    def __getitem__(self, index):
        """Get an image referenced by index."""
        sf = self.scale_factor  # Generally from 0 to 0.25
        rf = self.rot_factor
        if self.is_train:
            a = self.train_list.iloc[index]
        else:
            a = self.valid_list.iloc[index]

        img_path = a['img_paths']
        # cv2 based image transformations
        img = cv2.imread(img_path, cv2.IMREAD_COLOR)  # HxWxC
        rows, cols, colors = img.shape
        # Joint label positions
        pts = torch.Tensor(a['joint_self'])
        # pts[:, 0:2] -= 1  # Convert pts to zero based
        c = tuple(a['objpos'])
        s = a['scale_provided']
        # In Mpii, scale_provided is the dim of the boundary box wrt 200 px
        # Depending on the flag "crop", we can decide to either:
        #   True: Crop to crop_size around obj_pos
        #   False: Keep original res
        # Then we downsize to inp_res
        if s == -1:  # Yogi data scale_provided is initialized to -1
            if self.crop:
                # If crop, then crop crop_size x crop_size around obj_pos
                s = self.crop_size / 200
                # Move enter away from the joint by a random distance < max_dist pixels
                max_dist = 64
                c = (int(
                    torch.randn(1).clamp(-1, 1).mul(max_dist).add(c[0]).clamp(
                        0, cols - 1)),
                     int(
                         torch.randn(1).clamp(-1,
                                              1).mul(max_dist).add(c[1]).clamp(
                                                  0, rows - 1)))
            else:
                # If no crop, then use the entire image
                s = rows / 200
                # Use the center of the image to rotate
                c = (int(cols / 2), int(rows / 2))

        # # Adjust scale slightly to avoid cropping limbs
        # if c[0] != -1:
        #     c[1] = c[1] + 15 * s
        #     s = s * 1.25

        # For pose estimation with a centered/scaled figure
        nparts = pts.size(0)
        r = 0
        if self.is_train:
            # Given sf, choose scale from [1-sf, 1+sf]
            # For sf = 0.25, scale is chosen from [0.75, 1.25]
            s = torch.randn(1).mul_(sf).add_(1).clamp(1 - sf, 1 + sf)[0]
            # Given rf, choose scale from [-rf, rf]
            # For sf = 30, scale is chosen from [-30, 30]
            r = torch.randn(1).mul_(rf).clamp(
                -rf, rf)[0] if random.random() <= 0.6 else 0
        if self.mode == 'original':
            img = load_image(img_path)  # CxHxW
            c = torch.Tensor(c)
            if self.is_train:
                # Flip
                if self.fliplr and random.random() <= 0.5:
                    img = torch.from_numpy(fliplr(img.numpy())).float()
                    pts = shufflelr(pts, width=img.size(2),
                                    dataset='yogi')  # TODO
                    c[0] = img.size(2) - c[0]

                # Color
                # img[0, :, :].mul_(random.uniform(0.8, 1.2)).clamp_(0, 1)
                # img[1, :, :].mul_(random.uniform(0.8, 1.2)).clamp_(0, 1)
                # img[2, :, :].mul_(random.uniform(0.8, 1.2)).clamp_(0, 1)
            # Prepare image and groundtruth map
            inp = crop(img, c, s, [self.inp_res, self.inp_res], rot=r)
            inp = color_normalize(inp, self.mean, self.std)
            t = None
        else:
            if self.is_train:
                # Flip
                if self.fliplr and random.random() <= 0.5:
                    img = cv2.flip(img, 1)
                    pts = torch.Tensor([[cols - x[0] - 1, x[1]] for x in pts])
                # TODO: Shuffle left and right labels

            # Rotate, scale and crop image using inp_res
            # And get transformation matrix
            img, t_inp = cv2_crop(img,
                                  c,
                                  s, (self.inp_res, self.inp_res),
                                  rot=r,
                                  crop=self.crop,
                                  crop_size=self.crop_size)
            # Get transformation matrix for resizing from inp_res to out_res
            # No other changes, i.e. new_center is center, no cropping, etc.
            # Please note scaling to out_res has to be done before
            _, t_resize = cv2_resize(img, (self.out_res, self.out_res))
            t = combine_transformations(t_resize, t_inp)
            # TODO Update color normalize
            inp = img_normalize(img, self.mean, self.std)
            # if self.is_train:
            #     # Color
            #     inp[0, :, :].mul_(random.uniform(0.8, 1.2)).clamp_(0, 1)
            #     inp[1, :, :].mul_(random.uniform(0.8, 1.2)).clamp_(0, 1)
            #     inp[2, :, :].mul_(random.uniform(0.8, 1.2)).clamp_(0, 1)

        # Generate ground truth
        tpts = pts.clone()
        target = torch.zeros(nparts, self.out_res, self.out_res)
        target_weight = tpts[:, 2].clone().view(nparts, 1)

        for i in range(nparts):
            if tpts[i, 2] > 0:  # This is evil!! # if tpts[i, 1] > 0:
                # Hack: Change later -
                # The + 1 and -1 wrt tpts is there in the original code
                # Using int(self.mode == 'original') to do the + 1, -1
                tpts[i, 0:2] = to_torch(
                    transform(tpts[i, 0:2] + int(self.mode == 'original'),
                              c,
                              s, [self.out_res, self.out_res],
                              rot=r,
                              t=t))
                target[i], vis = draw_labelmap(target[i],
                                               tpts[i] -
                                               int(self.mode == 'original'),
                                               self.sigma,
                                               type=self.label_type)
                target_weight[i, 0] *= vis

        # Meta info
        meta = {
            'index': index,
            'center': c,
            'scale': s,
            'pts': pts,
            'tpts': tpts,
            'target_weight': target_weight,
            'inp_res': self.inp_res,
            'out_res': self.out_res,
            'rot': r,
            'img_paths': img_path
        }

        return inp, target, meta
Example #9
0
    def __init__(self, fn):

        filename = fn  # 파일 이름을 저장

        # get video and slice into frame
        viddir = filename
        vidcap = cv2.VideoCapture(viddir)
        vidlen = len(viddir)
        except_filename = 0

        # 불러온 위치 주소에서 [영상이름].mp4 부분을 삭제
        for i in range(vidlen - 1, -1, -1):
            if viddir[i] == '/':
                except_filename = i
                break
        viddir = viddir[0:except_filename + 1]

        # 실제 프레임 이미지들이 저장될 위치
        paradir = viddir + '/tmp+img'
        self.createFolder(paradir)

        # getFrame 함수를 통해 영상을 프레임별로 나눔
        sec = 0
        frameRate = 0.1  # //it will capture image in each 0.5 second
        count = 100
        success = self.getFrame(sec, vidcap, viddir, count)
        while success:
            count = count + 1
            sec = sec + frameRate
            sec = round(sec, 2)
            success = self.getFrame(sec, vidcap, viddir, count)

        self.predictor = HumanPosePredictor(hg8(pretrained=True),
                                            device='cuda')
        print("==model loaded==")
        # ...load image of a person into a PyTorch tensor...

        name = ""
        predictor = HumanPosePredictor(hg8(pretrained=True), device='cuda')
        model = xgb.XGBClassifier(max_depth=3,
                                  learning_rate=0.1,
                                  n_estimators=100,
                                  silent=True,
                                  objective='binary:logistic',
                                  booster='gbtree',
                                  n_jobs=1,
                                  nthread=None,
                                  gamma=0,
                                  min_child_weight=1,
                                  max_delta_step=0,
                                  subsample=1,
                                  colsample_bytree=1,
                                  colsample_bylevel=1,
                                  reg_alpha=0,
                                  reg_lambda=1,
                                  scale_pos_weight=1,
                                  base_score=0.5,
                                  random_state=0,
                                  seed=None,
                                  missing=None)
        model.load_model(viddir + 'xgboost.bst')  # load model
        g = Gaussian1DKernel(stddev=4)

        print("==model loaded==")

        RGB_MEAN = torch.as_tensor([0.4404, 0.4440, 0.4327])
        RGB_STDDEV = torch.as_tensor([0.2458, 0.2410, 0.2468])

        images = os.listdir("./" + name)
        print(images)
        images = sorted(images, key=lambda x: int(x.split(".")[0]))
        print("frames : ", len(images))

        result = {"name": name, "frames": dict()}

        threshold = 0.4
        img_array = list()
        testResult = list()
        for i in images:
            orgImg = Image.open("./" + name + i)
            #orgImg = orgImg.rotate(270, expand=True)
            print("!precessing " + str(i))

            try:
                for orientation in ExifTags.TAGS.keys():
                    if ExifTags.TAGS[orientation] == 'Orientation': break
                exif = dict(orgImg._getexif().items())
                if exif[orientation] == 3:
                    orgImg = orgImg.rotate(180, expand=True)
                elif exif[orientation] == 6:
                    orgImg = orgImg.rotate(270, expand=True)
                elif exif[orientation] == 8:
                    orgImg = orgImg.rotate(90, expand=True)
            except:
                pass

            im = np.asarray(orgImg)
            idx = int(i[:-4])

            img = torch.tensor(im).transpose(0, 2)
            img = color_normalize(img, RGB_MEAN, RGB_STDDEV)
            joints = predictor.estimate_joints(img, flip=True)
            xs, ys = list(joints[:, 0].numpy()), list(joints[:, 1].numpy())

            angles = self.predictFromjoints(joints)

            prob = model.predict_proba(np.asarray(angles).reshape(1, 4))[0][1]
            testResult.append(prob)
            smoothing = convolve(testResult[:], g)
            #smoothing = selfG.smoothListGaussian(testResult[:])

            ascending = True
            descending = False
            count = 0
            for p in range(1, len(smoothing) - 1):
                if ((smoothing[p] < threshold
                     and smoothing[p + 1] >= threshold) and ascending):
                    ascending = False
                    descending = True
                elif ((smoothing[p] > threshold
                       and smoothing[p + 1] <= threshold) and descending):
                    ascending = True
                    descending = False
                    count += 1

            orgImg = np.array(orgImg)
            height, width, layers = orgImg.shape
            size = (width, height)
            img_array.append(cv2.cvtColor(orgImg, cv2.COLOR_BGR2RGB))
            # orgImg = cv2.line(orgImg, (ys[0], xs[0]), (ys[1], xs[1]), (10, 255, 0), 2)
            # orgImg = cv2.line(orgImg, (ys[1], xs[1]), (ys[2], xs[2]), (50, 160, 50), 2)
            # orgImg = cv2.line(orgImg, (ys[5], xs[5]), (ys[4], xs[4]), (50, 0, 255), 2)
            # orgImg = cv2.line(orgImg, (ys[4], xs[4]), (ys[3], xs[3]), (255, 0, 0), 2)
            # orgImg = cv2.line(orgImg, (ys[3], xs[3]), (ys[6], xs[6]), (255, 0, 0), 2)
            # orgImg = cv2.line(orgImg, (ys[6], xs[6]), (ys[2], xs[2]), (30, 30, 130), 2)
            orgImg = cv2.line(orgImg, (ys[6], xs[6]), (ys[7], xs[7]),
                              (153, 255, 255), 3)  # 등
            # orgImg = cv2.line(orgImg, (ys[7], xs[7]), (ys[8], xs[8]), (255, 0, 0), 2)
            # orgImg = cv2.line(orgImg, (ys[8], xs[8]), (ys[9], xs[9]), (255, 0, 0), 2)
            orgImg = cv2.line(orgImg, (ys[10], xs[10]), (ys[11], xs[11]),
                              (153, 255, 255), 3)  # 왼쪽전완
            orgImg = cv2.line(orgImg, (ys[11], xs[11]), (ys[12], xs[12]),
                              (153, 255, 255), 3)
            orgImg = cv2.line(orgImg, (ys[12], xs[12]), (ys[7], xs[7]),
                              (153, 255, 255), 3)  # 왼쪽광배
            orgImg = cv2.line(orgImg, (ys[14], xs[14]), (ys[13], xs[13]),
                              (153, 255, 255), 3)  # 오른쪽 이두
            orgImg = cv2.line(orgImg, (ys[7], xs[7]), (ys[13], xs[13]),
                              (153, 255, 255), 3)  # 오른쪽 광배
            orgImg = cv2.line(orgImg, (ys[14], xs[14]), (ys[15], xs[15]),
                              (153, 255, 255), 3)  # 오른쪽 전완
            if (len(smoothing) > 0):
                orgImg = cv2.putText(orgImg, str(smoothing[-1]), (30, 30),
                                     cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0),
                                     2, cv2.LINE_AA)
                orgImg = cv2.putText(orgImg, "count : " + str(count), (30, 75),
                                     cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0),
                                     2, cv2.LINE_AA)

            #cv2.imshow('image', cv2.cvtColor(orgImg, cv2.COLOR_BGR2RGB))

            if cv2.waitKey(1) & 0xFF == ord('q'):
                break

        out = cv2.VideoWriter('result.avi', cv2.VideoWriter_fourcc(*'mp4v'),
                              30, size)
        for i in range(len(img_array)):
            out.write(img_array[i])
        out.release()

        print("done!")

        cv2.destroyAllWindows()
    def __init__(self, fn):

        filename = fn
        # get video and slice into frame
        viddir = filename
        vidcap = cv2.VideoCapture(viddir)
        vidlen = len(viddir)
        except_filename = 0
        for i in range(vidlen - 1, -1, -1):
            if viddir[i] == '/':
                except_filename = i
                break
        viddir = viddir[0:except_filename + 1]

        img_name = " "

        paradir = viddir + '/tmp+img'
        self.createFolder(paradir)

        sec = 0
        frameRate = 0.1  # //it will capture image in each 0.5 second
        count = 100
        success = self.getFrame(sec, vidcap, viddir, count)
        while success:
            count = count + 1
            sec = sec + frameRate
            sec = round(sec, 2)
            success = self.getFrame(sec, vidcap, viddir, count)

        # ...load image of a person into a PyTorch tensor...

        model = hg8(pretrained=True)
        predictor = HumanPosePredictor(model, device='cpu')

        print("==model loaded==")

        RGB_MEAN = torch.as_tensor([0.4404, 0.4440, 0.4327])
        RGB_STDDEV = torch.as_tensor([0.2458, 0.2410, 0.2468])
        # im = np.asarray(Image.open("./images/test14]].jpeg"))
        orgImg = Image.open("./image100.jpg")
        try:
            for orientation in ExifTags.TAGS.keys():
                if ExifTags.TAGS[orientation] == 'Orientation': break
            exif = dict(orgImg._getexif().items())
            if exif[orientation] == 3:
                orgImg = orgImg.rotate(180, expand=True)
            elif exif[orientation] == 6:
                orgImg = orgImg.rotate(270, expand=True)
            elif exif[orientation] == 8:
                orgImg = orgImg.rotate(90, expand=True)

        except:
            print("no metadata")
        im = np.asarray(orgImg)
        img = torch.tensor(im).transpose(0, 2)
        img = color_normalize(img, RGB_MEAN, RGB_STDDEV)
        if (img.size(0) == 4):
            img = img[:3]

        c, h, w = img.size()
        print("image size : ", h, w)
        start = time.time()
        joints = predictor.estimate_joints(img, flip=True)
        end = time.time()
        xs, ys = list(joints[:, 0].numpy()), list(joints[:, 1].numpy())
        print(joints)
        print("infer time : ", end - start)

        fig, ax = plt.subplots()
        plt.imshow(im)
        c = np.array([(0, 0, 1, 0.5), (0, 0, 1, 0.5), (0, 0, 1, 0.5),
                      (0, 0, 1, 0.5), (0, 0, 1, 0.5), (0, 0, 1, 0.5),
                      (0, 0, 1, 0.5), (0, 0, 1, 0.5), (0, 0, 1, 0.5),
                      (0, 0, 1, 0.5), (0, 0, 1, 0.5), (0, 0, 1, 0.5),
                      (0, 0, 1, 0.5), (0, 0, 1, 0.5), (0, 0, 1, 0.5)])
        lines = [[(ys[0], xs[0]), (ys[1], xs[1])],
                 [(ys[1], xs[1]), (ys[2], xs[2])],
                 [(ys[5], xs[5]), (ys[4], xs[4])],
                 [(ys[4], xs[4]), (ys[3], xs[3])],
                 [(ys[3], xs[3]), (ys[6], xs[6])],
                 [(ys[6], xs[6]), (ys[2], xs[2])],
                 [(ys[6], xs[6]), (ys[7], xs[7])],
                 [(ys[7], xs[7]), (ys[8], xs[8])],
                 [(ys[8], xs[8]), (ys[9], xs[9])],
                 [(ys[10], xs[10]), (ys[11], xs[11])],
                 [(ys[11], xs[11]), (ys[12], xs[12])],
                 [(ys[12], xs[12]), (ys[7], xs[7])],
                 [(ys[7], xs[7]), (ys[13], xs[13])],
                 [(ys[14], xs[14]), (ys[13], xs[13])],
                 [(ys[14], xs[14]), (ys[15], xs[15])]]

        left_antebrachial = np.array([ys[15] - ys[14], xs[15] - xs[14]])
        left_forearm = np.array([ys[13] - ys[14], xs[13] - xs[14]])
        left_back = np.array([ys[7] - ys[13], xs[7] - xs[13]])
        left_arm_angle = np.inner(left_antebrachial, left_forearm) / (
            np.linalg.norm(left_antebrachial) * np.linalg.norm(left_forearm))
        left_back_angle = np.inner(left_forearm, left_back) / (
            np.linalg.norm(left_forearm) * np.linalg.norm(left_back))

        right_antebrachial = np.array([ys[10] - ys[11], xs[10] - xs[11]])
        right_forearm = np.array([ys[12] - ys[11], xs[12] - xs[11]])
        right_back = np.array([ys[7] - ys[12], xs[7] - xs[12]])
        right_arm_angle = np.inner(right_antebrachial, right_forearm) / (
            np.linalg.norm(right_antebrachial) * np.linalg.norm(right_forearm))
        right_back_angle = np.inner(right_back, right_forearm) / (
            np.linalg.norm(right_back) * np.linalg.norm(right_forearm))

        head_neck = np.array([ys[9] - ys[8], xs[9] - xs[8]])
        chest_neck = np.array([ys[7] - ys[8], xs[7] - xs[8]])

        neck_chest = np.array([ys[8] - ys[7], xs[8] - xs[7]])
        hip_chest = np.array([ys[6] - ys[7], xs[6] - xs[7]])

        chest_hip = np.array([ys[7] - ys[6], xs[7] - xs[6]])
        knee_plevis = np.array([ys[4] - ys[3], xs[4] - xs[3]])

        neck_angle = np.inner(head_neck, chest_neck) / (
            np.linalg.norm(head_neck) * np.linalg.norm(chest_neck))
        chest_angle = np.inner(neck_chest, hip_chest) / (
            np.linalg.norm(neck_chest) * np.linalg.norm(hip_chest))
        hip_angle = np.inner(chest_hip, knee_plevis) / (
            np.linalg.norm(chest_hip) * np.linalg.norm(knee_plevis))

        print("목 각    : ", np.arccos(neck_angle) * 360 / (np.pi * 2))
        print("가슴 각   : ", np.arccos(chest_angle) * 360 / (np.pi * 2))
        print("엉덩이 각    : ", np.arccos(hip_angle) * 360 / (np.pi * 2))

        neck = np.arccos(neck_angle) * 360 / (np.pi * 2)
        chest = np.arccos(chest_angle) * 360 / (np.pi * 2)
        hip = np.arccos(hip_angle) * 360 / (np.pi * 2)

        if (170 > neck or neck >= 180):  # 목 각도가 오차 범위를 벗어난 경우
            c[8] = (1, 0, 0, 0.5)  # line color = red
            c[7] = (1, 0, 0, 0.5)  # line color = red
        if (165 > chest or chest >= 180):  # 가슴 각도가 오차 범위를 벗어난 경우
            c[7] = (1, 0, 0, 0.5)  # line color = red
            c[6] = (1, 0, 0, 0.5)  # line color = red
        if (165 > hip or hip >= 180):  # 엉덩이 각도가 오차 범위를 벗어난 경우
            c[6] = (1, 0, 0, 0.5)  # line color = red
            c[5] = (1, 0, 0, 0.5)  # line color = red
            c[4] = (1, 0, 0, 0.5)  # line color = red
            c[3] = (1, 0, 0, 0.5)  # line color = red
            c[1] = (1, 0, 0, 0.5)  # line color = red

        lc = mc.LineCollection(lines, colors=c, linewidths=2)
        ax.add_collection(lc)
        plt.scatter(ys, xs)

        # print("왼쪽 팔 각    : ",np.arccos(left_arm_angle)*360/(np.pi*2))
        # print("왼쪽 어깨 각   : ",180-np.arccos(left_back_angle)*360/(np.pi*2))
        # print("오른쪽 팔 각   : ",np.arccos(right_arm_angle)*360/(np.pi*2))
        # print("오른쪽 어깨 각 : ",180-np.arccos(right_back_angle)*360/(np.pi*2))

        # 플랭크를 위한 다리 각도 계산

        plt.show()  # keypoint detection 이미지로 출력