Пример #1
0
def test_estimate_joints_h36m(device, h36m_image, h36m_pose):
    device = torch.device(device)
    model = hg2(pretrained=True)
    predictor = HumanPosePredictor(model, device=device)
    joints = predictor.estimate_joints(h36m_image)
    assert joints.shape == (16, 2)
    assert_allclose(joints, h36m_pose, rtol=0, atol=15)
Пример #2
0
def test_do_forward(device, example_input):
    model = hg2(pretrained=True)
    predictor = HumanPosePredictor(model, device=device)
    output = predictor.do_forward(example_input.to(device))
    assert len(output) == 2  # Expect one set of heatmaps per stack.
    heatmaps = output[-1]
    assert heatmaps.shape == (1, 16, 64, 64)
Пример #3
0
def test_estimate_joints_with_flip(device, man_running_image,
                                   man_running_pose):
    model = hg2(pretrained=True)
    predictor = HumanPosePredictor(model, device=device)
    joints = predictor.estimate_joints(man_running_image, flip=True)
    assert joints.shape == (16, 2)
    assert_allclose(joints, man_running_pose, rtol=0, atol=20)
def test_prepare_image(device, man_running_image):
    model = hg2(pretrained=True)
    predictor = HumanPosePredictor(model, device=device)
    orig_image = man_running_image.clone()
    image = predictor.prepare_image(orig_image)
    assert_allclose(orig_image,
                    man_running_image)  # Input image should be unchanged.
    assert image.shape == (3, 256, 256)
    assert image.device.type == 'cpu'
Пример #5
0
def test_prepare_image_aspect_ratio(device, dummy_data_info):
    orig_image = torch.ones((3, 256, 512), dtype=torch.float32, device=device)
    model = hg2(pretrained=True)
    predictor = HumanPosePredictor(model,
                                   device=device,
                                   data_info=dummy_data_info)
    image = predictor.prepare_image(orig_image)
    expected = torch.zeros((3, 256, 256), dtype=torch.float32, device=device)
    expected[:, 64:192] = 1.0
    assert_allclose(image, expected)
Пример #6
0
def test_estimate_joints_tensor_batch(device, h36m_image, h36m_pose):
    model = hg2(pretrained=True)
    predictor = HumanPosePredictor(model, device=device)
    batch_size = 4
    joints = predictor.estimate_joints(h36m_image.repeat(batch_size, 1, 1, 1))
    assert joints.shape == (batch_size, 16, 2)
    assert_allclose(joints,
                    h36m_pose.repeat(batch_size, 1, 1),
                    rtol=0,
                    atol=15)
Пример #7
0
def test_asymmetric_input(device, man_running_image):
    model = hg2(pretrained=True)
    predictor = HumanPosePredictor(model, device=device, input_shape=(512, 64))
    orig_image = man_running_image.clone()
    image = predictor.prepare_image(orig_image)
    assert image.shape == (3, 512, 64)
    heatmaps = predictor.estimate_heatmaps(image)
    assert heatmaps.shape == (16, 128, 16)
    joints = predictor.estimate_joints(image)
    assert all(joints[:, 0] < 64)
    assert all(joints[:, 1] < 512)
Пример #8
0
def test_prepare_image_mostly_ready(device):
    # This test is for preparing an image which already has the correct dtype and size.
    image_float32 = torch.empty((3, 256, 256),
                                device=device,
                                dtype=torch.float32).uniform_()
    model = hg2(pretrained=True)
    predictor = HumanPosePredictor(model, device=device)
    orig_image = image_float32.clone()
    image = predictor.prepare_image(orig_image)
    assert_allclose(image_float32,
                    orig_image)  # Input image should be unchanged.
    assert image.shape == (3, 256, 256)
    assert image.device == device
Пример #9
0
def test_estimate_joints_fit_contain(device, man_running_image,
                                     man_running_pose):
    # Crop the example so that it is no longer square.
    narrow_width = 256
    image = fit(man_running_image, (512, narrow_width), fit_mode='cover')
    gt_joints = man_running_pose.clone()
    gt_joints[..., 0] -= (512 - narrow_width) / 2
    # Run inference, enforcing square input.
    model = hg2(pretrained=True)
    predictor = HumanPosePredictor(model,
                                   device=device,
                                   input_shape=(256, 256))
    joints = predictor.estimate_joints(image)
    # Check that the results are as expected.
    assert joints.shape == (16, 2)
    assert_allclose(joints, gt_joints, rtol=0, atol=20)
def main(args):
    # Select the hardware device to use for inference.
    if torch.cuda.is_available():
        device = torch.device('cuda', torch.cuda.current_device())
        torch.backends.cudnn.benchmark = True
    else:
        device = torch.device('cpu')

    # Disable gradient calculations.
    torch.set_grad_enabled(False)

    pretrained = not args.model_file

    if pretrained:
        print(
            'No model weights file specified, using pretrained weights instead.'
        )

    # Create the model, downloading pretrained weights if necessary.
    if args.arch == 'hg1':
        model = hg1(pretrained=pretrained)
    elif args.arch == 'hg2':
        model = hg2(pretrained=pretrained)
    elif args.arch == 'hg8':
        model = hg8(pretrained=pretrained)
    else:
        raise Exception('unrecognised model architecture: ' + args.model)
    model = model.to(device)

    if not pretrained:
        assert os.path.isfile(args.model_file)
        print('Loading model weights from file: {}'.format(args.model_file))
        checkpoint = torch.load(args.model_file)
        state_dict = checkpoint['state_dict']
        if sorted(state_dict.keys())[0].startswith('module.'):
            model = DataParallel(model)
        model.load_state_dict(state_dict)

    # Initialise the MPII validation set dataloader.
    # val_dataset = Mpii(args.image_path, is_train=False)
    # val_loader = DataLoader(val_dataset, batch_size=args.batch_size, shuffle=False,
    #                         num_workers=args.workers, pin_memory=True)

    # Generate predictions for the validation set.
    # _, _, predictions = do_validation_epoch(val_loader, model, device, Mpii.DATA_INFO, args.flip)

    model = hg1(pretrained=True)
    predictor = HumanPosePredictor(model, device='cpu')
    # my_image = image_loader("../inference-img/1.jpg")
    # joints = image_inference(predictor, image_path=None, my_image=my_image)
    # imshow(my_image, joints=joints)
    if args.camera == False:
        inference_video(predictor, "../inference-video/R6llTwEh07w.mp4")

    elif args.camera:
        inference_video(predictor, 0)
Пример #11
0
from PIL import Image
import torch
import numpy as np
import xgboost as xgb
import matplotlib.pyplot as plt
from matplotlib import collections as mc
import time, cv2, json, os
from astropy.convolution import Gaussian1DKernel, convolve
import PIL.ExifTags as ExifTags
from predict_arm_angle import *

# ...load image of a person into a PyTorch tensor...

name = "IMG_4529"

predictor = HumanPosePredictor(hg8(pretrained=True), device='cuda')
angle_predictor = arm_angle_predict()
model = xgb.XGBClassifier(max_depth=3,
                          learning_rate=0.1,
                          n_estimators=100,
                          silent=True,
                          objective='binary:logistic',
                          booster='gbtree',
                          n_jobs=1,
                          nthread=None,
                          gamma=0,
                          min_child_weight=1,
                          max_delta_step=0,
                          subsample=1,
                          colsample_bytree=1,
                          colsample_bylevel=1,
Пример #12
0
from stacked_hourglass import HumanPosePredictor, hg2
from stacked_hourglass.utils.transforms import shufflelr, crop, color_normalize, fliplr, transform
from PIL import Image
import torch
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import collections as mc
import time, cv2, json, os

# ...load image of a person into a PyTorch tensor...

name = "pullup"

model = hg2(pretrained=True)
predictor = HumanPosePredictor(model, device='cpu')

print("==model loaded==")

RGB_MEAN = torch.as_tensor([0.4404, 0.4440, 0.4327])
RGB_STDDEV = torch.as_tensor([0.2458, 0.2410, 0.2468])

images = os.listdir("./video/" + name)
print("frames : ", len(images))

result = {"name": name, "frames": dict()}

for i in images:
    orgImg = Image.open("./video/" + name + "/" + i)
    im = np.asarray(orgImg)
    idx = int(i[:-4])
Пример #13
0
def test_estimate_heatmaps(device, man_running_image):
    model = hg2(pretrained=True)
    predictor = HumanPosePredictor(model, device=device)
    heatmaps = predictor.estimate_heatmaps(man_running_image)
    assert heatmaps.shape == (16, 64, 64)
Пример #14
0
    def __init__(self, fn):

        filename = fn  # 파일 이름을 저장

        # get video and slice into frame
        viddir = filename
        vidcap = cv2.VideoCapture(viddir)
        vidlen = len(viddir)
        except_filename = 0

        # 불러온 위치 주소에서 [영상이름].mp4 부분을 삭제
        for i in range(vidlen - 1, -1, -1):
            if viddir[i] == '/':
                except_filename = i
                break
        viddir = viddir[0:except_filename + 1]

        # 실제 프레임 이미지들이 저장될 위치
        paradir = viddir + '/tmp+img'
        self.createFolder(paradir)

        # getFrame 함수를 통해 영상을 프레임별로 나눔
        sec = 0
        frameRate = 0.1  # //it will capture image in each 0.5 second
        count = 100
        success = self.getFrame(sec, vidcap, viddir, count)
        while success:
            count = count + 1
            sec = sec + frameRate
            sec = round(sec, 2)
            success = self.getFrame(sec, vidcap, viddir, count)

        self.predictor = HumanPosePredictor(hg8(pretrained=True),
                                            device='cuda')
        print("==model loaded==")
        # ...load image of a person into a PyTorch tensor...

        name = ""
        predictor = HumanPosePredictor(hg8(pretrained=True), device='cuda')
        model = xgb.XGBClassifier(max_depth=3,
                                  learning_rate=0.1,
                                  n_estimators=100,
                                  silent=True,
                                  objective='binary:logistic',
                                  booster='gbtree',
                                  n_jobs=1,
                                  nthread=None,
                                  gamma=0,
                                  min_child_weight=1,
                                  max_delta_step=0,
                                  subsample=1,
                                  colsample_bytree=1,
                                  colsample_bylevel=1,
                                  reg_alpha=0,
                                  reg_lambda=1,
                                  scale_pos_weight=1,
                                  base_score=0.5,
                                  random_state=0,
                                  seed=None,
                                  missing=None)
        model.load_model(viddir + 'xgboost.bst')  # load model
        g = Gaussian1DKernel(stddev=4)

        print("==model loaded==")

        RGB_MEAN = torch.as_tensor([0.4404, 0.4440, 0.4327])
        RGB_STDDEV = torch.as_tensor([0.2458, 0.2410, 0.2468])

        images = os.listdir("./" + name)
        print(images)
        images = sorted(images, key=lambda x: int(x.split(".")[0]))
        print("frames : ", len(images))

        result = {"name": name, "frames": dict()}

        threshold = 0.4
        img_array = list()
        testResult = list()
        for i in images:
            orgImg = Image.open("./" + name + i)
            #orgImg = orgImg.rotate(270, expand=True)
            print("!precessing " + str(i))

            try:
                for orientation in ExifTags.TAGS.keys():
                    if ExifTags.TAGS[orientation] == 'Orientation': break
                exif = dict(orgImg._getexif().items())
                if exif[orientation] == 3:
                    orgImg = orgImg.rotate(180, expand=True)
                elif exif[orientation] == 6:
                    orgImg = orgImg.rotate(270, expand=True)
                elif exif[orientation] == 8:
                    orgImg = orgImg.rotate(90, expand=True)
            except:
                pass

            im = np.asarray(orgImg)
            idx = int(i[:-4])

            img = torch.tensor(im).transpose(0, 2)
            img = color_normalize(img, RGB_MEAN, RGB_STDDEV)
            joints = predictor.estimate_joints(img, flip=True)
            xs, ys = list(joints[:, 0].numpy()), list(joints[:, 1].numpy())

            angles = self.predictFromjoints(joints)

            prob = model.predict_proba(np.asarray(angles).reshape(1, 4))[0][1]
            testResult.append(prob)
            smoothing = convolve(testResult[:], g)
            #smoothing = selfG.smoothListGaussian(testResult[:])

            ascending = True
            descending = False
            count = 0
            for p in range(1, len(smoothing) - 1):
                if ((smoothing[p] < threshold
                     and smoothing[p + 1] >= threshold) and ascending):
                    ascending = False
                    descending = True
                elif ((smoothing[p] > threshold
                       and smoothing[p + 1] <= threshold) and descending):
                    ascending = True
                    descending = False
                    count += 1

            orgImg = np.array(orgImg)
            height, width, layers = orgImg.shape
            size = (width, height)
            img_array.append(cv2.cvtColor(orgImg, cv2.COLOR_BGR2RGB))
            # orgImg = cv2.line(orgImg, (ys[0], xs[0]), (ys[1], xs[1]), (10, 255, 0), 2)
            # orgImg = cv2.line(orgImg, (ys[1], xs[1]), (ys[2], xs[2]), (50, 160, 50), 2)
            # orgImg = cv2.line(orgImg, (ys[5], xs[5]), (ys[4], xs[4]), (50, 0, 255), 2)
            # orgImg = cv2.line(orgImg, (ys[4], xs[4]), (ys[3], xs[3]), (255, 0, 0), 2)
            # orgImg = cv2.line(orgImg, (ys[3], xs[3]), (ys[6], xs[6]), (255, 0, 0), 2)
            # orgImg = cv2.line(orgImg, (ys[6], xs[6]), (ys[2], xs[2]), (30, 30, 130), 2)
            orgImg = cv2.line(orgImg, (ys[6], xs[6]), (ys[7], xs[7]),
                              (153, 255, 255), 3)  # 등
            # orgImg = cv2.line(orgImg, (ys[7], xs[7]), (ys[8], xs[8]), (255, 0, 0), 2)
            # orgImg = cv2.line(orgImg, (ys[8], xs[8]), (ys[9], xs[9]), (255, 0, 0), 2)
            orgImg = cv2.line(orgImg, (ys[10], xs[10]), (ys[11], xs[11]),
                              (153, 255, 255), 3)  # 왼쪽전완
            orgImg = cv2.line(orgImg, (ys[11], xs[11]), (ys[12], xs[12]),
                              (153, 255, 255), 3)
            orgImg = cv2.line(orgImg, (ys[12], xs[12]), (ys[7], xs[7]),
                              (153, 255, 255), 3)  # 왼쪽광배
            orgImg = cv2.line(orgImg, (ys[14], xs[14]), (ys[13], xs[13]),
                              (153, 255, 255), 3)  # 오른쪽 이두
            orgImg = cv2.line(orgImg, (ys[7], xs[7]), (ys[13], xs[13]),
                              (153, 255, 255), 3)  # 오른쪽 광배
            orgImg = cv2.line(orgImg, (ys[14], xs[14]), (ys[15], xs[15]),
                              (153, 255, 255), 3)  # 오른쪽 전완
            if (len(smoothing) > 0):
                orgImg = cv2.putText(orgImg, str(smoothing[-1]), (30, 30),
                                     cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0),
                                     2, cv2.LINE_AA)
                orgImg = cv2.putText(orgImg, "count : " + str(count), (30, 75),
                                     cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0),
                                     2, cv2.LINE_AA)

            #cv2.imshow('image', cv2.cvtColor(orgImg, cv2.COLOR_BGR2RGB))

            if cv2.waitKey(1) & 0xFF == ord('q'):
                break

        out = cv2.VideoWriter('result.avi', cv2.VideoWriter_fourcc(*'mp4v'),
                              30, size)
        for i in range(len(img_array)):
            out.write(img_array[i])
        out.release()

        print("done!")

        cv2.destroyAllWindows()
 def __init__(self):
     self.predictor = HumanPosePredictor(hg8(pretrained=True), device='cpu')
     print("==model loaded==")
class arm_angle_predict:
    def __init__(self):
        self.predictor = HumanPosePredictor(hg8(pretrained=True), device='cpu')
        print("==model loaded==")

    def predictfromDir(self,path):
        orgImg = Image.open(path)

        try:
            for orientation in ExifTags.TAGS.keys() :
                if ExifTags.TAGS[orientation]=='Orientation' : break
            exif=dict(orgImg._getexif().items())
            if exif[orientation] == 3:
                orgImg=orgImg.rotate(180, expand=True)
            elif exif[orientation] == 6 :
                orgImg=orgImg.rotate(270, expand=True)
            elif exif[orientation] == 8 :
                orgImg=orgImg.rotate(90, expand=True)

        except:
            pass

        im = np.asarray(orgImg)
        img = torch.tensor(im).transpose(0,2)
        img = color_normalize(img, RGB_MEAN, RGB_STDDEV)
        if(img.size(0)==4):
            img = img[:3]

        c,h,w = img.size()
        start = time.time()
        joints = self.predictor.estimate_joints(img, flip=True)
        end = time.time()
        xs,ys = list(joints[:,0].numpy()), list(joints[:,1].numpy())
        #print("infer time : ",end-start)


        left_antebrachial   = np.array([ys[15]-ys[14],xs[15]-xs[14]])
        left_forearm        = np.array([ys[13]-ys[14],xs[13]-xs[14]])
        left_back           = np.array([ys[7]-ys[13], xs[7]-xs[13]])
        left_arm_angle      = np.inner(left_antebrachial, left_forearm)/(np.linalg.norm(left_antebrachial)*np.linalg.norm(left_forearm))
        left_back_angle     = np.inner(left_forearm, left_back)/(np.linalg.norm(left_forearm)*np.linalg.norm(left_back))

        right_antebrachial  = np.array([ys[10]-ys[11],xs[10]-xs[11]])
        right_forearm       = np.array([ys[12]-ys[11],xs[12]-xs[11]])
        right_back          = np.array([ys[7]-ys[12], xs[7]-xs[12]])
        right_arm_angle     = np.inner(right_antebrachial, right_forearm)/(np.linalg.norm(right_antebrachial)*np.linalg.norm(right_forearm))
        right_back_angle    = np.inner(right_back, right_forearm)/(np.linalg.norm(right_back)*np.linalg.norm(right_forearm))

        #angle predict
        left_arm_angle   = np.arccos(left_arm_angle)*360/(np.pi*2)
        left_back_angle  = 180-np.arccos(left_back_angle)*360/(np.pi*2)
        right_arm_angle  = np.arccos(right_arm_angle)*360/(np.pi*2)
        right_back_angle = 180-np.arccos(right_back_angle)*360/(np.pi*2)

        return left_arm_angle,left_back_angle,right_arm_angle,right_back_angle

    def predictFromjoints(self,joints):

        end = time.time()
        xs,ys = list(joints[:,0].numpy()), list(joints[:,1].numpy())
        #print("infer time : ",end-start)

        left_antebrachial   = np.array([ys[15]-ys[14],xs[15]-xs[14]])
        left_forearm        = np.array([ys[13]-ys[14],xs[13]-xs[14]])
        left_back           = np.array([ys[7]-ys[13], xs[7]-xs[13]])
        left_arm_angle      = np.inner(left_antebrachial, left_forearm)/(np.linalg.norm(left_antebrachial)*np.linalg.norm(left_forearm))
        left_back_angle     = np.inner(left_forearm, left_back)/(np.linalg.norm(left_forearm)*np.linalg.norm(left_back))

        right_antebrachial  = np.array([ys[10]-ys[11],xs[10]-xs[11]])
        right_forearm       = np.array([ys[12]-ys[11],xs[12]-xs[11]])
        right_back          = np.array([ys[7]-ys[12], xs[7]-xs[12]])
        right_arm_angle     = np.inner(right_antebrachial, right_forearm)/(np.linalg.norm(right_antebrachial)*np.linalg.norm(right_forearm))
        right_back_angle    = np.inner(right_back, right_forearm)/(np.linalg.norm(right_back)*np.linalg.norm(right_forearm))

        #angle predict
        left_arm_angle   = np.arccos(left_arm_angle)*360/(np.pi*2)
        left_back_angle  = 180-np.arccos(left_back_angle)*360/(np.pi*2)
        right_arm_angle  = np.arccos(right_arm_angle)*360/(np.pi*2)
        right_back_angle = 180-np.arccos(right_back_angle)*360/(np.pi*2)

        return left_arm_angle,left_back_angle,right_arm_angle,right_back_angle