def test_prepare_image(device, man_running_image): model = hg2(pretrained=True) predictor = HumanPosePredictor(model, device=device) orig_image = man_running_image.clone() image = predictor.prepare_image(orig_image) assert_allclose(orig_image, man_running_image) # Input image should be unchanged. assert image.shape == (3, 256, 256) assert image.device.type == 'cpu'
def test_prepare_image_aspect_ratio(device, dummy_data_info): orig_image = torch.ones((3, 256, 512), dtype=torch.float32, device=device) model = hg2(pretrained=True) predictor = HumanPosePredictor(model, device=device, data_info=dummy_data_info) image = predictor.prepare_image(orig_image) expected = torch.zeros((3, 256, 256), dtype=torch.float32, device=device) expected[:, 64:192] = 1.0 assert_allclose(image, expected)
def test_asymmetric_input(device, man_running_image): model = hg2(pretrained=True) predictor = HumanPosePredictor(model, device=device, input_shape=(512, 64)) orig_image = man_running_image.clone() image = predictor.prepare_image(orig_image) assert image.shape == (3, 512, 64) heatmaps = predictor.estimate_heatmaps(image) assert heatmaps.shape == (16, 128, 16) joints = predictor.estimate_joints(image) assert all(joints[:, 0] < 64) assert all(joints[:, 1] < 512)
def test_prepare_image_mostly_ready(device): # This test is for preparing an image which already has the correct dtype and size. image_float32 = torch.empty((3, 256, 256), device=device, dtype=torch.float32).uniform_() model = hg2(pretrained=True) predictor = HumanPosePredictor(model, device=device) orig_image = image_float32.clone() image = predictor.prepare_image(orig_image) assert_allclose(image_float32, orig_image) # Input image should be unchanged. assert image.shape == (3, 256, 256) assert image.device == device