Пример #1
0
    def draw_dist_ax(self, ax, x, y):
        output = self.output
        cx, cy = int(coord2pix(x, 4)), int(coord2pix(y, 4))
        color_score, max_score_id = output.calc_color_score(x, y)
        xrange = np.arange(0, 64)
        ax.clear()

        lines_color = {
            'feat. matching': OURS_MATCHING_COLOR,
            'rgb matching': '#0066cc',
            'non-fusion feat. matching': BASELINE_MATCHING_COLOR,
        }
        lines_data = {
            'feat. matching': output.depth[:, cy, cx],
            'rgb matching': color_score,
            'non-fusion feat. matching': self.b_output.depth[:, cy, cx],
        }

        ax.clear()
        for label, line in lines_data.items():
            ax.plot(xrange[1:-1],
                    line[1:-1],
                    color=lines_color[label],
                    label=label)
        ax.set_yscale('log')
        ax.set_ylabel('similarity (log)')
        ax.tick_params(bottom=False, top=True)
        ax.tick_params(labelbottom=False, labeltop=True)
        ax.legend()

        return max_score_id
Пример #2
0
    def draw_other_ax(self, ax, x, y, max_score_id, joint_id=None):
        output = self.output
        b_output = self.b_output
        cx, cy = int(coord2pix(x, 4)), int(coord2pix(y, 4))
        xx, yy = output.corr_pos_pred[cy][cx]
        bxx, byy = self.b_output.corr_pos_pred[cy][cx]

        ax.clear()
        ax.imshow(output.img2)
        circ = Circle(max_score_id, 3, color=RGB_MATCHING_COLOR)
        ax.add_patch(circ)
        
        # draw epipolar lines
        line_start1 = de_normalize(output.sample_locs[1][int(cy)][int(cx)], output.H, output.W)
        line_start2 = de_normalize(output.sample_locs[63][int(cy)][int(cx)], output.H, output.W)
        ax.plot([line_start1[0], line_start2[0]], [line_start1[1], line_start2[1]], alpha=0.5, color='b', zorder=1)
        
        # draw groundtruth points
        # for i in range(17):
        gx, gy = output.points_2d[output.other_camera][joint_id][0], output.points_2d[output.other_camera][joint_id][1]
        circ = Circle((gx, gy), 3, color=GROUNDTRUTH_COLOR, zorder=2)
        ax.add_patch(circ)

        # draw baseline predicted point
        circ = Circle((pix2coord(bxx, 4), pix2coord(byy, 4)), 3, color=BASELINE_MATCHING_COLOR, zorder=2)
        ax.add_patch(circ)

        # draw predicted point
        circ = Circle((pix2coord(xx, 4), pix2coord(yy, 4)), 3, color=OURS_MATCHING_COLOR, zorder=3)
        ax.add_patch(circ)

        def dist(x1, y1, x2, y2):
            return math.sqrt((x1 - x2)**2 + (y1-y2) **2)

        # flag = True
        # #  predicted - gt > baseline - gt
        # if dist(pix2coord(xx, 4), pix2coord(yy,4), gx, gy)*1.5 > dist(pix2coord(bxx, 4), pix2coord(byy,4), gx, gy):
        #     flag = False
        # #  predicted - gt > TH: 3
        # if dist(pix2coord(bxx, 4), pix2coord(byy,4), gx, gy) < 5:
        #     flag = False

        # if flag:
        #     print('img1 path: ', output.img1_path)
        #     print('img2 path: ', output.img2_path)
        #     print('pred - gt: ', dist(pix2coord(xx, 4), pix2coord(yy,4), gx, gy))
        #     print('baseline - gt', dist(pix2coord(bxx, 4), pix2coord(byy,4), gx, gy))

        txt = self.sample_ax.text(0, 0, '', va="bottom", ha="left")
        txt.set_text('g: groundtruth; y: baseline; r: our prediction')
        return flag
Пример #3
0
    def draw_sample_ax(self, ax, x, y):
        output = self.output
        b_output = self.b_output
        cx, cy = int(coord2pix(x, 4)), int(coord2pix(y, 4))
        ax.clear()

        # update the line positions
        ax.imshow(self.ref_img)
        self.lx.set_ydata(y)
        self.ly.set_xdata(x)

        circ = Circle((x, y), 3, color=GROUNDTRUTH_COLOR)
        ax.add_patch(circ)

        self.txt.set_text('x=%1.1f, y=%1.1f; g: groundtruth; y: baseline; r: prediction' % (x, y))
    def generate_heatmap(self, joints, joints_vis):
        '''
        :param joints:  [num_joints, 3]
        :param joints_vis: [num_joints, 3]
        :return: target, target_weight(1: visible, 0: invisible)
        '''
        target_weight = np.ones((self.num_joints, 1), dtype=np.float32)
        target_weight[:, 0] = joints_vis[:, 0]

        target = np.zeros(
            (self.num_joints, self.heatmap_size[1], self.heatmap_size[0]),
            dtype=np.float32)

        tmp_size = self.sigma * 3

        for joint_id in range(self.num_joints):
            feat_stride = np.zeros(2)
            feat_stride[0] = self.image_size[0] / self.heatmap_size[0]
            feat_stride[1] = self.image_size[1] / self.heatmap_size[1]
            mu_x = int(coord2pix(joints[joint_id][0], feat_stride[0]) + 0.5)
            mu_y = int(coord2pix(joints[joint_id][1], feat_stride[1]) + 0.5)
            ul = [int(mu_x - tmp_size), int(mu_y - tmp_size)]
            br = [int(mu_x + tmp_size + 1), int(mu_y + tmp_size + 1)]
            if ul[0] >= self.heatmap_size[0] or ul[1] >= self.heatmap_size[1] \
                    or br[0] < 0 or br[1] < 0:
                target_weight[joint_id] = 0
                continue

            size = 2 * tmp_size + 1
            x = np.arange(0, size, 1, np.float32)
            y = x[:, np.newaxis]
            x0 = y0 = size // 2
            g = np.exp(-((x - x0)**2 + (y - y0)**2) / (2 * self.sigma**2))

            g_x = max(0, -ul[0]), min(br[0], self.heatmap_size[0]) - ul[0]
            g_y = max(0, -ul[1]), min(br[1], self.heatmap_size[1]) - ul[1]
            img_x = max(0, ul[0]), min(br[0], self.heatmap_size[0])
            img_y = max(0, ul[1]), min(br[1], self.heatmap_size[1])

            v = target_weight[joint_id]
            if v > 0.5:
                target[joint_id][img_y[0]:img_y[1], img_x[0]:img_x[1]] = \
                    g[g_y[0]:g_y[1], g_x[0]:g_x[1]]

        return target, target_weight
Пример #5
0
    def calc_color_score(self, x, y):
        cx, cy = int(coord2pix(x, 4)), int(coord2pix(y, 4))
        ref_point = self.img1_ab[int(y), int(x), :]
        color_score = []
        max_score_id = None
        max_score = -1

        for i in range(0, 64):
            pos = self.sample_locs[i][int(cy)][int(cx)]
            depos = de_normalize(pos, self.H, self.W)
            source_point = self.img2_ab[int(depos[1]), int(depos[0]), :]
            color_score.append(np.dot(ref_point, source_point))
            if color_score[-1] > max_score:
                max_score = color_score[-1]
                max_score_id = (int(depos[0]), int(depos[1]))

        color_score = color_score / sum(color_score)
        return color_score, max_score_id
Пример #6
0
def get_point_cloud(img1, img2, KRT1, KRT2, RT1, RT2, corr_pos, score):
    """
        KRT: 
        corr_pos: feat_h x feat_w x 2
        score:    sample_size x feat_h x feat_w
    """

    y = np.arange(0, img1.shape[0])  # 128
    x = np.arange(0, img1.shape[1])  # 84

    grid_x, grid_y = np.meshgrid(x, y)

    grid_y = pix2coord(grid_y, cfg.BACKBONE.DOWNSAMPLE)
    grid_y = grid_y * cfg.DATASETS.IMAGE_RESIZE * cfg.DATASETS.PREDICT_RESIZE
    grid_x = pix2coord(grid_x, cfg.BACKBONE.DOWNSAMPLE)
    grid_x = grid_x * cfg.DATASETS.IMAGE_RESIZE * cfg.DATASETS.PREDICT_RESIZE
    # 2668 * 4076

    grid_corr = pix2coord(corr_pos, cfg.BACKBONE.DOWNSAMPLE)
    grid_corr = grid_corr * cfg.DATASETS.IMAGE_RESIZE * cfg.DATASETS.PREDICT_RESIZE

    grid = np.stack((grid_x, grid_y))
    grid = grid.reshape(2, -1)
    grid_corr = grid_corr.reshape(-1, 2).transpose()

    from scipy.misc import imresize
    sample_size, fh, fw = score.shape
    resized_img2 = imresize(img2, (fh, fw))

    max_score = np.max(score.reshape(sample_size, -1), axis=0).reshape(fh, fw)
    select_pos1 = max_score > 0.02
    print('->', np.sum(select_pos1))
    select_pos2 = np.sum(resized_img2, axis=2) > 20
    print('->', np.sum(select_pos2))

    select_pos3 = np.sum(corr_pos, axis=2) > -50
    print('->', np.sum(select_pos2))

    select_pos = np.logical_and(select_pos3, select_pos2).reshape(-1)
    # select_pos = select_pos3
    print('-->', np.sum(select_pos))

    select_pos = select_pos.reshape(-1)
    select_img_point = resized_img2.reshape(fh * fw, 3)[select_pos, :]
    print(select_pos.shape)
    print('total pos', sum(select_pos))

    p3D = cv2.triangulatePoints(KRT2, KRT1, grid_corr[:, select_pos],
                                grid[:, select_pos])
    # p3D = cv2.triangulatePoints(KRT2, KRT1, grid_corr, grid)

    # depth = np.ones((fh, fw)) * np.min((KRT1@p3D)[2, :])
    depth = np.ones((fh, fw)) * np.max((KRT1 @ p3D)[2, :])

    cnt = 0
    for i in range(fh):
        for j in range(fw):
            if not select_pos[i * fw + j]:
                continue
            p_homo = (KRT1 @ p3D[:, cnt])
            p = p_homo / p_homo[2]
            depth[int(coord2pix(p[1], 32)),
                  int(coord2pix(p[0], 32))] = p_homo[2]
            cnt += 1

    p3D /= p3D[3]
    p3D = p3D[:3].squeeze()

    depth = (depth - depth.min()) / (depth.max() - depth.min()) + 1
    depth = np.log(depth)
    depth = (depth - depth.min()) / (depth.max() - depth.min())

    #######vis
    fig = plt.figure(1)
    ax1_1 = fig.add_subplot(331)
    ax1_1.imshow(img1)
    ax1_2 = fig.add_subplot(332)
    ax1_2.imshow(img2)

    w = corr_pos[:, :, 0]
    w = (w - w.min()) / (w.max() - w.min())
    ax1_1 = fig.add_subplot(334)
    ax1_1.imshow(w)

    w = corr_pos[:, :, 1]
    w = (w - w.min()) / (w.max() - w.min())
    ax1_1 = fig.add_subplot(335)
    ax1_1.imshow(w)

    # w1 = corr_pos[:, :, 0]
    # w1 = (w1 - w1.min()) / (w1.max() - w1.min())
    # w2 = corr_pos[:, :, 1]
    # w2 = (w2 - w2.min()) / (w2.max() - w2.min())
    # W = np.stack([w1, w2, np.ones(w2.shape)], axis=0)
    # ax2_1 = fig.add_subplot(336)
    # ax2_1.imshow(W.transpose(1,2,0))

    ax1_1 = fig.add_subplot(336)
    ax1_1.imshow(depth)

    w = select_pos1.reshape(fh, fw)
    # w = (w - w.min()) / (w.max() - w.min())
    ax2_1 = fig.add_subplot(337)
    ax2_1.imshow(w)

    w = select_pos2.reshape(fh, fw)
    # w = (w - w.min()) / (w.max() - w.min())
    ax2_1 = fig.add_subplot(338)
    ax2_1.imshow(w)

    w = select_pos.reshape(fh, fw)
    # w = (w - w.min()) / (w.max() - w.min())
    ax2_1 = fig.add_subplot(339)
    ax2_1.imshow(w)

    ####### end vis

    # w = select_img_point[:, :10000].reshape(-1, 100, 100).transpose(1,2,0)
    # w = (w - w.min()) / (w.max() - w.min())
    # ax2_1 = fig.add_subplot(326)
    # ax2_1.imshow(w)

    plt.show()
    return p3D, select_img_point
    def grid2sample_locs(self, grid, P1, P2, H, W):
        """ Get samples locs on the other view, given grid locs on ref view
        Args:
            grid: 3 x HW, real world xy (4096)
        Return:
            sample_locs: sample_size x N x H x W x 2, float xy (-1, 1)
        """
        N = P1.shape[0]

        # F = findFundamentalMat(P1, P2)
        # N x 4 x 3
        P1t = P1.transpose(1, 2)
        # P1inv = torch.matmul(P1t, torch.inverse(torch.matmul(P1, P1t)))
        P1inv = torch.stack([i.pinverse() for i in P1])
        # N x 4 x HW
        X = torch.matmul(P1inv, grid.to(P1inv))
        # N x 3 x HW
        x2 = torch.matmul(P2, X)
        #numerical stability
        x2 /= x2[:, [2], :]
        # N x 4
        center, _ = camera_center(P1, engine='torch')
        # N x 3 x 1
        e2 = torch.matmul(P2, center).view(N, 3, 1)
        #numerical stability
        e2 /= e2[:, [2], :]
        # N x 3 x HW
        l2 = torch.cross(e2.expand_as(x2), x2, dim=1)
        # N x HW x 3
        l2 = l2.transpose(1, 2)

        xmin = self.xmin.to(l2)
        xmax = self.xmax.to(l2)
        ymin = self.ymin.to(l2)
        ymax = self.ymax.to(l2)
        # N x HW
        # ( 3 ]
        #⎴     ⏜
        #1     2
        #⏝     ⎵
        # [ 0 )
        # by1 = -(xmin * l2[..., 0] + l2[..., 2]) / l2[..., 1]
        # by2 = -(xmax * l2[..., 0] + l2[..., 2]) / l2[..., 1]
        # bx0 = -(ymin * l2[..., 1] + l2[..., 2]) / l2[..., 0]
        # bx3 = -(ymax * l2[..., 1] + l2[..., 2]) / l2[..., 0]
        #numerical stability
        EPS = torch.tensor(self.epsilon).to(l2)
        by1 = -(xmin * l2[..., 0] + l2[..., 2]) / (
            torch.sign(l2[..., 1]) * torch.max(torch.abs(l2[..., 1]), EPS))
        by2 = -(xmax * l2[..., 0] + l2[..., 2]) / (
            torch.sign(l2[..., 1]) * torch.max(torch.abs(l2[..., 1]), EPS))
        bx0 = -(ymin * l2[..., 1] + l2[..., 2]) / (
            torch.sign(l2[..., 0]) * torch.max(torch.abs(l2[..., 0]), EPS))
        bx3 = -(ymax * l2[..., 1] + l2[..., 2]) / (
            torch.sign(l2[..., 0]) * torch.max(torch.abs(l2[..., 0]), EPS))
        # N x HW x 4
        intersections = torch.stack((
            bx0,
            by1,
            by2,
            bx3,
        ), -1)
        # N x HW x 4 x 2
        intersections = intersections.view(N, H * W, 4, 1).repeat(1, 1, 1, 2)
        intersections[..., 0, 1] = ymin
        intersections[..., 1, 0] = xmin
        intersections[..., 2, 0] = xmax
        intersections[..., 3, 1] = ymax
        # N x HW x 4
        mask = torch.stack((
            (bx0 >= xmin + self.epsilon) & (bx0 < xmax - self.epsilon),
            (by1 > ymin + self.epsilon) & (by1 <= ymax - self.epsilon),
            (by2 >= ymin + self.epsilon) & (by2 < ymax - self.epsilon),
            (bx3 > xmin + self.epsilon) & (bx3 <= xmax - self.epsilon),
        ), -1)
        # N x HW
        Nintersections = mask.sum(-1)
        # rule out all lines have no intersections
        mask[Nintersections < 2] = 0
        tmp_mask = mask.clone()
        tmp_mask[Nintersections < 2] = self.tmp_tensor.to(tmp_mask)
        # assert (Nintersections <= 2).all().item(), intersections[Nintersections > 2]
        # N x HW x 2 x 2
        valid_intersections = intersections[tmp_mask].view(N, H * W, 2, 2)
        valid_intersections[Nintersections < 2] = self.outrange_tensor.to(
            valid_intersections)
        # N x HW x 2
        start = valid_intersections[..., 0, :]
        vec = valid_intersections[..., 1, :] - start
        vec = vec.view(1, N, H * W, 2)
        # sample_size x N x HW x 2
        sample_locs = start.view(1, N, H * W,
                                 2) + vec * self.sample_steps.to(vec)
        # normalize
        sample_locs = sample_locs / cfg.DATASETS.IMAGE_RESIZE / cfg.DATASETS.PREDICT_RESIZE
        sample_locs = coord2pix(sample_locs, cfg.BACKBONE.DOWNSAMPLE)
        # sample_size*N x H x W x 2
        sample_locs = normalize(sample_locs, H, W).view(-1, H, W, 2)
        sample_locs = sample_locs.view(self.sample_size, N, H, W, 2)
        if self.debug:
            return sample_locs, intersections, mask, valid_intersections, start, vec
        return sample_locs
    def __getitem__(self, idx):
        if cfg.DATALOADER.BENCHMARK: self.timer0.tic()
        db_rec = copy.deepcopy(self.db[idx])
        if cfg.DATASETS.TASK not in [
                'lifting', 'lifting_direct', 'lifting_rot'
        ]:
            if cfg.VIS.H36M:
                #seq = (db_rec['subject'], db_rec['action'], db_rec['subaction'])
                #if not seq in self.checked:
                #    print(seq)
                #    print(self.isdamaged(db_rec))
                #    self.checked.append(seq)
                #else:
                #    return np.ones(2)

                print(db_rec['image'])
            # print(db_rec['image'])

            if self.data_format == 'undistoredzip':
                image_dir = 'undistoredimages.zip@'
            elif self.data_format == 'zip':
                image_dir = 'images.zip@'
            else:
                image_dir = ''
            image_file = osp.join(self.root, db_rec['source'], image_dir,
                                  'images', db_rec['image'])
            if 'zip' in self.data_format:
                from utils import zipreader
                data_numpy = zipreader.imread(
                    image_file,
                    cv2.IMREAD_COLOR | cv2.IMREAD_IGNORE_ORIENTATION)
            else:
                data_numpy = cv2.imread(
                    image_file,
                    cv2.IMREAD_COLOR | cv2.IMREAD_IGNORE_ORIENTATION)
            # crop image from 1002 x 1000 to 1000 x 1000
            data_numpy = data_numpy[:1000]
            assert data_numpy.shape == (1000, 1000, 3), data_numpy.shape

        joints = db_rec['joints_2d'].copy()
        joints_3d = db_rec['joints_3d'].copy()
        joints_3d_camera = db_rec['joints_3d_camera'].copy()

        joints_3d_camera_normed = joints_3d_camera - joints_3d_camera[0]
        keypoint_scale = np.linalg.norm(joints_3d_camera_normed[8] -
                                        joints_3d_camera_normed[0])
        joints_3d_camera_normed /= keypoint_scale

        if cfg.DATALOADER.BENCHMARK:
            assert joints.shape[0] == cfg.KEYPOINT.NUM_PTS, joints.shape[0]
            #assert db_rec['joints_3d'].shape[0] == cfg.KEYPOINT.NUM_PTS,db_rec['joints_3d'].shape[0]
        center = np.array(db_rec['center']).copy()
        joints_vis = db_rec['joints_vis'].copy()
        scale = np.array(db_rec['scale']).copy()
        #undistort
        camera = db_rec['camera']
        R = camera['R'].copy()
        rotation = 0
        K = np.array([
            [float(camera['fx']), 0,
             float(camera['cx'])],
            [0, float(camera['fy']),
             float(camera['cy'])],
            [0, 0, 1.],
        ])
        T = camera['T'].copy()
        world3d = (R.T @ joints_3d_camera.T + T).T
        Rt = np.zeros((3, 4))
        Rt[:, :3] = R
        Rt[:, 3] = -R @ T.squeeze()
        # Rt[:, :3] = R.T
        # Rt[:, 3] = T.squeeze()

        if cfg.DATASETS.TASK not in [
                'lifting', 'lifting_direct', 'lifting_rot'
        ]:
            if cfg.VIS.H36M:
                if not np.isclose(world3d, joints_3d).all():
                    print('world3d difference')
                    print(world3d)
                    print('joints_3d')
                    print(joints_3d)
                from IPython import embed
                import matplotlib.pyplot as plt
                import matplotlib.patches as patches
                fig = plt.figure(1)
                ax1 = fig.add_subplot(231)
                ax2 = fig.add_subplot(232)
                ax3 = fig.add_subplot(233)
                ax4 = fig.add_subplot(234)
                ax5 = fig.add_subplot(235)
                ax6 = fig.add_subplot(236)
                ax1.imshow(data_numpy[..., ::-1])
                ax1.set_title('raw')

        #0.058 s
        distCoeffs = np.array([
            float(i) for i in [
                camera['k'][0], camera['k'][1], camera['p'][0], camera['p'][1],
                camera['k'][2]
            ]
        ])

        if cfg.DATASETS.TASK not in [
                'lifting', 'lifting_direct', 'lifting_rot'
        ]:
            if self.data_format != 'undistoredzip':
                data_numpy = cv2.undistort(data_numpy, K, distCoeffs)

        #0.30 s
        if cfg.DATALOADER.BENCHMARK: print('timer0', self.timer0.toc())
        if cfg.DATALOADER.BENCHMARK: self.timer.tic()

        if cfg.VIS.H36M:
            ax1.scatter(joints[:, 0], joints[:, 1], color='green')
            imagePoints, _ = cv2.projectPoints(joints_3d[:,
                                                         None, :], (0, 0, 0),
                                               (0, 0, 0), K, distCoeffs)
            imagePoints = imagePoints.squeeze()
            ax1.scatter(imagePoints[:, 0], imagePoints[:, 1], color='yellow')
            from vision.multiview import project_point_radial
            camera = db_rec['camera']
            f = (K[0, 0] + K[1, 1]) / 2.
            c = K[:2, 2].reshape((2, 1))
            iccv19Points = project_point_radial(joints_3d_camera, f, c,
                                                camera['k'], camera['p'])
            ax1.scatter(iccv19Points[:, 0], iccv19Points[:, 1], color='blue')
            # trans1 = get_affine_transform(center, scale, rotation, self.image_size)
            # box1 = affine_transform(np.array([[0, 0], [999, 999]]), trans1)
            # print(box1)
            # rect1 = patches.Rectangle(box1[0],box1[1][0] - box1[0][0],box1[1][1] - box1[0][1],linewidth=1,edgecolor='r',facecolor='none')
            # ax1.add_patch(rect1)
            # print(joints, joints.shape, center.shape)
        joints = cv2.undistortPoints(joints[:, None, :], K, distCoeffs,
                                     P=K).squeeze()
        center = cv2.undistortPoints(np.array(center)[None, None, :],
                                     K,
                                     distCoeffs,
                                     P=K).squeeze()
        #data_numpy  = self.compute_distorted_meshgrid(data_numpy ,
        #        float(camera['fx']),
        #        float(camera['fy']),
        #        float(camera['cx']),
        #        float(camera['cy']),
        #        np.array([float(i) for i in camera['k']]),
        #        np.array([float(i) for i in camera['p']]))
        if self.is_train:
            sf = self.scale_factor
            rf = self.rotation_factor
            scale = scale * np.clip(np.random.randn() * sf + 1, 1 - sf, 1 + sf)
            rotation = np.clip(np.random.randn() * rf, -rf * 2, rf * 2) \
                if random.random() <= 0.6 else 0

        if cfg.DATASETS.TASK not in [
                'lifting', 'lifting_direct', 'lifting_rot'
        ]:
            if cfg.VIS.H36M:
                # print(joints.shape, center.shape)
                # print(trans)
                ax2.imshow(data_numpy[..., ::-1])
                projected2d = K.dot(joints_3d_camera.T)
                projected2d[:2] = projected2d[:2] / projected2d[-1]
                ax1.scatter(projected2d[0], projected2d[1], color='red')
                ax2.scatter(joints[:, 0], joints[:, 1], color='green')
                ax2.scatter(projected2d[0], projected2d[1], color='red')
                # box1 = affine_transform(np.array([[0, 0], [999, 999]]), trans)
                # rect1 = patches.Rectangle(box1[0],box1[1][0] - box1[0][0],box1[1][1] - box1[0][1],linewidth=1,edgecolor='r',facecolor='none')
                # ax2.add_patch(rect1)
                ax2.set_title('undistort')

        #input = data_numpy
        trans = get_affine_transform(center, scale, rotation, self.image_size)
        cropK = np.concatenate((trans, np.array([[0., 0., 1.]])), 0).dot(K)
        KRT = cropK.dot(Rt)

        if cfg.DATASETS.TASK not in [
                'lifting', 'lifting_direct', 'lifting_rot'
        ]:
            input = cv2.warpAffine(
                data_numpy,
                trans, (int(self.image_size[0]), int(self.image_size[1])),
                flags=cv2.INTER_LINEAR)

        # 0.31 s

        for i in range(self.num_joints):
            if joints_vis[i, 0] > 0.0:
                joints[i, 0:2] = affine_transform(joints[i, 0:2], trans)
                if (np.min(joints[i, :2]) < 0
                        or joints[i, 0] >= self.image_size[0]
                        or joints[i, 1] >= self.image_size[1]):
                    joints_vis[i, :] = 0

        if cfg.DATASETS.TASK not in [
                'lifting', 'lifting_direct', 'lifting_rot'
        ]:
            if cfg.VIS.H36M:
                ax3.imshow(input[..., ::-1])
                # ax3.scatter(joints[:, 0], joints[:, 1])
                # projected2d = KRT.dot(np.concatenate((db_rec['joints_3d'], np.ones( (len(db_rec['joints_3d']), 1))), 1).T)
                ax3.scatter(joints[:, 0], joints[:, 1])
                ax3.set_title('cropped')
                ax4.imshow(input[..., ::-1])
                # ax4.scatter(joints[:, 0], joints[:, 1])
                # projected2d = KRT.dot(np.concatenate((db_rec['joints_3d'], np.ones( (len(db_rec['joints_3d']), 1))), 1).T)
                projected2d = cropK.dot(joints_3d_camera.T)
                projected2d[:2] = projected2d[:2] / projected2d[-1]
                #ax4.scatter(joints[:, 0], joints[:, 1], color='green')
                #ax4.scatter(projected2d[0], projected2d[1], color='red')
                ax4.scatter(joints[-2:, 0], joints[-2:, 1], color='green')
                ax4.scatter(projected2d[0, -2:],
                            projected2d[1, -2:],
                            color='red')
                ax4.set_title('cropped, project 3d to 2d')

            if self.transform:
                input = self.transform(input)

        target = self.heatmapcreator.get(joints)
        target = target.reshape((-1, target.shape[1], target.shape[2]))
        target_weight = joints_vis[:, 0, None]
        ## inaccurate heatmap
        #target, target_weight = self.generate_target(joints, joints_vis)
        # target = torch.from_numpy(target).float()
        # target_weight = torch.from_numpy(target_weight)

        if cfg.VIS.H36M:
            #ax5.imshow(target.max(0)[0])
            #ax5.scatter(coord2pix(joints[:, 0], 4), coord2pix(joints[:, 1], 4), color='green')
            from modeling.backbones.basic_batch import find_tensor_peak_batch
            # pred_joints, _ = find_tensor_peak_batch(target, self.sigma, cfg.BACKBONE.DOWNSAMPLE)
            # ax5.scatter(coord2pix(pred_joints[:, 0], 4), coord2pix(pred_joints[:, 1], 4), color='blue')
            # ax6.scatter(coord2pix(pred_joints[:, 0], 4), coord2pix(pred_joints[:, 1], 4), color='blue')

            heatmap_by_creator = self.heatmapcreator.get(joints)
            heatmap_by_creator = heatmap_by_creator.reshape(
                (-1, heatmap_by_creator.shape[1], heatmap_by_creator.shape[2]))
            ax6.imshow(heatmap_by_creator.max(0))
            ax6.scatter(coord2pix(joints[:, 0], 4),
                        coord2pix(joints[:, 1], 4),
                        color='green')
            # pred_joints, _ = find_tensor_peak_batch(torch.from_numpy(heatmap_by_creator).float(), self.sigma, cfg.BACKBONE.DOWNSAMPLE)
            # print('creator found', pred_joints)
            # ax5.scatter(coord2pix(pred_joints[:, 0], 4), coord2pix(pred_joints[:, 1], 4), color='red')
            # ax6.scatter(coord2pix(pred_joints[:, 0], 4), coord2pix(pred_joints[:, 1], 4), color='red')
            plt.show()
        ret = {
            'heatmap':
            target,
            'visibility':
            target_weight,
            'KRT':
            KRT,
            'points-2d':
            joints,
            'points-3d':
            world3d.astype(np.double)
            if 'lifting' not in cfg.DATASETS.TASK else world3d,
            'camera-points-3d':
            joints_3d_camera,
            'normed-points-3d':
            joints_3d_camera_normed,
            'scale':
            keypoint_scale,
            'action':
            torch.tensor([db_rec['action']]),
            'img-path':
            db_rec['image'],
        }
        if cfg.DATASETS.TASK not in [
                'lifting', 'lifting_direct', 'lifting_rot'
        ]:
            ret['img'] = input
        ret['K'] = cropK
        ret['RT'] = Rt
        if cfg.VIS.MULTIVIEWH36M:
            ret['T'] = T
            ret['R'] = R
            ret['original_image'] = data_numpy
        if cfg.KEYPOINT.TRIANGULATION == 'rpsm' and not self.is_train:
            ret['origK'] = K
            ret['crop_center'] = center
            ret['crop_scale'] = scale

        if cfg.DATALOADER.BENCHMARK: print('timer1', self.timer.toc())
        return ret