Esempio n. 1
0
    def __getitem__(self, idx):
        name = self.db[idx]
        label = self.anno[name]

        image_path = os.path.join(self.cfg.ROOT, name + '.png')
        img = load_image(image_path, mode='RGB')  # already / 255

        coor = label['uv_coor']
        # coor[1:,:] = coor[1:,:].reshape(5,4,-1)[:,::-1,:].reshape(20, -1)
        coor = np.array(coor)
        coor = to_torch(coor)
        #apply transforms into image and calculate cooresponding coor
        # if self.cfg.TRANSFORMS:
        #     img, coor = self.transforms(self.cfg.TRANSFORMS, img , coor)

        meta = edict({'name': name})
        heatmap = torch.zeros(22, img.size(1), img.size(2))
        for i in range(21):
            heatmap[i] = draw_heatmap(heatmap[i], coor[i],
                                      self.cfg.HEATMAP.SIGMA)

        return {
            'input': {
                'img': img
            },
            'heatmap': heatmap,
            'coor': to_torch(coor[:, :2]),
            'weight': 1,
            'meta': meta
        }
Esempio n. 2
0
    def __getitem__(self, idx):
        name = self.db[idx]
        label = self.anno[name]

        image_path = os.path.join(self.cfg.ROOT, 'color', name + '.png')
        img = load_image(image_path)  #C * W * H
        coor = to_torch(label['xyz'])
        project_coor = label['project']

        # print(project_coor[:, 0].min(), project_coor[:, 1].max())
        assert project_coor[:, :2].min() >= 0 and project_coor[:, :2].max(
        ) < 320

        matrix = label['K']
        # norm the pose
        # index_bone_length = torch.norm(coor[12,:] - coor[11,:])
        # coor[0, :] = (coor[0] + coor[12]) / 2.
        # coor = coor - coor[:1,:].repeat(21,1)

        #apply transforms into image and calculate cooresponding coor and camera instrict matrix
        if self.cfg.TRANSFORMS:
            img, coor, project_coor, matrix = self.transforms(
                self.cfg.TRANSFORMS, img, coor, project_coor, matrix)

        # print(project_coor[:, 0].max())
        assert project_coor[:, :2].min() >= 0 and project_coor[:, :2].max(
        ) < 256

        matrix = np.linalg.inv(matrix)  #take the inversion of matrix
        meta = edict({'name': name})
        isleft = name[-1] == 'L'
        #corresponding depth position in depth map
        project_coor = torch.tensor(project_coor).long()
        index = torch.tensor([
            i * img.size(1) * img.size(2) + project_coor[i, 0] * img.size(1) +
            project_coor[i, 1] for i in range(21)
        ])

        assert index.max() < img.size(1) * img.size(2) * 21, 'Wrong Position'
        heatmap = torch.zeros(self.cfg.NUM_JOINTS, img.size(1), img.size(2))

        for i in range(21):
            heatmap[i] = draw_heatmap(heatmap[i], project_coor[i],
                                      self.cfg.HEATMAP.SIGMA)

        return {
            'input': {
                'img': img,
                'hand_side': torch.tensor([isleft, 1 - isleft]).float(),
            },
            'index': index,
            'matrix': to_torch(matrix),
            # 'index_bone_length': index_bone_length,
            'heatmap': heatmap,
            'coor': to_torch(coor),
            'project': to_torch(project_coor),
            'weight': 1,
            'meta': meta
        }
    def __getitem__(self, idx):
        name = self.names[idx // 5220]
        path = self.db[name][idx % 5220]
        coor = self.anno[name][idx % 5220]

        img = load_image(path)

        #calculate ground truth coordination
        coor = torch.tensor(coor)
        coor[:, 0] = coor[:, 0] * img.size(1)
        coor[:, 1] = (1 - coor[:, 1]) * img.size(2)
        coor = coor[:, :2]

        #apply transforms into image and calculate cooresponding coor
        if self.cfg.TRANSFORMS:
            img, coor = self.transforms(self.cfg.TRANSFORMS, img, coor)

        #openpose require 22 channel, discard the last one
        heatmap = np.zeros((self.cfg.NUM_JOINTS, img.shape[1], img.shape[2]))
        for i in range(self.cfg.NUM_JOINTS - 1):
            heatmap[i, :, :] = draw_heatmap(heatmap[i],
                                            coor[i],
                                            self.cfg.HEATMAP.SIGMA,
                                            type=self.cfg.HEATMAP.TYPE)

        # print (name, coor)
        # fig = plt.figure(1)
        # ax = fig.add_subplot(111)
        # plot_hand(im_to_numpy(img), coor, ax)
        # plt.show()

        meta = edict({'name': path})

        assert coor.min() > 0, path

        return {
            'input': {
                'img': img
            },
            'coor': to_torch(coor),
            'heatmap': to_torch(heatmap),
            'weight': 1,
            'meta': meta
        }
Esempio n. 4
0
    def __getitem__(self, idx):
        w = self.db[idx]

        image_path = os.path.join(self.cfg.ROOT, w[1], w[1] + w[2], 'image',
                                  w[0] + '.png')
        label_path = os.path.join(self.cfg.ROOT, w[1], w[1] + w[2], 'label',
                                  w[0] + '.json')

        img = load_image(image_path, mode='GBR')  # C * H * W

        label = json.load(open(label_path))

        #calculate ground truth coordination
        coor = np.array(label['perspective'])
        coor[:, 0] = coor[:, 0] * img.size(1)
        coor[:, 1] = (1 - coor[:, 1]) * img.size(2)
        coor = coor[:, :2]

        #apply transforms into image and calculate cooresponding coor
        if self.cfg.TRANSFORMS:
            img, coor = self.transforms(self.cfg.TRANSFORMS, img, coor)

        #openpose require 22 channel, discard the last one
        heatmap = np.zeros((self.cfg.NUM_JOINTS, img.shape[1], img.shape[2]))
        for i in range(self.cfg.NUM_JOINTS - 1):
            heatmap[i, :, :] = draw_heatmap(heatmap[i],
                                            coor[i],
                                            self.cfg.HEATMAP.SIGMA,
                                            type=self.cfg.HEATMAP.TYPE)

        meta = edict({'name': w[1] + ' ' + w[2] + ' ' + w[0]})

        assert coor.min() > 0, label_path

        return {
            'input': {
                'img': img
            },
            'coor': to_torch(coor),
            'heatmap': to_torch(heatmap),
            'weight': 1,
            'meta': meta
        }
    def reprocess(self, input, reprocess_cfg):
        img = input['img']
        coor2d = input['coor2d']
        matrix = input['matrix']
        meta = input['meta']
        #apply transforms into image and calculate cooresponding coor and camera instrict matrix
        if reprocess_cfg.TRANSFORMS:
            img, _, coor2d, matrix = transforms(reprocess_cfg.TRANSFORMS, img,
                                                None, coor2d, matrix)

        matrix = np.linalg.inv(matrix)  #take the inversion of matrix

        coor3d = coor2d.clone()
        coor3d[:, :2] *= coor3d[:, 2:]
        coor3d = torch.matmul(coor3d, to_torch(matrix).transpose(0, 1))

        root_depth = coor2d[0, 2].clone()
        index_bone_length = torch.norm(coor3d[9, :] - coor3d[10, :])
        relative_depth = (coor2d[:, 2] - root_depth) / index_bone_length

        heatmap = torch.zeros(reprocess_cfg.NUM_JOINTS, img.size(1),
                              img.size(2))

        for i in range(21):
            heatmap[i] = draw_heatmap(heatmap[i], coor2d[i],
                                      reprocess_cfg.HEATMAP.SIGMA)

        return {
            'input': {
                'img': img
            },
            'heatmap': heatmap,
            'matrix': to_torch(matrix),
            'color_hm': heatmap,
            'coor3d': to_torch(coor3d),
            'coor2d': to_torch(coor2d),
            'root_depth': root_depth,
            'index_bone_length': index_bone_length,
            'relative_depth': relative_depth,
            'weight': 1,
            'meta': meta,
        }
Esempio n. 6
0
    def __getitem__(self, idx):

        name = self.name[idx // 1500]
        coor = self.db[idx // 1500][idx % 1500, :, :]
        coor = to_torch(coor)

        name = name.split("_")
        image_path = os.path.join(self.cfg.ROOT, name[0],
                                  name[1] + '_' + str(idx % 1500) + '.png')
        img = load_image(image_path, mode='RGB')

        #apply transforms into image and calculate cooresponding coor
        if self.cfg.TRANSFORMS:
            img, coor = self.transforms(self.cfg.TRANSFORMS, img, coor)

        # print (name, idx % 1500, coor)
        # fig = plt.figure(1)
        # ax = fig.add_subplot(111)
        # plot_hand(im_to_numpy(img), coor, ax)
        # plt.show()

        meta = edict({'name': name})
        heatmap = torch.zeros(self.cfg.NUM_JOINTS, img.size(1), img.size(2))
        for i in range(self.cfg.NUM_JOINTS - 1):
            heatmap[i] = draw_heatmap(heatmap[i], coor[i],
                                      self.cfg.HEATMAP.SIGMA)

        return {
            'input': {
                'img': img
            },
            'heatmap': heatmap,
            'coor': coor,
            'weight': 1,
            'meta': meta
        }
Esempio n. 7
0
	def reprocess(self, input, data_cfg):
		img = input['img']
		depthmap = input['depthmap']

		coor2d = input['coor2d']
		assert coor2d[:, :2].min() >= 0 and coor2d[:, :2].max() < 320
		matrix = input['matrix']
		meta = input['meta']
		#apply transforms into image and calculate cooresponding coor and camera instrict matrix
		if data_cfg.TRANSFORMS:
			img, depthmap, coor2d, matrix = self.transforms(data_cfg.TRANSFORMS, img, depthmap, coor2d, matrix)

		# if depthmap_max - depthmap_min < 1e-6:
		#     print(name, ": ", depthmap_max - depthmap_min)
		# depthmap = (depthmap.max() - depthmap) / (depthmap_max - depthmap_min)
		# print(depthmap)
		
		

		matrix = np.linalg.inv(matrix) #take the inversion of matrix

		coor3d = coor2d.clone()
		coor3d[:,:2] *= coor3d[:, 2:]
		coor3d = torch.matmul(coor3d, to_torch(matrix).transpose(0, 1))

		root_depth = coor2d[0, 2].clone()
		index_bone_length = torch.norm(coor3d[9,:] - coor3d[10,:])
		relative_depth = (coor2d[:,2] - root_depth) / index_bone_length

		depthmap *= float(2**16 - 1)
		depthmap = (depthmap - root_depth) / index_bone_length
		depthmap_max = depthmap.max()
		depthmap_min = depthmap.min()
		depthmap = (depthmap - depthmap_min) / (depthmap_max - depthmap_min)
		
		heatmap = torch.zeros(data_cfg.NUM_JOINTS, img.size(1), img.size(2))
		depth   = torch.zeros(data_cfg.NUM_JOINTS, img.size(1), img.size(2))

		for i in range(21):
			heatmap[i] = draw_heatmap(heatmap[i], coor2d[i], data_cfg.HEATMAP.SIGMA)
			depth[i]   = heatmap[i] * (coor2d[i, 2] - coor2d[0, 2]) / index_bone_length


		return {'input': {'img':img,
						  'depthmap': depthmap,
						  },
				'heatmap': heatmap,
				'matrix': to_torch(matrix),
				'color_hm': heatmap,
				'depth_hm' :  depth,
				'depthmap': depthmap,
				'depthmap_max': depthmap_max,
				'depthmap_range': depthmap_max - depthmap_min,
				'coor3d': to_torch(coor3d),
				'coor2d': to_torch(coor2d),
				'root_depth': root_depth,
				'index_bone_length': index_bone_length,
				'relative_depth': relative_depth,
				'weight': 1,
				'meta': meta,
				}