コード例 #1
0
	def __getitem__(self, idx):
		# return sample with xdata, ydata, label
		xdata, ydata, label = [], [], []
		for i in range(1):
    
			image_name = self.image_names[i]
			label.append(i*torch.ones(1).long())   
			# read image
			#print(self.image_names)
			img_pil = Image.open(os.path.join(self.db_path, self.classes[i], image_name))
			xdata.append(self.preprocess(img_pil))
			# parse image name to get correponding target
			#pdb.set_trace()
			_, _, az, el, ct, _ = parse_name(image_name)
			if self.db_type == 'real':
				R = rotation_matrix(az, el, ct)
			elif self.db_type == 'render':
				R = rotation_matrix(az, el, -ct)
			else:
				raise NameError('Unknown db_type passed')
			if self.ydata_type == 'axis_angle':
				tmpy = get_y(R)
			elif self.ydata_type == 'quaternion':
				tmpy = get_quaternion(R)
			else:
				raise NameError('Uknown ydata_type passed')
			ydata.append(torch.from_numpy(tmpy).float())
		xdata = torch.stack(xdata)
		ydata = torch.stack(ydata)
		label = torch.stack(label)
		sample = {'xdata': xdata, 'ydata': ydata, 'label': label}
		return sample
コード例 #2
0
def get_residuals(ydata, key_rotations):
    ydata_res = np.zeros((ydata.shape[0], len(key_rotations), 3))
    for i in range(ydata.shape[0]):
        for j in range(len(key_rotations)):
            ydata_res[i,
                      j, :] = get_y(np.dot(key_rotations[j].T,
                                           get_R(ydata[i])))
    return ydata_res
コード例 #3
0
	def __getitem__(self, idx):
		# run the item handler of the renderedImages dataset
		sample = super().__getitem__(idx)
		# update the ydata target using kmeans dictionary
		ydata = sample['ydata'].numpy()
		# rotation matrix
		ydata_rot = np.stack([get_R(ydata[i]) for i in range(ydata.shape[0])])
		sample['ydata_rot'] = torch.from_numpy(ydata_rot).float()
		# bin part
		ydata_bin = self.kmeans.predict(ydata)
		sample['ydata_bin'] = torch.from_numpy(ydata_bin).long()
		# residual part
		ydata_res = np.stack([get_y(np.dot(self.rotations_dict[ydata_bin[i]].T, ydata_rot[i])) for i in range(ydata.shape[0])])
		sample['ydata_res'] = torch.from_numpy(ydata_res).float()
		return sample
コード例 #4
0
 def __getitem__(self, idx):
     # return sample with xdata, ydata, label
     image_name = self.image_names[idx]
     label = self.labels[idx]
     # read image
     img_pil = Image.open(
         os.path.join(self.db_path, self.classes[label],
                      image_name + '.png'))
     xdata = self.preprocess(img_pil)
     # parse image name to get correponding target
     _, _, az, el, ct, _ = parse_name(image_name)
     R = rotation_matrix(az, el, ct)
     if self.ydata_type == 'axis_angle':
         tmpy = get_y(R)
     elif self.ydata_type == 'quaternion':
         tmpy = get_quaternion(R)
     else:
         raise NameError('Uknown ydata_type passed')
     ydata = torch.from_numpy(tmpy).float()
     label = label * torch.ones(1).long()
     sample = {'xdata': xdata, 'ydata': ydata, 'label': label}
     return sample
コード例 #5
0
	def __getitem__(self, idx):
		# return sample with xdata, ydata, label
		xdata, ydata, label = [], [], []
		for i in range(self.num_classes):
			image_name = self.image_names[i][idx % self.num_images[i]]
			label.append(i*torch.ones(1).long())
			# read image
			img_pil = Image.open(os.path.join(self.db_path, self.classes[i], image_name + '.png'))
			xdata.append(preprocess(img_pil))
			# parse image name to get correponding target
			_, _, az, el, ct, _ = parse_name(image_name)
			R = rotation_matrix(az, el, ct)
			tmpy = get_y(R)
			ydata.append(torch.from_numpy(tmpy).float())
		xdata = torch.stack(xdata)
		ydata = torch.stack(ydata)
		ydata_bin = self.kmeans.predict(ydata.numpy())
		ydata_res = ydata.numpy() - self.kmeans.cluster_centers_[ydata_bin, :]
		ydata_bin = torch.from_numpy(ydata_bin).long()
		ydata_res = torch.from_numpy(ydata_res).float()
		label = torch.stack(label)
		sample = {'xdata': xdata, 'ydata': ydata, 'label': label, 'ydata_bin': ydata_bin, 'ydata_res': ydata_res}
		return sample