Example #1
0
	def get_current_tote_content_cloud(self, cur_camera_R, cur_camera_t, scene_cloud=None, threshold=0.02, fit=False):
		'''
		subtract tote model from current point cloud of tote (with object in it)
		if fit is True, transform the model to align with the scene first. it will take some time
		return the point cloud of the remaining scene in global frame
		'''
		if scene_cloud is None:
			_, scene_cloud, _, _ = self.read_once(unit='meter', Nx3_cloud=True, clean=True)
		model_cloud = self.load_model_tote_cloud(downsample=True)
		assert model_cloud is not None, 'Error loading tote model'
		
		model_xform_R, model_xform_t = self.load_R_t(self.get_tote_viewing_camera_xform_path(), nparray=True)
		model_cloud = model_cloud.dot(model_xform_R) + model_xform_t

		scene_cloud = scene_cloud[::downsample_rate, :]
		cur_camera_R = np.array(cur_camera_R).reshape(3,3).T
		cur_camera_t = np.array(cur_camera_t)
		scene_cloud = scene_cloud.dot(cur_camera_R.T) + cur_camera_t # transform scene cloud
		
		print "Making scene cloud with %i points"%scene_cloud.shape[0]
		scene_tree = KDTree(scene_cloud)
		print "Done"
		
		if fit:
			R, t = icp.match(model_cloud, scene_tree)
			model_cloud = model_cloud.dot(R.T) + t

		keep_idxs = self.subtract_model(model_cloud, scene_tree=scene_tree, threshold=threshold)
		content_cloud = scene_cloud[keep_idxs, :]

		# print 'Dirty content cloud has %i points'%content_cloud.shape[0]

		num_content_cloud_pts = content_cloud.shape[0]
		print "Making content cloud with %i points"%content_cloud.shape[0]
		content_tree = KDTree(content_cloud)
		print "Done. Querying neighbors within 0.03 m"
		idxs = content_tree.query_ball_point(content_cloud, 0.03)
		unisolated_flag = map(lambda x: len(x)>=3, idxs)
		content_cloud = content_cloud[np.where(unisolated_flag)[0], :]
		print "Clean cloud has %i points"%content_cloud.shape[0]

		print "Making content cloud with %i points"%content_cloud.shape[0]
		content_tree = KDTree(content_cloud)
		print "Done. Querying neighbors within 0.03 m"
		idxs = content_tree.query_ball_point(content_cloud, 0.03)
		unisolated_flag = map(lambda x: len(x)>=4, idxs)
		content_cloud = content_cloud[np.where(unisolated_flag)[0], :]
		print "Clean cloud has %i points"%content_cloud.shape[0]

		print "Making content cloud with %i points"%content_cloud.shape[0]
		content_tree = KDTree(content_cloud)
		print "Done. Querying neighbors within 0.03 m"
		idxs = content_tree.query_ball_point(content_cloud, 0.03)
		unisolated_flag = map(lambda x: len(x)>=5, idxs)
		content_cloud = content_cloud[np.where(unisolated_flag)[0], :]
		print "Clean cloud has %i points"%content_cloud.shape[0]
		return content_cloud
Example #2
0
	def get_current_bin_content_cloud(self, bin_letter, cur_camera_R, cur_camera_t, colorful=True, fit=False, threshold=0.02):
		'''
		return a point cloud (either colored or not) of the current bin content in the global frame. shelf is removed. 
		if fit is True, a new round of ICP is done on the model shelf
		cur_camera_R and cur_camera_t are the current camera transformation in world
		'''
		color, scene_cloud, depth_uv, _ = self.read_once(unit='meter', Nx3_cloud=False)
		if colorful:
			scene_cloud = colorize_point_cloud(scene_cloud, color, depth_uv).reshape(-1, 4)
		else:
			scene_cloud = scene_cloud.reshape(-1, 3)
		keep_idxs = np.where(scene_cloud[:,2].flatten()!=0)[0]
		scene_cloud = scene_cloud[keep_idxs, :]
		scene_cloud = scene_cloud[::downsample_rate, :]
		scene_cloud_xyz = scene_cloud[:, 0:3]
		cur_camera_R = np.array(cur_camera_R).reshape(3,3).T
		cur_camera_t = np.array(cur_camera_t)
		scene_cloud_xyz = scene_cloud_xyz.dot(cur_camera_R.T)+cur_camera_t # colorless cloud
		scene_cloud[:, 0:3] = scene_cloud_xyz # transform scene_cloud
		scene_tree = KDTree(scene_cloud_xyz)
		model_cloud = self.load_model_bin_cloud(bin_letter, downsample=True)
		model_xform_R, model_xform_t = self.load_R_t(self.get_bin_viewing_camera_xform_path(bin_letter), nparray=True)
		model_cloud = model_cloud.dot(model_xform_R.T) + model_xform_t # apply saved transformation
		assert (self.shelf_perturb_R is not None) and (self.shelf_perturb_t is not None), 'Shelf perturbation has not been calibrated'
		model_cloud = model_cloud.dot(self.shelf_perturb_R.T) + self.shelf_perturb_t # apply perturbation transformation found by ICP
		if fit:
			R, t = icp.match(model_cloud, scene_tree)
			model_cloud = model_cloud.dot(R.T) + t
		keep_idxs = self.subtract_model(model_cloud, scene_tree=scene_tree, threshold=threshold)
		content_cloud = scene_cloud[keep_idxs, :]

		# num_content_cloud_pts = content_cloud.shape[0]
		# print "Making content cloud with %i points"%content_cloud.shape[0]
		# content_tree = KDTree(content_cloud)
		# print "Done. Querying neighbors within 0.03 m"
		# idxs = content_tree.query_ball_point(content_cloud, 0.03)
		# unisolated_flag = map(lambda x: len(x)>=3, idxs)
		# content_cloud = content_cloud[np.where(unisolated_flag)[0], :]
		# print "Clean cloud has %i points"%content_cloud.shape[0]

		# print "Making content cloud with %i points"%content_cloud.shape[0]
		# content_tree = KDTree(content_cloud)
		# print "Done. Querying neighbors within 0.03 m"
		# idxs = content_tree.query_ball_point(content_cloud, 0.03)
		# unisolated_flag = map(lambda x: len(x)>=4, idxs)
		# content_cloud = content_cloud[np.where(unisolated_flag)[0], :]
		# print "Clean cloud has %i points"%content_cloud.shape[0]

		# print "Making content cloud with %i points"%content_cloud.shape[0]
		# content_tree = KDTree(content_cloud)
		# print "Done. Querying neighbors within 0.03 m"
		# idxs = content_tree.query_ball_point(content_cloud, 0.03)
		# unisolated_flag = map(lambda x: len(x)>=5, idxs)
		# content_cloud = content_cloud[np.where(unisolated_flag)[0], :]
		# print "Clean cloud has %i points"%content_cloud.shape[0]
		return content_cloud
Example #3
0
def test_icp():
	pts_model = np.load('canonical_model_cloud/binA.npy')
	pts_scene = np.load('canonical_model_cloud/binA_perturbed.npy')
	pts_model = pts_model[np.where(pts_model[:,2]!=0)]
	pts_scene = pts_scene[np.where(pts_scene[:,2]!=0)]
	pts_model = pts_model[::downsample_rate, :]/1000
	pts_scene = pts_scene[::downsample_rate, :]/1000
	# ax = render_3d_scatter(pts_scene.tolist())
	# ax.set_xlim([-500,500])
	# ax.set_ylim([-500,500])
	# ax.set_zlim([0,1200])
	# ax.view_init(elev=0, azim=90)
	# plt.show()
	print 'pts_model shape after downsampling:', pts_model.shape
	print 'pts_scene shape after downsampling:', pts_scene.shape
	print 'Making KD Tree'
	scene_tree = KDTree(pts_scene)
	print 'Done making KD Tree'
	R, t = icp.match(pts_model, scene_tree, iterations=20)
	print R, t

	pts_model_xformed = pts_model.dot(R.T) + t

	pts_model_xformed = np.hstack((pts_model_xformed, np.ones((pts_model_xformed.shape[0],1))*rgb_to_pcl_float(255,0,0)))
	pts_scene = np.hstack((pts_scene, np.ones((pts_scene.shape[0],1))*rgb_to_pcl_float(255,255,0)))

	ax = render_3d_scatter(np.vstack((pts_model_xformed, pts_scene)).tolist())
	ax.set_xlim([-0.5,0.5])
	ax.set_ylim([-0.5,0.5])
	ax.set_zlim([0,1.2])
	ax.view_init(elev=0, azim=90)
	plt.title('Shelf Overlay')

	# ax = render_3d_scatter(pts_model.tolist())
	# ax.set_xlim([-500,500])
	# ax.set_ylim([-500,500])
	# ax.set_zlim([0,1200])
	# ax.view_init(elev=0, azim=90)
	# plt.title('Model Original')
	# ax = render_3d_scatter(pts_model_xformed.tolist())
	# ax.set_xlim([-500,500])
	# ax.set_ylim([-500,500])
	# ax.set_zlim([0,1200])
	# ax.view_init(elev=0, azim=90)
	# plt.title('Model Xformed')
	# ax = render_3d_scatter(pts_scene.tolist())
	# ax.set_xlim([-500,500])
	# ax.set_ylim([-500,500])
	# ax.set_zlim([0,1200])
	# ax.view_init(elev=0, azim=90)
	# plt.title('Scene')
	plt.show()
Example #4
0
	def get_current_content_cloud(self, cur_camera_R, cur_camera_t, filename, colorful=True, threshold=0.02, fit=False, tolist=False, index=0):
		'''
		should do some form of subtraction between a presaved image and the current configuration: 8/8/16
		uses code from get_current_bin_content_cloud and get_current_tote_content_cloud
		'''
		color, scene_cloud, depth_uv, _ = self.read_once(limb=None, unit='meter', Nx3_cloud=False, index=index)
		if colorful:
			scene_cloud = colorize_point_cloud(scene_cloud, color, depth_uv).reshape(-1, 4)
		else:
			scene_cloud = scene_cloud.reshape(-1, 3)
		keep_idxs = np.where(scene_cloud[:,2].flatten()!=0)[0]
		scene_cloud = scene_cloud[keep_idxs, :]
		scene_cloud = scene_cloud[::downsample_rate, :]
		scene_cloud_xyz = scene_cloud[:, 0:3]
		cur_camera_R = np.array(cur_camera_R).reshape(3,3).T
		cur_camera_t = np.array(cur_camera_t)
		scene_cloud_xyz = scene_cloud_xyz.dot(cur_camera_R.T)+cur_camera_t # colorless cloud
		scene_cloud[:, 0:3] = scene_cloud_xyz # transform scene_cloud
		
		if len(scene_cloud_xyz) <= 1:
			#it's empty
			return scene_cloud_xyz
		scene_tree = KDTree(scene_cloud_xyz)
		model_cloud = self.load_model_cloud( self.get_standard_cloud_path(filename), downsample=True)
		model_xform_R, model_xform_t = self.load_R_t(self.get_standard_viewing_camera_xform_path(filename), nparray=True)
		model_cloud = model_cloud.dot(model_xform_R.T) + model_xform_t # apply saved transformation
		assert (self.shelf_perturb_R is not None) and (self.shelf_perturb_t is not None), 'Shelf perturbation has not been calibrated'
		model_cloud = model_cloud.dot(self.shelf_perturb_R.T) + self.shelf_perturb_t # apply perturbation transformation found by ICP
		if fit:
			R, t = icp.match(model_cloud, scene_tree)
			model_cloud = model_cloud.dot(R.T) + t
		keep_idxs = self.subtract_model(model_cloud, scene_tree=scene_tree, threshold=threshold)
		bin_content_cloud = scene_cloud[keep_idxs, :]

		if tolist:
			bin_content_cloud = bin_content_cloud.tolist()

		return bin_content_cloud
Example #5
0
	def icp_get_bin_transform(self, bin_letter, cur_camera_R, cur_camera_t):
		'''
		(private) get the current bin transform relative to canonical position for the specified bin
		bin_letter is one of ['A', 'B', ..., 'L'] and is used to load the pre-computed shelf cloud

		return (R, t), where R is a column-major order flattened array of rotation matrix, 
		and t is an array of 3 numbers for transformation. 
		'''
		cur_camera_R = np.array(cur_camera_R).reshape(3,3).T
		cur_camera_t = np.array(cur_camera_t).flatten()
		_, scene_cloud, _, _ = self.read_once(unit='meter', Nx3_cloud=True, clean=True)
		model_cloud = self.load_model_bin_cloud(bin_letter, downsample=True)
		assert model_cloud is not None, 'Cannot find model cloud'
		scene_cloud = scene_cloud[::downsample_rate,:]
		model_xform_R, model_xform_t = self.load_R_t(self.get_bin_viewing_camera_xform_path(bin_letter), nparray=True)
		model_cloud_xformed = model_cloud.dot(model_xform_R.T) + model_xform_t
		scene_cloud_xformed = scene_cloud.dot(cur_camera_R.T) + cur_camera_t
		np.save(self.prefix+'tmp/model_cloud_xformed.npy', model_cloud_xformed)
		np.save(self.prefix+'tmp/scene_cloud_xformed.npy', scene_cloud_xformed)
		scene_tree = KDTree(scene_cloud_xformed)
		relative_R, relative_t = icp.match(model_cloud_xformed, scene_tree)
		relative_R = list(relative_R.flatten(order='F'))
		relative_t = list(relative_t.flat)
		return relative_R, relative_t
Example #6
0
	def get_current_bin_content_cloud(self, bin_letter, cur_camera_R, cur_camera_t, limb, colorful=True, fit=False, threshold=0.01,
		crop=False, bin_bound=None, perturb_xform=None):
		'''
		return a point cloud (either colored or not) of the current bin content in the global frame. shelf is removed. 
		if fit is True, a new round of ICP is done on the model shelf
		cur_camera_R and cur_camera_t are the current camera transformation in world
		'''
		color, scene_cloud, depth_uv, _ = self.read_once(limb, unit='meter', Nx3_cloud=False)
		if colorful:
			scene_cloud = colorize_point_cloud(scene_cloud, color, depth_uv).reshape(-1, 4)
		else:
			scene_cloud = scene_cloud.reshape(-1, 3)
		keep_idxs = np.where(scene_cloud[:,2].flatten()!=0)[0]
		scene_cloud = scene_cloud[keep_idxs, :]
		scene_cloud = scene_cloud[::downsample_rate, :]
		scene_cloud_xyz = scene_cloud[:, 0:3]
		cur_camera_R = np.array(cur_camera_R).reshape(3,3).T
		cur_camera_t = np.array(cur_camera_t)
		scene_cloud_xyz = scene_cloud_xyz.dot(cur_camera_R.T)+cur_camera_t # colorless cloud
		scene_cloud[:, 0:3] = scene_cloud_xyz # transform scene_cloud
		scene_tree = KDTree(scene_cloud_xyz)
		model_cloud = self.load_model_bin_cloud(bin_letter, limb, downsample=True)
		model_xform_R, model_xform_t = self.load_R_t(self.get_bin_viewing_camera_xform_path(bin_letter, limb), nparray=True)
		model_cloud = model_cloud.dot(model_xform_R.T) + model_xform_t # apply saved transformation
		assert (self.shelf_perturb_R is not None) and (self.shelf_perturb_t is not None), 'Shelf perturbation has not been calibrated'
		model_cloud = model_cloud.dot(self.shelf_perturb_R.T) + self.shelf_perturb_t # apply perturbation transformation found by ICP
		if fit:
			R, t = icp.match(model_cloud, scene_tree)
			model_cloud = model_cloud.dot(R.T) + t
		keep_idxs = self.subtract_model(model_cloud, scene_tree=scene_tree, threshold=threshold)
		bin_content_cloud = scene_cloud[keep_idxs, :]
		if crop:
			if perturb_xform is None:
				perturb_xform = (list(self.shelf_perturb_R.T.flat), list(self.shelf_perturb_t.flat))
			bin_content_cloud = self.crop_cloud(bin_content_cloud, bin_bound, perturb_xform)
		return bin_content_cloud