Exemple #1
0
 def do_grasp_plan(event=None,world=world,sensor=sensor,planner=planner,camera_xform=xform,camera_intrinsics=intrinsics):
     sensor.kinematicReset()
     sensor.kinematicSimulate(world,0.01)
     rgb,depth = sensing.camera_to_images(sensor)
     grasps,scores = planner.generate(camera_xform,camera_intrinsics,rgb,depth)
     for i,(g,s) in enumerate(zip(grasps,scores)):
         color = (1-s,s,0,1)
         g.add_to_vis("grasp_"+str(i),color=color)
Exemple #2
0
        def loop_through_sensors(world=self.world,
                                 sensor=self.sensor,
                                 max_pics=max_pics):
            for counter in tqdm(range(max_pics)):
                if counter > 0:
                    self.chessEngine.randomizePieces(32)

                # Arrange pieces and model world
                self.chessEngine.arrangePieces()

                self._randomlyRotateCamera(20, 40, 0.6)

                sensor.kinematicReset()
                sensor.kinematicSimulate(world, 0.01)

                rgb, depth = sensing.camera_to_images(self.sensor)

                # Project RGB and depth images to rectify them
                local_corner_coords = np.float32([
                    sensing.camera_project(self.sensor, self.robot, pt)[:2]
                    for pt in self._boardWorldCorners
                ])

                H = cv2.getPerspectiveTransform(local_corner_coords,
                                                self._rectifiedPictureCorners)

                color_rectified = cv2.warpPerspective(
                    rgb, H, (DataUtils.IMAGE_SIZE, DataUtils.IMAGE_SIZE))
                depth_rectified = cv2.warpPerspective(
                    depth, H, (DataUtils.IMAGE_SIZE, DataUtils.IMAGE_SIZE))

                depth_rectified = np.uint8(
                    (depth_rectified - depth_rectified.min()) /
                    (depth_rectified.max() - depth_rectified.min()))

                pieces_arrangement = self.chessEngine.getPieceArrangement()

                images, classes = DataUtils.split_image_by_classes(
                    color_rectified, depth_rectified, data_distribution,
                    pieces_arrangement)

                rectified_fname = self._rectifiedFNFormat % (counter)
                Image.fromarray(color_rectified).save(rectified_fname)

                for idx in range(sum(data_distribution)):
                    color_fname = self._colorFNFormat % (counter, idx)
                    depth_fname = self._depthFNFormat % (counter, idx)

                    Image.fromarray(images[idx, :, :, :3]).save(color_fname)
                    Image.fromarray(images[idx, :, :, 3]).save(depth_fname)

                    metadata_f.write(
                        f'{color_fname},{depth_fname},{classes[idx]}\n')

            vis.show(False)
Exemple #3
0
        def loop_through_sensors(world=world,sensor=sensor,
            world_camera_viewpoints=world_camera_viewpoints,
            index=index,counters=counters):
            viewno = counters[0]
            variation = counters[1]
            total_count = counters[2]
            print("Generating data for sensor view",viewno,"variation",variation)
            sensor_xform = world_camera_viewpoints[viewno]
            sensing.set_sensor_xform(sensor,sensor_xform)
            rgb_filename = "image_dataset/color_%04d_var%04d.png"%(total_count,variation)
            depth_filename = "image_dataset/depth_%04d_var%04d.png"%(total_count,variation)
            grasp_filename = "image_dataset/grasp_%04d.png"%(total_count,)
            if PROBLEM=='1b':
                sensor.kinematicReset()
                sensor.kinematicSimulate(world,0.01)
                print("Done with kinematic simulate")

                rgb,depth = sensing.camera_to_images(sensor)
                print("Saving to",rgb_filename,depth_filename)
                Image.fromarray(rgb).save(rgb_filename)
                depth_scale = 8000
                depth_quantized = (depth * depth_scale).astype(np.uint32)
                Image.fromarray(depth_quantized).save(depth_filename)
                
                with open("image_dataset/metadata.csv",'a') as f:
                    line = "{},{},{},{},{},{},{}\n".format(index,viewno,loader.write(sensor_xform,'RigidTransform'),os.path.basename(rgb_filename),os.path.basename(depth_filename),os.path.basename(grasp_filename),variation)
                    f.write(line)
            
            if PROBLEM=='1c' and variation==0:
                #calculate grasp map and save it
                grasp_map = make_grasp_map(world)
                grasp_map_quantized = (grasp_map*255).astype(np.uint8)
                channels = ['score','opening','axis_heading','axis_elevation']
                for i,c in enumerate(channels):
                    base,ext=os.path.splitext(grasp_filename)
                    fn = base+'_'+c+ext
                    Image.fromarray(grasp_map_quantized[:,:,i]).save(fn)
        
            #loop through variations and views
            counters[1] += 1
            if counters[1] >= max_variations:  
                counters[1] = 0
                counters[0] += 1
                if counters[0] >= len(world_camera_viewpoints):
                    vis.show(False)
                counters[2] += 1
Exemple #4
0
def processDepthSensor(sensor):
    """
	#manual processing
	data = sensor.getMeasurements()
	w = int(sensor.getSetting("xres"))
	h = int(sensor.getSetting("yres"))
	#first, RGB then depth
	mind,maxd = float('inf'),float('-inf')
	for i in range(h):
		for j in range(w):
			pixelofs = (j+i*w)
			rgb = int(data[pixelofs])
			depth = data[pixelofs+w*h]
			mind = min(depth,mind)
			maxd = max(depth,maxd)
	print "Depth range",mind,maxd
	"""
    rgb, depth = sensing.camera_to_images(sensor)
    return rgb, depth
Exemple #5
0
        def loop_through_sensors(
                world=world,
                sensor=sensor,
                world_camera_viewpoints=world_camera_viewpoints,
                index=index,
                counters=counters):
            viewno = counters[0]
            variation = counters[1]
            total_count = counters[2]
            print("Generating data for sensor view", viewno, "variation",
                  variation)
            sensor_xform = world_camera_viewpoints[viewno]

            #TODO: problem 1.B: perturb camera and change colors
            # Generate random axis, random angle, rotate about angle for orientation perturbation
            # Generate random axis, random dist, translate distance over axis for position perturbation
            rand_axis = np.random.rand(3)
            rand_axis = vectorops.unit(rand_axis)
            multiplier = np.random.choice([True, False], 1)
            rand_angle = (np.random.rand(1)[0] * 10 +
                          10) * (-1 if multiplier else 1)
            # print(rand_angle)
            R = so3.from_axis_angle((rand_axis, np.radians(rand_angle)))
            rand_axis = vectorops.unit(np.random.random(3))
            rand_dist = np.random.random(1)[0] * .10 + .10
            # print(rand_dist)
            t = vectorops.mul(rand_axis, rand_dist)
            sensor_xform = se3.mul((R, t), sensor_xform)
            sensing.set_sensor_xform(sensor, sensor_xform)

            rgb_filename = "image_dataset/color_%04d_var%04d.png" % (
                total_count, variation)
            depth_filename = "image_dataset/depth_%04d_var%04d.png" % (
                total_count, variation)
            grasp_filename = "image_dataset/grasp_%04d_var%04d.png" % (
                total_count, variation)

            #Generate sensor images
            sensor.kinematicReset()
            sensor.kinematicSimulate(world, 0.01)
            print("Done with kinematic simulate")
            rgb, depth = sensing.camera_to_images(sensor)
            print("Saving to", rgb_filename, depth_filename)
            Image.fromarray(rgb).save(rgb_filename)
            depth_scale = 8000
            depth_quantized = (depth * depth_scale).astype(np.uint32)
            Image.fromarray(depth_quantized).save(depth_filename)

            with open("image_dataset/metadata.csv", 'a') as f:
                line = "{},{},{},{},{},{},{}\n".format(
                    index, viewno, loader.write(sensor_xform,
                                                'RigidTransform'),
                    os.path.basename(rgb_filename),
                    os.path.basename(depth_filename),
                    os.path.basename(grasp_filename), variation)
                f.write(line)

            #calculate grasp map and save it
            grasp_map = make_grasp_map(world, total_count)
            grasp_map_quantized = (grasp_map * 255).astype(np.uint8)
            channels = ['score', 'opening', 'axis_heading', 'axis_elevation']
            for i, c in enumerate(channels):
                base, ext = os.path.splitext(grasp_filename)
                fn = base + '_' + c + ext
                Image.fromarray(grasp_map_quantized[:, :, i]).save(fn)

            #loop through variations and views
            counters[1] += 1
            if counters[1] >= max_variations:
                counters[1] = 0
                counters[0] += 1
                if counters[0] >= len(world_camera_viewpoints):
                    vis.show(False)
                counters[2] += 1
Exemple #6
0
import klampt, time
from klampt import vis
from klampt import *
from klampt.model import sensing
import matplotlib.pyplot as plt

world = WorldModel()
world.readFile('../../data/simulation_test_worlds/sensortest.xml'
               )  # a world with RobotiQ, and a camera
sim = Simulator(world)
camera = sim.controller(0).sensor('rgbd_camera')
T = ([1, 0, 0, 0, 0, -1, 0, 1, 0], [0, -2.0, 0.5])
sensing.set_sensor_xform(camera, T, link=-1)

sim.simulate(0.01)
sim.updateWorld()
rgb, depth = sensing.camera_to_images(camera)
print rgb.shape
print depth.shape
plt.imshow(depth)  # <---- THIS LINE PREVENTS VIS.SHOW() FROM SHOWING
plt.show()

vis.add('world', world)
vis.show()
while vis.shown():
    time.sleep(0.1)
vis.kill()

plt.show()
quit()
Exemple #7
0
def do_snapshot():
    global rgb, depth
    camera.kinematicReset()
    camera.kinematicSimulate(world, 0.01)
    rgb, depth = sensing.camera_to_images(camera)