def setup_config(self, scene_path): """Creates model config that mirrors the simulation. Args: scene_path (str): path to .g file which contains the scene Returns: c (libry.Config): model config object v (libry.ConfigurationViewer): model config viewer objects (list): list of frames that match the objects in the simulation """ # internal robot configuration c = ry.Config() c.addFile(scene_path) # configuration viewer v = ry.ConfigurationViewer() v.setConfiguration(c) # frames for objects in scene objects = [] for i in range(len(self.sim_objects)): obj = c.addFrame('object_{}'.format(i)) obj.setPosition(self.obj_info[i]['pos']) obj.setQuaternion(self.obj_info[i]['quat']) obj.setShape(ry.ST.sphere, [.03]) obj.setColor(self.obj_info[i]['color_code']) v.setConfiguration(c) objects.append(obj) return c, v, objects
def _get_CSV(R): S = R.simulation(ry.SimulatorEngine.physx, True) S.addSensor("camera") C = ry.Config() C.addFile(join(path_to_rai, "scenarios/pandasTable.g")) V = ry.ConfigurationViewer() V.setConfiguration(C) return C, S, V
def __init__(self, no_obj): self.no_obj = no_obj self._shape_dic = { 'sphere': [ry.ST.sphere, 1], 'cylinder': [ry.ST.cylinder, 2], 'box': [ry.ST.box, 3] } self.steps_taken = 0 self._size_parameters = [0.025, 0.1, 0.002] # self._size_parameters = [0.025, 0.06, 0.001] self._sizes = np.arange(*self._size_parameters) self.positions = np.arange(-0.3, 0.35, 0.05) self.locations = { 'trash': [0.0, -1.3, 0.8], 'good': [0.55, 0.05, 0.65], 'maybe': [-0.55, 0.05, 0.65] } self._error_threshold = 0.01 self._error_change_threshold = 0.01 self.object_indices = np.arange(1, self.no_obj + 1) self.objects_spawned = 0 self.RealWorld = ry.Config() self.RealWorld.addFile("../../scenarios/challenge.g") self.Model = ry.Config() self.Model.addFile('../../scenarios/pandasTable_2.g') self.Model_Viewer = ry.ConfigurationViewer() self.Model_Viewer.setConfiguration(self.Model) self.camera_Frame = self.Model.frame('camera') self._reorder_objects() for _ in range(self.no_obj): # self.spawn_random_object() self.spawn_nice_object() self.Simulation = self.RealWorld.simulation(ry.SimulatorEngine.physx, True) self.Simulation.addSensor('camera') self._set_focal_length(0.895) self.tau = 0.01 [self.background_rgb, self.background_depth] = self.Simulation.getImageAndDepth() self.background_gray = cv2.cvtColor(self.background_rgb, cv2.COLOR_BGR2GRAY) self.open_gripper() self.start_JV = self.Simulation.get_q() print('Init successful!')
def __init__(self, g_file): self.RealWorld = ry.Config() self.RealWorld.addFile(g_file) self.V = ry.ConfigurationViewer() self.V.setConfiguration(self.RealWorld) self.S = self.RealWorld.simulation(ry.SimulatorEngine.physx, True) self.C = ry.Config() self.C.addFile(g_file) self.C.setFrameState(self.RealWorld.getFrameState()) self.V.setConfiguration(self.C) self.intial_q = self.S.get_q() self.S.addSensor("kitchen_camera") self.S.addSensor("table_camera")
def __init__(self, action_duration=0.5, floor_level=0.6, finger_relative_level=0.14, contact_distance=0.116, sticky_radius=0.08, tau=.01, file=None, display=False): self.action_duration = action_duration self.floor_level = floor_level self.finger_relative_level = finger_relative_level self.contact_distance = contact_distance self.sticky_radius = sticky_radius self.tau = tau self.n_steps = int(self.action_duration / self.tau) self.proportion_per_step = 1 / self.n_steps self.target_tolerance = 0.1 self.contact_vec = None self.config = ry.Config() if file is not None: self.config.addFile(file) else: self.config.addFile( os.getenv("HOME") + '/git/ryenv/ryenv/z.pick_and_place.g') self.config.makeObjectsFree(['finger']) self.config.setJointState([0.3, 0.3, 0.15, 1., 0., 0., 0.]) self.finger_radius = self.config.frame('finger').info()['size'][0] self.simulation = self.config.simulation(ry.SimulatorEngine.physx, display) self.reset_disk() self.reset([0.3, 0.3]) self.maximum_xy_for_finger = 1.7 # self.minimum_rel_z_for_finger = 0.05 + 0.03 self.maximum_rel_z_for_finger = 1 self.config.frame('floor').setColor(np.array((200, 200, 200)) / 255, ) rgb = [93, 87, 94] self.config.frame('finger').setColor(np.array([*rgb, 255]) / 255) self.config.frame('disk').setColor(np.array([*rgb, 255]) / 255)
def main(): # === Setup === output_folders = create_folder_structure() # optionally add a description to the dataset if len(sys.argv) >= 2: with open(os.path.join(output_folders.root_folder, "ds_desc.txt"), "w") as f: f.write(sys.argv[1]) dataframes = {} # === Data Generation === C = ry.Config() C.addFile("common/single_world.g") # viewer = C.view() C.addFrame("cam_tmp", parent="camera") i = 0 while i < datapoint_count: # Randomize Scene Contents set_stick_color(C, random_rgb()) set_table_color(C, random_rgb()) random_stick_pos_in_gripper_local(C, gripper_frame_name="endeffR") set_random_camera_pose(C) # move arm and stick to a random location EE_goal_frame_name = set_random_gripper_goal(C) set_komo_inv_kin(C, EE_goal_frame_name, EE_frame="endeffR") df = DataFrame(C, output_folders) df.extract_data(C) if df.shows_two_keypoints(): # Data is randomly generated and sometimes doesn't show two keypoints dataframes[i] = df.write_images_and_return_dataframe_dict(i) if i % 50 == 0 and i != 0: print("Done with sample", i) i += 1 filename = os.path.join(output_folders.root_folder, dataset_desc_filename) dump_data_as_yaml(filename, dataframes)
def __init__(self, action_duration=0.5, action_length=0.1, break_pos_thres=0.03, floor_level=0.65, finger_relative_level=0.14, tau=.01, safety_distance=0.1, spherically_symmetric_neighbours=False, file=None, display=False): self.action_duration = action_duration self.action_length = action_length self.break_pos_thres = break_pos_thres self.floor_level = floor_level self.finger_relative_level = finger_relative_level self.tau = tau self.safety_distance = safety_distance self.spherically_symmetric_neighbours = spherically_symmetric_neighbours self.n_steps = int(self.action_duration / self.tau) self.proportion_per_step = 1 / self.n_steps self.config = ry.Config() if file is not None: self.config.addFile(file) else: self.config.addFile( os.getenv("HOME") + '/git/ryenv/ryenv/z.push_default.g') self.config.makeObjectsFree(['finger']) self.config.setJointState([0.3, 0.3, 0.15, 1., 0., 0., 0.]) self.finger_radius = self.config.frame('finger').info()['size'][0] self.simulation = self.config.simulation(ry.SimulatorEngine.physx, display) self.reset_disk() self.disk_dimensions = [0.2, 0.25] self.reset([0.3, 0.3])
def setup_challenge_env(add_red_ball=False, number_objects=30, show_background=False): # -- Add empty REAL WORLD configuration and camera # R = ry.Config() # R.addFile(join(pathRepo, "scenarios/pandasTable.g")) # S = R.simulation(ry.SimulatorEngine.physx, True) # S.addSensor("camera") # back_frame = perc.extract_background(S, duration=2, vis=show_background) back_frame = None R = ry.Config() R.addFile(join(path_to_rai, "scenarios/challenge.g")) if add_red_ball: # only add 1 red ball number_objects = 1 # you can also change the shape & size R.getFrame("obj0").setColor([1., 0, 0]) R.getFrame("obj0").setShape(ry.ST.sphere, [0.03]) # RealWorld.getFrame("obj0").setShape(ry.ST.ssBox, [.05, .05, .2, .01]) R.getFrame("obj0").setPosition([0.0, -.02, 0.68]) R.getFrame("obj0").setContact(1) # R.getFrame("obj1").setColor([0, 0, 1.]) # R.getFrame("obj1").setShape(ry.ST.sphere, [.03]) # # RealWorld.getFrame("obj0").setShape(ry.ST.ssBox, [.05, .05, .2, .01]) # R.getFrame("obj1").setPosition([0.0, .3, 2.]) # R.getFrame("obj1").setContact(1) for o in range(number_objects, 30): name = "obj%i" % o R.delFrame(name) # C, S, V = _get_CSV(R) return R, back_frame # S, C, V,
def setup(): # -- MODEL WORLD configuration, this is the data structure on which you represent # what you know about the world and compute things (controls, contacts, etc) C = ry.Config() C.addFile(join(pathRepo, "scenarios/pandasTable.g")) # add box box = C.addFrame("goal") side = 0.08 box.setShape(ry.ST.ssBox, size=[side, side, side, .02]) box.setColor([.5, 1, 1]) box.setPosition([0, .05, 0.68]) box.setShape(ry.ST.sphere, [.03]) #box.setPosition([0.0, .05, 0.68]) box.setContact(1) # set contact flag for grippers C.frame("R_gripper").setContact(1) C.frame("L_gripper").setContact(1) C.frame("goal").setContact(1) return C
def setup_simulation(self, scene_path, n_objects, spawn_special=False): """Loads and populates the scene. Args: scene_path (str): path to .g file which contains the scene n_objects (int): number of objects to spawn randomly in a predefined area of the scene spawn_special (bool): spawn an additional sphere which has to be thrown to put in the basket if true """ # real world configuration self.RealWorld = ry.Config() self.RealWorld.addFile(scene_path) # add objects to scene for i in range(n_objects): obj = self.RealWorld.addFrame('object_{}'.format(i)) obj.setPosition(get_random_position()) obj.setQuaternion(get_random_quaternion()) shape, params = get_random_shape() obj.setShape(shape, params) obj.setColor(get_random_color()) obj.setMass(0.2) obj.setContact(1) self.sim_objects.append(obj) # spawn special sphere if spawn_special: obj = self.RealWorld.addFrame('object_{}'.format(n_objects)) obj.setPosition(get_random_position()) obj.setQuaternion(get_random_quaternion()) obj.setShape(ry.ST.sphere, [.05]) obj.setColor([0, 0, 0]) obj.setMass(0.2) obj.setContact(1) self.sim_objects.append(obj) # turn real world configuration into simulation self.S = self.RealWorld.simulation(ry.SimulatorEngine.physx, True)
def __init__(self, action_duration=0.5, action_length=0.1, floor_level=0.075, wall_height=0.1, wall_thickness=0.01, finger_relative_level=0.075, tau=.01, file=None, display=False): self.action_duration = action_duration self.action_length = action_length self.floor_level = floor_level self.wall_height = wall_height self.wall_thickness = wall_thickness self.finger_relative_level = finger_relative_level self.tau = tau self.n_steps = int(self.action_duration / self.tau) self.proportion_per_step = 1 / self.n_steps self.config = ry.Config() if file is not None: self.config.addFile(file) else: self.config.addFile( os.getenv("HOME") + '/git/ryenv/ryenv/z.push_maze.g') self.config.makeObjectsFree(['finger']) self.simulation = self.config.simulation(ry.SimulatorEngine.physx, display) self.wall_num = 0 self.reset([0, -0.1])
def main(): ### # Setup max_datapoints = 100 C = ry.Config() C.addFile("world.g") Viewer = C.view() camera = C.cameraView() camera.addSensorFromFrame('camera') camera.selectSensor('camera') ### # Movement if "goal" not in C.getFrameNames(): goal = C.addFrame("goal") goal.setShape(ry.ST.sphere, [.05]) goal.setColor([.5,1,1]) goal.setPosition([0.7, 0.3, 1]) # for i in range(max_datapoints): i = 0 while i < max_datapoints: set_stick_color(C, random_rgb()) set_table_color(C, random_rgb()) randomize_stick_pos(C) set_random_goal_pose(C) update_goal_frame(C) for frame_name in C.getFrameNames(): if frame_name.startswith("dg_"): C.delFrame(frame_name) # steps, duration = 30, 1 # step_duration = duration / steps steps_per_phase = 40 # path planning # komo = build_new_path(C, steps_per_phase) # IK: IK = build_goal_config(C) # C.setFrameState( X0 ) print("--- Done Planing") # for i in range(steps_per_phase): # step = komo.getConfiguration(i) # C.setFrameState(step) # time.sleep(0.1) C.setFrameState( IK.getConfiguration(0) ) print("--- Done Moving") # input("pre update cam config") camera.updateConfig(C) # input("post update cam config") print("--- Done Updating Camera") if save_dataframe(C, camera, i): # Data is randomly generated and sometimes invalid i += 1 print("--- Done Showing Images") # input() while True: time.sleep(1)
def main(): sl = Stats() use_lstm = False if use_lstm: # lstm net # keypoint_network_path = "/home/monti/Desktop/pdc/lstm_2021-02-28/checkpoint_0386.pth" # keypoint_network_path = "/home/monti/Desktop/pdc/lstm_2021-02-29/checkpoint_0346.pth" # keypoint_network_path = "/home/monti/Desktop/pdc/lstm_2021-02-26/checkpoint_0546.pth" # keypoint_network_path = "/home/monti/Desktop/pdc/lstm_2021-03-02/checkpoint_0246.pth" # keypoint_network_path = "/home/monti/Desktop/pdc/lstm_2021-03-02_cbrc/checkpoint_0296.pth" keypoint_network_path = "/home/monti/Desktop/pdc/lstm_2021-03-02_cbrc/checkpoint_0316.pth" keypoint_inference = MankeyKeypointDetectionLSTM(keypoint_network_path) else: # standard net # keypoint_network_path = "/home/monti/Desktop/pdc/resnet18_2021-01-30/checkpoint_0118.pth" keypoint_network_path = "/home/monti/Desktop/pdc/resnet18_2021-02-01/checkpoint_0236.pth" keypoint_inference = MankeyKeypointDetection(keypoint_network_path) C = ry.Config() C.addFile("common/single_world.g") C.addFrame("occ_frame") use_side_cam = True if use_side_cam: # this camera is located at the side of the robot video_cam_frame = C.frame("video_camera") video_cam_45 = [1.3, 1.3, 1.35, 0.245372, 0.193942, 0.548013, 0.775797] video_cam_more_side = [1.3, 1.1, 1.35, 0.401579, 0.298422, 0.537793, 0.67857] video_cam_X = video_cam_more_side video_cam_frame.setPosition(video_cam_X[:3]) video_cam_frame.setQuaternion(video_cam_X[3:]) video_cam = C.cameraView() video_cam.addSensorFromFrame("video_camera") video_cam.selectSensor("video_camera") video_cam.updateConfig(C) else: video_cam = C.cameraView() video_cam.addSensorFromFrame("camera") video_cam.selectSensor("camera") video_cam.updateConfig(C) viewer = C.view() jn = "R_panda_joint6" C.setJointState([C.getJointState([jn])[0] + 0.3], [jn]) # Set stick so that it's in the robot's gripper. C.frame("stick").setRelativePosition([0, 0, 0]) C.frame("stick").setRelativeQuaternion(euler_to_quaternion(0, 0, radians(90))) # attach_stick_to_EE(C) # random_stick_pos_in_gripper(C, "endeffR") set_stick_color(C, random_rgb()) randomize_stick_length(C) # setup_stick(C) set_stick_color(C, (0.3, 0.9, 0.1)) randomize_stick_length(C) cf = C.frame("camera") camera_front_facing = True if camera_front_facing: # cf.setPosition([0.5, 1.5, 1.7]) # too far. Depth estimation not reliable cf.setPosition([0.5, 1.1, 1.7]) cf.setQuaternion(euler_to_quaternion(radians(-110), radians(180), -radians(0))) else: # top down view cf.setPosition([0.5, 0.5, 1.4]) cf.setRotationRad(*[ 0, 0, 0.7071068, 0.7071068 ]) camera = C.cameraView() camera.addSensorFromFrame('camera') camera.selectSensor('camera') camera.updateConfig(C) plan_path(C, camera, keypoint_inference, sl, video_cam, use_lstm)
def main(): # === Setup === # Lenght of one sequence, i.e. length of a group of images that have a temporal relation time_series_length = 5 output_folders = create_folder_structure() # optionally add a description to the dataset if len(sys.argv) >= 2: with open(os.path.join(output_folders.root_folder, "ds_desc.txt"), "w") as f: f.write(sys.argv[1]) dataframes = {} # === Data Generation === C = ry.Config() C.addFile("common/single_world.g") # viewer = C.view() C.addFrame("cam_tmp", parent="camera") C.addFrame("occlusion_geometry") sample_id = 0 while sample_id < datapoint_count: # Data generation loop for one movement, consisting of multiple images # Randomize Scene Contents set_table_color(C, random_rgb()) set_stick_color(C, random_rgb()) random_stick_pos_in_gripper_local(C, gripper_frame_name="endeffR") set_random_camera_pose(C) # Set gripper goal EE_goal_frame_name = set_random_gripper_goal(C, vis=False, z_interval=(0.05, 0.5)) # Plan gripper path steps, duration = 15, 5. komo = plan_komo_path(C, steps=steps, obj_goal=EE_goal_frame_name, duration=duration) # === Store data (image and label) for all configs of this sequence === invalid_counter = 0 # used to skip a single robot configuration that doesn't show keypoints sequence = [] # holds an images sequence for sequence_step_id in range(steps): # Step through the different configurations on the movement path. # Capture images to generate training data with a temporal context. world_config = komo.getConfiguration(sequence_step_id) C.setFrameState(world_config) ds = DataFrame(C, output_folders) ds.extract_data(C) # === State machine that captures if len(sequence) == 0: # first, need an image with two visible keypoints to start a sequence if ds.vis_kp_count == 2: sequence.append(ds) invalid_counter = 0 elif len(sequence) < time_series_length: # capture more images for the sequence if ds.vis_kp_count == 0: # one image without a keypoint can be skipped if invalid_counter == 1: sequence = [] invalid_counter = 0 else: # skip this image invalid_counter += 1 else: # if the image only shows one keypoint, it is ok if ds.vis_kp_count == 2 and random() > 0.5: # for two visible keypoints: maybe place occlusion # geometry and recapture image place_occlusion_geometry(C) ds.extract_data(C) sequence.append(ds) elif len(sequence) == time_series_length: # done with this sequence. Save it. for d_point in sequence: dataframes[ sample_id] = d_point.write_images_and_return_dataframe_dict( sample_id) sample_id += 1 invalid_counter = 0 sequence = [] print( f"Added a sequence of {time_series_length} to the dataset." ) else: print("len(sequence) > time_series_length") raise RuntimeError("this shouldn't happen") hide_occlusion_geometry(C) if sample_id >= datapoint_count: break filename = os.path.join(output_folders.root_folder, dataset_desc_filename) dump_data_as_yaml(filename, dataframes)
def basics(): # Real world configs # -- REAL WORLD configuration, which is attached to the physics engine # accessing this directly would be cheating! RealWorld = ry.Config() RealWorld.addFile(join(pathRepo, "scenarios/challenge.g")) S = RealWorld.simulation(ry.SimulatorEngine.physx, True) S.addSensor("camera") # -- MODEL WORLD configuration, this is the data structure on which you represent # what you know about the world and compute things (controls, contacts, etc) C = ry.Config() # D = C.view() #rather use the ConfiguratioViewer below C.addFile(join(pathRepo, "scenarios/pandasTable.g")) # -- using the viewer, you can view configurations or paths V = ry.ConfigurationViewer() V.setConfiguration(C) # -- the following is the simulation loop tau = .01 # for t in range(300): # time.sleep(0.01) # # # grab sensor readings from the simulation # q = S.get_q() # if t % 10 == 0: # [rgb, depth] = S.getImageAndDepth() # we don't need images with 100Hz, rendering is slow # # # some good old fashioned IK # C.setJointState(q) # set your robot model to match the real q # V.setConfiguration(C) # to update your model display # [y, J] = C.evalFeature(ry.FS.position, ["R_gripper"]) # vel = J.T @ np.linalg.inv(J @ J.T + 1e-2 * np.eye(y.shape[0])) @ [0., -0.1, -1e-1]; # # # send velocity controls to the simulation # S.step(vel, tau, ry.ControlMode.velocity) # add a new frame to the MODEL configuration # (Perception will later have to do exactly this: add perceived objects to the model) obj = C.addFrame("object") # set frame parameters, associate a shape to the frame, obj.setPosition([.8, 0, 1.5]) obj.setQuaternion([0, 0, .5, 0]) obj.setShape(ry.ST.capsule, [.2, .02]) obj.setColor([1, 0, 1]) V.setConfiguration(C) V.recopyMeshes(C) # this is rarely necessary, on radius = 0.4 x_origin = 0.4 y_origin = 0.0 #C.selectJointsByTag(["armL"]) print('joint names: ', C.getJointNames()) # -- the following is the simulation loop tau = .01 for t in range(1000): time.sleep(0.01) x = x_origin + np.cos(t / 200) * radius y = y_origin + np.sin(t / 200) * radius obj.setQuaternion([1, 0, .5, 0]) obj.setPosition([x, y, 1.5]) # grab sensor readings from the simulation q = S.get_q() if t % 10 == 0: [rgb, depth] = S.getImageAndDepth( ) # we don't need images with 100Hz, rendering is slow # some good old fashioned IK C.setJointState(q) # set your robot model to match the real q V.setConfiguration(C) # to update your model display [y, J] = C.evalFeature(ry.FS.scalarProductXX, ["R_gripperCenter", "object"]) vel = J.T @ np.linalg.inv(J @ J.T + 1e-2 * np.eye(y.shape[0])) @ (-y) #print(J.shape) #print(vel.shape) # send velocity controls to the simulation S.step(vel, tau, ry.ControlMode.velocity)
def setup_color_challenge_env(): num_blocks = 4 # random.seed(10) R = ry.Config() R.addFile(join(path_to_rai, "scenarios/challenge.g")) # positions = [ # [0.02, 0.23, 0.7], # [-0.35,-0.1, 0.7], # [0.2, 0.45, 0.7], # # [0.5, .15, 0.65+side/2], # [0.0, -0.1, 0.7] # ] # # sizes = [ # [0.12, 0.08, 0.10, 0], # [0.17, 0.09, 0.12, 0], # [0.14, 0.10, 0.12, 0], # # [0.5, .15, 0.65+side/2], # [0.15, 0.20, 0.11, 0], # ] # # color = [[0, 0, 1], [0, 1, 0], [0, 1, 1], [1, 0, 0], [1, 0, 1], [1, 1, 0], # [1, 0.5, 0], [0.5, 0, 1], [0, 1, 0.5], [0, 0.5, 1], [0.5, 1, 0]] # # for i in range(num_blocks): # name = "obj%i" % i # box = R.frame(name) # box.setPosition(positions[i]) # box.setColor([0, 0, 1]) # box.setShape(ry.ST.ssBox, sizes[i]) # box.setQuaternion([1, 0, 0, 0]) # box.setContact(1) # Change color of objects depending how many objects in .g file are obj_count = 0 for n in R.getFrameNames(): if n.startswith("obj"): obj_count += 1 for o in range(0, obj_count): color = [[0, 1, 1], [1, 0, 1], [1, 1, 0], [0, 1, 0], [1, 0.5, 0], [0.5, 0, 1], [0, 1, 0.5], [0, 0.5, 1], [0.5, 1, 0]] name = "obj%i" % o R.frame(name).setContact(1) R.frame(name).setColor(color[o]) S = R.simulation(ry.SimulatorEngine.physx, True) S.addSensor("camera") C = ry.Config() C.addFile(join(path_to_rai, 'scenarios/pandasTable.g')) V = ry.ConfigurationViewer() V.setConfiguration(C) R_gripper = C.frame("R_gripper") R_gripper.setContact(1) L_gripper = C.frame("L_gripper") L_gripper.setContact(1) return R, S, C, V
import cv2 as cv import numpy as np import libry as ry import time print(cv.__version__) scene_file_sim = '../models/connect_4_scenario_mirror_sim.g' scene_file_conf = '../models/connect_4_scenario_mirror_conf.g' # In[2] # ------------------------------------------------------------- # create simulation world # ------------------------------------------------------------- #Let's edit the real world before we create the simulation RealWorld = ry.Config() RealWorld.addFile(scene_file_sim) #RealWorld.addFile('../models/connect_4_balls_demo_1.g') #RealWorld.addFile('../models/connect_4_balls_demo_2.g') V = ry.ConfigurationViewer() V.setConfiguration(RealWorld) # In[3] # ------------------------------------------------------------- # manage objects in the simulation world # ------------------------------------------------------------- # set contacts for models in the scene # TODO check if we need to do this for RealWorld for i in range(1, 12):
sys.path.append("../third_party/rai/rai/ry") import libry as ry import numpy as np import time import matplotlib as mpl mpl.use('tkagg') import matplotlib.pyplot as plt from math import atan2, degrees from common.data_extraction import calc_bounding_box from common.utility import euler_to_quaternion from common.robot_movement import plan_komo_path, execute_komo_path # not available anymore from contact_ros_service import request_prediction C = ry.Config() C.addFile("single_world_stick_global.g") Viewer = C.view() def setup_stick(): table = C.frame('table') pos_table = table.getPosition() size_table = table.getSize()[:2] - 0.2 size_table = [1., 1.] stick = C.frame('stick') stick_p1 = C.frame('stick_p1') stick_p2 = C.frame('stick_p2') xy_stick = (np.random.rand(2) - 0.5) * size_table * np.array( [0.7, 0.35]) + pos_table[:2]
def main(): # Initialization np.random.seed(25) RealWorld = ry.Config() RealWorld.addFile("../../scenarios/challenge.g") # Change color of objects for o in range(1, 30): color = list(np.random.choice(np.arange(0, 1, 0.05), size=2)) + [1] name = "obj%i" % o RealWorld.frame(name).setColor(color) S = RealWorld.simulation(ry.SimulatorEngine.physx, True) S.addSensor("camera") C = ry.Config() C.addFile('../../scenarios/pandasTable.g') V = ry.ConfigurationViewer() V.setConfiguration(C) cameraFrame = C.frame("camera") # q0 = C.getJointState() R_gripper = C.frame("R_gripper") R_gripper.setContact(1) L_gripper = C.frame("L_gripper") L_gripper.setContact(1) # Initialize centroid tracker ct = CentroidTracker() # the focal length f = 0.895 f = f * 360. # the relative pose of the camera # pcl.setRelativePose('d(-90 0 0 1) t(-.08 .205 .115) d(26 1 0 0) d(-1 0 1 0) d(6 0 0 1) ') fxfypxpy = [f, f, 320., 180.] # points = [] tau = .01 t = 0 while True: time.sleep(0.01) t += 1 # grab sensor readings from the simulation # q = S.get_q() if t % 10 == 0: [rgb, depth] = S.getImageAndDepth( ) # we don't need images with 100Hz, rendering is slow points = S.depthData2pointCloud(depth, fxfypxpy) cameraFrame.setPointCloud(points, rgb) V.recopyMeshes(C) V.setConfiguration(C) good_contours, edges, hull = detectObjects(rgb=rgb, depth=depth) # find hough lines # lines = cv.HoughLines(edges, 0.6, np.pi/120, 50) objects = ct.update(hull) good = np.zeros(rgb.shape, np.uint8) cv.drawContours(good, good_contours, -1, (0, 255, 0), 1) if len(rgb) > 0: cv.imshow('OPENCV - rgb', rgb) if len(edges) > 0: cv.imshow('OPENCV - gray_bgd', edges) if len(good) > 0: cv.imshow('OPENCV - depth', good) if cv.waitKey(1) & 0xFF == ord('q'): break S.step([], tau, ry.ControlMode.none)