def ex2a(C): # get q-config from ex1b q_start = C.getJointState() V = ry.ConfigurationViewer() V.setConfiguration(C) q_final = ex1b(C).getJointState() q_start = np.asarray(q_start) q_final = np.asarray(q_final) delta = q_final - q_start smoothBezier = create_bezier("EaseInOutSine") time.sleep(2) steps = np.linspace(0, 1, 100) for t in steps: factor = smoothBezier.solve(t) q = q_start + delta * factor C.setJointState(q) V.setConfiguration(C) time.sleep(0.01) time.sleep(10)
def setup_config(self, scene_path): """Creates model config that mirrors the simulation. Args: scene_path (str): path to .g file which contains the scene Returns: c (libry.Config): model config object v (libry.ConfigurationViewer): model config viewer objects (list): list of frames that match the objects in the simulation """ # internal robot configuration c = ry.Config() c.addFile(scene_path) # configuration viewer v = ry.ConfigurationViewer() v.setConfiguration(c) # frames for objects in scene objects = [] for i in range(len(self.sim_objects)): obj = c.addFrame('object_{}'.format(i)) obj.setPosition(self.obj_info[i]['pos']) obj.setQuaternion(self.obj_info[i]['quat']) obj.setShape(ry.ST.sphere, [.03]) obj.setColor(self.obj_info[i]['color_code']) v.setConfiguration(c) objects.append(obj) return c, v, objects
def _get_CSV(R): S = R.simulation(ry.SimulatorEngine.physx, True) S.addSensor("camera") C = ry.Config() C.addFile(join(path_to_rai, "scenarios/pandasTable.g")) V = ry.ConfigurationViewer() V.setConfiguration(C) return C, S, V
def __init__(self, no_obj): self.no_obj = no_obj self._shape_dic = { 'sphere': [ry.ST.sphere, 1], 'cylinder': [ry.ST.cylinder, 2], 'box': [ry.ST.box, 3] } self.steps_taken = 0 self._size_parameters = [0.025, 0.1, 0.002] # self._size_parameters = [0.025, 0.06, 0.001] self._sizes = np.arange(*self._size_parameters) self.positions = np.arange(-0.3, 0.35, 0.05) self.locations = { 'trash': [0.0, -1.3, 0.8], 'good': [0.55, 0.05, 0.65], 'maybe': [-0.55, 0.05, 0.65] } self._error_threshold = 0.01 self._error_change_threshold = 0.01 self.object_indices = np.arange(1, self.no_obj + 1) self.objects_spawned = 0 self.RealWorld = ry.Config() self.RealWorld.addFile("../../scenarios/challenge.g") self.Model = ry.Config() self.Model.addFile('../../scenarios/pandasTable_2.g') self.Model_Viewer = ry.ConfigurationViewer() self.Model_Viewer.setConfiguration(self.Model) self.camera_Frame = self.Model.frame('camera') self._reorder_objects() for _ in range(self.no_obj): # self.spawn_random_object() self.spawn_nice_object() self.Simulation = self.RealWorld.simulation(ry.SimulatorEngine.physx, True) self.Simulation.addSensor('camera') self._set_focal_length(0.895) self.tau = 0.01 [self.background_rgb, self.background_depth] = self.Simulation.getImageAndDepth() self.background_gray = cv2.cvtColor(self.background_rgb, cv2.COLOR_BGR2GRAY) self.open_gripper() self.start_JV = self.Simulation.get_q() print('Init successful!')
def __init__(self, g_file): self.RealWorld = ry.Config() self.RealWorld.addFile(g_file) self.V = ry.ConfigurationViewer() self.V.setConfiguration(self.RealWorld) self.S = self.RealWorld.simulation(ry.SimulatorEngine.physx, True) self.C = ry.Config() self.C.addFile(g_file) self.C.setFrameState(self.RealWorld.getFrameState()) self.V.setConfiguration(self.C) self.intial_q = self.S.get_q() self.S.addSensor("kitchen_camera") self.S.addSensor("table_camera")
print(cv.__version__) scene_file_sim = '../models/connect_4_scenario_mirror_sim.g' scene_file_conf = '../models/connect_4_scenario_mirror_conf.g' # In[2] # ------------------------------------------------------------- # create simulation world # ------------------------------------------------------------- #Let's edit the real world before we create the simulation RealWorld = ry.Config() RealWorld.addFile(scene_file_sim) #RealWorld.addFile('../models/connect_4_balls_demo_1.g') #RealWorld.addFile('../models/connect_4_balls_demo_2.g') V = ry.ConfigurationViewer() V.setConfiguration(RealWorld) # In[3] # ------------------------------------------------------------- # manage objects in the simulation world # ------------------------------------------------------------- # set contacts for models in the scene # TODO check if we need to do this for RealWorld for i in range(1, 12): RealWorld.getFrame("connect4_coll{}".format(i)).setContact(1) sim_spheres = [] for i in range(1, 67):
def basics(): # Real world configs # -- REAL WORLD configuration, which is attached to the physics engine # accessing this directly would be cheating! RealWorld = ry.Config() RealWorld.addFile(join(pathRepo, "scenarios/challenge.g")) S = RealWorld.simulation(ry.SimulatorEngine.physx, True) S.addSensor("camera") # -- MODEL WORLD configuration, this is the data structure on which you represent # what you know about the world and compute things (controls, contacts, etc) C = ry.Config() # D = C.view() #rather use the ConfiguratioViewer below C.addFile(join(pathRepo, "scenarios/pandasTable.g")) # -- using the viewer, you can view configurations or paths V = ry.ConfigurationViewer() V.setConfiguration(C) # -- the following is the simulation loop tau = .01 # for t in range(300): # time.sleep(0.01) # # # grab sensor readings from the simulation # q = S.get_q() # if t % 10 == 0: # [rgb, depth] = S.getImageAndDepth() # we don't need images with 100Hz, rendering is slow # # # some good old fashioned IK # C.setJointState(q) # set your robot model to match the real q # V.setConfiguration(C) # to update your model display # [y, J] = C.evalFeature(ry.FS.position, ["R_gripper"]) # vel = J.T @ np.linalg.inv(J @ J.T + 1e-2 * np.eye(y.shape[0])) @ [0., -0.1, -1e-1]; # # # send velocity controls to the simulation # S.step(vel, tau, ry.ControlMode.velocity) # add a new frame to the MODEL configuration # (Perception will later have to do exactly this: add perceived objects to the model) obj = C.addFrame("object") # set frame parameters, associate a shape to the frame, obj.setPosition([.8, 0, 1.5]) obj.setQuaternion([0, 0, .5, 0]) obj.setShape(ry.ST.capsule, [.2, .02]) obj.setColor([1, 0, 1]) V.setConfiguration(C) V.recopyMeshes(C) # this is rarely necessary, on radius = 0.4 x_origin = 0.4 y_origin = 0.0 #C.selectJointsByTag(["armL"]) print('joint names: ', C.getJointNames()) # -- the following is the simulation loop tau = .01 for t in range(1000): time.sleep(0.01) x = x_origin + np.cos(t / 200) * radius y = y_origin + np.sin(t / 200) * radius obj.setQuaternion([1, 0, .5, 0]) obj.setPosition([x, y, 1.5]) # grab sensor readings from the simulation q = S.get_q() if t % 10 == 0: [rgb, depth] = S.getImageAndDepth( ) # we don't need images with 100Hz, rendering is slow # some good old fashioned IK C.setJointState(q) # set your robot model to match the real q V.setConfiguration(C) # to update your model display [y, J] = C.evalFeature(ry.FS.scalarProductXX, ["R_gripperCenter", "object"]) vel = J.T @ np.linalg.inv(J @ J.T + 1e-2 * np.eye(y.shape[0])) @ (-y) #print(J.shape) #print(vel.shape) # send velocity controls to the simulation S.step(vel, tau, ry.ControlMode.velocity)
def setup_color_challenge_env(): num_blocks = 4 # random.seed(10) R = ry.Config() R.addFile(join(path_to_rai, "scenarios/challenge.g")) # positions = [ # [0.02, 0.23, 0.7], # [-0.35,-0.1, 0.7], # [0.2, 0.45, 0.7], # # [0.5, .15, 0.65+side/2], # [0.0, -0.1, 0.7] # ] # # sizes = [ # [0.12, 0.08, 0.10, 0], # [0.17, 0.09, 0.12, 0], # [0.14, 0.10, 0.12, 0], # # [0.5, .15, 0.65+side/2], # [0.15, 0.20, 0.11, 0], # ] # # color = [[0, 0, 1], [0, 1, 0], [0, 1, 1], [1, 0, 0], [1, 0, 1], [1, 1, 0], # [1, 0.5, 0], [0.5, 0, 1], [0, 1, 0.5], [0, 0.5, 1], [0.5, 1, 0]] # # for i in range(num_blocks): # name = "obj%i" % i # box = R.frame(name) # box.setPosition(positions[i]) # box.setColor([0, 0, 1]) # box.setShape(ry.ST.ssBox, sizes[i]) # box.setQuaternion([1, 0, 0, 0]) # box.setContact(1) # Change color of objects depending how many objects in .g file are obj_count = 0 for n in R.getFrameNames(): if n.startswith("obj"): obj_count += 1 for o in range(0, obj_count): color = [[0, 1, 1], [1, 0, 1], [1, 1, 0], [0, 1, 0], [1, 0.5, 0], [0.5, 0, 1], [0, 1, 0.5], [0, 0.5, 1], [0.5, 1, 0]] name = "obj%i" % o R.frame(name).setContact(1) R.frame(name).setColor(color[o]) S = R.simulation(ry.SimulatorEngine.physx, True) S.addSensor("camera") C = ry.Config() C.addFile(join(path_to_rai, 'scenarios/pandasTable.g')) V = ry.ConfigurationViewer() V.setConfiguration(C) R_gripper = C.frame("R_gripper") R_gripper.setContact(1) L_gripper = C.frame("L_gripper") L_gripper.setContact(1) return R, S, C, V
def vis(C, duration): # -- using the viewer, you can view configurations or paths V = ry.ConfigurationViewer() V.setConfiguration(C) time.sleep(duration)
def ex3a(C): #how far we want to look ahead def get_komo(C, step): komo = C.komo_path(1, 50, step, True) # relative distance from each other komo.addObjective(time=[], type=ry.OT.sos, feature=ry.FS.qItself, scale=[1] * 16, order=2) komo.addObjective(time=[1.], type=ry.OT.sos, feature=ry.FS.positionRel, frames=['R_gripper', 'L_gripper'], target=[0.0, 0.0, -0.2], scale=[1e2] * 3) # Z-axis should be in opposite direction (face each other) komo.addObjective(time=[0.9, 1.], type=ry.OT.sos, feature=ry.FS.scalarProductZZ, frames=['R_gripper', 'L_gripper'], target=[-1]) # grippers hands should be orthogonal, so no collision komo.addObjective(time=[0.9, 1.], type=ry.OT.eq, feature=ry.FS.scalarProductXX, frames=['R_gripper', 'L_gripper'], target=[0]) # define relation between box and gripper komo.addObjective(time=[1.], type=ry.OT.sos, feature=ry.FS.positionRel, frames=["goal", "R_gripper"], target=[0.0, 0.0, -0.1], scale=[1e2] * 3) komo.addObjective(time=[0.9, 1.], type=ry.OT.eq, feature=ry.FS.scalarProductXZ, frames=['R_gripper', 'goal'], target=[1], scale=[1e2]) komo.addObjective(time=[0.9, 1.], type=ry.OT.eq, feature=ry.FS.scalarProductXZ, frames=['goal', 'R_gripper'], target=[1], scale=[1e2]) # no contact komo.addObjective([], ry.FS.accumulatedCollisions, [], ry.OT.ineq, [1e2]) komo.optimize(True) return komo # visualize V = ry.ConfigurationViewer() V.setConfiguration(C) step_size = 5 duration = 30 for t in range(duration // step_size): step_future = duration - step_size * t komo = get_komo(C, step_future) for i in range(step_size): C.setFrameState(komo.getConfiguration(i)) V.setConfiguration(C) time.sleep(0.1) time.sleep(10)
def main(): # Initialization np.random.seed(25) RealWorld = ry.Config() RealWorld.addFile("../../scenarios/challenge.g") # Change color of objects for o in range(1, 30): color = list(np.random.choice(np.arange(0, 1, 0.05), size=2)) + [1] name = "obj%i" % o RealWorld.frame(name).setColor(color) S = RealWorld.simulation(ry.SimulatorEngine.physx, True) S.addSensor("camera") C = ry.Config() C.addFile('../../scenarios/pandasTable.g') V = ry.ConfigurationViewer() V.setConfiguration(C) cameraFrame = C.frame("camera") # q0 = C.getJointState() R_gripper = C.frame("R_gripper") R_gripper.setContact(1) L_gripper = C.frame("L_gripper") L_gripper.setContact(1) # Initialize centroid tracker ct = CentroidTracker() # the focal length f = 0.895 f = f * 360. # the relative pose of the camera # pcl.setRelativePose('d(-90 0 0 1) t(-.08 .205 .115) d(26 1 0 0) d(-1 0 1 0) d(6 0 0 1) ') fxfypxpy = [f, f, 320., 180.] # points = [] tau = .01 t = 0 while True: time.sleep(0.01) t += 1 # grab sensor readings from the simulation # q = S.get_q() if t % 10 == 0: [rgb, depth] = S.getImageAndDepth( ) # we don't need images with 100Hz, rendering is slow points = S.depthData2pointCloud(depth, fxfypxpy) cameraFrame.setPointCloud(points, rgb) V.recopyMeshes(C) V.setConfiguration(C) good_contours, edges, hull = detectObjects(rgb=rgb, depth=depth) # find hough lines # lines = cv.HoughLines(edges, 0.6, np.pi/120, 50) objects = ct.update(hull) good = np.zeros(rgb.shape, np.uint8) cv.drawContours(good, good_contours, -1, (0, 255, 0), 1) if len(rgb) > 0: cv.imshow('OPENCV - rgb', rgb) if len(edges) > 0: cv.imshow('OPENCV - gray_bgd', edges) if len(good) > 0: cv.imshow('OPENCV - depth', good) if cv.waitKey(1) & 0xFF == ord('q'): break S.step([], tau, ry.ControlMode.none)