def main(interactive=True, try_use_sim=True, f=None): # config this_dir = os.path.dirname(os.path.realpath(__file__)) f = choose_random_framework(excluded=['numpy']) if f is None else f set_framework(f) sim = Simulator(interactive, try_use_sim) lr = 0.5 num_anchors = 3 num_sample_points = 100 # spline start anchor_points = ivy.cast( ivy.expand_dims(ivy.linspace(0, 1, 2 + num_anchors), -1), 'float32') query_points = ivy.cast( ivy.expand_dims(ivy.linspace(0, 1, num_sample_points), -1), 'float32') # learnable parameters robot_start_config = ivy.array(ivy.cast(sim.robot_start_config, 'float32')) robot_target_config = ivy.array( ivy.cast(sim.robot_target_config, 'float32')) learnable_anchor_vals = ivy.variable( ivy.cast( ivy.transpose( ivy.linspace(robot_start_config, robot_target_config, 2 + num_anchors)[..., 1:-1], (1, 0)), 'float32')) # optimizer optimizer = ivy.SGD(lr=lr) # optimize it = 0 colliding = True clearance = 0 joint_query_vals = None while colliding: total_cost, grads, joint_query_vals, link_positions, sdf_vals = ivy.execute_with_gradients( lambda xs: compute_cost_and_sdfs(xs[ 'w'], anchor_points, robot_start_config, robot_target_config, query_points, sim), Container({'w': learnable_anchor_vals})) colliding = ivy.reduce_min(sdf_vals[2:]) < clearance sim.update_path_visualization( link_positions, sdf_vals, os.path.join(this_dir, 'msp_no_sim', 'path_{}.png'.format(it))) learnable_anchor_vals = optimizer.step( Container({'w': learnable_anchor_vals}), grads)['w'] it += 1 sim.execute_motion(joint_query_vals) sim.close() unset_framework()
def test_linear_layer(bs_ic_oc_target, with_v, dtype_str, tensor_fn, dev_str, call): # smoke test batch_shape, input_channels, output_channels, target = bs_ic_oc_target x = ivy.cast( ivy.linspace(ivy.zeros(batch_shape), ivy.ones(batch_shape), input_channels), 'float32') if with_v: np.random.seed(0) wlim = (6 / (output_channels + input_channels))**0.5 w = ivy.variable( ivy.array( np.random.uniform(-wlim, wlim, (output_channels, input_channels)), 'float32')) b = ivy.variable(ivy.zeros([output_channels])) v = Container({'w': w, 'b': b}) else: v = None linear_layer = ivy.Linear(input_channels, output_channels, v=v) ret = linear_layer(x) # type test assert ivy.is_array(ret) # cardinality test assert ret.shape == tuple(batch_shape + [output_channels]) # value test if not with_v: return assert np.allclose(call(linear_layer, x), np.array(target)) # compilation test if call is helpers.torch_call: # pytest scripting does not **kwargs return helpers.assert_compilable(linear_layer)
def test_lstm(b_t_ic_hc_otf_sctv, dtype_str, tensor_fn, dev_str, call): # smoke test b, t, input_channels, hidden_channels, output_true_flat, state_c_true_val = b_t_ic_hc_otf_sctv x = ivy.cast(ivy.linspace(ivy.zeros([b, t]), ivy.ones([b, t]), input_channels), 'float32') init_h = ivy.ones([b, hidden_channels]) init_c = ivy.ones([b, hidden_channels]) kernel = ivy.variable(ivy.ones([input_channels, 4*hidden_channels]))*0.5 recurrent_kernel = ivy.variable(ivy.ones([hidden_channels, 4*hidden_channels]))*0.5 output, state_c = ivy.lstm_update(x, init_h, init_c, kernel, recurrent_kernel) # type test assert ivy.is_array(output) assert ivy.is_array(state_c) # cardinality test assert output.shape == (b, t, hidden_channels) assert state_c.shape == (b, hidden_channels) # value test output_true = np.tile(np.asarray(output_true_flat).reshape((b, t, 1)), (1, 1, hidden_channels)) state_c_true = np.ones([b, hidden_channels]) * state_c_true_val output, state_c = call(ivy.lstm_update, x, init_h, init_c, kernel, recurrent_kernel) assert np.allclose(output, output_true, atol=1e-6) assert np.allclose(state_c, state_c_true, atol=1e-6) # compilation test if call in [helpers.torch_call]: # this is not a backend implemented function pytest.skip() helpers.assert_compilable(ivy.lstm_update)
def test_lstm_layer(b_t_ic_hc_otf_sctv, with_v, with_initial_state, dtype_str, tensor_fn, dev_str, call): # smoke test b, t, input_channels, hidden_channels, output_true_flat, state_c_true_val = b_t_ic_hc_otf_sctv x = ivy.cast( ivy.linspace(ivy.zeros([b, t]), ivy.ones([b, t]), input_channels), 'float32') if with_initial_state: init_h = ivy.ones([b, hidden_channels]) init_c = ivy.ones([b, hidden_channels]) initial_state = ([init_h], [init_c]) else: initial_state = None if with_v: kernel = ivy.variable( ivy.ones([input_channels, 4 * hidden_channels]) * 0.5) recurrent_kernel = ivy.variable( ivy.ones([hidden_channels, 4 * hidden_channels]) * 0.5) v = Container({ 'input': { 'layer_0': { 'w': kernel } }, 'recurrent': { 'layer_0': { 'w': recurrent_kernel } } }) else: v = None lstm_layer = ivy.LSTM(input_channels, hidden_channels, v=v) output, (state_h, state_c) = lstm_layer(x, initial_state=initial_state) # type test assert ivy.is_array(output) assert ivy.is_array(state_h[0]) assert ivy.is_array(state_c[0]) # cardinality test assert output.shape == (b, t, hidden_channels) assert state_h[0].shape == (b, hidden_channels) assert state_c[0].shape == (b, hidden_channels) # value test if not with_v or not with_initial_state: return output_true = np.tile( np.asarray(output_true_flat).reshape((b, t, 1)), (1, 1, hidden_channels)) state_c_true = np.ones([b, hidden_channels]) * state_c_true_val output, (state_h, state_c) = call(lstm_layer, x, initial_state=initial_state) assert np.allclose(output, output_true, atol=1e-6) assert np.allclose(state_c, state_c_true, atol=1e-6) # compilation test if call in [helpers.torch_call]: # this is not a backend implemented function pytest.skip() helpers.assert_compilable(ivy.lstm_update)
def test_sgd_optimizer(bs_ic_oc_target, with_v, dtype_str, tensor_fn, dev_str, call): # smoke test if call is helpers.np_call: # NumPy does not support gradients pytest.skip() batch_shape, input_channels, output_channels, target = bs_ic_oc_target x = ivy.cast( ivy.linspace(ivy.zeros(batch_shape), ivy.ones(batch_shape), input_channels), 'float32') if with_v: np.random.seed(0) wlim = (6 / (output_channels + input_channels))**0.5 w = ivy.variable( ivy.array( np.random.uniform(-wlim, wlim, (output_channels, input_channels)), 'float32')) b = ivy.variable(ivy.zeros([output_channels])) v = Container({'w': w, 'b': b}) else: v = None linear_layer = ivy.Linear(input_channels, output_channels, v=v) def loss_fn(v_): out = linear_layer(x, v=v_) return ivy.reduce_mean(out)[0] # optimizer optimizer = ivy.SGD() # train loss_tm1 = 1e12 loss = None grads = None for i in range(10): loss, grads = ivy.execute_with_gradients(loss_fn, linear_layer.v) linear_layer.v = optimizer.step(linear_layer.v, grads) assert loss < loss_tm1 loss_tm1 = loss # type test assert ivy.is_array(loss) assert isinstance(grads, ivy.Container) # cardinality test if call is helpers.mx_call: # mxnet slicing cannot reduce dimension to zero assert loss.shape == (1, ) else: assert loss.shape == () # value test assert ivy.reduce_max(ivy.abs(grads.b)) > 0 assert ivy.reduce_max(ivy.abs(grads.w)) > 0 # compilation test if call is helpers.torch_call: # pytest scripting does not **kwargs return helpers.assert_compilable(loss_fn)
def test_module_w_none_attribute(bs_ic_oc, dev_str, call): # smoke test if call is helpers.np_call: # NumPy does not support gradients pytest.skip() batch_shape, input_channels, output_channels = bs_ic_oc x = ivy.cast( ivy.linspace(ivy.zeros(batch_shape), ivy.ones(batch_shape), input_channels), 'float32') module = ModuleWithNoneAttribute()
def test_module_training(bs_ic_oc, dev_str, call): # smoke test if call is helpers.np_call: # NumPy does not support gradients pytest.skip() batch_shape, input_channels, output_channels = bs_ic_oc x = ivy.cast( ivy.linspace(ivy.zeros(batch_shape), ivy.ones(batch_shape), input_channels), 'float32') module = TrainableModule(input_channels, output_channels) def loss_fn(v_): out = module(x, v=v_) return ivy.reduce_mean(out)[0] # train loss_tm1 = 1e12 loss = None grads = None for i in range(10): loss, grads = ivy.execute_with_gradients(loss_fn, module.v) module.v = ivy.gradient_descent_update(module.v, grads, 1e-3) assert loss < loss_tm1 loss_tm1 = loss # type test assert ivy.is_array(loss) assert isinstance(grads, ivy.Container) # cardinality test if call is helpers.mx_call: # mxnet slicing cannot reduce dimension to zero assert loss.shape == (1, ) else: assert loss.shape == () # value test assert ivy.reduce_max(ivy.abs(grads.linear0.b)) > 0 assert ivy.reduce_max(ivy.abs(grads.linear0.w)) > 0 assert ivy.reduce_max(ivy.abs(grads.linear1.b)) > 0 assert ivy.reduce_max(ivy.abs(grads.linear1.w)) > 0 assert ivy.reduce_max(ivy.abs(grads.linear2.b)) > 0 assert ivy.reduce_max(ivy.abs(grads.linear2.w)) > 0 # compilation test if call is helpers.torch_call: # pytest scripting does not support **kwargs return helpers.assert_compilable(loss_fn)
def stratified_sample(starts, ends, num_samples, batch_shape=None): """ Perform stratified sampling, between start and end arrays. This operation divides the range into equidistant bins, and uniformly samples value within the ranges for each of these bins. :param starts: Start values *[batch_shape]* :type starts: array :param ends: End values *[batch_shape]* :type ends: array :param num_samples: The number of samples to generate between starts and ends :type num_samples: int :param batch_shape: Shape of batch, Inferred from inputs if None. :type batch_shape: sequence of ints, optional :return: The stratified samples, with each randomly placed in uniformly spaced bins *[batch_shape,num_samples]* """ # shapes if batch_shape is None: batch_shape = starts.shape # shapes as lists batch_shape = list(batch_shape) # BS bin_sizes = (ends - starts) / num_samples # BS x NS linspace_vals = ivy.linspace(starts, ends - bin_sizes, num_samples) # BS x NS random_uniform = ivy.random_uniform(shape=batch_shape + [num_samples], dev_str=ivy.dev_str(starts)) # BS x NS random_offsets = random_uniform * ivy.expand_dims(bin_sizes, -1) # BS x NS return linspace_vals + random_offsets
def __init__(self, interactive, try_use_sim): super().__init__(interactive, try_use_sim) # initialize scene if self.with_pyrep: self._spherical_vision_sensor.remove() for i in range(6): self._vision_sensors[i].remove() self._vision_sensor_bodies[i].remove() [ray.remove() for ray in self._vision_sensor_rays[i]] self._box.set_position(np.array([0.55, 0, 0.9])) self._robot.set_position(np.array([0.85003, -0.024983, 0.77837])) self._robot._ik_target.set_position(np.array([0, 0, -1])) self._robot.get_tip().set_parent(self._robot._ik_target) self._robot.get_tip().set_position(np.array([0, 0, -1])) robot_start_config = ivy.array( [100., 100., 240., 180., 180., 120.]) * np.pi / 180 [ j.set_joint_position(p, False) for j, p in zip(self._robot.joints, ivy.to_numpy(robot_start_config).tolist()) ] robot_target_config = ivy.array([260., 100., 220., 0., 180., 45. ]) * np.pi / 180 self._robot_target.set_position( np.array([0.85003, -0.024983, 0.77837])) self._robot_target._ik_target.set_position(np.array([0, 0, -1])) self._robot_target.get_tip().set_parent( self._robot_target._ik_target) self._robot_target.get_tip().set_position(np.array([0, 0, -1])) [ j.set_joint_position(p, False) for j, p in zip(self._robot_target.joints, ivy.to_numpy(robot_target_config).tolist()) ] self._default_camera.set_position( np.array([0.094016, -1.2767, 1.7308])) self._default_camera.set_orientation( np.array([i * np.pi / 180 for i in [-121.32, 27.760, -164.18]])) input( '\nScene initialized.\n\n' 'The simulator visualizer can be translated and rotated by clicking either the left mouse button or the wheel, ' 'and then dragging the mouse.\n' 'Scrolling the mouse wheel zooms the view in and out.\n\n' 'You can click on any object either in the scene or the left hand panel, ' 'then select the box icon with four arrows in the top panel of the simulator, ' 'and then drag the object around dynamically.\n' 'Starting to drag and then holding ctrl allows you to also drag the object up and down.\n' 'Clicking the top icon with a box and two rotating arrows similarly allows rotation of the object.\n\n' 'The joint angles of either the robot or target robot configuration can also be changed.\n' 'To do this, Open the Mico or MicoTarget drop-downs on the left, and click on one of the joints "Mico_jointx", ' 'and then click on the magnifying glass over a box on the left-most panel.\n' 'In the window that opens, change the value in the field Position [deg], and close the window again.\n\n' 'Once you have aranged the scene as desired, press enter in the terminal to continue with the demo...\n' ) # primitive scene self.setup_primitive_scene() # robot configs robot_start_config = ivy.array(self._robot.get_joint_positions(), 'float32') robot_target_config = ivy.array( self._robot_target.get_joint_positions(), 'float32') # ivy robot self._ivy_manipulator = MicoManipulator( ivy_mech.make_transformation_homogeneous( ivy.reshape(ivy.array(self._robot_base.get_matrix()), (3, 4)))) # spline path interpolated_joint_path = ivy.transpose( ivy.linspace(robot_start_config, robot_target_config, 100), (1, 0)) multi_spline_points = ivy.transpose( self._ivy_manipulator.sample_links(interpolated_joint_path), (1, 0, 2)) multi_spline_sdf_vals = ivy.reshape( self.sdf(ivy.reshape(multi_spline_points, (-1, 3))), (-1, 100, 1)) self.update_path_visualization(multi_spline_points, multi_spline_sdf_vals, None) # public objects self.ivy_manipulator = self._ivy_manipulator self.robot_start_config = robot_start_config self.robot_target_config = robot_target_config # wait for user input self._user_prompt( '\nInitialized scene with a robot and a target robot configuration to reach.' '\nPress enter in the terminal to use method ivy_robot.interpolate_spline_points ' 'to plan a spline path which reaches the target configuration whilst avoiding the objects in the scene...\n' ) else: # primitive scene self.setup_primitive_scene_no_sim(box_pos=np.array([0.55, 0, 0.9])) # ivy robot base_inv_ext_mat = ivy.array([[1, 0, 0, 0.84999895], [0, 1, 0, -0.02500308], [0, 0, 1, 0.70000124]]) self.ivy_manipulator = MicoManipulator( ivy_mech.make_transformation_homogeneous(base_inv_ext_mat)) self.robot_start_config = ivy.array( [100., 100., 240., 180., 180., 120.]) * np.pi / 180 self.robot_target_config = ivy.array( [260., 100., 220., 0., 180., 45.]) * np.pi / 180 # message print( '\nInitialized dummy scene with a robot and a target robot configuration to reach.' '\nClose the visualization window to use method ivy_robot.interpolate_spline_points ' 'to plan a spline path which reaches the target configuration whilst avoiding the objects in the scene...\n' ) # plot scene before rotation if interactive: plt.imshow( mpimg.imread( os.path.join( os.path.dirname(os.path.realpath(__file__)), 'msp_no_sim', 'path_0.png'))) plt.show() # message print('\nOptimizing spline path...\n')
def test_lstm_layer_training(b_t_ic_hc_otf_sctv, with_v, dtype_str, tensor_fn, dev_str, call): # smoke test if call is helpers.np_call: # NumPy does not support gradients pytest.skip() # smoke test b, t, input_channels, hidden_channels, output_true_flat, state_c_true_val = b_t_ic_hc_otf_sctv x = ivy.cast( ivy.linspace(ivy.zeros([b, t]), ivy.ones([b, t]), input_channels), 'float32') if with_v: kernel = ivy.variable( ivy.ones([input_channels, 4 * hidden_channels]) * 0.5) recurrent_kernel = ivy.variable( ivy.ones([hidden_channels, 4 * hidden_channels]) * 0.5) v = Container({ 'input': { 'layer_0': { 'w': kernel } }, 'recurrent': { 'layer_0': { 'w': recurrent_kernel } } }) else: v = None lstm_layer = ivy.LSTM(input_channels, hidden_channels, v=v) def loss_fn(v_): out, (state_h, state_c) = lstm_layer(x, v=v_) return ivy.reduce_mean(out)[0] # train loss_tm1 = 1e12 loss = None grads = None for i in range(10): loss, grads = ivy.execute_with_gradients(loss_fn, lstm_layer.v) lstm_layer.v = ivy.gradient_descent_update(lstm_layer.v, grads, 1e-3) assert loss < loss_tm1 loss_tm1 = loss # type test assert ivy.is_array(loss) assert isinstance(grads, ivy.Container) # cardinality test if call is helpers.mx_call: # mxnet slicing cannot reduce dimension to zero assert loss.shape == (1, ) else: assert loss.shape == () # value test for key, val in grads.to_iterator(): assert ivy.reduce_max(ivy.abs(val)) > 0 # compilation test if call is helpers.torch_call: # pytest scripting does not **kwargs return helpers.assert_compilable(loss_fn)
def __init__(self, interactive, try_use_sim): super().__init__(interactive, try_use_sim) # ivy robot rel_body_points = ivy.array([[0., 0., 0.], [-0.15, 0., -0.15], [-0.15, 0., 0.15], [0.15, 0., -0.15], [0.15, 0., 0.15]]) self.ivy_drone = RigidMobile(rel_body_points) # initialize scene if self.with_pyrep: self._spherical_vision_sensor.remove() for i in range(6): self._vision_sensors[i].remove() self._vision_sensor_bodies[i].remove() [ray.remove() for ray in self._vision_sensor_rays[i]] drone_start_pos = np.array([-1.15, -1.028, 0.6]) target_pos = np.array([1.025, 1.125, 0.6]) self._drone.set_position(drone_start_pos) self._drone.set_orientation( np.array([-90, 50, -180]) * np.pi / 180) self._target.set_position(target_pos) self._target.set_orientation( np.array([-90, 50, -180]) * np.pi / 180) self._default_camera.set_position( np.array([-3.2835, -0.88753, 1.3773])) self._default_camera.set_orientation( np.array([-151.07, 70.079, -120.45]) * np.pi / 180) input( '\nScene initialized.\n\n' 'The simulator visualizer can be translated and rotated by clicking either the left mouse button or the wheel, ' 'and then dragging the mouse.\n' 'Scrolling the mouse wheel zooms the view in and out.\n\n' 'You can click on any object either in the scene or the left hand panel, ' 'then select the box icon with four arrows in the top panel of the simulator, ' 'and then drag the object around dynamically.\n' 'Starting to drag and then holding ctrl allows you to also drag the object up and down.\n' 'Clicking the top icon with a box and two rotating arrows similarly allows rotation of the object.\n\n' 'Once you have aranged the scene as desired, press enter in the terminal to continue with the demo...\n' ) # primitive scene self.setup_primitive_scene() # public objects drone_starting_inv_ext_mat = ivy.array( np.reshape(self._drone.get_matrix(), (3, 4)), 'float32') drone_start_rot_vec_pose = ivy_mech.mat_pose_to_rot_vec_pose( drone_starting_inv_ext_mat) self.drone_start_pose = drone_start_rot_vec_pose target_inv_ext_mat = ivy.array( np.reshape(self._target.get_matrix(), (3, 4)), 'float32') target_rot_vec_pose = ivy_mech.mat_pose_to_rot_vec_pose( target_inv_ext_mat) self.drone_target_pose = target_rot_vec_pose # spline path drone_start_to_target_poses = ivy.transpose( ivy.linspace(self.drone_start_pose, self.drone_target_pose, 100), (1, 0)) drone_start_to_target_inv_ext_mats = ivy_mech.rot_vec_pose_to_mat_pose( drone_start_to_target_poses) drone_start_to_target_positions =\ ivy.transpose(self.ivy_drone.sample_body(drone_start_to_target_inv_ext_mats), (1, 0, 2)) initil_sdf_vals = ivy.reshape( self.sdf( ivy.reshape( ivy.cast(drone_start_to_target_positions, 'float32'), (-1, 3))), (-1, 100, 1)) self.update_path_visualization(drone_start_to_target_positions, initil_sdf_vals, None) # wait for user input self._user_prompt( '\nInitialized scene with a drone and a target position to reach.' '\nPress enter in the terminal to use method ivy_robot.interpolate_spline_points ' 'to plan a spline path which reaches the target whilst avoiding the objects in the scene...\n' ) else: # primitive scene self.setup_primitive_scene_no_sim() # public objects self.drone_start_pose = ivy.array( [-1.1500, -1.0280, 0.6000, 0.7937, 1.7021, 1.7021]) self.drone_target_pose = ivy.array( [1.0250, 1.1250, 0.6000, 0.7937, 1.7021, 1.7021]) # message print( '\nInitialized dummy scene with a drone and a target position to reach.' '\nClose the visualization window to use method ivy_robot.interpolate_spline_points ' 'to plan a spline path which reaches the target whilst avoiding the objects in the scene...\n' ) # plot scene before rotation if interactive: plt.imshow( mpimg.imread( os.path.join( os.path.dirname(os.path.realpath(__file__)), 'dsp_no_sim', 'path_0.png'))) plt.show() print('\nOptimizing spline path...\n')
def main(interactive=True, f=None): global INTERACTIVE INTERACTIVE = interactive # Framework Setup # # ----------------# # choose random framework set_framework(choose_random_framework() if f is None else f) # Spline Interpolation # # ---------------------# # config num_free_anchors = 3 num_samples = 100 constant_rot_vec = ivy.array([[0., 0., 0.]]) constant_z = ivy.array([[0.]]) # xy positions # 1 x 2 start_xy = ivy.array([[0., 0.]]) target_xy = ivy.array([[1., 1.]]) # 1 x 2 anchor1_xy = ivy.array([[0.6, 0.2]]) anchor2_xy = ivy.array([[0.5, 0.5]]) anchor3_xy = ivy.array([[0.4, 0.8]]) # as 6DOF poses # 1 x 6 start_pose = ivy.concatenate((start_xy, constant_z, constant_rot_vec), -1) anchor1_pose = ivy.concatenate((anchor1_xy, constant_z, constant_rot_vec), -1) anchor2_pose = ivy.concatenate((anchor2_xy, constant_z, constant_rot_vec), -1) anchor3_pose = ivy.concatenate((anchor3_xy, constant_z, constant_rot_vec), -1) target_pose = ivy.concatenate((target_xy, constant_z, constant_rot_vec), -1) num_anchors = num_free_anchors + 2 # num_anchors x 6 anchor_poses = ivy.concatenate( (start_pose, anchor1_pose, anchor2_pose, anchor3_pose, target_pose), 0) # uniform sampling for spline # num_anchors x 1 anchor_points = ivy.expand_dims(ivy.linspace(0., 1., num_anchors), -1) # num_samples x 1 query_points = ivy.expand_dims(ivy.linspace(0., 1., num_samples), -1) # interpolated spline poses # num_samples x 6 interpolated_poses = ivy_robot.sample_spline_path(anchor_points, anchor_poses, query_points) # xy motion # num_samples x 2 anchor_xy_positions = anchor_poses[..., 0:2] # num_samples x 2 interpolated_xy_positions = interpolated_poses[..., 0:2] # show xy path show_2d_spline_path(anchor_xy_positions, interpolated_xy_positions, [-0.095, 0.055], [0.638, 0.171], [0.544, 0.486], [0.445, 0.766], [0.9, 0.9], 'x', 'y', 'Interpolated Drone Pose Spline in XY Plane', 'start point', 'target point') # Rigid Mobile # # -------------# # drone relative body points rel_body_points = ivy.array([[0., 0., 0.], [-0.1, -0.1, 0.], [-0.1, 0.1, 0.], [0.1, -0.1, 0.], [0.1, 0.1, 0.]]) # create drone as ivy rigid mobile robot drone = RigidMobile(rel_body_points) # rotatin vectors # 1 x 3 start_rot_vec = ivy.array([[0., 0., 0.]]) target_rot_vec = ivy.array([[0., 0., np.pi]]) # 1 x 3 anchor1_rot_vec = ivy.array([[0., 0., np.pi / 4]]) anchor2_rot_vec = ivy.array([[0., 0., 2 * np.pi / 4]]) anchor3_rot_vec = ivy.array([[0., 0., 3 * np.pi / 4]]) # as 6DOF poses # 1 x 6 start_pose = ivy.concatenate((start_xy, constant_z, start_rot_vec), -1) anchor1_pose = ivy.concatenate((anchor1_xy, constant_z, anchor1_rot_vec), -1) anchor2_pose = ivy.concatenate((anchor2_xy, constant_z, anchor2_rot_vec), -1) anchor3_pose = ivy.concatenate((anchor3_xy, constant_z, anchor3_rot_vec), -1) target_pose = ivy.concatenate((target_xy, constant_z, target_rot_vec), -1) # num_anchors x 6 anchor_poses = ivy.concatenate( (start_pose, anchor1_pose, anchor2_pose, anchor3_pose, target_pose), 0) # interpolated spline poses # num_samples x 6 interpolated_poses = ivy_robot.sample_spline_path(anchor_points, anchor_poses, query_points) # as matrices # num_anchors x 3 x 4 anchor_matrices = ivy_mech.rot_vec_pose_to_mat_pose(anchor_poses) # num_samples x 3 x 4 interpolated_matrices = ivy_mech.rot_vec_pose_to_mat_pose( interpolated_poses) # sample drone body # num_anchors x num_body_points x 3 anchor_body_points = drone.sample_body(anchor_matrices) # num_samples x num_body_points x 3 interpolated_body_points = drone.sample_body(interpolated_matrices) # show show_full_spline_path(anchor_body_points, interpolated_body_points, [-0.168, 0.19], [0.88, 0.73], 'x', 'y', 'Sampled Drone Body Positions in XY Plane', 'start points', 'target points', False) # Manipulator # # ------------# class SimpleManipulator(Manipulator): # noinspection PyShadowingNames def __init__(self, base_inv_ext_mat=None): a_s = ivy.array([0.5, 0.5]) d_s = ivy.array([0., 0.]) alpha_s = ivy.array([0., 0.]) dh_joint_scales = ivy.ones((2, )) dh_joint_offsets = ivy.array([-np.pi / 2, 0.]) super().__init__(a_s, d_s, alpha_s, dh_joint_scales, dh_joint_offsets, base_inv_ext_mat) # create manipulator as ivy manipulator manipulator = SimpleManipulator() # joint angles # 1 x 2 start_joint_angles = ivy.array([[0., 0.]]) target_joint_angles = ivy.array([[-np.pi / 4, -np.pi / 4]]) # 1 x 2 anchor1_joint_angles = -ivy.array([[0.2, 0.6]]) * np.pi / 4 anchor2_joint_angles = -ivy.array([[0.5, 0.5]]) * np.pi / 4 anchor3_joint_angles = -ivy.array([[0.8, 0.4]]) * np.pi / 4 # num_anchors x 2 anchor_joint_angles = ivy.concatenate( (start_joint_angles, anchor1_joint_angles, anchor2_joint_angles, anchor3_joint_angles, target_joint_angles), 0) # interpolated joint angles # num_anchors x 2 interpolated_joint_angles = ivy_robot.sample_spline_path( anchor_points, anchor_joint_angles, query_points) # sample links # num_anchors x num_link_points x 3 anchor_link_points = manipulator.sample_links(anchor_joint_angles, samples_per_metre=5) # num_anchors x num_link_points x 3 interpolated_link_points = manipulator.sample_links( interpolated_joint_angles, samples_per_metre=5) # show show_2d_spline_path(anchor_joint_angles, interpolated_joint_angles, [-0.222, -0.015], [-0.147, -0.52], [-0.38, -0.366], [-0.64, -0.263], [-0.752, -0.79], r'$\theta_1$', r'$\theta_2$', 'Interpolated Manipulator Joint Angles Spline', 'start angles', 'target angles') show_full_spline_path(anchor_link_points, interpolated_link_points, [0.026, 0.8], [0.542, 0.26], 'x', 'y', 'Sampled Manipulator Links in XY Plane', 'start config', 'target config', True)
def render(self, mode='human'): """ Renders the environment. The set of supported modes varies per environment. (And some environments do not support rendering at all.) By convention, if mode is: - human: render to the current display or terminal and return nothing. Usually for human consumption. - rgb_array: Return an numpy.ndarray with shape (x, y, 3), representing RGB values for an x-by-y pixel image, suitable for turning into a video. - ansi: Return a string (str) or StringIO.StringIO containing a terminal-style text representation. The text can include newlines and ANSI escape sequences (e.g. for colors). :param mode: Render mode, one of [human|rgb_array], default human :type mode: str, optional :return: Rendered image. """ screen_width = 500 screen_height = 500 x_min = -1.2 x_max = 0.6 world_width = x_max - x_min scale = screen_width / world_width car_width = 40 car_height = 20 if self.viewer is None: # noinspection PyBroadException try: from gym.envs.classic_control import rendering except: if not self._logged_headless_message: print('Unable to connect to display. Running the Ivy environment in headless mode...') self._logged_headless_message = True return self.viewer = rendering.Viewer(screen_width, screen_height) # Track. xs = ivy.linspace(x_min, x_max, 100) ys = self._height(xs) xys = list((ivy.to_numpy(xt).item(), ivy.to_numpy(yt).item()) for xt, yt in zip((xs - x_min) * scale, ys * scale)) self.track = rendering.make_polyline(xys) self.track.set_linewidth(2) self.viewer.add_geom(self.track) # Car. clearance = 10 l, r, t, b = -car_width / 2, car_width / 2, car_height, 0 self.car_geom = rendering.FilledPolygon( [(l, b), (l, t), (r, t), (r, b)]) self.car_geom.add_attr( rendering.Transform(translation=(0, clearance))) self.car_tr = rendering.Transform() self.car_geom.add_attr(self.car_tr) self.viewer.add_geom(self.car_geom) # Wheels. front_wheel = rendering.make_circle(car_height / 2.5) front_wheel.set_color(0.5, 0.5, 0.5) front_wheel.add_attr( rendering.Transform(translation=(car_width / 4, clearance))) front_wheel.add_attr(self.car_tr) self.viewer.add_geom(front_wheel) back_wheel = rendering.make_circle(car_height / 2.5) back_wheel.add_attr( rendering.Transform(translation=(-car_width / 4, clearance))) back_wheel.add_attr(self.car_tr) back_wheel.set_color(0.5, 0.5, 0.5) self.viewer.add_geom(back_wheel) # Flag. flag_x = (ivy.to_numpy(self.goal_x)[0] - x_min) * scale flagy_y1 = ivy.to_numpy(self._height(self.goal_x))[0] * scale flagy_y2 = flagy_y1 + 50 flagpole = rendering.Line((flag_x, flagy_y1), (flag_x, flagy_y2)) self.viewer.add_geom(flagpole) flag = rendering.FilledPolygon( [(flag_x, flagy_y2), (flag_x, flagy_y2 - 10), (flag_x + 25, flagy_y2 - 5)]) flag.set_color(0.4, 0.6, 1.) self.viewer.add_geom(flag) self.car_tr.set_translation( (ivy.to_numpy(self.x)[0] - x_min) * scale, ivy.to_numpy(self._height(self.x))[0] * scale) self.car_tr.set_rotation(ivy.to_numpy(ivy.cos(3 * self.x))[0]) rew = ivy.to_numpy(self.get_reward()).item() self.car_geom.set_color(1 - rew, rew, 0.) return self.viewer.render(return_rgb_array=mode == 'rgb_array')
def sample_links(self, joint_angles, link_num=None, samples_per_metre=25, batch_shape=None): """ Sample links of the robot at uniformly distributed cartesian positions. :param joint_angles: Joint angles of the robot *[batch_shape,num_joints]* :type joint_angles: array :param link_num: Link number for which to compute matrices up to. Default is the last link. :type link_num: int, optional :param samples_per_metre: Number of samples per metre of robot link :type samples_per_metre: int :param batch_shape: Shape of batch. Inferred from inputs if None. :type batch_shape: sequence of ints, optional :return: The sampled link cartesian positions *[batch_shape,total_sampling_chain_length,3]* """ if link_num is None: link_num = self._num_joints if batch_shape is None: batch_shape = joint_angles.shape[:-1] batch_shape = list(batch_shape) num_batch_dims = len(batch_shape) batch_dims_for_trans = list(range(num_batch_dims)) # BS x NJ x 4 x 4 link_matrices = self.compute_link_matrices(joint_angles, link_num, batch_shape) # BS x LN+1 x 3 link_positions = link_matrices[..., 0:3, -1] # BS x LN x 3 segment_starts = link_positions[..., :-1, :] segment_ends = link_positions[..., 1:, :] # LN segment_sizes = _ivy.cast(_ivy.ceil( self._link_lengths[0:link_num] * samples_per_metre), 'int32') # list of segments segments_list = list() for link_idx in range(link_num): segment_size = segment_sizes[link_idx] # BS x 1 x 3 segment_start = segment_starts[..., link_idx:link_idx + 1, :] segment_end = segment_ends[..., link_idx:link_idx + 1, :] # BS x segment_size x 3 segment = _ivy.linspace(segment_start, segment_end, segment_size, axis=-2)[..., 0, :, :] if link_idx == link_num - 1 or segment_size == 1: segments_list.append(segment) else: segments_list.append(segment[..., :-1, :]) # BS x total_robot_chain_length x 3 all_segments = _ivy.concatenate(segments_list, -2) # BS x total_robot_chain_length x 4 all_segments_homo = _ivy_mech.make_coordinates_homogeneous(all_segments) # 4 x BSxtotal_robot_chain_length all_segments_homo_trans = _ivy.reshape(_ivy.transpose( all_segments_homo, [num_batch_dims + 1] + batch_dims_for_trans + [num_batch_dims]), (4, -1)) # 3 x BSxtotal_robot_chain_length transformed_trans = _ivy.matmul(self._base_inv_ext_mat[..., 0:3, :], all_segments_homo_trans) # BS x total_robot_chain_length x 3 return _ivy.transpose(_ivy.reshape( transformed_trans, [3] + batch_shape + [-1]), [i+1 for i in batch_dims_for_trans] + [num_batch_dims+1] + [0])