def get_random_quaternion(max_rot_ang=_math.pi, batch_shape=None): """ Generate random quaternion :math:`\mathbf{q} = [q_i, q_j, q_k, q_r]`, adhering to maximum absolute rotation angle.\n `[reference] <https://en.wikipedia.org/wiki/Quaternion>`_ :param max_rot_ang: Absolute value of maximum rotation angle for quaternion. Default value of :math:`π`. :type max_rot_ang: float, optional :param batch_shape: Shape of batch. Shape of [1] is assumed if None. :type batch_shape: sequence of ints, optional :return: Random quaternion *[batch_shape,4]* """ if batch_shape is None: batch_shape = [] # BS x 3 quaternion_vector = _ivy.random_uniform(0, 1, list(batch_shape) + [3]) vec_len = _ivy.norm(quaternion_vector) quaternion_vector /= (vec_len + MIN_DENOMINATOR) # BS x 1 theta = _ivy.random_uniform(-max_rot_ang, max_rot_ang, list(batch_shape) + [1]) # BS x 4 return axis_angle_to_quaternion( _ivy.concatenate([quaternion_vector, theta], -1))
def _create_variables(self, dev_str): """ Create internal variables for the LSTM layer """ # ToDo: support other initialization mechanisms, via class constructor options # ToDo: tidy the construction of these variables, with helper functions wlim = (6 / (self._output_channels + self._input_channels))**0.5 input_weights = dict( zip(['layer_' + str(i) for i in range(self._num_layers)], [{ 'w': ivy.variable( ivy.random_uniform( -wlim, wlim, (self._input_channels if i == 0 else self._output_channels, 4 * self._output_channels), dev_str=dev_str)) } for i in range(self._num_layers)])) wlim = (6 / (self._output_channels + self._output_channels))**0.5 recurrent_weights = dict( zip(['layer_' + str(i) for i in range(self._num_layers)], [{ 'w': ivy.variable( ivy.random_uniform( -wlim, wlim, (self._output_channels, 4 * self._output_channels), dev_str=dev_str)) } for i in range(self._num_layers)])) return {'input': input_weights, 'recurrent': recurrent_weights}
def test_gradcheck(self, dev_str, dtype_str, call): if call is not helpers.torch_call: # ivy gradcheck method not yet implemented pytest.skip() input_ = ivy.variable(ivy.cast(ivy.random_uniform(shape=(2, 3, 4, 4), dev_str=dev_str), 'float64')) kernel = ivy.variable(ivy.cast(ivy.random_uniform(shape=(3, 3), dev_str=dev_str), 'float64')) assert gradcheck(top_hat, (input_, kernel), raise_exception=True)
def _create_variables(self, dev_str): vars_dict = dict() wlim = (6 / (2 * self._memory_vector_dim)) ** 0.5 vars_dict['read_weights'] =\ dict(zip(['w_' + str(i) for i in range(self._read_head_num)], [ivy.variable(ivy.random_uniform(-wlim, wlim, [self._memory_vector_dim, ], dev_str=dev_str)) for _ in range(self._read_head_num)])) wlim = (6 / (2 * self._memory_size)) ** 0.5 vars_dict['write_weights'] =\ dict(zip(['w_' + str(i) for i in range(self._read_head_num + self._write_head_num)], [ivy.variable(ivy.random_uniform(-wlim, wlim, [self._memory_size, ], dev_str=dev_str)) for _ in range(self._read_head_num + self._write_head_num)])) vars_dict['memory'] = ivy.variable( ivy.ones([self._memory_size, self._memory_vector_dim], dev_str=dev_str) * self._init_value) return vars_dict
def main(env_str=None, visualize=True, f=None): # Framework Setup # # ----------------# # choose random framework f = choose_random_framework() if f is None else f ivy.set_framework(f) # get environment env = getattr(ivy_gym, env_str)() # run environment steps env.reset() ac_dim = env.action_space.shape[0] for _ in range(250): ac = ivy.random_uniform(-1, 1, (ac_dim, )) env.step(ac) if visualize: env.render() env.close() ivy.unset_framework() # message print('End of Run Through Demo!')
def get_random_euler(batch_shape=None): """ Generate random :math:`zyx` Euler angles :math:`\mathbf{θ}_{xyz} = [ϕ_z, ϕ_y, ϕ_x]`. :param batch_shape: Shape of batch. Shape of [1] is assumed if None. :type batch_shape: sequence of ints, optional :return: Random euler *[batch_shape,3]* """ if batch_shape is None: batch_shape = [] # BS x 3 return _ivy.random_uniform(0.0, _math.pi * 2, list(batch_shape) + [3])
def _create_variables(self, dev_str): """ Create internal variables for the Linear layer """ # ToDo: support other initialization mechanisms, via class constructor options # ToDo: tidy the construction of these variables, with helper functions wlim = (6 / (self._output_channels + self._input_channels))**0.5 w = ivy.variable( ivy.random_uniform(-wlim, wlim, (self._output_channels, self._input_channels), dev_str=dev_str)) b = ivy.variable(ivy.zeros([self._output_channels], dev_str=dev_str)) return {'w': w, 'b': b}
def test_jit(self, dev_str, dtype_str, call): op = top_hat if call in [helpers.jnp_call, helpers.torch_call]: # compiled jax tensors do not have device_buffer attribute, preventing device info retrieval, # pytorch scripting does not support .type() casting, nor Union or Numbers for type hinting pytest.skip() op_compiled = ivy.compile_fn(op) input_ = ivy.cast(ivy.random_uniform(shape=(1, 2, 7, 7), dev_str=dev_str), dtype_str) kernel = ivy.ones((3, 3), dev_str=dev_str, dtype_str=dtype_str) actual = call(op_compiled, input_, kernel) expected = call(op, input_, kernel) assert np.allclose(actual, expected)
def test_random_uniform(low, high, shape, dtype_str, tensor_fn, dev_str, call): # smoke test if tensor_fn == helpers.var_fn and call is helpers.mx_call: # mxnet does not support 0-dimensional variables pytest.skip() kwargs = dict([(k, tensor_fn(v)) for k, v in zip(['low', 'high'], [low, high]) if v is not None]) if shape is not None: kwargs['shape'] = shape ret = ivy.random_uniform(**kwargs, dev_str=dev_str) # type test assert ivy.is_array(ret) # cardinality test if shape is None: assert ret.shape == () else: assert ret.shape == shape # value test ret_np = call(ivy.random_uniform, **kwargs, dev_str=dev_str) assert np.min((ret_np < (high if high else 1.)).astype(np.int32)) == 1 assert np.min((ret_np > (low if low else 0.)).astype(np.int32)) == 1 # compilation test helpers.assert_compilable(ivy.random_uniform)
def stratified_sample(starts, ends, num_samples, batch_shape=None): """ Perform stratified sampling, between start and end arrays. This operation divides the range into equidistant bins, and uniformly samples value within the ranges for each of these bins. :param starts: Start values *[batch_shape]* :type starts: array :param ends: End values *[batch_shape]* :type ends: array :param num_samples: The number of samples to generate between starts and ends :type num_samples: int :param batch_shape: Shape of batch, Inferred from inputs if None. :type batch_shape: sequence of ints, optional :return: The stratified samples, with each randomly placed in uniformly spaced bins *[batch_shape,num_samples]* """ # shapes if batch_shape is None: batch_shape = starts.shape # shapes as lists batch_shape = list(batch_shape) # BS bin_sizes = (ends - starts) / num_samples # BS x NS linspace_vals = ivy.linspace(starts, ends - bin_sizes, num_samples) # BS x NS random_uniform = ivy.random_uniform(shape=batch_shape + [num_samples], dev_str=ivy.dev_str(starts)) # BS x NS random_offsets = random_uniform * ivy.expand_dims(bin_sizes, -1) # BS x NS return linspace_vals + random_offsets
def _get_dummy_obs(batch_size, num_frames, num_cams, image_dims, num_feature_channels, dev_str='cpu', ones=False, empty=False): uniform_pixel_coords =\ ivy_vision.create_uniform_pixel_coords_image(image_dims, [batch_size, num_frames], dev_str=dev_str) img_meas = dict() for i in range(num_cams): validity_mask = ivy.ones([batch_size, num_frames] + image_dims + [1], dev_str=dev_str) if ones: img_mean = ivy.concatenate((uniform_pixel_coords[..., 0:2], ivy.ones( [batch_size, num_frames] + image_dims + [1 + num_feature_channels], dev_str=dev_str)), -1) img_var = ivy.ones( [batch_size, num_frames] + image_dims + [3 + num_feature_channels], dev_str=dev_str)*1e-3 pose_mean = ivy.zeros([batch_size, num_frames, 6], dev_str=dev_str) pose_cov = ivy.ones([batch_size, num_frames, 6, 6], dev_str=dev_str)*1e-3 else: img_mean = ivy.concatenate((uniform_pixel_coords[..., 0:2], ivy.random_uniform( 1e-3, 1, [batch_size, num_frames] + image_dims + [1 + num_feature_channels], dev_str=dev_str)), -1) img_var = ivy.random_uniform( 1e-3, 1, [batch_size, num_frames] + image_dims + [3 + num_feature_channels], dev_str=dev_str) pose_mean = ivy.random_uniform(1e-3, 1, [batch_size, num_frames, 6], dev_str=dev_str) pose_cov = ivy.random_uniform(1e-3, 1, [batch_size, num_frames, 6, 6], dev_str=dev_str) if empty: img_var = ivy.ones_like(img_var) * 1e12 validity_mask = ivy.zeros_like(validity_mask) img_meas['dummy_cam_{}'.format(i)] =\ {'img_mean': img_mean, 'img_var': img_var, 'validity_mask': validity_mask, 'pose_mean': pose_mean, 'pose_cov': pose_cov, 'cam_rel_mat': ivy.identity(4, batch_shape=[batch_size, num_frames], dev_str=dev_str)[..., 0:3, :]} if ones: control_mean = ivy.zeros([batch_size, num_frames, 6], dev_str=dev_str) control_cov = ivy.ones([batch_size, num_frames, 6, 6], dev_str=dev_str)*1e-3 else: control_mean = ivy.random_uniform(1e-3, 1, [batch_size, num_frames, 6], dev_str=dev_str) control_cov = ivy.random_uniform(1e-3, 1, [batch_size, num_frames, 6, 6], dev_str=dev_str) return Container({'img_meas': img_meas, 'control_mean': control_mean, 'control_cov': control_cov, 'agent_rel_mat': ivy.identity(4, batch_shape=[batch_size, num_frames], dev_str=dev_str)[..., 0:3, :]})
def reset(self): self.urchin_xys = ivy.random_uniform(-1, 1, (self.num_urchins, 2)) self.xy = ivy.random_uniform(-1, 1, (2, )) self.xy_vel = ivy.zeros((2, )) self.goal_xy = ivy.random_uniform(-1, 1, (2, )) return self.get_observation()
def _as_random(value, _=''): if hasattr(value, 'shape'): return _ivy.random_uniform(0., 1., value.shape) return value
def main(f=None): # Framework Setup # # ----------------# # choose random framework f = choose_random_framework() if f is None else f set_framework(f) # Orientation # # ------------# # rotation representations # 3 rot_vec = ivy.array([0., 1., 0.]) # 3 x 3 rot_mat = ivy_mech.rot_vec_to_rot_mat(rot_vec) # 3 euler_angles = ivy_mech.rot_mat_to_euler(rot_mat, 'zyx') # 4 quat = ivy_mech.euler_to_quaternion(euler_angles) # 4 axis_and_angle = ivy_mech.quaternion_to_axis_angle(quat) # 3 rot_vec_again = axis_and_angle[..., :-1] * axis_and_angle[..., -1:] # Pose # # -----# # pose representations # 3 position = ivy.ones_like(rot_vec) # 6 rot_vec_pose = ivy.concatenate((position, rot_vec), 0) # 3 x 4 mat_pose = ivy_mech.rot_vec_pose_to_mat_pose(rot_vec_pose) # 6 euler_pose = ivy_mech.mat_pose_to_euler_pose(mat_pose) # 7 quat_pose = ivy_mech.euler_pose_to_quaternion_pose(euler_pose) # 6 rot_vec_pose_again = ivy_mech.quaternion_pose_to_rot_vec_pose(quat_pose) # Position # # ---------# # conversions of positional representation # 3 cartesian_coord = ivy.random_uniform(0., 1., (3, )) # 3 polar_coord = ivy_mech.cartesian_to_polar_coords(cartesian_coord) # 3 cartesian_coord_again = ivy_mech.polar_to_cartesian_coords(polar_coord) # cartesian co-ordinate frame-of-reference transformations # 3 x 4 trans_mat = ivy.random_uniform(0., 1., (3, 4)) # 4 cartesian_coord_homo = ivy_mech.make_coordinates_homogeneous( cartesian_coord) # 3 trans_cartesian_coord = ivy.matmul( trans_mat, ivy.expand_dims(cartesian_coord_homo, -1))[:, 0] # 4 trans_cartesian_coord_homo = ivy_mech.make_coordinates_homogeneous( trans_cartesian_coord) # 4 x 4 trans_mat_homo = ivy_mech.make_transformation_homogeneous(trans_mat) # 3 x 4 inv_trans_mat = ivy.inv(trans_mat_homo)[0:3] # 3 cartesian_coord_again = ivy.matmul( inv_trans_mat, ivy.expand_dims(trans_cartesian_coord_homo, -1))[:, 0] # message print('End of Run Through Demo!')
def main(batch_size=32, num_train_steps=31250, compile_flag=True, num_bits=8, seq_len=28, ctrl_output_size=100, memory_size=128, memory_vector_dim=28, overfit_flag=False, interactive=True, f=None): f = choose_random_framework() if f is None else f set_framework(f) # train config lr = 1e-3 if not overfit_flag else 1e-2 batch_size = batch_size if not overfit_flag else 1 num_train_steps = num_train_steps if not overfit_flag else 150 max_grad_norm = 50 # logging config vis_freq = 250 if not overfit_flag else 1 # optimizer optimizer = ivy.Adam(lr=lr) # ntm ntm = NTM(input_dim=num_bits + 1, output_dim=num_bits, ctrl_output_size=ctrl_output_size, ctrl_layers=1, memory_size=memory_size, memory_vector_dim=memory_vector_dim, read_head_num=1, write_head_num=1) # compile loss fn total_seq_example = ivy.random_uniform(shape=(batch_size, 2 * seq_len + 1, num_bits + 1)) target_seq_example = total_seq_example[:, 0:seq_len, :-1] if compile_flag: loss_fn_maybe_compiled = ivy.compile_fn( lambda v, ttl_sq, trgt_sq, sq_ln: loss_fn(ntm, v, ttl_sq, trgt_sq, sq_ln), dynamic=False, example_inputs=[ ntm.v, total_seq_example, target_seq_example, seq_len ]) else: loss_fn_maybe_compiled = lambda v, ttl_sq, trgt_sq, sq_ln: loss_fn( ntm, v, ttl_sq, trgt_sq, sq_ln) # init input_seq_m1 = ivy.cast( ivy.random_uniform(0., 1., (batch_size, seq_len, num_bits)) > 0.5, 'float32') mw = None vw = None for i in range(num_train_steps): # sequence to copy if not overfit_flag: input_seq_m1 = ivy.cast( ivy.random_uniform(0., 1., (batch_size, seq_len, num_bits)) > 0.5, 'float32') target_seq = input_seq_m1 input_seq = ivy.concatenate( (input_seq_m1, ivy.zeros((batch_size, seq_len, 1))), -1) eos = ivy.ones((batch_size, 1, num_bits + 1)) output_seq = ivy.zeros_like(input_seq) total_seq = ivy.concatenate((input_seq, eos, output_seq), -2) # train step loss, pred_vals = train_step(loss_fn_maybe_compiled, optimizer, ntm, total_seq, target_seq, seq_len, mw, vw, ivy.array(i + 1, 'float32'), max_grad_norm) # log print('step: {}, loss: {}'.format(i, ivy.to_numpy(loss).item())) # visualize if i % vis_freq == 0: target_to_vis = (ivy.to_numpy(target_seq[0] * 255)).astype( np.uint8) target_to_vis = np.transpose( cv2.resize(target_to_vis, (560, 160), interpolation=cv2.INTER_NEAREST), (1, 0)) pred_to_vis = (ivy.to_numpy(pred_vals[0] * 255)).astype(np.uint8) pred_to_vis = np.transpose( cv2.resize(pred_to_vis, (560, 160), interpolation=cv2.INTER_NEAREST), (1, 0)) img_to_vis = np.concatenate((pred_to_vis, target_to_vis), 0) img_to_vis = cv2.resize(img_to_vis, (1120, 640), interpolation=cv2.INTER_NEAREST) img_to_vis[0:60, -200:] = 0 img_to_vis[5:55, -195:-5] = 255 cv2.putText(img_to_vis, 'step {}'.format(i), (935, 42), cv2.FONT_HERSHEY_SIMPLEX, 1.2, tuple([0] * 3), 2) img_to_vis[0:60, 0:200] = 0 img_to_vis[5:55, 5:195] = 255 cv2.putText(img_to_vis, 'prediction', (7, 42), cv2.FONT_HERSHEY_SIMPLEX, 1.2, tuple([0] * 3), 2) img_to_vis[320:380, 0:130] = 0 img_to_vis[325:375, 5:125] = 255 cv2.putText(img_to_vis, 'target', (7, 362), cv2.FONT_HERSHEY_SIMPLEX, 1.2, tuple([0] * 3), 2) if interactive: cv2.imshow('prediction_and_target', img_to_vis) if overfit_flag: cv2.waitKey(1) else: cv2.waitKey(100) cv2.destroyAllWindows()
def reset(self): self.angles = ivy.random_uniform(-np.pi, np.pi, [self.num_joints]) self.angle_vels = ivy.random_uniform(-1, 1, [self.num_joints]) self.goal_xy = ivy.random_uniform(-self.num_joints, self.num_joints, [2]) return self.get_observation()
def main(): # LSTM # # -----# # using the Ivy LSTM memory module, dual stacked, in a PyTorch model class TorchModelWithLSTM(torch.nn.Module): def __init__(self, channels_in, channels_out): torch.nn.Module.__init__(self) self._linear = torch.nn.Linear(channels_in, 64) self._lstm = ivy_mem.LSTM(64, channels_out, 2, return_state=False) self._assign_variables() def _assign_variables(self): self._lstm.v.map(lambda x, kc: self.register_parameter( name=kc, param=torch.nn.Parameter(x))) self._lstm.v = self._lstm.v.map(lambda x, kc: self._parameters[kc]) def forward(self, x): x = self._linear(x) return self._lstm(x) # create model in_channels = 32 out_channels = 8 ivy.set_framework('torch') model = TorchModelWithLSTM(in_channels, out_channels) # define inputs batch_shape = [1, 2] timesteps = 3 input_shape = batch_shape + [timesteps, in_channels] input_seq = torch.rand(batch_shape + [timesteps, in_channels]) # call model and test output output_seq = model(input_seq) assert input_seq.shape[:-1] == output_seq.shape[:-1] assert input_seq.shape[-1] == in_channels assert output_seq.shape[-1] == out_channels # define loss function target = torch.zeros_like(output_seq) def loss_fn(): pred = model(input_seq) return torch.sum((pred - target)**2) # define optimizer optimizer = torch.optim.SGD(model.parameters(), lr=1e-2) # train model print('\ntraining dummy PyTorch LSTM model...\n') for i in range(10): loss = loss_fn() loss.backward() optimizer.step() print('step {}, loss = {}'.format(i, loss)) print('\ndummy PyTorch LSTM model trained!\n') ivy.unset_framework() # NTM # # ----# # using the Ivy NTM memory module in a TensorFlow model class TfModelWithNTM(tf.keras.Model): def __init__(self, channels_in, channels_out): tf.keras.Model.__init__(self) self._linear = tf.keras.layers.Dense(64) memory_size = 4 memory_vector_dim = 1 self._ntm = ivy_mem.NTM(input_dim=64, output_dim=channels_out, ctrl_output_size=channels_out, ctrl_layers=1, memory_size=memory_size, memory_vector_dim=memory_vector_dim, read_head_num=1, write_head_num=1) self._assign_variables() def _assign_variables(self): self._ntm.v.map( lambda x, kc: self.add_weight(name=kc, shape=x.shape)) self.set_weights( [ivy.to_numpy(v) for k, v in self._ntm.v.to_iterator()]) self.trainable_weights_dict = dict() for weight in self.trainable_weights: self.trainable_weights_dict[weight.name] = weight self._ntm.v = self._ntm.v.map( lambda x, kc: self.trainable_weights_dict[kc + ':0']) def call(self, x, **kwargs): x = self._linear(x) return self._ntm(x) # create model in_channels = 32 out_channels = 8 ivy.set_framework('tensorflow') model = TfModelWithNTM(in_channels, out_channels) # define inputs batch_shape = [1, 2] timesteps = 3 input_shape = batch_shape + [timesteps, in_channels] input_seq = tf.random.uniform(batch_shape + [timesteps, in_channels]) # call model and test output output_seq = model(input_seq) assert input_seq.shape[:-1] == output_seq.shape[:-1] assert input_seq.shape[-1] == in_channels assert output_seq.shape[-1] == out_channels # define loss function target = tf.zeros_like(output_seq) def loss_fn(): pred = model(input_seq) return tf.reduce_sum((pred - target)**2) # define optimizer optimizer = tf.keras.optimizers.Adam(1e-2) # train model print('\ntraining dummy TensorFlow NTM model...\n') for i in range(10): with tf.GradientTape() as tape: loss = loss_fn() grads = tape.gradient(loss, model.trainable_weights) optimizer.apply_gradients(zip(grads, model.trainable_weights)) print('step {}, loss = {}'.format(i, loss)) print('\ndummy TensorFlow NTM model trained!\n') ivy.unset_framework() # ESM # # ----# # using the Ivy ESM memory module in a pure-Ivy model, with a JAX backend # ToDo: add pre-ESM conv layers to this demo class IvyModelWithESM(ivy.Module): def __init__(self, channels_in, channels_out): self._channels_in = channels_in self._esm = ivy_mem.ESM(omni_image_dims=(16, 32)) self._linear = ivy_mem.Linear(channels_in, channels_out) ivy.Module.__init__(self, 'cpu') def _forward(self, obs): mem = self._esm(obs) x = ivy.reshape(mem.mean, (-1, self._channels_in)) return self._linear(x) # create model in_channels = 32 out_channels = 8 ivy.set_framework('torch') model = IvyModelWithESM(in_channels, out_channels) # input config batch_size = 1 image_dims = [5, 5] num_timesteps = 2 num_feature_channels = 3 # create image of pixel co-ordinates uniform_pixel_coords =\ ivy_vision.create_uniform_pixel_coords_image(image_dims, [batch_size, num_timesteps]) # define camera measurement depths = ivy.random_uniform(shape=[batch_size, num_timesteps] + image_dims + [1]) ds_pixel_coords = ivy_vision.depth_to_ds_pixel_coords(depths) inv_calib_mats = ivy.random_uniform( shape=[batch_size, num_timesteps, 3, 3]) cam_coords = ivy_vision.ds_pixel_to_cam_coords(ds_pixel_coords, inv_calib_mats)[..., 0:3] features = ivy.random_uniform(shape=[batch_size, num_timesteps] + image_dims + [num_feature_channels]) img_mean = ivy.concatenate((cam_coords, features), -1) cam_rel_mat = ivy.identity(4, batch_shape=[batch_size, num_timesteps])[..., 0:3, :] # place these into an ESM camera measurement container esm_cam_meas = ESMCamMeasurement(img_mean=img_mean, cam_rel_mat=cam_rel_mat) # define agent pose transformation agent_rel_mat = ivy.identity(4, batch_shape=[batch_size, num_timesteps])[..., 0:3, :] # collect together into an ESM observation container esm_obs = ESMObservation(img_meas={'camera_0': esm_cam_meas}, agent_rel_mat=agent_rel_mat) # call model and test output output = model(esm_obs) assert output.shape[-1] == out_channels # define loss function target = ivy.zeros_like(output) def loss_fn(v): pred = model(esm_obs, v=v) return ivy.reduce_mean((pred - target)**2) # optimizer optimizer = ivy.SGD(lr=1e-4) # train model print('\ntraining dummy Ivy ESM model...\n') for i in range(10): loss, grads = ivy.execute_with_gradients(loss_fn, model.v) model.v = optimizer.step(model.v, grads) print('step {}, loss = {}'.format(i, ivy.to_numpy(loss).item())) print('\ndummy Ivy ESM model trained!\n') ivy.unset_framework() # message print('End of Run Through Demo!')
def reset(self): self.x = ivy.random_uniform(-0.9, -0.2, [1]) self.x_vel = ivy.zeros([1]) return self.get_observation()
def test_smoke(self, dev_str, dtype_str, call): kernel = ivy.cast(ivy.random_uniform(shape=(3, 3), dev_str=dev_str), dtype_str) assert call(_se_to_mask, kernel) is not None
def reset(self): self.angle = ivy.random_uniform(-np.pi, np.pi, [1]) self.angle_vel = ivy.random_uniform(-1., 1., [1]) return self.get_observation()