示例#1
0
def test_adam_update(ws_n_grads_n_lr_n_wsnew, dtype_str, tensor_fn, dev_str,
                     call):
    # smoke test
    ws_raw, dcdws_raw, lr, ws_raw_new = ws_n_grads_n_lr_n_wsnew
    ws = ws_raw.map(lambda x, _: ivy.variable(ivy.array(x)))
    dcdws = dcdws_raw.map(lambda x, _: ivy.array(x))
    ws_true_new = ws_raw_new.map(lambda x, _: ivy.variable(ivy.array(x)))
    mw = dcdws
    vw = dcdws.map(lambda x, _: x**2)
    ws_new, mw_new, vw_new = ivy.adam_update(ws, dcdws, lr, mw, vw,
                                             ivy.array(1))
    # type test
    assert isinstance(ws_new, dict)
    assert isinstance(mw_new, dict)
    assert isinstance(vw_new, dict)
    # cardinality test
    for (w_new, w_true_new) in zip(ws_new.values(), ws_true_new.values()):
        assert w_new.shape == w_true_new.shape
    for (m_new, m_orig) in zip(mw_new.values(), mw.values()):
        assert m_new.shape == m_orig.shape
    for (v_new, v_orig) in zip(vw_new.values(), vw.values()):
        assert v_new.shape == v_orig.shape
    # value test
    for (w_new, w_true_new) in zip(ws_new.values(), ws_true_new.values()):
        assert np.allclose(ivy.to_numpy(w_new), ivy.to_numpy(w_true_new))
    # compilation test
    if call in [helpers.torch_call]:
        # pytorch scripting does not support internal function definitions
        return
    helpers.assert_compilable(ivy.adam_update)
示例#2
0
def test_variable(object_in, dtype_str, dev_str, call):
    if call is helpers.tf_graph_call:
        # cannot create variables as part of compiled tf graph
        pytest.skip()
    if call in [helpers.mx_call] and dtype_str == 'int16':
        # mxnet does not support int16
        pytest.skip()
    if len(object_in) == 0 and call is helpers.mx_call:
        # mxnet does not support 0-dimensional variables
        pytest.skip()
    # smoke test
    ret = ivy.variable(ivy.array(object_in, dtype_str, dev_str))
    # type test
    if call is not helpers.np_call:
        assert ivy.is_variable(ret)
    # cardinality test
    assert ret.shape == np.array(object_in).shape
    # value test
    assert np.allclose(
        call(ivy.variable, ivy.array(object_in, dtype_str, dev_str)),
        np.array(object_in).astype(dtype_str))
    # compilation test
    if call in [helpers.torch_call]:
        # pytorch scripting does not support string devices
        return
    helpers.assert_compilable(ivy.variable)
示例#3
0
def test_container_to_disk_shuffle_and_from_disk():
    for lib, call in helpers.calls:
        if call in [helpers.tf_graph_call, helpers.mx_graph_call]:
            # container disk saving requires eager execution
            continue
        save_filepath = 'container_on_disk.hdf5'
        dict_in = {'a': ivy.array([1, 2, 3], f=lib),
                   'b': {'c': ivy.array([1, 2, 3], f=lib), 'd': ivy.array([1, 2, 3], f=lib)}}
        container = Container(dict_in)

        # saving
        container.to_disk(save_filepath, max_batch_size=3)
        assert os.path.exists(save_filepath)

        # shuffling
        Container.shuffle_h5_file(save_filepath)

        # loading
        container_shuffled = Container.from_disk(save_filepath, lib, slice(3))

        # testing
        data = np.array([1, 2, 3])
        random.seed(0)
        random.shuffle(data)

        assert (ivy.to_numpy(container_shuffled['a'], lib) == data).all()
        assert (ivy.to_numpy(container_shuffled.a, lib) == data).all()
        assert (ivy.to_numpy(container_shuffled['b']['c'], lib) == data).all()
        assert (ivy.to_numpy(container_shuffled.b.c, lib) == data).all()
        assert (ivy.to_numpy(container_shuffled['b']['d'], lib) == data).all()
        assert (ivy.to_numpy(container_shuffled.b.d, lib) == data).all()

        os.remove(save_filepath)
示例#4
0
    def setup_primitive_scene(self):

        # shape matrices
        shape_matrices = ivy.concatenate([ivy.reshape(ivy.array(obj.get_matrix(), 'float32'), (1, 3, 4))
                                              for obj in self._objects], 0)

        # shape dims
        x_dims = ivy.concatenate([ivy.reshape(ivy.array(
            obj.get_bounding_box()[1] - obj.get_bounding_box()[0], 'float32'), (1, 1)) for obj in self._objects], 0)
        y_dims = ivy.concatenate([ivy.reshape(ivy.array(
            obj.get_bounding_box()[3] - obj.get_bounding_box()[2], 'float32'), (1, 1)) for obj in self._objects], 0)
        z_dims = ivy.concatenate([ivy.reshape(ivy.array(
            obj.get_bounding_box()[5] - obj.get_bounding_box()[4], 'float32'), (1, 1)) for obj in self._objects], 0)
        shape_dims = ivy.concatenate((x_dims, y_dims, z_dims), -1)

        # primitve scene visualization
        if self._with_primitive_scene_vis:
            scene_vis = [Shape.create(PrimitiveShape.CUBOID, ivy.to_numpy(shape_dim).tolist())
                         for shape_dim in shape_dims]
            [obj.set_matrix(ivy.to_numpy(shape_mat).reshape(-1).tolist())
             for shape_mat, obj in zip(shape_matrices, scene_vis)]
            [obj.set_transparency(0.5) for obj in scene_vis]

        # sdf
        primitive_scene = PrimitiveScene(cuboid_ext_mats=ivy.inv(ivy_mech.make_transformation_homogeneous(
            shape_matrices))[..., 0:3, :], cuboid_dims=shape_dims)
        self.sdf = primitive_scene.sdf
示例#5
0
def test_is_variable(object_in, dtype_str, dev_str, call):
    if call is helpers.tf_graph_call:
        # cannot create variables as part of compiled tf graph
        pytest.skip()
    if call in [helpers.mx_call] and dtype_str == 'int16':
        # mxnet does not support int16
        pytest.skip()
    if len(object_in) == 0 and call is helpers.mx_call:
        # mxnet does not support 0-dimensional variables
        pytest.skip()
    # smoke test
    non_var = ivy.array(object_in, dtype_str, dev_str)
    var = ivy.variable(ivy.array(object_in, dtype_str, dev_str))
    non_var_res = ivy.is_variable(non_var)
    var_res = ivy.is_variable(var)
    # type test
    assert ivy.is_array(non_var)
    if call is not helpers.np_call:
        assert ivy.is_variable(var)
    if call in [helpers.np_call, helpers.jnp_call]:
        # numpy and jax do not support flagging variables
        pytest.skip()
    # value test
    assert non_var_res is False
    assert var_res is True
    # compilation test
    helpers.assert_compilable(ivy.is_variable)
示例#6
0
def test_container_shuffle():
    for lib, call in helpers.calls:
        if call is helpers.tf_graph_call:
            # tf.random.set_seed is not compiled. The shuffle is then not aligned between container items.
            continue
        if call is helpers.mx_graph_call:
            fn = func
        else:
            fn = lambda x: x
        dict_in = {
            'a': ivy.array([1, 2, 3], f=lib),
            'b': {
                'c': ivy.array([1, 2, 3], f=lib),
                'd': ivy.array([1, 2, 3], f=lib)
            }
        }
        container = Container(dict_in)
        container_shuffled = container.shuffle(0)
        data = ivy.array([1, 2, 3], f=lib)
        ivy.core.random.seed(f=lib)
        shuffled_data = ivy.core.random.shuffle(data)

        assert np.array(fn(container_shuffled['a']) == fn(shuffled_data)).all()
        assert np.array(fn(container_shuffled.a) == fn(shuffled_data)).all()
        assert np.array(
            fn(container_shuffled['b']['c']) == fn(shuffled_data)).all()
        assert np.array(fn(container_shuffled.b.c) == fn(shuffled_data)).all()
        assert np.array(
            fn(container_shuffled['b']['d']) == fn(shuffled_data)).all()
        assert np.array(fn(container_shuffled.b.d) == fn(shuffled_data)).all()
示例#7
0
 def cap(self):
     rgb = ivy.array(np.load(os.path.join(os.path.dirname(os.path.realpath(__file__)),
                                          'esm_no_sim/rgb_{}.npy'.format(str(self._time).zfill(2)))))
     depth = ivy.array(np.load(os.path.join(os.path.dirname(os.path.realpath(__file__)),
                                            'esm_no_sim/depth_{}.npy'.format(str(self._time).zfill(2)))))
     self._time += 1
     return depth, rgb
示例#8
0
 def pose_spherical(theta, phi, radius):
     c2w_ = trans_t(radius)
     c2w_ = rot_phi(phi / 180. * np.pi) @ c2w_
     c2w_ = rot_theta(theta / 180. * np.pi) @ c2w_
     c2w_ = ivy.array([[-1, 0, 0, 0], [0, 0, 1, 0], [0, 1, 0, 0], [0, 0, 0, 1]], 'float32') @ c2w_
     c2w_ = c2w_ @ ivy.array([[1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, 1]], 'float32')
     return c2w_
示例#9
0
    def __init__(self, num_iters, compile_flag, interactive, dev_str, f):

        # ivy
        f = choose_random_framework() if f is None else f
        ivy.set_framework(f)
        ivy.seed(0)

        # device
        if dev_str is None:
            dev_str = 'gpu:0' if ivy.gpu_is_available() else 'cpu'
        self._dev_str = dev_str

        # Load input images and poses
        this_dir = os.path.dirname(os.path.realpath(__file__))
        data = np.load(os.path.join(this_dir, 'nerf_data/tiny_nerf_data.npz'))
        images = ivy.array(data['images'], 'float32', dev_str)
        inv_ext_mats = ivy.array(data['poses'], 'float32', dev_str)

        # intrinsics
        focal_lengths = ivy.array(np.tile(np.reshape(data['focal'], (1, 1)), [100, 2]), 'float32', dev_str)
        self._img_dims = images.shape[1:3]
        pp_offsets = ivy.tile(ivy.array([[dim/2 - 0.5 for dim in self._img_dims]], dev_str=dev_str), [100, 1])

        # train data
        self._images = images[:100, ..., :3]
        self._intrinsics = ivy_vision.focal_lengths_and_pp_offsets_to_intrinsics_object(
            focal_lengths, pp_offsets, self._img_dims)
        self._cam_geoms = ivy_vision.inv_ext_mat_and_intrinsics_to_cam_geometry_object(
            inv_ext_mats[:100, 0:3], self._intrinsics)

        # test data
        self._test_img = images[101]
        self._test_cam_geom = ivy_vision.inv_ext_mat_and_intrinsics_to_cam_geometry_object(
            inv_ext_mats[101, 0:3], self._intrinsics.slice(0))

        # train config
        self._embed_length = 6
        self._lr = 5e-4
        self._num_samples = 64
        self._num_iters = num_iters

        # log config
        self._interactive = interactive
        self._log_freq = 1
        self._vis_freq = 25 if self._interactive else -1
        self._vis_log_dir = 'nerf_renderings'
        if os.path.exists(self._vis_log_dir):
            shutil.rmtree(self._vis_log_dir)
        os.makedirs(self._vis_log_dir)

        # model
        self._model = Model(4, 256, self._embed_length, dev_str)

        # compile
        if compile_flag:
            rays_o, rays_d = self._get_rays(self._cam_geoms.slice(0))
            target = self._images[0]
            self._loss_fn = ivy.compile_fn(self._loss_fn, False,
                                           example_inputs=[self._model, rays_o, rays_d, target, self._model.v])
示例#10
0
 def __init__(self, base_inv_ext_mat=None):
     a_s = ivy.array([0.5, 0.5])
     d_s = ivy.array([0., 0.])
     alpha_s = ivy.array([0., 0.])
     dh_joint_scales = ivy.ones((2, ))
     dh_joint_offsets = ivy.array([-np.pi / 2, 0.])
     super().__init__(a_s, d_s, alpha_s, dh_joint_scales,
                      dh_joint_offsets, base_inv_ext_mat)
示例#11
0
def test_container_to_random():
    for lib, call in helpers.calls:
        dict_in = {'a': ivy.array([1.], f=lib),
                   'b': {'c': ivy.array([2.], f=lib), 'd': ivy.array([3.], f=lib)}}
        container = Container(dict_in)
        random_container = container.to_random(lib)
        for (key, value), orig_value in zip(random_container.to_iterator(),
                                            [ivy.array([2], f=lib), ivy.array([3], f=lib), ivy.array([4], f=lib)]):
            assert call(ivy.shape, value, f=lib) == call(ivy.shape, orig_value, f=lib)
示例#12
0
def test_container_map():
    for lib, call in helpers.calls:
        dict_in = {'a': ivy.array([1], f=lib),
                   'b': {'c': ivy.array([2], f=lib), 'd': ivy.array([3], f=lib)}}
        container = Container(dict_in)
        container_iterator = container.map(lambda x, _: x + 1).to_iterator()
        for (key, value), expected_value in zip(container_iterator,
                                                [ivy.array([2], f=lib), ivy.array([3], f=lib), ivy.array([4], f=lib)]):
            assert call(lambda x: x, value) == call(lambda x: x, expected_value)
示例#13
0
 def __init__(self, pr_obj):
     super().__init__(pr_obj)
     self._img_dims = pr_obj.get_resolution()
     if isinstance(pr_obj, VisionSensor):
         pp_offsets = ivy.array([item/2 - 0.5 for item in self._img_dims], 'float32')
         persp_angles = ivy.array([pr_obj.get_perspective_angle() * math.pi/180]*2, 'float32')
         intrinsics = ivy_vision.persp_angles_and_pp_offsets_to_intrinsics_object(
             persp_angles, pp_offsets, self._img_dims)
         self.calib_mat = intrinsics.calib_mats
         self.inv_calib_mat = intrinsics.inv_calib_mats
示例#14
0
 def test_value(self, dev_str, dtype_str, call):
     input_ = ivy.array([[0.5, 1., 0.3], [0.7, 0.3, 0.8], [0.4, 0.9, 0.2]],
                        dev_str=dev_str,
                        dtype_str=dtype_str)[None, None, :, :]
     kernel = ivy.array([[0., 1., 0.], [1., 1., 1.], [0., 1., 0.]],
                        dev_str=dev_str,
                        dtype_str=dtype_str)
     expected = np.array([[0.5, 0.3, 0.3], [0.3, 0.3, 0.2],
                          [0.4, 0.2, 0.2]])[None, None, :, :]
     assert np.allclose(call(erosion, input_, kernel), expected)
示例#15
0
def test_container_expand_dims():
    for lib, call in helpers.calls:
        if call is helpers.mx_graph_call:
            fn = func
        else:
            fn = lambda x: x
        dict_in = {
            'a': ivy.array([1], f=lib),
            'b': {
                'c': ivy.array([2], f=lib),
                'd': ivy.array([3], f=lib)
            }
        }
        container = Container(dict_in)
        container_expanded_dims = container.expand_dims(0)
        assert (fn(container_expanded_dims['a']) == fn(ivy.array([[1]],
                                                                 f=lib)))[0, 0]
        assert (fn(container_expanded_dims.a) == fn(ivy.array([[1]],
                                                              f=lib)))[0, 0]
        assert (fn(container_expanded_dims['b']['c']) == fn(
            ivy.array([[2]], f=lib)))[0, 0]
        assert (fn(container_expanded_dims.b.c) == fn(ivy.array([[2]],
                                                                f=lib)))[0, 0]
        assert (fn(container_expanded_dims['b']['d']) == fn(
            ivy.array([[3]], f=lib)))[0, 0]
        assert (fn(container_expanded_dims.b.d) == fn(ivy.array([[3]],
                                                                f=lib)))[0, 0]
示例#16
0
    def __init__(self, interactive, try_use_sim):
        super().__init__(interactive, try_use_sim)

        # initialize scene
        if self.with_pyrep:
            for i in range(6):
                self._vision_sensors[i].remove()
                self._vision_sensor_bodies[i].remove()
                [item.remove() for item in self._vision_sensor_rays[i]]
            self._default_camera.set_position(np.array([-2.3518, 4.3953, 2.8949]))
            self._default_camera.set_orientation(np.array([i*np.pi/180 for i in [112.90, 27.329, -10.978]]))
            inv_ext_mat = ivy.reshape(ivy.array(self._default_vision_sensor.get_matrix(), 'float32'), (3, 4))
            self.default_camera_ext_mat_homo = ivy.inv(ivy_mech.make_transformation_homogeneous(inv_ext_mat))

            # public objects
            self.omcam = SimCam(self._spherical_vision_sensor)

            # wait for user input
            self._user_prompt('\nInitialized scene with an omni-directional camera in the centre.\n\n'
                              'You can click on the omni directional camera, which appears as a small floating black sphere, '
                              'then select the box icon with four arrows in the top panel of the simulator, '
                              'and then drag the camera around dynamically.\n'
                              'Starting to drag and then holding ctrl allows you to also drag the camera up and down. \n\n'
                              'This demo enables you to capture 10 different omni-directional images from the camera, '
                              'and render the associated 10 point clouds in an open3D visualizer.\n\n'
                              'Both visualizers can be translated and rotated by clicking either the left mouse button or the wheel, '
                              'and then dragging the mouse.\n'
                              'Scrolling the mouse wheel zooms the view in and out.\n\n'
                              'Both visualizers can be rotated and zoomed by clicking either the left mouse button or the wheel, '
                              'and then dragging with the mouse.\n\n'
                              'Press enter in the terminal to use method ivy_mech.polar_coords_to_cartesian_coords and '
                              'show the first cartesian point cloud reconstruction of the scene, '
                              'converted from the polar co-ordinates captured from the omni-directional camera.\n\n')

        else:
            # public objects
            self.omcam = DummyOmCam()
            self.default_camera_ext_mat_homo = ivy.array(
                [[-0.872, -0.489,  0., 0.099],
                 [-0.169,  0.301, -0.938, 0.994],
                 [0.459, -0.818, -0.346, 5.677],
                 [0., 0., 0., 1.]])

            # message
            print('\nInitialized dummy scene with an omni-directional camera in the centre.'
                  '\nClose the visualization window to use method ivy_mech.polar_coords_to_cartesian_coords and show'
                  'a cartesian point cloud reconstruction of the scene, '
                  'converted from the omni-directional camera polar co-ordinates\n')

            # plot scene before rotation
            if interactive:
                plt.imshow(mpimg.imread(os.path.join(os.path.dirname(os.path.realpath(__file__)),
                                                     'ptcc_no_sim', 'before_capture.png')))
                plt.show()
示例#17
0
 def _init(self, num_processes):
     self._x = [ivy.array([0, 1]), ivy.array([2, 3, 4, 5, 6, 7, 8, 9])]
     dataset_container = ivy.Container({'x': self._x})
     dataset = Dataset(dataset_container,
                       'base',
                       dataset_container.shape[0],
                       num_processes=num_processes)
     dataset = dataset.unbatch('unbatched', num_processes=num_processes)
     self._dataset = dataset.batch('batched',
                                   3,
                                   num_processes=num_processes)
示例#18
0
    def __init__(self, base_inv_ext_mat=None):
        """
        Initialize Kinova Mico robot manipulator instance.
        Denavit–Hartenberg parameters inferred from KINOVA_MICO_Robotic_arm_user_guide.pdf
        Joint scales and offsets inferred from JACO²-6DOF-Advanced-Specification-Guide.pdf
        Both of these PDFs are included in this module for reference

        :param base_inv_ext_mat: Inverse extrinsic matrix of the robot base *[3,4]*
        :type base_inv_ext_mat: array, optional
        """

        # length params
        # KINOVA_MICO_Robotic_arm_user_guide.pdf
        # page 50

        d1 = 0.2755
        d2 = 0.29
        d3 = 0.1233
        d4 = 0.0741
        d5 = 0.0741
        d6 = 0.16
        e2 = 0.007

        # alternate params
        # KINOVA_MICO_Robotic_arm_user_guide.pdf
        # page 53

        aa = 30 * _math.pi / 180
        sa = _math.sin(aa)
        s2a = _math.sin(2 * aa)
        d4b = d3 + sa / s2a * d4
        d5b = sa / s2a * d4 + sa / s2a * d5
        d6b = sa / s2a * d5 + d6

        # dh params
        # KINOVA_MICO_Robotic_arm_user_guide.pdf
        # page 55

        a_s = _ivy.array([0, d2, 0, 0, 0, 0])
        d_s = _ivy.array([d1, 0, -e2, -d4b, -d5b, -d6b])
        alpha_s = _ivy.array([_math.pi / 2, _math.pi, _math.pi / 2, 2 * aa, 2 * aa, _math.pi])

        # dh joint angles convention based on:
        # JACO²-6DOF-Advanced-Specification-Guide.pdf
        # (Unable to find Mico version, but Jaco convention is the same)
        # page 10

        dh_joint_scales = _ivy.array([-1., 1., 1., 1., 1., 1.])
        dh_joint_offsets = _ivy.array([0., _math.pi / 2, -_math.pi / 2, 0., _math.pi, -_math.pi / 2])

        # call constructor
        super().__init__(a_s, d_s, alpha_s, dh_joint_scales, dh_joint_offsets, base_inv_ext_mat)
示例#19
0
def test_container_to_iterator():
    for lib, call in helpers.calls:
        if call is helpers.mx_graph_call:
            fn = func
        else:
            fn = lambda x: x
        dict_in = {'a': ivy.array([1], f=lib),
                   'b': {'c': ivy.array([2], f=lib), 'd': ivy.array([3], f=lib)}}
        container = Container(dict_in)
        container_iterator = container.to_iterator()
        for (key, value), expected_value in zip(container_iterator,
                                                [ivy.array([1], f=lib), ivy.array([2], f=lib), ivy.array([3], f=lib)]):
            assert fn(value) == fn(expected_value)
示例#20
0
def test_container_at_key_chain(dev_str, call):
    dict_in = {
        'a': ivy.array([1]),
        'b': {
            'c': ivy.array([2]),
            'd': ivy.array([3])
        }
    }
    container = Container(dict_in)
    sub_container = container.at_key_chain('b')
    assert (sub_container['c'] == ivy.array([2]))[0]
    sub_container = container.at_key_chain('b/c')
    assert (sub_container == ivy.array([2]))[0]
示例#21
0
def test_container_at_key_chain():
    for lib, call in helpers.calls:
        if call is helpers.mx_graph_call:
            fn = func
        else:
            fn = lambda x: x
        dict_in = {'a': ivy.array([1], f=lib),
                   'b': {'c': ivy.array([2], f=lib), 'd': ivy.array([3], f=lib)}}
        container = Container(dict_in)
        sub_container = container.at_key_chain('b')
        assert (fn(sub_container['c']) == fn(ivy.array([2], f=lib)))[0]
        sub_container = container.at_key_chain('b/c')
        assert (fn(sub_container) == fn(ivy.array([2], f=lib)))[0]
示例#22
0
def main(interactive=True, try_use_sim=True, f=None):

    # config
    this_dir = os.path.dirname(os.path.realpath(__file__))
    f = choose_random_framework(excluded=['numpy']) if f is None else f
    set_framework(f)
    sim = Simulator(interactive, try_use_sim)
    lr = 0.5
    num_anchors = 3
    num_sample_points = 100

    # spline start
    anchor_points = ivy.cast(
        ivy.expand_dims(ivy.linspace(0, 1, 2 + num_anchors), -1), 'float32')
    query_points = ivy.cast(
        ivy.expand_dims(ivy.linspace(0, 1, num_sample_points), -1), 'float32')

    # learnable parameters
    robot_start_config = ivy.array(ivy.cast(sim.robot_start_config, 'float32'))
    robot_target_config = ivy.array(
        ivy.cast(sim.robot_target_config, 'float32'))
    learnable_anchor_vals = ivy.variable(
        ivy.cast(
            ivy.transpose(
                ivy.linspace(robot_start_config, robot_target_config,
                             2 + num_anchors)[..., 1:-1], (1, 0)), 'float32'))

    # optimizer
    optimizer = ivy.SGD(lr=lr)

    # optimize
    it = 0
    colliding = True
    clearance = 0
    joint_query_vals = None
    while colliding:
        total_cost, grads, joint_query_vals, link_positions, sdf_vals = ivy.execute_with_gradients(
            lambda xs: compute_cost_and_sdfs(xs[
                'w'], anchor_points, robot_start_config, robot_target_config,
                                             query_points, sim),
            Container({'w': learnable_anchor_vals}))
        colliding = ivy.reduce_min(sdf_vals[2:]) < clearance
        sim.update_path_visualization(
            link_positions, sdf_vals,
            os.path.join(this_dir, 'msp_no_sim', 'path_{}.png'.format(it)))
        learnable_anchor_vals = optimizer.step(
            Container({'w': learnable_anchor_vals}), grads)['w']
        it += 1
    sim.execute_motion(joint_query_vals)
    sim.close()
    unset_framework()
示例#23
0
 def _init(self, array_shape, num_processes):
     x = [
         ivy.array([[0], [1], [2]]),
         ivy.array([[3], [4], [5]]),
         ivy.array([[6], [7], [8]])
     ]
     self._x = [ivy.reshape(item, array_shape) for item in x]
     dataset_container = ivy.Container({'x': x})
     dataset = Dataset(dataset_container,
                       'base',
                       dataset_container.shape[0],
                       num_processes=num_processes)
     self._dataset = dataset.unbatch('unbatched',
                                     num_processes=num_processes)
示例#24
0
def test_container_with_entries_as_lists():
    for lib, call in helpers.calls:
        if call in [helpers.tf_graph_call, helpers.mx_graph_call]:
            # to_list() requires eager execution
            continue
        dict_in = {'a': ivy.array([1], f=lib),
                   'b': {'c': ivy.array([2.], f=lib), 'd': 'some string'}}
        container = Container(dict_in)
        container_w_list_entries = container.with_entries_as_lists(lib)
        for (key, value), expected_value in zip(container_w_list_entries.to_iterator(),
                                                [[1],
                                                 [2.],
                                                 'some string']):
            assert value == expected_value
示例#25
0
def test_incremental_rotation(dev_str, call):
    if call in [helpers.np_call, helpers.jnp_call, helpers.mx_call]:
        # convolutions not yet implemented in numpy or jax
        # mxnet is unable to stack or expand zero-dimensional tensors
        pytest.skip()
    batch_size = 1
    num_timesteps = 1
    num_cams = 1
    num_feature_channels = 3
    image_dims = [3, 3]
    esm = ESM(omni_image_dims=[10, 20], smooth_mean=False)
    empty_memory = esm.empty_memory(batch_size, num_timesteps)
    empty_obs = _get_dummy_obs(batch_size, num_timesteps, num_cams, image_dims, num_feature_channels, empty=True)
    rel_rot_vec_pose = ivy.array([[[0., 0., 0., 0., 0.1, 0.]]])
    empty_obs['control_mean'] = rel_rot_vec_pose
    empty_obs['agent_rel_mat'] = ivy_mech.rot_vec_pose_to_mat_pose(rel_rot_vec_pose)

    first_obs = _get_dummy_obs(batch_size, num_timesteps, num_cams, image_dims, num_feature_channels, ones=True)
    memory_1 = esm(first_obs, empty_memory, batch_size=batch_size, num_timesteps=num_timesteps, num_cams=num_cams,
                   image_dims=image_dims)
    memory_2 = esm(empty_obs, memory_1, batch_size=batch_size, num_timesteps=num_timesteps, num_cams=num_cams,
                   image_dims=image_dims)
    memory_3 = esm(empty_obs, memory_2, batch_size=batch_size, num_timesteps=num_timesteps, num_cams=num_cams,
                   image_dims=image_dims)

    assert not np.allclose(memory_1.mean, memory_3.mean)
示例#26
0
文件: test_layers.py 项目: ivy-dl/ivy
def test_linear_layer(bs_ic_oc_target, with_v, dtype_str, tensor_fn, dev_str,
                      call):
    # smoke test
    batch_shape, input_channels, output_channels, target = bs_ic_oc_target
    x = ivy.cast(
        ivy.linspace(ivy.zeros(batch_shape), ivy.ones(batch_shape),
                     input_channels), 'float32')
    if with_v:
        np.random.seed(0)
        wlim = (6 / (output_channels + input_channels))**0.5
        w = ivy.variable(
            ivy.array(
                np.random.uniform(-wlim, wlim,
                                  (output_channels, input_channels)),
                'float32'))
        b = ivy.variable(ivy.zeros([output_channels]))
        v = Container({'w': w, 'b': b})
    else:
        v = None
    linear_layer = ivy.Linear(input_channels, output_channels, v=v)
    ret = linear_layer(x)
    # type test
    assert ivy.is_array(ret)
    # cardinality test
    assert ret.shape == tuple(batch_shape + [output_channels])
    # value test
    if not with_v:
        return
    assert np.allclose(call(linear_layer, x), np.array(target))
    # compilation test
    if call is helpers.torch_call:
        # pytest scripting does not **kwargs
        return
    helpers.assert_compilable(linear_layer)
示例#27
0
    def __init__(self, lr=1e-4, beta1=0.9, beta2=0.999, epsilon=1e-07, compile_step=False, dev_str=None):
        """
        Construct an ADAM optimizer.

        :param lr: Learning rate, default is 1e-4.
        :type lr: float, optional
        :param beta1: gradient forgetting factor, default is 0.9
        :type beta1: float, optional
        :param beta2: second moment of gradient forgetting factor, default is 0.999
        :type beta2: float, optional
        :param epsilon: divisor during adam update, preventing division by zero, default is 1e-07
        :type epsilon: float, optional
        :param compile_step: Whether to compile the optimizer step, default is False.
        :type compile_step: bool, option
        :param dev_str: device on which to create the layer's variables 'cuda:0', 'cuda:1', 'cpu' etc.
        :type dev_str: str, optional
        """
        Optimizer.__init__(self, lr, compile_step)
        self._beta1 = beta1
        self._beta2 = beta2
        self._epsilon = epsilon
        self._mw = None
        self._vw = None
        self._first_pass = True
        self._step = ivy.array([0], dev_str=dev_str)
示例#28
0
def make_transformation_homogeneous(matrices, batch_shape=None, dev_str=None):
    """
    Append to set of 3x4 non-homogeneous matrices to make them homogeneous.

    :param matrices: set of 3x4 non-homogeneous matrices *[batch_shape,3,4]*
    :type matrices: array
    :param batch_shape: Shape of batch. Inferred from inputs if None.
    :type batch_shape: sequence of ints, optional
    :param dev_str: device on which to create the array 'cuda:0', 'cuda:1', 'cpu' etc. Same as x if None.
    :type dev_str: str, optional
    :return: 4x4 Homogeneous matrices *[batch_shape,4,4]*
    """

    if batch_shape is None:
        batch_shape = matrices.shape[:-2]

    if dev_str is None:
        dev_str = _ivy.dev_str(matrices)

    # shapes as list
    batch_shape = list(batch_shape)
    num_batch_dims = len(batch_shape)

    # BS x 1 x 4
    last_row = _ivy.tile(
        _ivy.reshape(_ivy.array([0., 0., 0., 1.], dev_str=dev_str),
                     [1] * num_batch_dims + [1, 4]), batch_shape + [1, 1])

    # BS x 4 x 4
    return _ivy.concatenate((matrices, last_row), -2)
示例#29
0
def main(interactive=True, try_use_sim=True, f=None):
    f = choose_random_framework() if f is None else f
    set_framework(f)
    sim = Simulator(interactive, try_use_sim)
    vis = Visualizer(ivy.to_numpy(sim.default_camera_ext_mat_homo))
    pix_per_deg = 2
    om_pix = sim.get_pix_coords()
    plr_degs = om_pix / pix_per_deg
    plr_rads = plr_degs * math.pi / 180
    iterations = 10 if sim.with_pyrep else 1
    for _ in range(iterations):
        depth, rgb = sim.omcam.cap()
        plr = ivy.concatenate([plr_rads, depth], -1)
        xyz_wrt_cam = ivy_mech.polar_to_cartesian_coords(plr)
        xyz_wrt_cam = ivy.reshape(xyz_wrt_cam, (-1, 3))
        xyz_wrt_cam_homo = ivy_mech.make_coordinates_homogeneous(xyz_wrt_cam)
        inv_ext_mat_trans = ivy.transpose(sim.omcam.get_inv_ext_mat(), (1, 0))
        xyz_wrt_world = ivy.matmul(xyz_wrt_cam_homo, inv_ext_mat_trans)[..., 0:3]
        with ivy.numpy.use:
            omni_cam_inv_ext_mat = ivy_mech.make_transformation_homogeneous(
                ivy.to_numpy(sim.omcam.get_inv_ext_mat()))
        vis.show_point_cloud(xyz_wrt_world, rgb, interactive,
                             sphere_inv_ext_mats=[omni_cam_inv_ext_mat], sphere_radii=[0.025])
        if not interactive:
            sim.omcam.set_pos(sim.omcam.get_pos()
                               + ivy.array([-0.01, 0.01, 0.]))
    sim.close()
    unset_framework()
示例#30
0
    def _group_tensor_into_windowed_tensor(self, x, valid_first_frame):
        if self._window_size == 1:
            valid_first_frame_pruned = ivy.cast(valid_first_frame[:, 0],
                                                'bool')
        else:
            valid_first_frame_pruned = ivy.cast(
                valid_first_frame[:1 - self._window_size, 0], 'bool')
        if ivy.reduce_sum(ivy.cast(valid_first_frame_pruned, 'int32'))[0] == 0:
            valid_first_frame_pruned =\
                ivy.cast(ivy.one_hot(0, self._sequence_lengths[0] - self._window_size + 1), 'bool')
        window_idxs_single = ivy.indices_where(valid_first_frame_pruned)

        gather_idxs_list = list()
        for w_idx in window_idxs_single:
            gather_idxs_list.append(
                ivy.expand_dims(
                    ivy.arange(w_idx[0] + self._window_size, w_idx[0], 1), 0))
        gather_idxs = ivy.concatenate(gather_idxs_list, 0)
        gather_idxs = ivy.reshape(gather_idxs, (-1, 1))
        num_valid_windows_for_seq = ivy.shape(window_idxs_single)[0:1]
        return ivy.reshape(
            ivy.gather_nd(x, gather_idxs),
            ivy.concatenate(
                (num_valid_windows_for_seq, ivy.array(
                    [self._window_size]), ivy.shape(x)[1:]), 0))