예제 #1
0
파일: run_through.py 프로젝트: ivy-dl/gym
def main(env_str=None, visualize=True, f=None):

    # Framework Setup #
    # ----------------#

    # choose random framework
    f = choose_random_framework() if f is None else f
    ivy.set_framework(f)

    # get environment
    env = getattr(ivy_gym, env_str)()

    # run environment steps
    env.reset()
    ac_dim = env.action_space.shape[0]
    for _ in range(250):
        ac = ivy.random_uniform(-1, 1, (ac_dim, ))
        env.step(ac)
        if visualize:
            env.render()
    env.close()
    ivy.unset_framework()

    # message
    print('End of Run Through Demo!')
예제 #2
0
def main(interactive=True, try_use_sim=True, f=None):
    f = choose_random_framework() if f is None else f
    set_framework(f)
    sim = Simulator(interactive, try_use_sim)
    vis = Visualizer(ivy.to_numpy(sim.default_camera_ext_mat_homo))
    pix_per_deg = 2
    om_pix = sim.get_pix_coords()
    plr_degs = om_pix / pix_per_deg
    plr_rads = plr_degs * math.pi / 180
    iterations = 10 if sim.with_pyrep else 1
    for _ in range(iterations):
        depth, rgb = sim.omcam.cap()
        plr = ivy.concatenate([plr_rads, depth], -1)
        xyz_wrt_cam = ivy_mech.polar_to_cartesian_coords(plr)
        xyz_wrt_cam = ivy.reshape(xyz_wrt_cam, (-1, 3))
        xyz_wrt_cam_homo = ivy_mech.make_coordinates_homogeneous(xyz_wrt_cam)
        inv_ext_mat_trans = ivy.transpose(sim.omcam.get_inv_ext_mat(), (1, 0))
        xyz_wrt_world = ivy.matmul(xyz_wrt_cam_homo, inv_ext_mat_trans)[..., 0:3]
        with ivy.numpy.use:
            omni_cam_inv_ext_mat = ivy_mech.make_transformation_homogeneous(
                ivy.to_numpy(sim.omcam.get_inv_ext_mat()))
        vis.show_point_cloud(xyz_wrt_world, rgb, interactive,
                             sphere_inv_ext_mats=[omni_cam_inv_ext_mat], sphere_radii=[0.025])
        if not interactive:
            sim.omcam.set_pos(sim.omcam.get_pos()
                               + ivy.array([-0.01, 0.01, 0.]))
    sim.close()
    unset_framework()
예제 #3
0
    def __init__(self, num_iters, compile_flag, interactive, dev_str, f):

        # ivy
        f = choose_random_framework() if f is None else f
        ivy.set_framework(f)
        ivy.seed(0)

        # device
        if dev_str is None:
            dev_str = 'gpu:0' if ivy.gpu_is_available() else 'cpu'
        self._dev_str = dev_str

        # Load input images and poses
        this_dir = os.path.dirname(os.path.realpath(__file__))
        data = np.load(os.path.join(this_dir, 'nerf_data/tiny_nerf_data.npz'))
        images = ivy.array(data['images'], 'float32', dev_str)
        inv_ext_mats = ivy.array(data['poses'], 'float32', dev_str)

        # intrinsics
        focal_lengths = ivy.array(np.tile(np.reshape(data['focal'], (1, 1)), [100, 2]), 'float32', dev_str)
        self._img_dims = images.shape[1:3]
        pp_offsets = ivy.tile(ivy.array([[dim/2 - 0.5 for dim in self._img_dims]], dev_str=dev_str), [100, 1])

        # train data
        self._images = images[:100, ..., :3]
        self._intrinsics = ivy_vision.focal_lengths_and_pp_offsets_to_intrinsics_object(
            focal_lengths, pp_offsets, self._img_dims)
        self._cam_geoms = ivy_vision.inv_ext_mat_and_intrinsics_to_cam_geometry_object(
            inv_ext_mats[:100, 0:3], self._intrinsics)

        # test data
        self._test_img = images[101]
        self._test_cam_geom = ivy_vision.inv_ext_mat_and_intrinsics_to_cam_geometry_object(
            inv_ext_mats[101, 0:3], self._intrinsics.slice(0))

        # train config
        self._embed_length = 6
        self._lr = 5e-4
        self._num_samples = 64
        self._num_iters = num_iters

        # log config
        self._interactive = interactive
        self._log_freq = 1
        self._vis_freq = 25 if self._interactive else -1
        self._vis_log_dir = 'nerf_renderings'
        if os.path.exists(self._vis_log_dir):
            shutil.rmtree(self._vis_log_dir)
        os.makedirs(self._vis_log_dir)

        # model
        self._model = Model(4, 256, self._embed_length, dev_str)

        # compile
        if compile_flag:
            rays_o, rays_d = self._get_rays(self._cam_geoms.slice(0))
            target = self._images[0]
            self._loss_fn = ivy.compile_fn(self._loss_fn, False,
                                           example_inputs=[self._model, rays_o, rays_d, target, self._model.v])
예제 #4
0
def main(interactive=True, try_use_sim=True, f=None):

    # config
    this_dir = os.path.dirname(os.path.realpath(__file__))
    f = choose_random_framework(excluded=['numpy']) if f is None else f
    set_framework(f)
    sim = Simulator(interactive, try_use_sim)
    lr = 0.5
    num_anchors = 3
    num_sample_points = 100

    # spline start
    anchor_points = ivy.cast(
        ivy.expand_dims(ivy.linspace(0, 1, 2 + num_anchors), -1), 'float32')
    query_points = ivy.cast(
        ivy.expand_dims(ivy.linspace(0, 1, num_sample_points), -1), 'float32')

    # learnable parameters
    robot_start_config = ivy.array(ivy.cast(sim.robot_start_config, 'float32'))
    robot_target_config = ivy.array(
        ivy.cast(sim.robot_target_config, 'float32'))
    learnable_anchor_vals = ivy.variable(
        ivy.cast(
            ivy.transpose(
                ivy.linspace(robot_start_config, robot_target_config,
                             2 + num_anchors)[..., 1:-1], (1, 0)), 'float32'))

    # optimizer
    optimizer = ivy.SGD(lr=lr)

    # optimize
    it = 0
    colliding = True
    clearance = 0
    joint_query_vals = None
    while colliding:
        total_cost, grads, joint_query_vals, link_positions, sdf_vals = ivy.execute_with_gradients(
            lambda xs: compute_cost_and_sdfs(xs[
                'w'], anchor_points, robot_start_config, robot_target_config,
                                             query_points, sim),
            Container({'w': learnable_anchor_vals}))
        colliding = ivy.reduce_min(sdf_vals[2:]) < clearance
        sim.update_path_visualization(
            link_positions, sdf_vals,
            os.path.join(this_dir, 'msp_no_sim', 'path_{}.png'.format(it)))
        learnable_anchor_vals = optimizer.step(
            Container({'w': learnable_anchor_vals}), grads)['w']
        it += 1
    sim.execute_motion(joint_query_vals)
    sim.close()
    unset_framework()
예제 #5
0
def main(env_str, steps=100, iters=10000, lr=0.001, seed=0, log_freq=100, vis_freq=1000, visualize=True, f=None):

    # config
    f = choose_random_framework(excluded=['numpy']) if f is None else f
    ivy.seed(seed)
    env = getattr(ivy_gym, env_str)()
    starting_obs = env.reset()

    # policy
    in_size = starting_obs.shape[0]
    ac_dim = env.action_space.shape[0]
    policy = Policy(in_size, ac_dim)

    # compile loss function
    compiled_loss_fn = ivy.compile_fn(lambda initial_state, pol_vs:
                                      loss_fn(env, initial_state, policy, pol_vs, steps),
                                      False, example_inputs=[env.get_state(), policy.v])

    # optimizer
    optimizer = ivy.Adam(lr=lr)

    # train
    scores = []
    for iteration in range(iters):

        if iteration % vis_freq == 0 and visualize:
            obs = env.reset()
            env.render()
            for _ in range(steps):
                ac = policy(obs)
                obs, _, _, _ = env.step(ac)
                env.render()

        env.reset()
        if iteration == 0:
            print('\nCompiling loss function for {} environment steps... This may take a while...\n'.format(steps))
        score = train_step(compiled_loss_fn, optimizer, env.get_state(), policy, f)
        if iteration == 0:
            print('\nLoss function compiled!\n')
        print('iteration {} score {}'.format(iteration, ivy.to_numpy(score).item()))
        scores.append(ivy.to_numpy(score)[0])

        if len(scores) == log_freq:
            print('\nIterations: {} Mean Score: {}\n'.format(iteration + 1, np.mean(scores)))
            scores.clear()
예제 #6
0
def main(interactive=True, try_use_sim=True, f=None):
    f = choose_random_framework() if f is None else f
    set_framework(f)
    sim = Simulator(interactive, try_use_sim)
    cam_pos = sim.cam.get_pos()
    iterations = 250 if sim.with_pyrep else 1
    msg = 'tracking plant pot for 250 simulator steps...' if sim.with_pyrep else ''
    print(msg)
    for i in range(iterations):
        target_pos = sim.target.get_pos()
        tfrm = ivy_mech.target_facing_rotation_matrix(cam_pos, target_pos)
        sim.cam.set_rot_mat(tfrm)
        if not interactive:
            sim.target.set_pos(sim.target.get_pos()
                               + ivy.array([-0.01, 0.01, 0.]))
        time.sleep(0.05)
    sim.close()
    unset_framework()
예제 #7
0
def main(interactive=True, try_use_sim=True, f=None):
    f = choose_random_framework() if f is None else f
    set_framework(f)
    sim = Simulator(interactive, try_use_sim)
    vis = Visualizer(ivy.to_numpy(sim.default_camera_ext_mat_homo))
    xyzs = list()
    rgbs = list()
    iterations = 10 if sim.with_pyrep else 1
    for _ in range(iterations):
        for cam in sim.cams:
            depth, rgb = cam.cap()
            xyz = sim.depth_to_xyz(depth, cam.get_inv_ext_mat(),
                                   cam.inv_calib_mat, [128] * 2)
            xyzs.append(xyz)
            rgbs.append(rgb)
        xyz = ivy.reshape(ivy.concatenate(xyzs, 1), (-1, 3))
        rgb = ivy.reshape(ivy.concatenate(rgbs, 1), (-1, 3))
        voxels = [
            ivy.to_numpy(item)
            for item in ivy_vision.coords_to_voxel_grid(xyz, [100] * 3,
                                                        features=rgb)
        ]
        cuboid_inv_ext_mats = [
            ivy.to_numpy(
                ivy_mech.make_transformation_homogeneous(
                    cam.get_inv_ext_mat())) for cam in sim.cams
        ]
        with ivy.numpy.use:
            vis.show_voxel_grid(voxels,
                                interactive,
                                cuboid_inv_ext_mats=cuboid_inv_ext_mats,
                                cuboid_dims=[
                                    np.array([0.045, 0.045, 0.112])
                                    for _ in sim.cams
                                ])
        xyzs.clear()
        rgbs.clear()
    sim.close()
    unset_framework()
예제 #8
0
파일: run_through.py 프로젝트: ivy-dl/mech
def main(f=None):

    # Framework Setup #
    # ----------------#

    # choose random framework

    f = choose_random_framework() if f is None else f
    set_framework(f)

    # Orientation #
    # ------------#

    # rotation representations

    # 3
    rot_vec = ivy.array([0., 1., 0.])

    # 3 x 3
    rot_mat = ivy_mech.rot_vec_to_rot_mat(rot_vec)

    # 3
    euler_angles = ivy_mech.rot_mat_to_euler(rot_mat, 'zyx')

    # 4
    quat = ivy_mech.euler_to_quaternion(euler_angles)

    # 4
    axis_and_angle = ivy_mech.quaternion_to_axis_angle(quat)

    # 3
    rot_vec_again = axis_and_angle[..., :-1] * axis_and_angle[..., -1:]

    # Pose #
    # -----#

    # pose representations

    # 3
    position = ivy.ones_like(rot_vec)

    # 6
    rot_vec_pose = ivy.concatenate((position, rot_vec), 0)

    # 3 x 4
    mat_pose = ivy_mech.rot_vec_pose_to_mat_pose(rot_vec_pose)

    # 6
    euler_pose = ivy_mech.mat_pose_to_euler_pose(mat_pose)

    # 7
    quat_pose = ivy_mech.euler_pose_to_quaternion_pose(euler_pose)

    # 6
    rot_vec_pose_again = ivy_mech.quaternion_pose_to_rot_vec_pose(quat_pose)

    # Position #
    # ---------#

    # conversions of positional representation

    # 3
    cartesian_coord = ivy.random_uniform(0., 1., (3, ))

    # 3
    polar_coord = ivy_mech.cartesian_to_polar_coords(cartesian_coord)

    # 3
    cartesian_coord_again = ivy_mech.polar_to_cartesian_coords(polar_coord)

    # cartesian co-ordinate frame-of-reference transformations

    # 3 x 4
    trans_mat = ivy.random_uniform(0., 1., (3, 4))

    # 4
    cartesian_coord_homo = ivy_mech.make_coordinates_homogeneous(
        cartesian_coord)

    # 3
    trans_cartesian_coord = ivy.matmul(
        trans_mat, ivy.expand_dims(cartesian_coord_homo, -1))[:, 0]

    # 4
    trans_cartesian_coord_homo = ivy_mech.make_coordinates_homogeneous(
        trans_cartesian_coord)

    # 4 x 4
    trans_mat_homo = ivy_mech.make_transformation_homogeneous(trans_mat)

    # 3 x 4
    inv_trans_mat = ivy.inv(trans_mat_homo)[0:3]

    # 3
    cartesian_coord_again = ivy.matmul(
        inv_trans_mat, ivy.expand_dims(trans_cartesian_coord_homo, -1))[:, 0]

    # message
    print('End of Run Through Demo!')
예제 #9
0
def main(batch_size=32,
         num_train_steps=31250,
         compile_flag=True,
         num_bits=8,
         seq_len=28,
         ctrl_output_size=100,
         memory_size=128,
         memory_vector_dim=28,
         overfit_flag=False,
         interactive=True,
         f=None):
    f = choose_random_framework() if f is None else f
    set_framework(f)

    # train config
    lr = 1e-3 if not overfit_flag else 1e-2
    batch_size = batch_size if not overfit_flag else 1
    num_train_steps = num_train_steps if not overfit_flag else 150
    max_grad_norm = 50

    # logging config
    vis_freq = 250 if not overfit_flag else 1

    # optimizer
    optimizer = ivy.Adam(lr=lr)

    # ntm
    ntm = NTM(input_dim=num_bits + 1,
              output_dim=num_bits,
              ctrl_output_size=ctrl_output_size,
              ctrl_layers=1,
              memory_size=memory_size,
              memory_vector_dim=memory_vector_dim,
              read_head_num=1,
              write_head_num=1)

    # compile loss fn
    total_seq_example = ivy.random_uniform(shape=(batch_size, 2 * seq_len + 1,
                                                  num_bits + 1))
    target_seq_example = total_seq_example[:, 0:seq_len, :-1]
    if compile_flag:
        loss_fn_maybe_compiled = ivy.compile_fn(
            lambda v, ttl_sq, trgt_sq, sq_ln: loss_fn(ntm, v, ttl_sq, trgt_sq,
                                                      sq_ln),
            dynamic=False,
            example_inputs=[
                ntm.v, total_seq_example, target_seq_example, seq_len
            ])
    else:
        loss_fn_maybe_compiled = lambda v, ttl_sq, trgt_sq, sq_ln: loss_fn(
            ntm, v, ttl_sq, trgt_sq, sq_ln)

    # init
    input_seq_m1 = ivy.cast(
        ivy.random_uniform(0., 1., (batch_size, seq_len, num_bits)) > 0.5,
        'float32')
    mw = None
    vw = None

    for i in range(num_train_steps):

        # sequence to copy
        if not overfit_flag:
            input_seq_m1 = ivy.cast(
                ivy.random_uniform(0., 1.,
                                   (batch_size, seq_len, num_bits)) > 0.5,
                'float32')
        target_seq = input_seq_m1
        input_seq = ivy.concatenate(
            (input_seq_m1, ivy.zeros((batch_size, seq_len, 1))), -1)
        eos = ivy.ones((batch_size, 1, num_bits + 1))
        output_seq = ivy.zeros_like(input_seq)
        total_seq = ivy.concatenate((input_seq, eos, output_seq), -2)

        # train step
        loss, pred_vals = train_step(loss_fn_maybe_compiled, optimizer, ntm,
                                     total_seq, target_seq, seq_len, mw, vw,
                                     ivy.array(i + 1,
                                               'float32'), max_grad_norm)

        # log
        print('step: {}, loss: {}'.format(i, ivy.to_numpy(loss).item()))

        # visualize
        if i % vis_freq == 0:
            target_to_vis = (ivy.to_numpy(target_seq[0] * 255)).astype(
                np.uint8)
            target_to_vis = np.transpose(
                cv2.resize(target_to_vis, (560, 160),
                           interpolation=cv2.INTER_NEAREST), (1, 0))

            pred_to_vis = (ivy.to_numpy(pred_vals[0] * 255)).astype(np.uint8)
            pred_to_vis = np.transpose(
                cv2.resize(pred_to_vis, (560, 160),
                           interpolation=cv2.INTER_NEAREST), (1, 0))

            img_to_vis = np.concatenate((pred_to_vis, target_to_vis), 0)
            img_to_vis = cv2.resize(img_to_vis, (1120, 640),
                                    interpolation=cv2.INTER_NEAREST)

            img_to_vis[0:60, -200:] = 0
            img_to_vis[5:55, -195:-5] = 255
            cv2.putText(img_to_vis, 'step {}'.format(i), (935, 42),
                        cv2.FONT_HERSHEY_SIMPLEX, 1.2, tuple([0] * 3), 2)

            img_to_vis[0:60, 0:200] = 0
            img_to_vis[5:55, 5:195] = 255
            cv2.putText(img_to_vis, 'prediction', (7, 42),
                        cv2.FONT_HERSHEY_SIMPLEX, 1.2, tuple([0] * 3), 2)

            img_to_vis[320:380, 0:130] = 0
            img_to_vis[325:375, 5:125] = 255
            cv2.putText(img_to_vis, 'target', (7, 362),
                        cv2.FONT_HERSHEY_SIMPLEX, 1.2, tuple([0] * 3), 2)

            if interactive:
                cv2.imshow('prediction_and_target', img_to_vis)
                if overfit_flag:
                    cv2.waitKey(1)
                else:
                    cv2.waitKey(100)
                    cv2.destroyAllWindows()
예제 #10
0
파일: run_through.py 프로젝트: ivy-dl/robot
def main(interactive=True, f=None):

    global INTERACTIVE
    INTERACTIVE = interactive

    # Framework Setup #
    # ----------------#

    # choose random framework

    set_framework(choose_random_framework() if f is None else f)

    # Spline Interpolation #
    # ---------------------#

    # config
    num_free_anchors = 3
    num_samples = 100
    constant_rot_vec = ivy.array([[0., 0., 0.]])
    constant_z = ivy.array([[0.]])

    # xy positions

    # 1 x 2
    start_xy = ivy.array([[0., 0.]])
    target_xy = ivy.array([[1., 1.]])

    # 1 x 2
    anchor1_xy = ivy.array([[0.6, 0.2]])
    anchor2_xy = ivy.array([[0.5, 0.5]])
    anchor3_xy = ivy.array([[0.4, 0.8]])

    # as 6DOF poses

    # 1 x 6
    start_pose = ivy.concatenate((start_xy, constant_z, constant_rot_vec), -1)
    anchor1_pose = ivy.concatenate((anchor1_xy, constant_z, constant_rot_vec),
                                   -1)
    anchor2_pose = ivy.concatenate((anchor2_xy, constant_z, constant_rot_vec),
                                   -1)
    anchor3_pose = ivy.concatenate((anchor3_xy, constant_z, constant_rot_vec),
                                   -1)
    target_pose = ivy.concatenate((target_xy, constant_z, constant_rot_vec),
                                  -1)

    num_anchors = num_free_anchors + 2

    # num_anchors x 6
    anchor_poses = ivy.concatenate(
        (start_pose, anchor1_pose, anchor2_pose, anchor3_pose, target_pose), 0)

    # uniform sampling for spline

    # num_anchors x 1
    anchor_points = ivy.expand_dims(ivy.linspace(0., 1., num_anchors), -1)

    # num_samples x 1
    query_points = ivy.expand_dims(ivy.linspace(0., 1., num_samples), -1)

    # interpolated spline poses

    # num_samples x 6
    interpolated_poses = ivy_robot.sample_spline_path(anchor_points,
                                                      anchor_poses,
                                                      query_points)

    # xy motion

    # num_samples x 2
    anchor_xy_positions = anchor_poses[..., 0:2]

    # num_samples x 2
    interpolated_xy_positions = interpolated_poses[..., 0:2]

    # show xy path
    show_2d_spline_path(anchor_xy_positions, interpolated_xy_positions,
                        [-0.095, 0.055], [0.638, 0.171], [0.544, 0.486],
                        [0.445, 0.766], [0.9, 0.9], 'x', 'y',
                        'Interpolated Drone Pose Spline in XY Plane',
                        'start point', 'target point')

    # Rigid Mobile #
    # -------------#

    # drone relative body points
    rel_body_points = ivy.array([[0., 0., 0.], [-0.1, -0.1, 0.],
                                 [-0.1, 0.1, 0.], [0.1, -0.1, 0.],
                                 [0.1, 0.1, 0.]])

    # create drone as ivy rigid mobile robot
    drone = RigidMobile(rel_body_points)

    # rotatin vectors

    # 1 x 3
    start_rot_vec = ivy.array([[0., 0., 0.]])
    target_rot_vec = ivy.array([[0., 0., np.pi]])

    # 1 x 3
    anchor1_rot_vec = ivy.array([[0., 0., np.pi / 4]])
    anchor2_rot_vec = ivy.array([[0., 0., 2 * np.pi / 4]])
    anchor3_rot_vec = ivy.array([[0., 0., 3 * np.pi / 4]])

    # as 6DOF poses

    # 1 x 6
    start_pose = ivy.concatenate((start_xy, constant_z, start_rot_vec), -1)
    anchor1_pose = ivy.concatenate((anchor1_xy, constant_z, anchor1_rot_vec),
                                   -1)
    anchor2_pose = ivy.concatenate((anchor2_xy, constant_z, anchor2_rot_vec),
                                   -1)
    anchor3_pose = ivy.concatenate((anchor3_xy, constant_z, anchor3_rot_vec),
                                   -1)
    target_pose = ivy.concatenate((target_xy, constant_z, target_rot_vec), -1)

    # num_anchors x 6
    anchor_poses = ivy.concatenate(
        (start_pose, anchor1_pose, anchor2_pose, anchor3_pose, target_pose), 0)

    # interpolated spline poses

    # num_samples x 6
    interpolated_poses = ivy_robot.sample_spline_path(anchor_points,
                                                      anchor_poses,
                                                      query_points)

    # as matrices

    # num_anchors x 3 x 4
    anchor_matrices = ivy_mech.rot_vec_pose_to_mat_pose(anchor_poses)

    # num_samples x 3 x 4
    interpolated_matrices = ivy_mech.rot_vec_pose_to_mat_pose(
        interpolated_poses)

    # sample drone body

    # num_anchors x num_body_points x 3
    anchor_body_points = drone.sample_body(anchor_matrices)

    # num_samples x num_body_points x 3
    interpolated_body_points = drone.sample_body(interpolated_matrices)

    # show
    show_full_spline_path(anchor_body_points, interpolated_body_points,
                          [-0.168, 0.19], [0.88, 0.73], 'x', 'y',
                          'Sampled Drone Body Positions in XY Plane',
                          'start points', 'target points', False)

    # Manipulator #
    # ------------#

    class SimpleManipulator(Manipulator):

        # noinspection PyShadowingNames
        def __init__(self, base_inv_ext_mat=None):
            a_s = ivy.array([0.5, 0.5])
            d_s = ivy.array([0., 0.])
            alpha_s = ivy.array([0., 0.])
            dh_joint_scales = ivy.ones((2, ))
            dh_joint_offsets = ivy.array([-np.pi / 2, 0.])
            super().__init__(a_s, d_s, alpha_s, dh_joint_scales,
                             dh_joint_offsets, base_inv_ext_mat)

    # create manipulator as ivy manipulator
    manipulator = SimpleManipulator()

    # joint angles

    # 1 x 2
    start_joint_angles = ivy.array([[0., 0.]])
    target_joint_angles = ivy.array([[-np.pi / 4, -np.pi / 4]])

    # 1 x 2
    anchor1_joint_angles = -ivy.array([[0.2, 0.6]]) * np.pi / 4
    anchor2_joint_angles = -ivy.array([[0.5, 0.5]]) * np.pi / 4
    anchor3_joint_angles = -ivy.array([[0.8, 0.4]]) * np.pi / 4

    # num_anchors x 2
    anchor_joint_angles = ivy.concatenate(
        (start_joint_angles, anchor1_joint_angles, anchor2_joint_angles,
         anchor3_joint_angles, target_joint_angles), 0)

    # interpolated joint angles

    # num_anchors x 2
    interpolated_joint_angles = ivy_robot.sample_spline_path(
        anchor_points, anchor_joint_angles, query_points)

    # sample links

    # num_anchors x num_link_points x 3
    anchor_link_points = manipulator.sample_links(anchor_joint_angles,
                                                  samples_per_metre=5)

    # num_anchors x num_link_points x 3
    interpolated_link_points = manipulator.sample_links(
        interpolated_joint_angles, samples_per_metre=5)

    # show
    show_2d_spline_path(anchor_joint_angles, interpolated_joint_angles,
                        [-0.222, -0.015], [-0.147, -0.52], [-0.38, -0.366],
                        [-0.64, -0.263], [-0.752, -0.79], r'$\theta_1$',
                        r'$\theta_2$',
                        'Interpolated Manipulator Joint Angles Spline',
                        'start angles', 'target angles')
    show_full_spline_path(anchor_link_points, interpolated_link_points,
                          [0.026, 0.8], [0.542, 0.26], 'x', 'y',
                          'Sampled Manipulator Links in XY Plane',
                          'start config', 'target config', True)
예제 #11
0
def main(interactive=True, f=None):

    global INTERACTIVE
    INTERACTIVE = interactive

    # Framework Setup #
    # ----------------#

    # choose random framework
    f = choose_random_framework() if f is None else f
    set_framework(f)

    # Camera Geometry #
    # ----------------#

    # intrinsics

    # common intrinsic params
    img_dims = [512, 512]
    pp_offsets = ivy.array([dim / 2 - 0.5 for dim in img_dims], 'float32')
    cam_persp_angles = ivy.array([60 * np.pi / 180] * 2, 'float32')

    # ivy cam intrinsics container
    intrinsics = ivy_vision.persp_angles_and_pp_offsets_to_intrinsics_object(
        cam_persp_angles, pp_offsets, img_dims)

    # extrinsics

    # 3 x 4
    cam1_inv_ext_mat = ivy.array(np.load(data_dir + '/cam1_inv_ext_mat.npy'),
                                 'float32')
    cam2_inv_ext_mat = ivy.array(np.load(data_dir + '/cam2_inv_ext_mat.npy'),
                                 'float32')

    # full geometry

    # ivy cam geometry container
    cam1_geom = ivy_vision.inv_ext_mat_and_intrinsics_to_cam_geometry_object(
        cam1_inv_ext_mat, intrinsics)
    cam2_geom = ivy_vision.inv_ext_mat_and_intrinsics_to_cam_geometry_object(
        cam2_inv_ext_mat, intrinsics)
    cam_geoms = [cam1_geom, cam2_geom]

    # Camera Geometry Check #
    # ----------------------#

    # assert camera geometry shapes

    for cam_geom in cam_geoms:

        assert cam_geom.intrinsics.focal_lengths.shape == (2, )
        assert cam_geom.intrinsics.persp_angles.shape == (2, )
        assert cam_geom.intrinsics.pp_offsets.shape == (2, )
        assert cam_geom.intrinsics.calib_mats.shape == (3, 3)
        assert cam_geom.intrinsics.inv_calib_mats.shape == (3, 3)

        assert cam_geom.extrinsics.cam_centers.shape == (3, 1)
        assert cam_geom.extrinsics.Rs.shape == (3, 3)
        assert cam_geom.extrinsics.inv_Rs.shape == (3, 3)
        assert cam_geom.extrinsics.ext_mats_homo.shape == (4, 4)
        assert cam_geom.extrinsics.inv_ext_mats_homo.shape == (4, 4)

        assert cam_geom.full_mats_homo.shape == (4, 4)
        assert cam_geom.inv_full_mats_homo.shape == (4, 4)

    # Image Data #
    # -----------#

    # load images

    # h x w x 3
    color1 = ivy.array(
        cv2.imread(data_dir + '/rgb1.png').astype(np.float32) / 255)
    color2 = ivy.array(
        cv2.imread(data_dir + '/rgb2.png').astype(np.float32) / 255)

    # h x w x 1
    depth1 = ivy.array(
        np.reshape(
            np.frombuffer(
                cv2.imread(data_dir + '/depth1.png', -1).tobytes(),
                np.float32), img_dims + [1]))
    depth2 = ivy.array(
        np.reshape(
            np.frombuffer(
                cv2.imread(data_dir + '/depth2.png', -1).tobytes(),
                np.float32), img_dims + [1]))

    # depth scaled pixel coords

    # h x w x 3
    u_pix_coords = ivy_vision.create_uniform_pixel_coords_image(img_dims)
    ds_pixel_coords1 = u_pix_coords * depth1
    ds_pixel_coords2 = u_pix_coords * depth2

    # depth limits
    depth_min = ivy.reduce_min(ivy.concatenate((depth1, depth2), 0))
    depth_max = ivy.reduce_max(ivy.concatenate((depth1, depth2), 0))
    depth_limits = [depth_min, depth_max]

    # show images
    show_rgb_and_depth_images(color1, color2, depth1, depth2, depth_limits)

    # Flow and Depth Triangulation #
    # -----------------------------#

    # required mat formats
    cam1to2_full_mat_homo = ivy.matmul(cam2_geom.full_mats_homo,
                                       cam1_geom.inv_full_mats_homo)
    cam1to2_full_mat = cam1to2_full_mat_homo[..., 0:3, :]
    full_mats_homo = ivy.concatenate(
        (ivy.expand_dims(cam1_geom.full_mats_homo,
                         0), ivy.expand_dims(cam2_geom.full_mats_homo, 0)), 0)
    full_mats = full_mats_homo[..., 0:3, :]

    # flow
    flow1to2 = ivy_vision.flow_from_depth_and_cam_mats(ds_pixel_coords1,
                                                       cam1to2_full_mat)

    # depth again
    depth1_from_flow = ivy_vision.depth_from_flow_and_cam_mats(
        flow1to2, full_mats)

    # show images
    show_flow_and_depth_images(depth1, flow1to2, depth1_from_flow,
                               depth_limits)

    # Inverse Warping #
    # ----------------#

    # inverse warp rendering
    warp = u_pix_coords[..., 0:2] + flow1to2
    color2_warp_to_f1 = ivy.reshape(ivy.bilinear_resample(color2, warp),
                                    color1.shape)

    # projected depth scaled pixel coords 2
    ds_pixel_coords1_wrt_f2 = ivy_vision.ds_pixel_to_ds_pixel_coords(
        ds_pixel_coords1, cam1to2_full_mat)

    # projected depth 2
    depth1_wrt_f2 = ds_pixel_coords1_wrt_f2[..., -1:]

    # inverse warp depth
    depth2_warp_to_f1 = ivy.reshape(ivy.bilinear_resample(depth2, warp),
                                    depth1.shape)

    # depth validity
    depth_validity = ivy.abs(depth1_wrt_f2 - depth2_warp_to_f1) < 0.01

    # inverse warp rendering with mask
    color2_warp_to_f1_masked = ivy.where(depth_validity, color2_warp_to_f1,
                                         ivy.zeros_like(color2_warp_to_f1))

    # show images
    show_inverse_warped_images(depth1_wrt_f2, depth2_warp_to_f1,
                               depth_validity, color1, color2_warp_to_f1,
                               color2_warp_to_f1_masked, depth_limits)

    # Forward Warping #
    # ----------------#

    # forward warp rendering
    ds_pixel_coords1_proj = ivy_vision.ds_pixel_to_ds_pixel_coords(
        ds_pixel_coords2,
        ivy.inv(cam1to2_full_mat_homo)[..., 0:3, :])
    depth1_proj = ds_pixel_coords1_proj[..., -1:]
    ds_pixel_coords1_proj = ds_pixel_coords1_proj[..., 0:2] / depth1_proj
    features_to_render = ivy.concatenate((depth1_proj, color2), -1)

    # without depth buffer
    f1_forward_warp_no_db, _, _ = ivy_vision.quantize_to_image(
        ivy.reshape(ds_pixel_coords1_proj, (-1, 2)),
        img_dims,
        ivy.reshape(features_to_render, (-1, 4)),
        ivy.zeros_like(features_to_render),
        with_db=False)

    # with depth buffer
    f1_forward_warp_w_db, _, _ = ivy_vision.quantize_to_image(
        ivy.reshape(ds_pixel_coords1_proj, (-1, 2)),
        img_dims,
        ivy.reshape(features_to_render, (-1, 4)),
        ivy.zeros_like(features_to_render),
        with_db=False if ivy.get_framework() == 'mxnd' else True)

    # show images
    show_forward_warped_images(depth1, color1, f1_forward_warp_no_db,
                               f1_forward_warp_w_db, depth_limits)

    # message
    print('End of Run Through Demo!')
예제 #12
0
            print('\nIterations: {} Mean Score: {}\n'.format(iteration + 1, np.mean(scores)))
            scores.clear()


if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument('--no_visuals', action='store_true',
                        help='whether to run the demo without rendering images.')
    parser.add_argument('--env', default='CartPole',
                        choices=['CartPole', 'Pendulum', 'MountainCar', 'Reacher', 'Swimmer'])
    parser.add_argument('--framework', type=str, default=None,
                        choices=['jax', 'tensorflow', 'torch', 'mxnd', 'numpy'])
    parser.add_argument('--steps', type=int, default=100)
    parser.add_argument('--iters', type=int, default=10000)
    parser.add_argument('--lr', type=float, default=0.001)
    parser.add_argument('--seed', type=int, default=0)
    parser.add_argument('--log_freq', type=int, default=100)
    parser.add_argument('--vis_freq', type=int, default=1000)
    parsed_args = parser.parse_args()
    if parsed_args.framework is None:
        framework = choose_random_framework(excluded=['numpy'])
    else:
        framework = get_framework_from_str(parsed_args.framework)
    if parsed_args.framework == 'numpy':
        raise Exception('Invalid framework selection. Numpy does not support auto-differentiation.\n'
                        'This demo involves gradient-based optimization, and so auto-diff is required.\n'
                        'Please choose a different backend framework.')
    print('\nTraining for {} iterations.\n'.format(parsed_args.iters))
    main(parsed_args.env, parsed_args.steps, parsed_args.iters, parsed_args.lr, parsed_args.seed,
         parsed_args.log_freq, parsed_args.vis_freq, not parsed_args.no_visuals, framework)
예제 #13
0
def main(interactive=True, try_use_sim=True, f=None):

    # setup framework
    f = choose_random_framework() if f is None else f
    set_framework(f)

    # simulator and drone
    sim = Simulator(interactive, try_use_sim)
    drone = sim.drone

    # ESM
    esm = ivy_mem.ESM()
    esm_mem = esm.empty_memory(1, 1)

    # demo loop
    for _ in range(1000 if interactive and sim.with_pyrep else 100):

        # log iteration
        print('timestep {}'.format(_))

        # acquire image measurements
        depth, rgb = drone.cam.cap()

        # convert to ESM format
        ds_pix_coords = ivy_vision.depth_to_ds_pixel_coords(depth)
        cam_coords = ivy_vision.ds_pixel_to_cam_coords(ds_pix_coords, drone.cam.inv_calib_mat)[..., 0:3]
        img_mean = ivy.concatenate((cam_coords, rgb), -1)

        # acquire pose measurements
        cam_rel_mat = drone.cam.mat_rel_to_drone
        agent_rel_mat = ivy.array(drone.measure_incremental_mat())

        # single esm camera measurement
        esm_cam_meas = ESMCamMeasurement(
            img_mean=img_mean,
            cam_rel_mat=cam_rel_mat
        )

        # total esm observation
        obs = ESMObservation(
            img_meas={'cam0': esm_cam_meas},
            agent_rel_mat=agent_rel_mat)

        esm_mem = esm(obs, esm_mem)

        # update esm visualization
        if not interactive:
            continue
        rgb_img = _add_image_border(
            cv2.resize(ivy.to_numpy(rgb).copy(), (180, 180)))
        rgb_img = _add_title(rgb_img, 25, 75, 2, 'raw rgb', 70)
        depth_img = _add_image_border(cv2.resize(np.clip(
            np.tile(ivy.to_numpy(depth), (1, 1, 3))/3, 0, 1).copy(), (180, 180)))
        depth_img = _add_title(depth_img, 25, 90, 2, 'raw depth', 85)
        raw_img_concatted = np.concatenate((rgb_img, depth_img), 0)
        esm_feat = _add_image_border(np.clip(ivy.to_numpy(esm_mem.mean[0, 0, ..., 3:]), 0, 1).copy())
        esm_feat = _add_title(esm_feat, 25, 80, 2, 'esm rgb', 75)
        esm_depth = _add_image_border(np.clip(np.tile(ivy.to_numpy(esm_mem.mean[0, 0, ..., 2:3])/3,
                                                      (1, 1, 3)), 0, 1).copy())
        esm_depth = _add_title(esm_depth, 25, 95, 2, 'esm depth', 90)
        esm_img_concatted = np.concatenate((esm_feat, esm_depth), 0)
        img_to_show = np.concatenate((raw_img_concatted, esm_img_concatted), 1)
        plt.imshow(img_to_show)
        plt.show(block=False)
        plt.pause(0.001)

    # end of demo
    sim.close()
    unset_framework()
예제 #14
0
def main(interactive=True, try_use_sim=True, f=None):
    f = choose_random_framework() if f is None else f
    set_framework(f)
    with_mxnd = f is ivy.mxnd
    if with_mxnd:
        print('\nMXnet does not support "sum" or "min" reductions for scatter_nd,\n'
              'instead it only supports non-deterministic replacement for duplicates.\n'
              'Depth buffer rendering (requies min) or fusion buffer (requies sum) are therefore unsupported.\n'
              'The rendering in this demo with MXNet backend exhibits non-deterministic jagged edges as a result.')
    sim = Simulator(interactive, try_use_sim)
    import matplotlib.pyplot as plt
    xyzs = list()
    rgbs = list()
    iterations = 10 if sim.with_pyrep else 1
    for _ in range(iterations):
        for cam in sim.cams:
            depth, rgb = cam.cap()
            xyz = sim.depth_to_xyz(depth, cam.get_inv_ext_mat(), cam.inv_calib_mat, [1024]*2)
            xyzs.append(xyz)
            rgbs.append(rgb)
        xyz = ivy.reshape(ivy.concatenate(xyzs, 1), (-1, 3))
        rgb = ivy.reshape(ivy.concatenate(rgbs, 1), (-1, 3))
        cam_coords = ivy_vision.world_to_cam_coords(ivy_mech.make_coordinates_homogeneous(ivy.expand_dims(xyz, 1)),
                                                    sim.target_cam.get_ext_mat())
        ds_pix_coords = ivy_vision.cam_to_ds_pixel_coords(cam_coords, sim.target_cam.calib_mat)
        depth = ds_pix_coords[..., -1]
        pix_coords = ds_pix_coords[..., 0, 0:2] / depth
        final_image_dims = [512]*2
        feat = ivy.concatenate((depth, rgb), -1)
        rendered_img_no_db, _, _ = ivy_vision.quantize_to_image(
            pix_coords, final_image_dims, feat, ivy.zeros(final_image_dims + [4]), with_db=False)
        with_db = not with_mxnd
        rendered_img_with_db, _, _ = ivy_vision.quantize_to_image(
            pix_coords, final_image_dims, feat, ivy.zeros(final_image_dims + [4]), with_db=with_db)

        a_img = cv2.resize(ivy.to_numpy(rgbs[0]), (256, 256))
        a_img[0:50, 0:50] = np.zeros_like(a_img[0:50, 0:50])
        a_img[5:45, 5:45] = np.ones_like(a_img[5:45, 5:45])
        cv2.putText(a_img, 'a', (13, 33), cv2.FONT_HERSHEY_SIMPLEX, 1.2, tuple([0] * 3), 2)

        b_img = cv2.resize(ivy.to_numpy(rgbs[1]), (256, 256))
        b_img[0:50, 0:50] = np.zeros_like(b_img[0:50, 0:50])
        b_img[5:45, 5:45] = np.ones_like(b_img[5:45, 5:45])
        cv2.putText(b_img, 'b', (13, 33), cv2.FONT_HERSHEY_SIMPLEX, 1.2, tuple([0] * 3), 2)

        c_img = cv2.resize(ivy.to_numpy(rgbs[2]), (256, 256))
        c_img[0:50, 0:50] = np.zeros_like(c_img[0:50, 0:50])
        c_img[5:45, 5:45] = np.ones_like(c_img[5:45, 5:45])
        cv2.putText(c_img, 'c', (13, 33), cv2.FONT_HERSHEY_SIMPLEX, 1.2, tuple([0] * 3), 2)

        target_img = cv2.resize(ivy.to_numpy(sim.target_cam.cap()[1]), (256, 256))
        target_img[0:50, 0:140] = np.zeros_like(target_img[0:50, 0:140])
        target_img[5:45, 5:135] = np.ones_like(target_img[5:45, 5:135])
        cv2.putText(target_img, 'target', (13, 33), cv2.FONT_HERSHEY_SIMPLEX, 1.2, tuple([0] * 3), 2)

        msg = 'non-deterministic' if with_mxnd else 'no depth buffer'
        width = 360 if with_mxnd else 320
        no_db_img = np.copy(ivy.to_numpy(rendered_img_no_db[..., 3:]))
        no_db_img[0:50, 0:width+5] = np.zeros_like(no_db_img[0:50, 0:width+5])
        no_db_img[5:45, 5:width] = np.ones_like(no_db_img[5:45, 5:width])
        cv2.putText(no_db_img, msg, (13, 33), cv2.FONT_HERSHEY_SIMPLEX, 1.2, tuple([0] * 3), 2)

        with_db_img = np.copy(ivy.to_numpy(rendered_img_with_db[..., 3:]))
        with_db_img[0:50, 0:350] = np.zeros_like(with_db_img[0:50, 0:350])
        with_db_img[5:45, 5:345] = np.ones_like(with_db_img[5:45, 5:345])
        cv2.putText(with_db_img, 'with depth buffer', (13, 33), cv2.FONT_HERSHEY_SIMPLEX, 1.2, tuple([0] * 3), 2)

        raw_imgs = np.concatenate((np.concatenate((a_img, b_img), 1),
                                   np.concatenate((c_img, target_img), 1)), 0)
        to_concat = (raw_imgs, no_db_img) if with_mxnd else (raw_imgs, no_db_img, with_db_img)
        final_img = np.concatenate(to_concat, 1)

        if interactive:
            print('\nClose the image window when you are ready.\n')
            plt.imshow(final_img)
            plt.show()
        xyzs.clear()
        rgbs.clear()
    sim.close()
    unset_framework()