Пример #1
0
def castalia_raycasting_plot(img_path):
    """Generate an image of the raycaster in work
    """
    if not os.path.exists(img_path):
        os.makedirs(img_path)

    output_filename = os.path.join(img_path, 'castalia_raycasting_plot.jpg')

    input_filename = './data/shape_model/CASTALIA/castalia.obj'
    polydata = wavefront.read_obj_to_polydata(input_filename)

    # initialize the raycaster
    caster_obb = raycaster.RayCaster(polydata, flag='obb')
    caster_bsp = raycaster.RayCaster(polydata, flag='bsp')

    sensor = raycaster.Lidar(view_axis=np.array([1, 0, 0]), num_step=3)

    # need to translate the sensor and give it a pointing direction
    pos = np.array([2, 0, 0])
    dist = 2  # distance for each raycast
    R = attitude.rot3(np.pi)

    # find the inersections
    # targets = pos + sensor.rotate_fov(R) * dist
    targets = sensor.define_targets(pos, R, dist)
    intersections_obb = caster_obb.castarray(pos, targets)
    intersections_bsp = caster_bsp.castarray(pos, targets)

    # plot
    fig = graphics.mayavi_figure()

    graphics.mayavi_addPoly(fig, polydata)
    graphics.mayavi_addPoint(fig, pos, radius=0.05)

    # draw lines from pos to all targets
    for pt in targets:
        graphics.mayavi_addLine(fig, pos, pt, color=(1, 0, 0))

    for ints in intersections_bsp:
        graphics.mayavi_addPoint(fig, ints, radius=0.05, color=(1, 1, 0))

    graphics.mayavi_axes(fig=fig, extent=[-1, 1, -1, 1, -1, 1])
    graphics.mayavi_view(fig=fig)

    # savefig
    graphics.mayavi_savefig(fig=fig, filename=output_filename, magnification=4)
Пример #2
0
"""
from kinematics import attitude
from point_cloud import wavefront, raycaster
import numpy as np
from visualization import graphics
import pdb

from lib import mesh_data, cgal

filename = './data/shape_model/CASTALIA/castalia.obj'
polydata = wavefront.read_obj_to_polydata(filename)

caster_obb = raycaster.RayCaster(polydata, flag='obb')
caster_bsp = raycaster.RayCaster(polydata, flag='bsp')

sensor = raycaster.Lidar(view_axis=np.array([1, 0, 0]), num_step=3)

# need to translate the sensor and give it a pointing direction
pos = np.array([2, 0, 0])
dist = 2  # distance for each raycast
R = attitude.rot3(np.pi)

# find the inersections
# targets = pos + sensor.rotate_fov(R) * dist
targets = sensor.define_targets(pos, R, dist)
intersections_obb = caster_obb.castarray(pos, targets)
intersections_bsp = caster_bsp.castarray(pos, targets)

# plot
fig = graphics.mayavi_figure(bg=(0, 0, 0))
graphics.mayavi_addPoly(fig, polydata)
Пример #3
0
def simulate():

    logger = logging.getLogger(__name__)

    ast, dum, des_att_func, des_tran_func, AbsTol, RelTol = initialize()

    num_steps = int(1e3)
    time = np.linspace(0, num_steps, num_steps)
    t0, tf = time[0], time[-1]
    dt = time[1] - time[0]

    initial_pos = np.array([1.5, 0, 0])
    initial_vel = np.array([0, 0, 0])
    initial_R = attitude.rot3(np.pi / 2).reshape(-1)
    initial_w = np.array([0, 0, 0])
    initial_state = np.hstack((initial_pos, initial_vel, initial_R, initial_w))

    # TODO Initialize a coarse asteroid mesh model and combine with piont cloud data

    # initialize the raycaster and lidar
    polydata = wavefront.meshtopolydata(ast.V, ast.F)
    caster = raycaster.RayCaster(polydata)
    sensor = raycaster.Lidar(dist=5)
    # try both a controlled and uncontrolled simulation
    # t, istate, astate, bstate = eoms.inertial_eoms_driver(initial_state, time, ast, dum)

    # TODO Dynamics should be based on the course model
    # TODO Asteroid class will need a method to update mesh
    system = integrate.ode(eoms.eoms_controlled_inertial)
    system.set_integrator('lsoda', atol=AbsTol, rtol=RelTol, nsteps=1e4)
    system.set_initial_value(initial_state, t0)
    system.set_f_params(ast, dum, des_att_func, des_tran_func)

    point_cloud = defaultdict(list)

    state = np.zeros((num_steps + 1, 18))
    t = np.zeros(num_steps + 1)
    int_array = []
    state[0, :] = initial_state

    ii = 1
    while system.successful() and system.t < tf:

        # integrate the system and save state to an array
        t[ii] = (system.t + dt)
        state[ii, :] = system.integrate(system.t + dt)

        logger.info('Step : {} Time: {}'.format(ii, t[ii]))
        # now do the raycasting
        if not (np.floor(t[ii]) % 1):
            logger.info('Raycasting at t = {}'.format(t[ii]))

            targets = sensor.define_targets(state[ii, 0:3],
                                            state[ii, 6:15].reshape((3, 3)),
                                            np.linalg.norm(state[ii, 0:3]))

            # new asteroid rotation with vertices
            nv = ast.rotate_vertices(t[ii])
            Ra = ast.rot_ast2int(t[ii])
            # update the mesh inside the caster
            caster = raycaster.RayCaster.updatemesh(nv, ast.F)

            # these intersections are points in the inertial frame
            intersections = caster.castarray(state[ii, 0:3], targets)

            point_cloud['time'].append(t[ii])
            point_cloud['ast_state'].append(Ra.reshape(-1))
            point_cloud['sc_state'].append(state[ii, :])
            point_cloud['targets'].append(targets)
            point_cloud['inertial_ints'].append(intersections)

            logger.info('Found {} intersections'.format(len(intersections)))

            ast_ints = []
            for pt in intersections:
                if pt.size > 0:
                    pt_ast = Ra.T.dot(pt)
                else:
                    logger.info('No intersection for this point')
                    pt_ast = np.array([np.nan, np.nan, np.nan])

                ast_ints.append(pt_ast)

            point_cloud['ast_ints'].append(np.asarray(ast_ints))

        # TODO Eventually call the surface reconstruction function and update asteroid model

        # create an asteroid and dumbbell
        ii += 1

    # plot the simulation
    # plotting.animate_inertial_trajectory(t, istate, ast, dum)
    # plotting.plot_controlled_inertial(t, istate, ast, dum, fwidth=1)

    return time, state, point_cloud
Пример #4
0
def test_rotation():
    R = attitude.rot3(np.pi / 2)
    sensor_x = raycaster.Lidar()
    rot_arr = raycaster.Lidar().rotate_fov(R)
    np.testing.assert_array_almost_equal(rot_arr,
                                         R.dot(sensor_x.lidar_arr.T).T)
Пример #5
0
def test_view_axis():
    sensor_y = raycaster.Lidar(view_axis=np.array([0, 1, 0]))
    sensor_x = raycaster.Lidar()
    R = attitude.rot3(np.pi / 2)
    np.testing.assert_array_almost_equal(sensor_y.lidar_arr,
                                         R.dot(sensor_x.lidar_arr.T).T)
Пример #6
0
def test_up_axis():
    sensor_y = raycaster.Lidar(up_axis=np.array([0, 1, 0]))
    sensor_z = raycaster.Lidar(up_axis=np.array([0, 0, 1]))
    R = attitude.rot1(np.pi / 2)
    np.testing.assert_array_almost_equal(sensor_z.lidar_arr,
                                         R.dot(sensor_y.lidar_arr.T).T)
Пример #7
0
def test_number_of_steps():
    sensor = raycaster.Lidar(num_step=10)
    np.testing.assert_allclose(sensor.lidar_arr.shape[0], 10**2)
Пример #8
0
def test_object_creation():
    sensor = raycaster.Lidar()
    np.testing.assert_allclose(sensor.up_axis, np.array([0, 0, 1]))