Ejemplo n.º 1
0
def stabilize(task):
    tau = 0.1
    inertia = 1.0

    orientation = mass_node.get_quat()
    delta_orientation = target_node.get_quat() * invert(orientation)
    delta_node.set_quat(invert(delta_orientation))

    delta_angle = delta_orientation.get_angle_rad()
    if abs(delta_angle) < (pi / 360 * 0.1):
        delta_angle = 0
        axis_of_torque = VBase3(0, 0, 0)
    else:
        axis_of_torque = delta_orientation.get_axis()
        axis_of_torque.normalize()
        axis_of_torque = s.render.get_relative_vector(
            mass_node,
            axis_of_torque,
        )
    if delta_angle > pi:
        delta_angle -= 2 * pi
    axis_node.set_scale(axis_of_torque * 0.2)

    # If the mass was standing still, this would be the velocity that has to be
    # reached to achieve the targeted orientation in tau seconds.
    target_angular_velocity = axis_of_torque * delta_angle / tau
    # But we also have to cancel out the current velocity for that.
    angular_velocity = mass.get_angular_velocity()  # radians / sec
    countering_velocity = -angular_velocity

    # An impulse of 1 causes an angular velocity of 2.5 rad on a unit mass, so
    # we have to adjust accordingly.
    target_impulse = target_angular_velocity / 2.5 * inertia
    countering_impulse = countering_velocity / 2.5 * inertia

    # Now just sum those up, and we have the impulse that needs to be applied to
    # steer towards target.
    impulse = target_impulse + countering_impulse

    # Clamp the impulse to what the "motor" can produce.
    max_impulse = 0.4
    if impulse.length() > max_impulse:
        clamped_impulse = impulse / impulse.length() * max_impulse
    else:
        clamped_impulse = impulse

    print("{:4.0f} deg, "
          "{:2.5f} of {:2.5f} impulse, "
          "{:3.2f}% power of {:4.2f}% "
          "({:5.2f}% steering, {:5.2f}% countering) requested".format(
              delta_angle / (2 * pi) * 360,
              clamped_impulse.length(),
              impulse.length(),
              clamped_impulse.length() / max_impulse * 100,
              impulse.length() / max_impulse * 100,
              target_angular_velocity.length() / pi * 100,
              countering_velocity.length() / pi * 100,
          ))
    mass.apply_torque_impulse(clamped_impulse)
    return task.cont
def stabilize(task):
    tau = 0.1
    inertia = 1.0

    orientation = mass_node.get_quat()
    delta_orientation = target_node.get_quat() * invert(orientation)
    delta_node.set_quat(invert(delta_orientation))

    delta_angle = delta_orientation.get_angle_rad()
    if abs(delta_angle) < (pi/360*0.1):
        delta_angle = 0
        axis_of_torque = VBase3(0, 0, 0)
    else:
        axis_of_torque = delta_orientation.get_axis()
        axis_of_torque.normalize()
        axis_of_torque = s.render.get_relative_vector(
            mass_node,
            axis_of_torque,
        )
    if delta_angle > pi:
        delta_angle -= 2*pi
    axis_node.set_scale(axis_of_torque * 0.2)

    # If the mass was standing still, this would be the velocity that has to be
    # reached to achieve the targeted orientation in tau seconds.
    target_angular_velocity = axis_of_torque * delta_angle / tau
    # But we also have to cancel out the current velocity for that.
    angular_velocity = mass.get_angular_velocity() # radians / sec
    countering_velocity = -angular_velocity

    # An impulse of 1 causes an angular velocity of 2.5 rad on a unit mass, so
    # we have to adjust accordingly.
    target_impulse = target_angular_velocity / 2.5 * inertia
    countering_impulse = countering_velocity / 2.5 * inertia

    # Now just sum those up, and we have the impulse that needs to be applied to
    # steer towards target.
    impulse = target_impulse + countering_impulse

    # Clamp the impulse to what the "motor" can produce.
    max_impulse = 0.4
    if impulse.length() > max_impulse:
        clamped_impulse = impulse / impulse.length() * max_impulse
    else:
        clamped_impulse = impulse

    print("{:4.0f} deg, "
          "{:2.5f} of {:2.5f} impulse, "
          "{:3.2f}% power of {:4.2f}% "
          "({:5.2f}% steering, {:5.2f}% countering) requested".format(
              delta_angle / (2*pi) * 360,
              clamped_impulse.length(), impulse.length(),
              clamped_impulse.length() / max_impulse * 100,
              impulse.length() / max_impulse * 100,
              target_angular_velocity.length() / pi * 100,
              countering_velocity.length() / pi * 100,
    ))
    mass.apply_torque_impulse(clamped_impulse)
    return task.cont
Ejemplo n.º 3
0
def test_mat3_invert_same_type(type):
    mat = type((1, 0, 0,
                0, 1, 0,
                1, 2, 3))

    inv = core.invert(mat)
    assert mat.__class__ == inv.__class__
Ejemplo n.º 4
0
    def update(self):
        """ Updates the commonly used resources, mostly the shader inputs """
        update = self._input_ubo.update_input

        # Get the current transform matrix of the camera
        view_mat = Globals.render.get_transform(self._showbase.cam).get_mat()

        # Compute the view matrix, but with a z-up coordinate system
        update("view_mat_z_up", view_mat * Mat4.convert_mat(CS_zup_right, CS_yup_right))

        # Compute the view matrix without the camera rotation
        view_mat_billboard = Mat4(view_mat)
        view_mat_billboard.set_row(0, Vec3(1, 0, 0))
        view_mat_billboard.set_row(1, Vec3(0, 1, 0))
        view_mat_billboard.set_row(2, Vec3(0, 0, 1))
        update("view_mat_billboard", view_mat_billboard)

        update("camera_pos", self._showbase.camera.get_pos(Globals.render))

        # Compute last view projection mat
        curr_vp = self._input_ubo.get_input("view_proj_mat_no_jitter")
        update("last_view_proj_mat_no_jitter", curr_vp)
        curr_vp = Mat4(curr_vp)
        curr_vp.invert_in_place()
        update("last_inv_view_proj_mat_no_jitter", curr_vp)

        proj_mat = Mat4(self._showbase.camLens.get_projection_mat())

        # Set the projection matrix as an input, but convert it to the correct
        # coordinate system before.
        proj_mat_zup = Mat4.convert_mat(CS_yup_right, CS_zup_right) * proj_mat
        update("proj_mat", proj_mat_zup)

        # Set the inverse projection matrix
        update("inv_proj_mat", invert(proj_mat_zup))

        # Remove jitter and set the new view projection mat
        proj_mat.set_cell(1, 0, 0.0)
        proj_mat.set_cell(1, 1, 0.0)
        update("view_proj_mat_no_jitter", view_mat  * proj_mat)

        # Store the frame delta
        update("frame_delta", Globals.clock.get_dt())
        update("smooth_frame_delta", 1.0 / max(1e-5, Globals.clock.get_average_frame_rate()))
        update("frame_time", Globals.clock.get_frame_time())

        # Store the current film offset, we use this to compute the pixel-perfect
        # velocity, which is otherwise not possible. Usually this is always 0
        # except when SMAA and reprojection is enabled
        update("current_film_offset", self._showbase.camLens.get_film_offset())

        max_clip_length = 1
        if self._pipeline.plugin_mgr.is_plugin_enabled("smaa"):
            max_clip_length = self._pipeline.plugin_mgr.instances["smaa"].history_length

        update("temporal_index", Globals.clock.get_frame_count() % max_clip_length)
        update("frame_index", Globals.clock.get_frame_count())
Ejemplo n.º 5
0
def test_egg2pg_transform_pos3d(egg_coordsys, coordsys):
    vpool = egg.EggVertexPool("vpool")
    vtx = vpool.make_new_vertex(core.Point3D.rfu(-8, 0.5, 4.5, egg_coordsys))

    point = egg.EggPoint()
    point.add_vertex(vtx)

    group = egg.EggGroup("group")
    group.add_translate3d(core.Point3D.rfu(1, 2, 3, egg_coordsys))
    assert not group.transform_is_identity()
    group.add_child(point)

    mat = group.get_transform3d()
    assert group.get_vertex_frame() == core.Mat4D.ident_mat()
    assert group.get_node_frame() == mat
    assert group.get_vertex_frame_inv() == core.Mat4D.ident_mat()
    assert group.get_node_frame_inv() == core.invert(mat)
    assert group.get_vertex_to_node() == core.invert(mat)
    assert group.get_node_to_vertex() == mat

    assert group.get_vertex_frame_ptr() is None
    assert group.get_vertex_frame_inv_ptr() is None

    data = egg.EggData()
    data.set_coordinate_system(egg_coordsys)
    data.add_child(vpool)
    data.add_child(group)

    root = egg.load_egg_data(data, coordsys)
    assert root
    node, = root.children

    # Ensure the node has the expected position.
    assert node.transform.pos == core.Point3.rfu(1, 2, 3, coordsys)

    # Get the location of the vertex.  This is a quick, hacky way to get it.
    point = core.NodePath(node).get_tight_bounds()[0]
    assert point == core.Point3.rfu(-8, 0.5, 4.5, coordsys)
Ejemplo n.º 6
0
def test_egg2pg_transform_mat_unchanged(coordsys):
    # Ensures that the matrix remains unchanged if coordinate system is same.
    mat = (5, 2, -3, 4, 5, 6, 7, 8, 9, 1, -3, 2, 5, 2, 5, 2)
    group = egg.EggGroup("group")
    group.add_matrix4(mat)
    assert not group.transform_is_identity()

    assert group.get_vertex_frame() == core.Mat4D.ident_mat()
    assert group.get_node_frame() == mat
    assert group.get_vertex_frame_inv() == core.Mat4D.ident_mat()
    assert group.get_node_frame_inv() == core.invert(mat)
    assert group.get_vertex_to_node() == core.invert(mat)
    assert group.get_node_to_vertex() == mat

    data = egg.EggData()
    data.set_coordinate_system(coordsys)
    data.add_child(group)

    root = egg.load_egg_data(data, coordsys)
    assert root
    node, = root.children

    assert node.transform.mat == mat
Ejemplo n.º 7
0
    def do_update(self):
        """ Updates the commonly used resources, mostly the shader inputs """
        update = self._input_ubo.update_input

        # Get the current transform matrix of the camera
        view_mat = Globals.render.get_transform(self._showbase.cam).get_mat()

        # Compute the view matrix, but with a z-up coordinate system
        update("view_mat_z_up", view_mat * Mat4.convert_mat(CS_zup_right, CS_yup_right))

        # Compute the view matrix without the camera rotation
        view_mat_billboard = Mat4(view_mat)
        view_mat_billboard.set_row(0, Vec3(1, 0, 0))
        view_mat_billboard.set_row(1, Vec3(0, 1, 0))
        view_mat_billboard.set_row(2, Vec3(0, 0, 1))
        update("view_mat_billboard", view_mat_billboard)

        update("camera_pos", self._showbase.camera.get_pos(Globals.render))
        update("last_view_proj_mat_no_jitter", self._input_ubo.get_input("view_proj_mat_no_jitter"))
        proj_mat = Mat4(self._showbase.camLens.get_projection_mat())

        # Set the projection matrix as an input, but convert it to the correct
        # coordinate system before.
        proj_mat_zup = Mat4.convert_mat(CS_yup_right, CS_zup_right) * proj_mat
        update("proj_mat", proj_mat_zup)

        # Set the inverse projection matrix
        update("inv_proj_mat", invert(proj_mat_zup))

        # Remove jitter and set the new view projection mat
        proj_mat.set_cell(1, 0, 0.0)
        proj_mat.set_cell(1, 1, 0.0)
        update("view_proj_mat_no_jitter", view_mat * proj_mat)

        # Store the frame delta
        update("frame_delta", Globals.clock.get_dt())
        update("frame_time", Globals.clock.get_frame_time())
Ejemplo n.º 8
0
    def update(self):
        """ Updates the commonly used resources, mostly the shader inputs """
        update = self._input_ubo.update_input

        # Get the current transform matrix of the camera
        view_mat = Globals.render.get_transform(self._showbase.cam).get_mat()

        # Compute the view matrix, but with a z-up coordinate system
        update("view_mat_z_up", view_mat * Mat4.convert_mat(CS_zup_right, CS_yup_right))

        # Compute the view matrix without the camera rotation
        view_mat_billboard = Mat4(view_mat)
        view_mat_billboard.set_row(0, Vec3(1, 0, 0))
        view_mat_billboard.set_row(1, Vec3(0, 1, 0))
        view_mat_billboard.set_row(2, Vec3(0, 0, 1))
        update("view_mat_billboard", view_mat_billboard)

        update("camera_pos", self._showbase.camera.get_pos(Globals.render))

        # Compute last view projection mat
        curr_vp = self._input_ubo.get_input("view_proj_mat_no_jitter")
        update("last_view_proj_mat_no_jitter", curr_vp)
        curr_vp = Mat4(curr_vp)
        curr_vp.invert_in_place()
        curr_inv_vp = curr_vp
        update("last_inv_view_proj_mat_no_jitter", curr_inv_vp)

        proj_mat = Mat4(self._showbase.camLens.get_projection_mat())

        # Set the projection matrix as an input, but convert it to the correct
        # coordinate system before.
        proj_mat_zup = Mat4.convert_mat(CS_yup_right, CS_zup_right) * proj_mat
        update("proj_mat", proj_mat_zup)

        # Set the inverse projection matrix
        update("inv_proj_mat", invert(proj_mat_zup))

        # Remove jitter and set the new view projection mat
        proj_mat.set_cell(1, 0, 0.0)
        proj_mat.set_cell(1, 1, 0.0)
        update("view_proj_mat_no_jitter", view_mat  * proj_mat)

        # Store the frame delta
        update("frame_delta", Globals.clock.get_dt())
        update("smooth_frame_delta", 1.0 / max(1e-5, Globals.clock.get_average_frame_rate()))
        update("frame_time", Globals.clock.get_frame_time())

        # Store the current film offset, we use this to compute the pixel-perfect
        # velocity, which is otherwise not possible. Usually this is always 0
        # except when SMAA and reprojection is enabled
        update("current_film_offset", self._showbase.camLens.get_film_offset())
        update("frame_index", Globals.clock.get_frame_count())

        # Compute frustum corners in the order BL, BR, TL, TR
        ws_frustum_directions = Mat4()
        vs_frustum_directions = Mat4()
        inv_proj_mat = Globals.base.camLens.get_projection_mat_inv()
        view_mat_inv = Mat4(view_mat)
        view_mat_inv.invert_in_place()

        for i, point in enumerate(((-1, -1), (1, -1), (-1, 1), (1, 1))):
            result = inv_proj_mat.xform(Vec4(point[0], point[1], 1.0, 1.0))
            vs_dir = result.xyz.normalized()
            vs_frustum_directions.set_row(i, Vec4(vs_dir, 1))
            ws_dir = view_mat_inv.xform(Vec4(result.xyz, 0))
            ws_frustum_directions.set_row(i, ws_dir)

        update("vs_frustum_directions", vs_frustum_directions)
        update("ws_frustum_directions", ws_frustum_directions)
Ejemplo n.º 9
0
    def update(self):
        """ Updates the commonly used resources, mostly the shader inputs """
        update = self._input_ubo.update_input

        # Get the current transform matrix of the camera
        view_mat = Globals.render.get_transform(self._showbase.cam).get_mat()

        # Compute the view matrix, but with a z-up coordinate system
        zup_conversion = Mat4.convert_mat(CS_zup_right, CS_yup_right)
        update("view_mat_z_up", view_mat * zup_conversion)

        # Compute the view matrix without the camera rotation
        view_mat_billboard = Mat4(view_mat)
        view_mat_billboard.set_row(0, Vec3(1, 0, 0))
        view_mat_billboard.set_row(1, Vec3(0, 1, 0))
        view_mat_billboard.set_row(2, Vec3(0, 0, 1))
        update("view_mat_billboard", view_mat_billboard)

        update("camera_pos", self._showbase.camera.get_pos(Globals.render))

        # Compute last view projection mat
        curr_vp = self._input_ubo.get_input("view_proj_mat_no_jitter")
        update("last_view_proj_mat_no_jitter", curr_vp)
        curr_vp = Mat4(curr_vp)
        curr_vp.invert_in_place()
        curr_inv_vp = curr_vp
        update("last_inv_view_proj_mat_no_jitter", curr_inv_vp)

        proj_mat = Mat4(self._showbase.camLens.get_projection_mat())

        # Set the projection matrix as an input, but convert it to the correct
        # coordinate system before.
        proj_mat_zup = Mat4.convert_mat(CS_yup_right, CS_zup_right) * proj_mat
        update("proj_mat", proj_mat_zup)

        # Set the inverse projection matrix
        update("inv_proj_mat", invert(proj_mat_zup))

        # Remove jitter and set the new view projection mat
        proj_mat.set_cell(1, 0, 0.0)
        proj_mat.set_cell(1, 1, 0.0)
        update("view_proj_mat_no_jitter", view_mat * proj_mat)

        # Store the frame delta
        update("frame_delta", Globals.clock.get_dt())
        update("smooth_frame_delta",
               1.0 / max(1e-5, Globals.clock.get_average_frame_rate()))
        update("frame_time", Globals.clock.get_frame_time())

        # Store the current film offset, we use this to compute the pixel-perfect
        # velocity, which is otherwise not possible. Usually this is always 0
        # except when SMAA and reprojection is enabled
        update("current_film_offset", self._showbase.camLens.get_film_offset())
        update("frame_index", Globals.clock.get_frame_count())

        # Compute frustum corners in the order BL, BR, TL, TR
        ws_frustum_directions = Mat4()
        vs_frustum_directions = Mat4()
        inv_proj_mat = Globals.base.camLens.get_projection_mat_inv()
        view_mat_inv = Mat4(view_mat)
        view_mat_inv.invert_in_place()

        for i, point in enumerate(((-1, -1), (1, -1), (-1, 1), (1, 1))):
            result = inv_proj_mat.xform(Vec4(point[0], point[1], 1.0, 1.0))
            vs_dir = (zup_conversion.xform(result)).xyz.normalized()
            vs_frustum_directions.set_row(i, Vec4(vs_dir, 1))
            ws_dir = view_mat_inv.xform(Vec4(result.xyz, 0))
            ws_frustum_directions.set_row(i, ws_dir)

        update("vs_frustum_directions", vs_frustum_directions)
        update("ws_frustum_directions", ws_frustum_directions)

        update("screen_size", Globals.resolution)
        update("native_screen_size", Globals.native_resolution)
        update("lc_tile_count", self._pipeline.light_mgr.num_tiles)
Ejemplo n.º 10
0
def test_mat4_invert_same_type(type):
    mat = type((1, 0, 0, 0, 0, 1, 0, 0, 0, 0, 1, 0, 1, 2, 3, 1))

    inv = core.invert(mat)
    assert mat.__class__ == inv.__class__
Ejemplo n.º 11
0
    def ecu_gyro_stabilization(self):
        # Active stabilization and angular dampening
        # FIXME: Get from self.inputs
        tau = 0.2  # Seconds until target orientation is reached

        if self.sensors[LOCAL_UP]:
            target_mode = self.inputs[ACTIVE_STABILIZATION_ON_GROUND]
        else:
            target_mode = self.inputs[ACTIVE_STABILIZATION_IN_AIR]

        if target_mode == TO_HORIZON:
            # Stabilize to the current heading, but in a horizontal
            # orientation
            self.target_node.set_hpr(
                self.app.render,
                self.vehicle.get_h(),
                0,
                0,
            )
        elif target_mode == TO_GROUND:
            # Stabilize towards the local up
            self.target_node.set_hpr(self.centroid, (0, 0, 0))

        if target_mode != PASSIVE:
            xyz_driver_modification = self.inputs[TARGET_ORIENTATION]
            hpr_driver_modification = VBase3(
                xyz_driver_modification.z,
                xyz_driver_modification.x,
                xyz_driver_modification.y,
            )
            self.target_node.set_hpr(
                self.target_node,
                hpr_driver_modification,
            )

            # Now comes the math.
            orientation = self.vehicle.get_quat(self.app.render)
            target_orientation = self.target_node.get_quat(self.app.render)
            delta_orientation = target_orientation * invert(orientation)
            self.delta_node.set_quat(invert(delta_orientation))

            delta_angle = delta_orientation.get_angle_rad()
            if abs(delta_angle) < (pi / 360 * 0.1) or isnan(delta_angle):
                delta_angle = 0
                axis_of_torque = VBase3(0, 0, 0)
            else:
                axis_of_torque = delta_orientation.get_axis()
                axis_of_torque.normalize()
                axis_of_torque = self.app.render.get_relative_vector(
                    self.vehicle,
                    axis_of_torque,
                )
            if delta_angle > pi:
                delta_angle -= 2 * pi

            # If the mass was standing still, this would be the velocity that
            # has to be reached to achieve the targeted orientation in tau
            # seconds.
            target_angular_velocity = axis_of_torque * delta_angle / tau
        else:  # `elif target_mode == PASSIVE:`, since we might want an OFF mode
            # Passive stabilization, so this is the pure commanded impulse
            target_angular_velocity = self.app.render.get_relative_vector(
                self.vehicle,
                self.inputs[TARGET_ORIENTATION] * tau / pi,
            )

        # But we also have to cancel out the current velocity for that.
        angular_velocity = self.physics_node.get_angular_velocity()
        countering_velocity = -angular_velocity

        # An impulse of 1 causes an angular velocity of 2.5 rad on a unit mass,
        # so we have to adjust accordingly.
        target_impulse = target_angular_velocity / 2.5 * self.vehicle_data.mass
        countering_impulse = countering_velocity / 2.5 * self.vehicle_data.mass

        # Now just sum those up, and we have the impulse that needs to be
        # applied to steer towards target.
        impulse = target_impulse + countering_impulse
        return impulse