Example #1
0
def rotate(contr):
    """ Read the movements of the mouse and apply them
        as a rotation to the camera. """
    # get the object this script is attached to
    camera = contr.owner

    scene = blenderapi.scene()
    if not scene:
        # not ready, main reload(blenderapi)
        return

    # Do not move the camera if the current view is using another camera
    if camera != scene.active_camera:
        return

    # Get sensor named Mouse
    mouse = contr.sensors['Mouse']
    # Get Blender keyboard sensor
    keyboard = contr.sensors['All_Keys']

    # Show the cursor
    mouse_visible = True

    keylist = keyboard.events
    for key in keylist:
        if key[1] == blenderapi.input_active():
            # Left CTRL key allow to rotate the camera
            if key[0] == blenderapi.LEFTCTRLKEY:
                # Hide the cursor while we control the camera
                mouse_visible = False
                if mouse.positive:
                    # get width and height of game window
                    width = blenderapi.render().getWindowWidth()
                    height = blenderapi.render().getWindowHeight()

                    # get mouse movement from function
                    move = mouse_move(camera, mouse, width, height)

                    # set mouse sensitivity
                    sensitivity = camera['Sensitivity']

                    # Amount, direction and sensitivity
                    leftRight = move[0] * sensitivity
                    upDown = move[1] * sensitivity

                    # set the values
                    camera.applyRotation([0.0, 0.0, leftRight], 0)
                    camera.applyRotation([upDown, 0.0, 0.0], 1)

                    # Center mouse in game window
                    # Using the '//' operator (floor division) to produce an integer result
                    blenderapi.render().setMousePosition(
                        width // 2, height // 2)

    # Set the cursor visibility
    blenderapi.mousepointer(visible=mouse_visible)
Example #2
0
def rotate(contr):
    """ Read the movements of the mouse and apply them
        as a rotation to the camera. """
    # get the object this script is attached to
    camera = contr.owner

    scene = blenderapi.scene()
    if not scene:
        # not ready, main reload(blenderapi)
        return

    # Do not move the camera if the current view is using another camera
    if camera != scene.active_camera:
        return

    # Get sensor named Mouse
    mouse = contr.sensors['Mouse']
    # Get Blender keyboard sensor
    keyboard = contr.sensors['All_Keys']

    # Show the cursor
    mouse_visible = True

    keylist = keyboard.events
    for key in keylist:
        if key[1] == blenderapi.input_active():
            # Left CTRL key allow to rotate the camera
            if key[0] == blenderapi.LEFTCTRLKEY:
                # Hide the cursor while we control the camera
                mouse_visible = False
                if mouse.positive:
                    # get width and height of game window
                    width = blenderapi.render().getWindowWidth()
                    height = blenderapi.render().getWindowHeight()

                    # get mouse movement from function
                    move = mouse_move(camera, mouse, width, height)

                    # set mouse sensitivity
                    sensitivity = camera['Sensitivity']

                    # Amount, direction and sensitivity
                    leftRight = move[0] * sensitivity
                    upDown = move[1] * sensitivity

                    # set the values
                    camera.applyRotation( [0.0, 0.0, leftRight], 0 )
                    camera.applyRotation( [upDown, 0.0, 0.0], 1 )

                    # Center mouse in game window
                    # Using the '//' operator (floor division) to produce an integer result
                    blenderapi.render().setMousePosition(width//2, height//2)

    # Set the cursor visibility
    blenderapi.mousepointer(visible = mouse_visible)
Example #3
0
def head_control(contr):
    """ Move the target of the head and camera

    Use the movement of the mouse to determine the rotation
    for the human head and camera. """
    # get the object this script is attached to
    human = contr.owner
    scene = blenderapi.scene()
    target = scene.objects['Head_Empty']
    # get the camera on the human head
    camera = scene.objects['Human_Camera']

    # set mouse sensitivity
    sensitivity = human['Sensitivity']

    # Do not move the camera if the current view is using another camera
    if camera != blenderapi.scene().active_camera:
        return

    # Get sensor named Mouse
    mouse = contr.sensors['Mouse']

    if mouse.positive:
        # get width and height of game window
        width = blenderapi.render().getWindowWidth()
        height = blenderapi.render().getWindowHeight()

        # get mouse movement from function
        move = mouse_move(human, mouse, width, height)

        # Amount, direction and sensitivity
        left_right = move[0] * sensitivity
        up_down = move[1] * sensitivity

        target.applyMovement([0.0, left_right, 0.0], True)
        target.applyMovement([0.0, 0.0, up_down], True)

        # Reset mouse position to the centre of the screen
        # Using the '//' operator (floor division) to produce an integer result
        blenderapi.render().setMousePosition(width // 2, height // 2)
Example #4
0
def head_control(contr):
    """ Move the target of the head and camera

    Use the movement of the mouse to determine the rotation
    for the human head and camera. """
    # get the object this script is attached to
    human = contr.owner
    scene = blenderapi.scene()
    target = scene.objects['Head_Empty']
    # get the camera on the human head
    camera = scene.objects['Human_Camera']

    # set mouse sensitivity
    sensitivity = human['Sensitivity']

    # Do not move the camera if the current view is using another camera
    if camera != blenderapi.scene().active_camera:
        return

    # Get sensor named Mouse
    mouse = contr.sensors['Mouse']

    if mouse.positive:
        # get width and height of game window
        width = blenderapi.render().getWindowWidth()
        height = blenderapi.render().getWindowHeight()

        # get mouse movement from function
        move = mouse_move(human, mouse, width, height)

        # Amount, direction and sensitivity
        left_right = move[0] * sensitivity
        up_down = move[1] * sensitivity

        target.applyMovement([0.0, left_right, 0.0], True)
        target.applyMovement([0.0, 0.0, up_down], True)

        # Reset mouse position to the centre of the screen
        # Using the '//' operator (floor division) to produce an integer result
        blenderapi.render().setMousePosition(width//2, height//2)
Example #5
0
def write():
    """write on screen"""
    width = blenderapi.render().getWindowWidth()
    height = blenderapi.render().getWindowHeight()

    # OpenGL setup
    bgl.glMatrixMode(bgl.GL_PROJECTION)
    bgl.glLoadIdentity()
    bgl.gluOrtho2D(0, width, 0, height)
    bgl.glMatrixMode(bgl.GL_MODELVIEW)
    bgl.glLoadIdentity()

    # BLF drawing routine
    blf.size(font_id, height//40, 72)
    data = parse_file.read_file()
    data = data.splitlines()
    linePosition = height * 0.8
    for str in data:
        str_len = len(str)
        blf.position(font_id, (width * 0.05), linePosition, 0)
        blf.enable(font_id, blf.SHADOW)
        blf.shadow(font_id, 0, 1.0, 0.2, 0.0, 1.0)
        blf.draw(font_id,str)
        linePosition -= height * 0.05
def write():
    """write on screen"""
    width = blenderapi.render().getWindowWidth()
    height = blenderapi.render().getWindowHeight()

    # OpenGL setup
    bgl.glMatrixMode(bgl.GL_PROJECTION)
    bgl.glLoadIdentity()
    bgl.gluOrtho2D(0, width, 0, height)
    bgl.glMatrixMode(bgl.GL_MODELVIEW)
    bgl.glLoadIdentity()

    # BLF drawing routine
    blf.size(font_id, height // 40, 72)
    data = parse_file.read_file()
    data = data.splitlines()
    linePosition = height * 0.8
    for str in data:
        str_len = len(str)
        blf.position(font_id, (width * 0.05), linePosition, 0)
        blf.enable(font_id, blf.SHADOW)
        blf.shadow(font_id, 0, 1.0, 0.2, 0.0, 1.0)
        blf.draw(font_id, str)
        linePosition -= height * 0.05
Example #7
0
    def __init__(self, obj, parent=None):
        """ Constructor method.

        Receives the reference to the Blender object.
        The second parameter should be the name of the object's parent.
        """
        logger.info("%s initialization" % obj.name)
        # Call the constructor of the parent class
        morse.core.sensor.Sensor.__init__(self, obj, parent)

        # Set the background color of the scene
        self.bg_color = [143, 143, 143, 255]

        self._camera_image = None

        """
        Check if the bge.render.offScreenCreate method exists. If it
        exists, Morse will use it (to use FBO). Otherwise, Morse will se
        the old and classic viewport rendering. It requieres creation of
        additional scenes for each camera resolution, and
        synchronisation of these scenes before rendering (which makes
        the process potentially slower and may introduce some glitches
        if some objects are not properly synchronised).
        """
        self._offscreen_create = getattr(blenderapi.render(), 'offScreenCreate', None)

        if not self._offscreen_create:
            self.scene_name = 'S.%dx%d' % (self.image_width, self.image_height)

            persistantstorage = morse.core.blenderapi.persistantstorage()
            parent_name = self.robot_parent.name()
            is_parent_external = False

            for robot in persistantstorage.externalRobotDict.keys():
                if robot.name == parent_name:
                    is_parent_external = True
                    break

            if not is_parent_external:
                logger.info("Adding scene %s" % self.scene_name)
                blenderapi.add_scene(self.scene_name, overlay=0)
        logger.info('Component initialized, runs at %.2f Hz', self.frequency)
Example #8
0
    def __init__(self, obj, parent=None):
        """ Constructor method.

        Receives the reference to the Blender object.
        The second parameter should be the name of the object's parent.
        """
        logger.info("%s initialization" % obj.name)
        # Call the constructor of the parent class
        morse.core.sensor.Sensor.__init__(self, obj, parent)

        # Set the background color of the scene
        self.bg_color = [143, 143, 143, 255]

        self._camera_image = None
        """
        Check if the bge.render.offScreenCreate method exists. If it
        exists, Morse will use it (to use FBO). Otherwise, Morse will se
        the old and classic viewport rendering. It requieres creation of
        additional scenes for each camera resolution, and
        synchronisation of these scenes before rendering (which makes
        the process potentially slower and may introduce some glitches
        if some objects are not properly synchronised).
        """
        self._offscreen_create = getattr(blenderapi.render(),
                                         'offScreenCreate', None)

        if not self._offscreen_create:
            self.scene_name = 'S.%dx%d' % (self.image_width, self.image_height)

            persistantstorage = morse.core.blenderapi.persistantstorage()
            parent_name = self.robot_parent.name()
            is_parent_external = False

            for robot in persistantstorage.externalRobotDict.keys():
                if robot.name == parent_name:
                    is_parent_external = True
                    break

            if not is_parent_external:
                logger.info("Adding scene %s" % self.scene_name)
                blenderapi.add_scene(self.scene_name, overlay=0)
        logger.info('Component initialized, runs at %.2f Hz', self.frequency)
Example #9
0
logger = logging.getLogger("morse." + __name__)

from morse.core import blenderapi
import bgl
import blf

from morse.helpers import passive_objects

font_id = 0

co = blenderapi.controller()
keyboard = co.sensors['All_Keys']

scene = blenderapi.scene()

windowWidth = blenderapi.render().getWindowWidth()
windowHeight = blenderapi.render().getWindowHeight()


def write():
    """
    Write the name of all active objects on Screen
    """
    # OpenGL setup
    bgl.glMatrixMode(bgl.GL_PROJECTION)
    bgl.glLoadIdentity()
    bgl.gluOrtho2D(0, windowWidth, 0, windowHeight)
    bgl.glMatrixMode(bgl.GL_MODELVIEW)
    bgl.glLoadIdentity()

    cam = scene.active_camera
Example #10
0
import logging; logger = logging.getLogger("morse." + __name__)

from morse.core import blenderapi
import bgl
import blf

from morse.helpers import passive_objects

font_id =0

co = blenderapi.controller()
keyboard = co.sensors['All_Keys']

scene = blenderapi.scene()

windowWidth = blenderapi.render().getWindowWidth()
windowHeight = blenderapi.render().getWindowHeight()

def write():
    """
    Write the name of all active objects on Screen
    """
    # OpenGL setup
    bgl.glMatrixMode(bgl.GL_PROJECTION)
    bgl.glLoadIdentity()
    bgl.gluOrtho2D(0, windowWidth, 0, windowHeight)
    bgl.glMatrixMode(bgl.GL_MODELVIEW)
    bgl.glLoadIdentity()
    
    cam = scene.active_camera
Example #11
0
def hand_control(contr):
    """ Move the hand following the mouse

    Use the movement of the mouse to determine the rotation
    for the IK arm (right arm)
    
    stays for better placing of objects - >(QKEY + EKEY) to rotate body<
    """
    # get the object this script is attached to
    human = contr.owner

    # if the human is external, do nothing
    if human.get('External_Robot_Tag') or human['disable_keyboard_control']:
        return

    # get the suffix of the human to reference the right objects
    suffix = human.name[-4:] if human.name[-4] == "." else ""

    scene = blenderapi.scene()
    target = scene.objects['IK_Target_Empty.R' + suffix]
    right_hand = scene.objects['Hand_Grab.R' + suffix]
    mmb = human.sensors['MMB']

    # If the manipulation mode is inactive, do nothing
    if not human['Manipulate']:
        return

    # set mouse sensitivity
    sensitivity = human['Sensitivity']

    # Get sensors for mouse wheel
    wheel_up = contr.sensors['Wheel_Up']
    wheel_down = contr.sensors['Wheel_Down']
    keyboard = contr.sensors['All_Keys']

    keylist = keyboard.events
    for key in keylist:
        if key[1] == blenderapi.input_none(
        ) and key[0] == blenderapi.LEFTCTRLKEY:
            if wheel_up.positive:
                front = 50.0 * sensitivity
                target.applyMovement([front, 0.0, 0.0], True)

            if wheel_down.positive:
                back = -50.0 * sensitivity
                target.applyMovement([back, 0.0, 0.0], True)

    # If nothing grabbed or Middle Mouse Button is not pressed,
    # do nothing of the following
    if (right_hand['selected'] == 'None' or right_hand['selected'] == ''
            or (not mmb.positive)):
        #use head_control for this
        return

    # Get sensor named Mouse
    mouse = contr.sensors['Mouse']

    if mouse.positive:
        # get width and height of game window
        width = blenderapi.render().getWindowWidth()
        height = blenderapi.render().getWindowHeight()

        # get mouse movement from function
        move = mouse_move(human, mouse, width, height)

        # Amount, direction and sensitivity
        left_right = move[0] * sensitivity
        up_down = move[1] * sensitivity

        if not human['FOCUSED']:
            target.applyMovement([0.0, left_right, 0.0], True)
            target.applyMovement([0.0, 0.0, up_down], True)

        # Reset mouse position to the centre of the screen
        # Using the '//' operator (floor division) to produce an integer result
        blenderapi.render().setMousePosition(width // 2, height // 2)
Example #12
0
def head_control(contr):
    """ Move the target of the head and camera

    Use the movement of the mouse to determine the rotation
    for the human head and camera. """
    # get the object this script is attached to
    human = contr.owner

    # if the human is external, do nothing
    if human.get('External_Robot_Tag') or human['disable_keyboard_control']:
        return

    # get the suffix of the human to reference the right objects
    suffix = human.name[-4:] if human.name[-4] == "." else ""

    scene = blenderapi.scene()
    target = scene.objects['Target_Empty' + suffix]
    POS_EMPTY = scene.objects['POS_EMPTY' + suffix]
    Head_Empty = scene.objects['Head_Empty' + suffix]
    right_hand = scene.objects['Hand_Grab.R' + suffix]
    camera = scene.objects['Human_Camera' + suffix]
    mmb = contr.sensors['MMB']

    # Do not move the camera if the current view is using another camera
    if camera != blenderapi.scene().active_camera:
        return

    # If the manipulation mode is active, an object is grabbed
    # and the Middle Mouse Button is pressed, do nothing
    if (human['Manipulate'] and right_hand['selected'] != 'None'
            and right_hand['selected'] != '' and mmb.positive):
        return

    if mmb.positive:
        target = scene.objects['IK_Target_Empty.R' + suffix]

    # Get sensor named Mouse
    mouse = contr.sensors['Mouse']

    if mouse.positive:
        # get width and height of game window
        width = blenderapi.render().getWindowWidth()
        height = blenderapi.render().getWindowHeight()

        # get mouse movement from function
        move = mouse_move(human, mouse, width, height)

        # set mouse sensitivity
        sensitivity = human['Sensitivity']

        # Amount, direction and sensitivity
        left_right = move[0] * sensitivity
        up_down = move[1] * sensitivity

        if not human['FOCUSED']:
            POS_EMPTY.applyRotation([0.0, 0.0, left_right], True)
            if not ((Head_Empty.localOrientation.to_euler()[1] >= 0.7
                     and up_down < 0) or
                    (Head_Empty.localOrientation.to_euler()[1] <= -0.4
                     and up_down > 0)) and not human['Manipulate']:
                # capping the rotation to prevent the camera to be upside down
                if not mmb.positive:
                    Head_Empty.applyRotation([0.0, -up_down, 0.0], True)
                target.applyMovement([0.0, 0.0, up_down], True)
            elif human['Manipulate']:
                Head_Empty.applyRotation([0.0, -up_down, 0.0], True)
                target.applyMovement([0.0, 0.0, up_down], True)

        # Reset mouse position to the centre of the screen
        # Using the '//' operator (floor division) to produce an integer result
        blenderapi.render().setMousePosition(width // 2, height // 2)
Example #13
0
    def _setup_video_texture(self):
        """ Prepare this camera to use the bge.texture module.
        Extract the references to the Blender camera and material where
        the images will be rendered.
        """
        for child in self.bge_object.children:
            # The camera object that will produce the image in Blender
            if 'CameraRobot' in child.name:
                camera = child
            # The object that contains the material where the image is rendered
            if 'CameraMesh' in child.name:
                screen = child
                # Considering it consists of a single mesh
                mesh = child.meshes[0]
                # Get the material name
                for material in mesh.materials:
                    material_index = material.getMaterialIndex()
                    mesh_material_name = mesh.getMaterialName(material_index)
                    if 'MAScreenMat' in mesh_material_name:
                        material_name = mesh_material_name

        try:
            logger.debug("\tCAMERA: %s" % camera.name)
            logger.debug("\tSCREEN: %s" % screen.name)
            logger.debug("\tMATERIAL: %s" % material_name)
        except UnboundLocalError:
            logger.error("The video camera could not be properly initialized."
                         "The children object could not be found."
                         "Best solution is to re-link the camera.")
            return False

        if not self._offscreen_create:
            self._compute_syncable_objects()
            img_renderer = blenderapi.texture().ImageRender(
                self._scene, camera)
        else:
            fbo = self._offscreen_create(
                self.image_width, self.image_height,
                blenderapi.render().RAS_OFS_RENDER_TEXTURE)
            img_renderer = blenderapi.texture().ImageRender(
                blenderapi.scene(), camera, fbo)

        mat_id = blenderapi.texture().materialID(screen, material_name)
        self._camera_image = blenderapi.texture().Texture(screen, mat_id)
        self._camera_image.source = img_renderer

        # Set the focal length of the camera using the Game Logic Property. One
        # can use either focal (lens) or fov parameter: setting one computes the
        # other accordingly. Fov supersedes focal.
        if self.image_fov is not None:
            camera.fov = self.image_fov
            self.image_focal = camera.lens
        else:
            camera.lens = self.image_focal
        logger.info("\tFocal length of the camera is: %s" % camera.lens)
        logger.info("\tFOV of the camera is: %s" % camera.fov)

        # Set the clipping distances of the camera using the Game Logic Property
        camera.near = self.near_clipping
        logger.info("\tNear clipping distance of the camera is: %s" %
                    camera.near)
        camera.far = self.far_clipping
        logger.info("\tFar clipping distance of the camera is: %s" %
                    camera.far)

        # Set the background to be used for the render
        self._camera_image.source.background = self.bg_color
        # Define an image size. It must be powers of two. Default 512 * 512
        self._camera_image.source.capsize = [
            self.image_width, self.image_height
        ]
        logger.info("Camera '%s': Exporting an image of capsize: %s pixels" %
                    (self.name(), self._camera_image.source.capsize))

        # Workaround capsize limit to window size
        self.image_width, self.image_height = self._camera_image.source.capsize

        # Reverse the image (boolean game-property)
        self._camera_image.source.flip = self.vertical_flip

        try:
            # Use the Z-Buffer as an image texture for the camera
            if self.retrieve_zbuffer:
                self._camera_image.source.zbuff = True
            # Use the Z-Buffer as input with an array of depths
            if self.retrieve_depth:
                self._camera_image.source.depth = True
        except AttributeError as detail:
            logger.warn("%s\nPlease use Blender > 2.65 for Z-Buffer support" %
                        detail)
Example #14
0
    def _setup_video_texture(self):
        """ Prepare this camera to use the bge.texture module.
        Extract the references to the Blender camera and material where
        the images will be rendered.
        """
        for child in self.bge_object.children:
            # The camera object that will produce the image in Blender
            if 'CameraRobot' in child.name:
                camera = child
            # The object that contains the material where the image is rendered
            if 'CameraMesh' in child.name:
                screen = child
                # Considering it consists of a single mesh
                mesh = child.meshes[0]
                # Get the material name
                for material in mesh.materials:
                    material_index = material.getMaterialIndex()
                    mesh_material_name = mesh.getMaterialName(material_index)
                    if 'MAScreenMat' in mesh_material_name:
                        material_name = mesh_material_name

        try:
            logger.debug("\tCAMERA: %s" % camera.name)
            logger.debug("\tSCREEN: %s" % screen.name)
            logger.debug("\tMATERIAL: %s" % material_name)
        except UnboundLocalError:
            logger.error("The video camera could not be properly initialized."
                         "The children object could not be found."
                         "Best solution is to re-link the camera.")
            return False

        if not self._offscreen_create:
            self._compute_syncable_objects()
            img_renderer = blenderapi.texture().ImageRender(self._scene, camera)
        else:
            fbo = self._offscreen_create(self.image_width, self.image_height,
                                         blenderapi.render().RAS_OFS_RENDER_TEXTURE)
            img_renderer = blenderapi.texture().ImageRender(blenderapi.scene(), camera, fbo)

        mat_id = blenderapi.texture().materialID(screen, material_name)
        self._camera_image = blenderapi.texture().Texture(screen, mat_id)
        self._camera_image.source = img_renderer

        # Set the focal length of the camera using the Game Logic Property
        camera.lens = self.image_focal
        logger.info("\tFocal length of the camera is: %s" % camera.lens)

        # Set the clipping distances of the camera using the Game Logic Property
        camera.near = self.near_clipping
        logger.info("\tNear clipping distance of the camera is: %s" %
                       camera.near)
        camera.far = self.far_clipping
        logger.info("\tFar clipping distance of the camera is: %s" %
                       camera.far)

        # Set the background to be used for the render
        self._camera_image.source.background = self.bg_color
        # Define an image size. It must be powers of two. Default 512 * 512
        self._camera_image.source.capsize = [self.image_width, self.image_height]
        logger.info("Camera '%s': Exporting an image of capsize: %s pixels" %
                (self.name(), self._camera_image.source.capsize))

        # Workaround capsize limit to window size
        self.image_width, self.image_height = self._camera_image.source.capsize

        # Reverse the image (boolean game-property)
        self._camera_image.source.flip = self.vertical_flip

        try:
            # Use the Z-Buffer as an image texture for the camera
            if self.retrieve_zbuffer:
                self._camera_image.source.zbuff = True
            # Use the Z-Buffer as input with an array of depths
            if self.retrieve_depth:
                self._camera_image.source.depth = True
        except AttributeError as detail:
            logger.warn("%s\nPlease use Blender > 2.65 for Z-Buffer support" %
                        detail)
Example #15
0
    def draw_line(self):
        for i in range(0, len(self.positions) - 2):
            start = self.positions[i]
            end = self.positions[i + 1]

            render().drawLine(start, end, self.color)
Example #16
0
def hand_control(contr):
    """ Move the hand following the mouse

    Use the movement of the mouse to determine the rotation
    for the IK arm (right arm)
    
    stays for better placing of objects - >(QKEY + EKEY) to rotate body<
    """
    # get the object this script is attached to
    human = contr.owner
    
    # if the human is external, do nothing
    if human.get('External_Robot_Tag') or human['disable_keyboard_control']:
        return

    # get the suffix of the human to reference the right objects
    suffix = human.name[-4:] if human.name[-4] == "." else ""
    
    scene = blenderapi.scene()
    target = scene.objects['IK_Target_Empty.R' + suffix]
    right_hand = scene.objects['Hand_Grab.R' + suffix]
    mmb = human.sensors['MMB']

    # If the manipulation mode is inactive, do nothing
    if not human['Manipulate']:
        return

    # set mouse sensitivity
    sensitivity = human['Sensitivity']

    # Get sensors for mouse wheel
    wheel_up = contr.sensors['Wheel_Up']
    wheel_down = contr.sensors['Wheel_Down']
    keyboard = contr.sensors['All_Keys']

    keylist = keyboard.events
    for key in keylist:
        if key[1] == blenderapi.input_none() and key[0] == blenderapi.LEFTCTRLKEY:
            if wheel_up.positive:
                front = 50.0 * sensitivity
                target.applyMovement([front, 0.0, 0.0], True)

            if wheel_down.positive:
                back = -50.0 * sensitivity
                target.applyMovement([back, 0.0, 0.0], True)

    # If nothing grabbed or Middle Mouse Button is not pressed,
    # do nothing of the following
    if (right_hand['selected'] == 'None' or right_hand['selected'] == '' or
        (not mmb.positive)):
        #use head_control for this
        return

    # Get sensor named Mouse
    mouse = contr.sensors['Mouse']

    if mouse.positive:
        # get width and height of game window
        width = blenderapi.render().getWindowWidth()
        height = blenderapi.render().getWindowHeight()

        # get mouse movement from function
        move = mouse_move(human, mouse, width, height)

        # Amount, direction and sensitivity
        left_right = move[0] * sensitivity
        up_down = move[1] * sensitivity

        if not human['FOCUSED']:
            target.applyMovement([0.0, left_right, 0.0], True)
            target.applyMovement([0.0, 0.0, up_down], True)

        # Reset mouse position to the centre of the screen
        # Using the '//' operator (floor division) to produce an integer result
        blenderapi.render().setMousePosition(width//2, height//2)
Example #17
0
def head_control(contr):
    """ Move the target of the head and camera

    Use the movement of the mouse to determine the rotation
    for the human head and camera. """
    # get the object this script is attached to
    human = contr.owner
    
    # if the human is external, do nothing
    if human.get('External_Robot_Tag') or human['disable_keyboard_control']:
        return

    # get the suffix of the human to reference the right objects
    suffix = human.name[-4:] if human.name[-4] == "." else ""

    scene = blenderapi.scene()
    target = scene.objects['Target_Empty' + suffix]
    POS_EMPTY = scene.objects['POS_EMPTY' + suffix]
    Head_Empty = scene.objects['Head_Empty' + suffix]
    right_hand = scene.objects['Hand_Grab.R' + suffix]
    camera = scene.objects['Human_Camera' + suffix]
    mmb = contr.sensors['MMB']

    # Do not move the camera if the current view is using another camera
    if camera != blenderapi.scene().active_camera:
        return

    # If the manipulation mode is active, an object is grabbed
    # and the Middle Mouse Button is pressed, do nothing
    if (human['Manipulate'] and right_hand['selected'] != 'None' and
        right_hand['selected'] != '' and mmb.positive):
        return

    if mmb.positive:
        target = scene.objects['IK_Target_Empty.R' + suffix]

    # Get sensor named Mouse
    mouse = contr.sensors['Mouse']

    if mouse.positive:
        # get width and height of game window
        width = blenderapi.render().getWindowWidth()
        height = blenderapi.render().getWindowHeight()

        # get mouse movement from function
        move = mouse_move(human, mouse, width, height)

        # set mouse sensitivity
        sensitivity = human['Sensitivity']

        # Amount, direction and sensitivity
        left_right = move[0] * sensitivity
        up_down = move[1] * sensitivity

        if not human['FOCUSED']:
            POS_EMPTY.applyRotation([0.0, 0.0, left_right], True)
            if not ((Head_Empty.localOrientation.to_euler()[1] >= 0.7
                     and up_down < 0) or
                    (Head_Empty.localOrientation.to_euler()[1] <= -0.4
                     and up_down > 0)) and not human['Manipulate']:
                # capping the rotation to prevent the camera to be upside down
                if not mmb.positive:
                    Head_Empty.applyRotation([0.0, -up_down, 0.0], True)
                target.applyMovement([0.0, 0.0, up_down], True)
            elif human['Manipulate']:
                Head_Empty.applyRotation([0.0, -up_down, 0.0], True)
                target.applyMovement([0.0, 0.0, up_down], True)

        # Reset mouse position to the centre of the screen
        # Using the '//' operator (floor division) to produce an integer result
        blenderapi.render().setMousePosition(width//2, height//2)