def move(cont): """ Move the hand to a nice position for carriing objects. This script is executed as long as the Property 'moveArm' is True """ ow = cont.owner # get the suffix of the human to reference the right objects suffix = ow.name[-4:] if ow.name[-4] == "." else "" dest = blenderapi.scene().objects['IK_Pose_Empty.R' + suffix] hips = blenderapi.scene().objects['Hips_Empty' + suffix] left_hand = blenderapi.scene().objects['IK_Target_Empty.L' + suffix] human = blenderapi.scene().objects[ow.parent.name + suffix] # get the Vector to the right position if human['Manipulate']: vect = ow.getVectTo(dest) else: walk_hand_position = human.worldPosition + human.worldOrientation*Vector((0.3, -0.3, 0.9)) vect = ow.getVectTo(walk_hand_position) # vect[0] is Distance # vect[1] and vect[2] are the Vector in global and local coordinates ow.applyMovement(vect[1]/50) # use global coordinates to move the right hand a bit to the destination hips.applyMovement([0.0, 0.0, (0.92 - hips.localPosition[2])/10]) # move the hips down left_hand.applyMovement([0.0, 0.0, (0.9 - left_hand.localPosition[2])/10]) # also move the left hand to prevent a unnatural pose if vect[0] < 0.02: # if the owner is near enough to the right position, set this position ow.worldPosition = dest.worldPosition if human['Manipulate'] else walk_hand_position ow['moveArm'] = False
def collision(): """ Detect camera collision and place the camera accordingly """ co = blenderapi.controller() ow = co.owner # get the suffix of the human to reference the right objects suffix = ow.name[-4:] if ow.name[-4] == "." else "" ray = co.sensors["collision"] right = co.sensors["RIGHT"] left = co.sensors["LEFT"] human = blenderapi.scene().objects[ow.parent.parent["human_name"]] # if the Human is external, do nothing if human.get("External_Robot_Tag") or human["disable_keyboard_control"]: return Orig_Pos = blenderapi.scene().objects["POS_3P_Cam_Orig" + suffix] distance = 0.05 # the distance the camera keeps to Objects # if near an object, place the camera slightly in front of it if ray.positive and not human["Manipulate"] and not (right.positive or left.positive): hitPos = ray.hitPosition ow.worldPosition = Vector(hitPos) - Vector(ray.rayDirection).normalized() * distance ow["prop_collision"] = True elif ray.positive and not human["Manipulate"] and right.positive: hitPos = (Vector(ray.hitPosition) + Vector(right.hitPosition)) / 2 ow.worldPosition = hitPos - (Vector(ray.rayDirection) + Vector(right.rayDirection)).normalized() * distance ow["prop_collision"] = True elif ray.positive and not human["Manipulate"] and left.positive: hitPos = (Vector(ray.hitPosition) + Vector(left.hitPosition)) / 2 ow.worldPosition = hitPos - (Vector(ray.rayDirection) + Vector(left.rayDirection)).normalized() * distance ow["prop_collision"] = True elif left.positive and not human["Manipulate"] and not (right.positive or ray.positive): hitPos = left.hitPosition ow.worldPosition = Vector(hitPos) - Vector(left.rayDirection).normalized() * distance ow["prop_collision"] = True elif right.positive and not human["Manipulate"] and not (left.positive or ray.positive): hitPos = right.hitPosition ow.worldPosition = Vector(hitPos) - Vector(right.rayDirection).normalized() * distance ow["prop_collision"] = True else: ow["prop_collision"] = False ow.worldPosition = Orig_Pos.worldPosition
def set_body_position(co): """ During grabbing the head moves. The 'Head_Empty' needs to follow. """ ow = co.owner # get the suffix of the human to reference the right objects suffix = ow.name[-4:] if ow.name[-4] == "." else "" head = blenderapi.scene().objects['Head'] human = blenderapi.scene().objects[ow.parent["human_name"]] if human['Manipulate']: ow.worldPosition = head.worldPosition
def set_body_position(co): """ During grabbing the head moves. The 'Head_Empty' needs to follow. """ ow = co.owner # get the suffix of the human to reference the right objects suffix = ow.name[-4:] if ow.name[-4] == "." else "" head = blenderapi.scene().objects['Head' + suffix] human = blenderapi.scene().objects['Human' + suffix] if human['Manipulate']: ow.worldPosition = head.worldPosition
def limit(cont): """ Limit the hand's location to a sphere (radius 0.7) around the shoulder """ ow = cont.owner # get the suffix of the human to reference the right objects suffix = ow.name[-4:] if ow.name[-4] == "." else "" limitY = cont.actuators['LimitLocY'] sobList = blenderapi.scene().objects shoulder = sobList['Shoulder_Empty.R' + suffix] human = ow.parent try: if human['Manipulate']: limitY.min = -math.sqrt(0.7**2 - (shoulder.worldPosition[2] - ow.worldPosition[2])**2) limitY.max = -limitY.min cont.activate(limitY) else: cont.deactivate(limitY) except ValueError: pass
def init(): """ Sets the camera on load """ co = blenderapi.controller() ow = co.owner # get the suffix of the human to reference the right objects suffix = ow.name[-4:] if ow.name[-4] == "." else "" camAct = co.actuators['Set_Camera'] sobList = blenderapi.scene().objects human = ow # if the Human is external, do not use his camera initially if human.get('External_Robot_Tag') or human['disable_keyboard_control']: return humCam = sobList['Human_Camera' + suffix] try: worldCam = sobList['CameraFP'] #check if there is a Camera displaying the world in the scene except KeyError: worldCam = None if ow['WorldCamera'] and worldCam: camAct.camera = worldCam else: camAct.camera = humCam blenderapi.mousepointer(visible=False) # set Camera following the human or displaying the world co.activate(camAct)
def __init__(self, obj, parent=None): logger.info('%s initialization' % obj.name) # Call the constructor of the parent class morse.core.actuator.Actuator.__init__(self, obj, parent) logger.setLevel(logging.INFO) self._destination = self.robot_parent.bge_object.worldPosition self._wp_object = None # set desired position to current position self.local_data['x'] = self._destination[0] self.local_data['y'] = self._destination[1] self.local_data['z'] = self._destination[2] self.local_data['yaw'] = self.robot_parent.position_3d.yaw logger.info("inital wp: (%.3f %.3f %.3f)", self._destination[0], self._destination[0], self._destination[0]) self._pos_initalized = False # Make new reference to the robot velocities (mathutils.Vector) self.robot_w = self.robot_parent.bge_object.localAngularVelocity # get the robot inertia (list [ix, iy, iz]) robot_inertia = self.robot_parent.bge_object.localInertia self.inertia = Vector(tuple(robot_inertia)) logger.info("robot inertia: (%.3f %.3f %.3f)" % tuple(self.inertia)) self.nominal_thrust = self.robot_parent.bge_object.mass * 9.81 logger.info("nominal thrust: %.3f", self.nominal_thrust) self._attitude_compensation_limit = cos(self._max_bank_angle)**2 # current attitude setpoints in radians self.roll_setpoint = 0.0 self.pitch_setpoint = 0.0 self.yaw_setpoint = 0.0 self.thrust = 0.0 #previous attitude error self.prev_err = Vector((0.0, 0.0, 0.0)) # Initially (ie, before receiving a waypoint), # the robot is in 'Arrived' state self.robot_parent.move_status = "Arrived" # Identify an object as the target of the motion try: wp_name = self._target if wp_name != '': scene = blenderapi.scene() self._wp_object = scene.objects[wp_name] logger.info("Using object '%s' to indicate motion target", wp_name) except KeyError as detail: self._wp_object = None logger.info("Not using a target object") logger.info("Component initialized, runs at %.2f Hz ", self.frequency)
def synchronize(self): self.morse_ambassador.tag = False scene = blenderapi.scene() t = self.morse_ambassador.current_time + self.morse_ambassador.lookahead for obj in self.morse_ambassador.objects: obj_name = self.rtia.getObjectInstanceName(obj) obj_pos = scene.objects[obj_name].worldPosition.to_tuple() obj_ori = scene.objects[obj_name].worldOrientation.to_euler() hla_att = { self.morse_ambassador.out_position: MorseVector.pack([obj_pos[0], obj_pos[1], obj_pos[2]]), self.morse_ambassador.out_orientation: MorseVector.pack([obj_ori.x, obj_ori.y, obj_ori.z]) } try: self.rtia.updateAttributeValues(obj, hla_att, "update", t) except rti.InvalidFederationTime: logger.debug("Invalid time for UAV: %s; Federation time is %s", t, self.rtia.queryFederateTime()) if self.time_sync: self.rtia.timeAdvanceRequest(t) while (not self.morse_ambassador.tag): self.rtia.tick(0, 1) logger.debug("Node simulation time:" + \ self.morse_ambassador.current_time) else: self.rtia.tick()
def lay_down_visualize(cont): """ Show a green rectangle if you can accurately place the selected object """ ow = cont.owner # get the suffix of the human to reference the right objects suffix = ow.name[-4:] if ow.name[-4] == "." else "" scene = blenderapi.scene() objects = scene.objects human = objects[ow['human_name'] + suffix] hand = objects['Hand_Grab.R' + suffix] ray = cont.sensors['LayDownRay'] focused_object = ray.hitObject try: actor_focused = data.objects[focused_object.name].game.use_actor except AttributeError: actor_focused = False if human['Manipulate'] and ray.positive and \ focused_object != hand['selected'] and hand['selected'] and \ actor_focused: if not color_placing in scene.post_draw: scene.post_draw.append(color_placing) else: if color_placing in scene.post_draw: scene.post_draw.remove(color_placing)
def write_interaction_status(): """ Write the interaction status on Screen The status is stored in a property """ cam = blenderapi.scene().active_camera # get the suffix of the human to reference the right objects suffix = cam.name[-4:] if cam.name[-4] == "." else "" hand = objects['Hand_Grab.R' + suffix] # OpenGL setup bgl.glMatrixMode(bgl.GL_PROJECTION) bgl.glLoadIdentity() bgl.gluOrtho2D(0, windowWidth, 0, windowHeight) bgl.glMatrixMode(bgl.GL_MODELVIEW) bgl.glLoadIdentity() blf.size(font_id, int(windowHeight*0.04), 72) # draw a black shadow around the text blf.enable(font_id, blf.SHADOW) blf.shadow(font_id, 5, 0.0, 0.0, 0.0, 1.0) blf.position(font_id, windowWidth*0.4, windowHeight*0.4,0) blf.draw(font_id, hand['Status'])
def lay_down(cont): """ lay the object down to given coordinates """ pos = cont.owner # get the suffix of the human to reference the right objects suffix = pos.name[-4:] if pos.name[-4] == "." else "" objects = blenderapi.scene().objects hand = objects['Hand_Grab.R' + suffix] obj = hand['selected'] if obj == None or not pos['LayDown']: return vect = pos.getVectTo(Vector(pos['LayDown']))[1] obj_collision = obj.sensors['Collision'] if not (obj_collision.positive and pos['LayDownObj'] in obj_collision.hitObjectList): pos.worldPosition += vect/75 else: obj.removeParent() hand['selected'] = None pos['moveArm'] = True pos['LayDown'] = False pos['LayDownObj'] = None
def synchronize(self): if self.connected: # Get the coordinates of local robots robot_dict = blenderapi.persistantstorage().robotDict for obj, local_robot_data in robot_dict.items(): #self.out_data[obj.name] = [obj.worldPosition.to_tuple()] euler_rotation = obj.worldOrientation.to_euler() self.out_data[obj.name] = [ obj.worldPosition.to_tuple(), [euler_rotation.x, euler_rotation.y, euler_rotation.z] ] # Send the encoded dictionary through a socket # and receive a reply with any changes in the other nodes in_data = self._exchange_data(self.out_data) if in_data != None: scene = blenderapi.scene() # Update the positions of the external robots for obj_name, robot_data in in_data.items(): try: obj = scene.objects[obj_name] if obj not in robot_dict: logger.debug("Data received: ", robot_data) obj.worldPosition = robot_data[0] obj.worldOrientation = mathutils.Euler( robot_data[1]).to_matrix() except KeyError as detail: logger.info( "Robot %s not found in this simulation scenario, but present in another node. Ignoring it!" % detail)
def get_wheels(self): """ Get pointers to and physicsIds of all objects Compute wheel_radius too """ scene = blenderapi.scene() self._wheel_radius = None caster_wheel_name = self.bge_object.get('CasterWheelName', None) # inherited from the parent robot for index in self._wheel_index: name = "Wheel%sName" % index # Get the actual name of the object from the properties # of the parent robot try: wheel = scene.objects[self.bge_object[name]] self._wheels[index] = wheel logger.info("\tWheel %s: %s" % (index, wheel.name)) wheel.removeParent() # get wheel radius if not already computed if wheel.name != caster_wheel_name and not self._wheel_radius: self._wheel_radius = self.get_wheel_radius(self.bge_object[name]) except: pass logger.debug("get_wheels %s" % self._wheels)
def lay_down(cont): """ lay the object down to given coordinates """ pos = cont.owner # get the suffix of the human to reference the right objects suffix = pos.name[-4:] if pos.name[-4] == "." else "" objects = blenderapi.scene().objects hand = objects['Hand_Grab.R' + suffix] obj = hand['selected'] if obj == None or not pos['LayDown']: return vect = pos.getVectTo(Vector(pos['LayDown']))[1] obj_collision = obj.sensors['Collision'] if not (obj_collision.positive and pos['LayDownObj'] in obj_collision.hitObjectList): pos.worldPosition += vect / 75 else: obj.removeParent() hand['selected'] = None pos['moveArm'] = True pos['LayDown'] = False pos['LayDownObj'] = None
def default_action(self): """ Compute the local temperature Temperature is measured dependent on the closest fire source. """ min_distance = 100000.0 fires = False scene = blenderapi.scene() # Look for the fire sources marked so for obj in scene.objects: try: obj['Fire'] fire_radius = obj['Fire_Radius'] # If the direction of the fire is also important, # we can use getVectTo instead of getDistanceTo distance = self.bge_object.getDistanceTo(obj) if distance < min_distance: min_distance = distance fires = True except KeyError as detail: logger.debug("Exception: " + str(detail)) temperature = self._global_temp # Trial and error formula to get a temperature dependant on # distance to the nearest fire source. if fires: temperature += self._fire_temp * math.e ** (-0.2 * min_distance) # Store the data acquired by this sensor that could be sent # via a middleware. self.local_data['temperature'] = float(temperature)
def write_interaction_status(): """ Write the interaction status on Screen The status is stored in a property """ cam = blenderapi.scene().active_camera # get the suffix of the human to reference the right objects suffix = cam.name[-4:] if cam.name[-4] == "." else "" hand = objects['Hand_Grab.R' + suffix] # OpenGL setup bgl.glMatrixMode(bgl.GL_PROJECTION) bgl.glLoadIdentity() bgl.gluOrtho2D(0, windowWidth, 0, windowHeight) bgl.glMatrixMode(bgl.GL_MODELVIEW) bgl.glLoadIdentity() blf.size(font_id, int(windowHeight * 0.04), 72) # draw a black shadow around the text blf.enable(font_id, blf.SHADOW) blf.shadow(font_id, 5, 0.0, 0.0, 0.0, 1.0) blf.position(font_id, windowWidth * 0.4, windowHeight * 0.4, 0) blf.draw(font_id, hand['Status'])
def reflectAttributeValues(self, object, attributes, tag, order, transport, time=None, retraction=None): scene = blenderapi.scene() obj_name = self._rtia.getObjectInstanceName(object) logger.debug("RAV %s", obj_name) try: obj = scene.objects[obj_name] if self.in_position in attributes: pos, offset = MorseVector.unpack(attributes[self.in_position]) # Update the positions of the robots obj.worldPosition = pos if self.in_orientation in attributes: ori, offset = MorseVector.unpack( attributes[self.in_orientation]) # Update the orientations of the robots obj.worldOrientation = mathutils.Euler(ori).to_matrix() except KeyError as detail: logger.debug("Robot %s not found in this simulation scenario," + \ "but present in another node. Ignoring it!", obj_name)
def init(): """ Sets the camera on load """ co = blenderapi.controller() ow = co.owner # get the suffix of the human to reference the right objects suffix = ow.name[-4:] if ow.name[-4] == "." else "" camAct = co.actuators["Set_Camera"] sobList = blenderapi.scene().objects human = ow # if the Human is external, do not use his camera initially if human.get("External_Robot_Tag") or human["disable_keyboard_control"]: return humCam = sobList["Human_Camera" + suffix] try: worldCam = sobList["CameraFP"] # check if there is a Camera displaying the world in the scene except KeyError: worldCam = None if ow["WorldCamera"] and worldCam: camAct.camera = worldCam else: camAct.camera = humCam blenderapi.mousepointer(visible=False) # set Camera following the human or displaying the world co.activate(camAct)
def grasp_(self, seq): """ Grasp object. """ human = self.blender_obj if human["Manipulate"]: scene = blenderapi.scene() hand_empty = scene.objects["Hand_Grab.R"] selected_object = hand_empty["Near_Object"] if seq == "t": # Check that no other object is being carried if human["DraggedObject"] == None or human["DraggedObject"] == "": # If the object is draggable if selected_object != None and selected_object != "": # Clear the previously selected object, if any human["DraggedObject"] = selected_object # Remove Physic simulation selected_object.suspendDynamics() # Parent the selected object to the hand target selected_object.setParent(hand_empty) if seq == "f": if human["DraggedObject"] != None and human["DraggedObject"] != "": previous_object = human["DraggedObject"] # Restore Physics simulation previous_object.restoreDynamics() previous_object.setLinearVelocity([0, 0, 0]) previous_object.setAngularVelocity([0, 0, 0]) # Remove the parent previous_object.removeParent() # Clear the object from dragged status human["DraggedObject"] = None
def reset_view(contr): """ Make the human model look forward """ human = contr.owner scene = blenderapi.scene() target = scene.objects['Head_Empty'] # Reset the Empty object to its original position target.localPosition = [0.5, 0.0, 1.6]
def synchronize(self): if not self.node_stream: logger.debug("not self.node_stream") return if not self.node_stream.connected: logger.debug("not self.node_stream.connected") return # Get the coordinates of local robots for obj in blenderapi.persistantstorage().robotDict.keys(): #self.out_data[obj.name] = [obj.worldPosition.to_tuple()] euler_rotation = obj.worldOrientation.to_euler() self.out_data[obj.name] = [obj.worldPosition.to_tuple(), \ [euler_rotation.x, euler_rotation.y, euler_rotation.z]] self.out_data['__time'] = [ self.simulation_time.time, 1.0 / blenderapi.getfrequency(), self.simulation_time.real_time ] # Send the encoded dictionary through a socket # and receive a reply with any changes in the other nodes in_data = self._exchange_data(self.out_data) logger.debug("Received: %s" % in_data) if not in_data: return try: self.update_scene(in_data, blenderapi.scene()) except Exception as e: logger.warning("error while processing incoming data: " + str(e))
def toggle_manipulation(self): """ Change from and to manipulation mode. A request to use by a socket. Done for wiimote remote control. """ human = self.bge_object scene = blenderapi.scene() hand_target = scene.objects['IK_Target_Empty.R'] head_target = scene.objects['Target_Empty'] if human['Manipulate']: human['Manipulate'] = False # Place the hand beside the body hand_target.localPosition = [0.0, -0.3, 0.8] head_target.setParent(human) head_target.localPosition = [1.3, 0.0, 1.7] else: human['Manipulate'] = True head_target.setParent(hand_target) # Place the hand in a nice position hand_target.localPosition = [0.6, 0.0, 1.4] # Place the head in the same place head_target.localPosition = [0.0, 0.0, 0.0]
def synchronize(self): if not self.node_stream: logger.debug("not self.node_stream") return if not self.node_stream.connected: logger.debug("not self.node_stream.connected") return # Get the coordinates of local robots for obj in blenderapi.persistantstorage().robotDict.keys(): #self.out_data[obj.name] = [obj.worldPosition.to_tuple()] euler_rotation = obj.worldOrientation.to_euler() self.out_data[obj.name] = [obj.worldPosition.to_tuple(), \ [euler_rotation.x, euler_rotation.y, euler_rotation.z]] self.out_data['__time'] = [self.simulation_time.time, 1.0/ blenderapi.getfrequency(), self.simulation_time.real_time] # Send the encoded dictionary through a socket # and receive a reply with any changes in the other nodes in_data = self._exchange_data(self.out_data) logger.debug("Received: %s" % in_data) if not in_data: return try: self.update_scene(in_data, blenderapi.scene()) except Exception as e: logger.warning("error while processing incoming data: " + str(e))
def get_scene_objects(self): """ Returns a hierarchial dictonary structure of all objects in the scene along with their positions and orientations, formated as a Python string representation. The structure: {object_name: [dict_of_children, position_tuple, quaternion_tuple], object_name: [dict_of_children, position_tuple, quaternion_tuple], ...} """ scene = blenderapi.scene() # Special Morse items to remove from the list remove_items = [ "Scene_Script_Holder", "CameraFP", "__default__cam__", "MORSE.Properties", "__morse_dt_analyser", ] top_levelers = [o for o in scene.objects if o.parent is None and not o.name in remove_items] objects = {} for obj in top_levelers: objects.update(get_structured_children_of(obj)) return objects
def default_action(self): """ Compute the local temperature Temperature is measured dependent on the closest fire sources. """ temp = float(self._zero) scene = blenderapi.scene() # Look for the fire sources marked so for obj in scene.objects: try: f = obj[self._tag] if type(f) == int or type(f) == float: fire_intensity = float(f) else: fire_intensity = self._fire distance, gvect, lvect = self.bge_object.getVectTo(obj) if distance < self._range: t = fire_intensity * math.exp(- self._alpha * distance) temp += t except KeyError as detail: logger.debug("Exception: " + str(detail)) self.local_data['temperature'] = float(temp)
def synchronize(self): self.morse_ambassador.tag = False scene = blenderapi.scene() t = self.morse_ambassador.current_time + self.morse_ambassador.lookahead for obj in self.morse_ambassador.objects: obj_name = self.rtia.getObjectInstanceName(obj) obj_pos = scene.objects[obj_name].worldPosition.to_tuple() obj_ori = scene.objects[obj_name].worldOrientation.to_euler() hla_att = { self.morse_ambassador.out_position: MorseVector.pack([obj_pos[0], obj_pos[1], obj_pos[2]]), self.morse_ambassador.out_orientation: MorseVector.pack([obj_ori.x, obj_ori.y, obj_ori.z])} try: self.rtia.updateAttributeValues(obj, hla_att, "update", t) except rti.InvalidFederationTime: logger.debug("Invalid time for UAV: %s; Federation time is %s", t, self.rtia.queryFederateTime()) if self.time_sync: self.rtia.timeAdvanceRequest(t) while (not self.morse_ambassador.tag): self.rtia.tick(0, 1) logger.debug("Node simulation time:" + \ self.morse_ambassador.current_time) else: self.rtia.tick()
def toggle_manipulate(contr): """ Switch mouse control between look and manipulate """ human = contr.owner # if the human is external, do nothing if human.get('External_Robot_Tag') or human['disable_keyboard_control']: return # get the suffix of the human to reference the right objects suffix = human.name[-4:] if human.name[-4] == "." else "" scene = blenderapi.scene() hand_target = scene.objects['IK_Target_Empty.R' + suffix] head_target = scene.objects['Target_Empty' + suffix] right_hand = scene.objects['Hand_Grab.R' + suffix] if human['Manipulate']: #blenderapi.render().showMouse(False) human['Manipulate'] = False # Place the hand beside the body if right_hand['selected'] == 'None' or right_hand['selected'] == '' or right_hand['selected'] is None: hand_target.localPosition = [0.3, -0.3, 0.9] head_target.setParent(human) head_target.localPosition = [1.3, 0.0, 1.7] else: #blenderapi.render().showMouse(True) human['Manipulate'] = True head_target.setParent(hand_target) # Place the hand in a nice position hand_target.localPosition = [0.6, 0.0, 1.4] head_target.worldPosition = hand_target.worldPosition
def default_action(self): """ Extract the human posture """ #logger.debug("\tI am '%s': (%.4f, %.4f, %.4f)" % (self.name, self.position_3d.x, self.position_3d.y, self.position_3d.z)) #self.local_data['x'] = float(self.position_3d.x) #self.local_data['y'] = float(self.position_3d.y) #self.local_data['z'] = float(self.position_3d.z) #self.local_data['yaw'] = float(self.position_3d.yaw)# - (math.pi/2) # 1.57 #pi/2 #self.local_data['pitch'] = float(self.position_3d.pitch) #self.local_data['roll'] = float(self.position_3d.roll) # Give the position of the Torso_Empty object as the position of the human scene = blenderapi.scene() torso = scene.objects['Torso_Empty'] self.local_data['x'] = torso.worldPosition[0] self.local_data['y'] = torso.worldPosition[1] self.local_data['z'] = torso.worldPosition[2] logger.debug("\tTorso_Empty position: (%.4f, %.4f, %.4f)" % (torso.worldPosition[0], torso.worldPosition[1], torso.worldPosition[2])) # Pass also the rotation of the Torso_Empty self.local_data['yaw'] = torso.worldOrientation.to_euler().z self.local_data['pitch'] = torso.worldOrientation.to_euler().y self.local_data['roll'] = torso.worldOrientation.to_euler().x logger.debug("\tTorso_Empty orientation: (%.4f, %.4f, %.4f)" % (self.local_data['roll'], self.local_data['pitch'], self.local_data['yaw'])) self._read_pose(self.bge_object) logger.debug("LOCAL_DATA: %s" % self.local_data)
def read_status(contr): """ Check if the human is moving and set the flags This will trigger the walking animation even when the human is controlled via a motion actuator """ human = contr.owner scene = blenderapi.scene() # get the suffix of the human to reference the right objects suffix = human.name[-4:] if human.name[-4] == "." else "" armature = scene.objects['HumanArmature' + suffix] tolerance = 0.001 # TODO: Do not change the movement properties until the controllers # are properly implemented to use velocity commands if False: speed = human.getLinearVelocity() logger.debug("Man going at speed [%.4f, %.4f, %.4f]" % (speed[0], speed[1], speed[2])) if speed[0] > tolerance: armature['movingForward'] = True elif speed[0] < -tolerance: armature['movingBackward'] = True else: armature['movingForward'] = False armature['movingBackward'] = False
def toggle_manipulation(self): """ Switch manipulation mode on and off. a request to use by a socket. Done for wiimote remote control. """ human = self.bge_object scene = blenderapi.scene() hand_target = scene.objects['IK_Target_Empty.R'] head_target = scene.objects['IK_Target_Empty.Head'] torso = scene.objects['Torso_Reference_Empty'] if human['Manipulate']: human['Manipulate'] = False # Place the hand beside the body hand_target.localPosition = [0.3, -0.3, 0.9] # Make the head follow the body head_target.setParent(torso) # Put the head_target in front and above the waist head_target.localPosition = [0.5, 0.0, 0.5] logger.debug("Moving head_target to CENTER: %s" % head_target.localPosition) #hand_target.localPosition = [0.0, -0.3, 0.8] #head_target.setParent(human) #head_target.localPosition = [1.3, 0.0, 1.7] else: human['Manipulate'] = True # Place the hand in a nice position hand_target.localPosition = [0.6, 0.0, 1.4] # Make the head follow the hand head_target.setParent(hand_target) # Reset the head_target position to the same as its parent head_target.localPosition = [0.0, 0.0, 0.0] logger.debug("Moving head_target to HAND: %s" % head_target.localPosition)
def default_action(self): """ Extract the human posture """ # Give the position of the Torso_Empty object as the position of # the human scene = blenderapi.scene() torso = scene.objects['Torso_Empty'] self.local_data['x'] = torso.worldPosition[0] self.local_data['y'] = torso.worldPosition[1] self.local_data['z'] = torso.worldPosition[2] logger.debug("\tTorso_Empty position: (%.4f, %.4f, %.4f)" % (torso.worldPosition[0], torso.worldPosition[1], torso.worldPosition[2])) # Pass also the rotation of the Torso_Empty self.local_data['yaw'] = torso.worldOrientation.to_euler().z self.local_data['pitch'] = torso.worldOrientation.to_euler().y self.local_data['roll'] = torso.worldOrientation.to_euler().x logger.debug("\tTorso_Empty orientation: (%.4f, %.4f, %.4f)" % (self.local_data['roll'], self.local_data['pitch'], self.local_data['yaw'])) self._read_pose(self.bge_object) logger.debug("LOCAL_DATA: %s" % self.local_data)
def toggle_manipulate(contr): """ Switch mouse control between look and manipulate """ human = contr.owner # if the human is external, do nothing if human.get('External_Robot_Tag') or human['disable_keyboard_control']: return # get the suffix of the human to reference the right objects suffix = human.name[-4:] if human.name[-4] == "." else "" scene = blenderapi.scene() hand_target = scene.objects['IK_Target_Empty.R' + suffix] head_target = scene.objects['Target_Empty' + suffix] right_hand = scene.objects['Hand_Grab.R' + suffix] if human['Manipulate']: #blenderapi.render().showMouse(False) human['Manipulate'] = False # Place the hand beside the body if right_hand['selected'] == 'None' or right_hand[ 'selected'] == '' or right_hand['selected'] == None: hand_target.localPosition = [0.3, -0.3, 0.9] head_target.setParent(human) head_target.localPosition = [1.3, 0.0, 1.7] else: #blenderapi.render().showMouse(True) human['Manipulate'] = True head_target.setParent(hand_target) # Place the hand in a nice position hand_target.localPosition = [0.6, 0.0, 1.4] head_target.worldPosition = hand_target.worldPosition
def move(contr): """ Read the keys for specific combinations that will make the camera move in 3D space. """ # get the object this script is attached to camera = contr.owner scene = blenderapi.scene() # Do not move the camera if the current view is using another camera if camera != scene.active_camera: return if 'Human' in scene.objects: human = scene.objects['Human'] if not human['move_cameraFP']: return # set the movement speed speed = camera['Speed'] # Get Blender keyboard sensor keyboard = contr.sensors['All_Keys'] # Default movement speed move_speed = [0.0, 0.0, 0.0] keylist = keyboard.events for key in keylist: if key[1] == blenderapi.input_active(): # Also add the corresponding key for an AZERTY keyboard if key[0] == blenderapi.WKEY or key[0] == blenderapi.ZKEY: move_speed[2] = -speed elif key[0] == blenderapi.SKEY: move_speed[2] = speed # Also add the corresponding key for an AZERTY keyboard elif key[0] == blenderapi.AKEY or key[0] == blenderapi.QKEY: move_speed[0] = -speed elif key[0] == blenderapi.DKEY: move_speed[0] = speed elif key[0] == blenderapi.RKEY: move_speed[1] = speed elif key[0] == blenderapi.FKEY: move_speed[1] = -speed else: move_speed[0] = 0 move_speed[1] = 0 move_speed[2] = 0 # The second parameter of 'applyMovement' determines # a movement with respect to the object's local # coordinate system camera.applyMovement( move_speed, True ) elif key[1] == blenderapi.input_just_activated(): # Other actions activated with the keyboard # Reset camera to center if key[0] == blenderapi.F8KEY and keyboard.positive: reset_position(contr)
def __init__(self, obj, parent=None): logger.info('%s initialization' % obj.name) # Call the constructor of the parent class morse.core.actuator.Actuator.__init__(self, obj, parent) logger.setLevel(logging.INFO) self._destination = self.robot_parent.bge_object.worldPosition self._wp_object = None # set desired position to current position self.local_data['x'] = self._destination[0] self.local_data['y'] = self._destination[1] self.local_data['z'] = self._destination[2] self.local_data['yaw'] = self.robot_parent.position_3d.yaw logger.info("inital wp: (%.3f %.3f %.3f)", self._destination[0], self._destination[0], self._destination[0]) self._pos_initalized = False # Make new reference to the robot velocities (mathutils.Vector) self.robot_w = self.robot_parent.bge_object.localAngularVelocity # get the robot inertia (list [ix, iy, iz]) robot_inertia = self.robot_parent.bge_object.localInertia self.inertia = Vector(tuple(robot_inertia)) logger.info("robot inertia: (%.3f %.3f %.3f)" % tuple(self.inertia)) self.nominal_thrust = self.robot_parent.bge_object.mass * 9.81 logger.info("nominal thrust: %.3f", self.nominal_thrust) self._attitude_compensation_limit = cos(self._max_bank_angle) ** 2 # current attitude setpoints in radians self.roll_setpoint = 0.0 self.pitch_setpoint = 0.0 self.yaw_setpoint = 0.0 self.thrust = 0.0 #previous attitude error self.prev_err = Vector((0.0, 0.0, 0.0)) # Initially (ie, before receiving a waypoint), # the robot is in 'Arrived' state self.robot_parent.move_status = "Arrived" # Identify an object as the target of the motion try: wp_name = self._target if wp_name != '': scene = blenderapi.scene() self._wp_object = scene.objects[wp_name] logger.info("Using object '%s' to indicate motion target", wp_name) except KeyError as detail: self._wp_object = None logger.info("Not using a target object") logger.info("Component initialized, runs at %.2f Hz ", self.frequency)
def __init__(self, obj, parent=None): logger.info("%s initialization" % obj.name) # Call the constructor of the parent class morse.core.actuator.Actuator.__init__(self, obj, parent) # Get every bge object in the scene objs = blenderapi.scene().objects # Get the water surface object self._water = objs['water'] # Set the water surface property self._water['castable'] = True # Initialise all buoyancy spheres in the scene self.spheres = [ Spheres(child) for child in objs if 'float' in child.name.lower() ] # Sphere volumes by parent sphere_vols = {} # Sum sphere volumes for s in self.spheres: parent = s.obj.parent if not parent.name in sphere_vols: sphere_vols[parent.name] = s.vol else: sphere_vols[parent.name] += s.vol # Distribute buoyancy among # spheres according to volume for s in self.spheres: # Parent object parent = s.obj.parent # Parent mass mass = parent.mass # Parent trim trim = parent.get('trim', 0) # The total buoyancy force will equal the weight # in air of the flotation elements plus the trim mass += trim # Buoyancy force when totally submerged # Vector points up in the global frame buoyancy = -mass * blenderapi.gravity() # Buoyancy for this sphere s.buoy = buoyancy * s.vol / sphere_vols[parent.name] logger.info('Found %d buoyancy elements in scene' % len(self.spheres)) logger.info('Component initialized')
def set_human_animation(contr): """ Toggle the animation actions (walking, standing still...) of the armature. """ # Get sensor named Mouse armature = contr.owner # get the suffix of the human to reference the right objects suffix = armature.name[-4:] if armature.name[-4] == "." else "" scene = blenderapi.scene() active_camera = scene.active_camera human = scene.objects[armature.parent.name] # if the human is external, do nothing if human.get('External_Robot_Tag') or human['disable_keyboard_control']: return keyboard = contr.sensors['All_Keys'] keylist = keyboard.events pressed = [] #all keys that are currently pressed for key in keylist: # key[0] == blenderapi.keycode, key[1] = status if key[1] == blenderapi.input_just_activated(): pressed.append(key[0]) # Keys for moving forward or turning """ if key[0] == blenderapi.WKEY or key[0] == blenderapi.ZKEY: armature['movingForward'] = True elif key[0] == blenderapi.SKEY: armature['movingBackward'] = True """ # TEST: Read the rotation of the bones in the armature if key[0] == blenderapi.BKEY: read_pose(contr) #elif key[0] == blenderapi.VKEY: #reset_pose(contr) #elif key[1] == blenderapi.input_just_released(): """ if key[0] == blenderapi.WKEY or key[0] == blenderapi.ZKEY: armature['movingForward'] = False elif key[0] == blenderapi.SKEY: armature['movingBackward'] = False """ elif key[1] == blenderapi.input_active(): pressed.append(key[0]) if human['move_cameraFP'] and active_camera.name != 'Human_Camera': return if (FORWARDS in pressed or LEFT in pressed or BACKWARDS in pressed or RIGHT in pressed): armature['movingForward'] = True else: armature['movingForward'] = False
def move_hand(self, diff, tilt): """ move the human hand (wheel). a request to use by a socket. Done for wiimote remote control. """ human = self.bge_object if human['Manipulate']: scene = blenderapi.scene() target = scene.objects['IK_Target_Empty.R'] target.applyMovement([diff, 0.0, 0.0], True)
def restore_dynamics(): """ Resumes physics for all object in the scene. """ scene = blenderapi.scene() for object in scene.objects: object.restoreDynamics() return "Physics is resumed"
def __init__(self, obj, parent=None): """ Constructor method. Receives the reference to the Blender object. """ logger.info('%s initialization' % obj.name) # Call the constructor of the parent class GraspingRobot.__init__(self, obj, parent) """ # We define here the name of the pr2 grasping hand: """ self.hand_name = 'Hand.Grasp.PR2' self.armatures = [] # Search armatures and torso in all objects parented to the pr2 empty for obj in self.bge_object.childrenRecursive: # Check if obj is an armature if type(obj).__name__ == 'BL_ArmatureObject': self.armatures.append(obj.name) logger.info(obj.name) logger.info(' ==\n') if obj.name == 'torso_lift_joint': self.torso = obj logger.info(' ==\n') # constant that holds the original height of the torso from the ground # These values come from the pr2 urdf file self.TORSO_BASE_HEIGHT = (0.739675 + 0.051) self.TORSO_LOWER = 0.0 # lower limit on the torso z-translantion self.TORSO_UPPER = 0.31 # upper limit on the torso z-translation self.MIN_DIST = 0.85 self.cancel = False self.is_ho = False self.is_gr = False # is grasping? # raise both hands up scene = blenderapi.scene() hand_l = scene.objects['l_elbow_flex_joint'] hand_l.worldOrientation = [ math.radians(0), math.radians(-90), math.radians(180) ] hand_r = scene.objects['r_elbow_flex_joint'] hand_r.worldOrientation = [ math.radians(90), math.radians(-90), math.radians(-90) ] head = scene.objects['head_tilt_joint'] head.worldOrientation = [0, 0, math.radians(90)] logger.info('Component initialized')
def get_obj_by_name(name): """ Return object in the scene associated to :param name: If it does not exists, throw a MorseRPCInvokationError """ scene = blenderapi.scene() if name not in scene.objects: raise MorseRPCInvokationError("Object '%s' does not appear in the scene." % name) return scene.objects[name]
def suspend_dynamics(): """ Suspends physics for all object in the scene. """ scene = blenderapi.scene() for object in scene.objects: object.suspendDynamics() return "Physics is suspended"
def restore_dynamics(self): """ Resumes physics for all object in the scene. """ scene = blenderapi.scene() for object in scene.objects: object.restoreDynamics() return "Physics is resumed"
def suspend_dynamics(self): """ Suspends physics for all object in the scene. """ scene = blenderapi.scene() for object in scene.objects: object.suspendDynamics() return "Physics is suspended"
def rotate(contr): """ Read the movements of the mouse and apply them as a rotation to the camera. """ # get the object this script is attached to camera = contr.owner scene = blenderapi.scene() if not scene: # not ready, main reload(blenderapi) return # Do not move the camera if the current view is using another camera if camera != scene.active_camera: return # Get sensor named Mouse mouse = contr.sensors['Mouse'] # Get Blender keyboard sensor keyboard = contr.sensors['All_Keys'] # Show the cursor mouse_visible = True keylist = keyboard.events for key in keylist: if key[1] == blenderapi.input_active(): # Left CTRL key allow to rotate the camera if key[0] == blenderapi.LEFTCTRLKEY: # Hide the cursor while we control the camera mouse_visible = False if mouse.positive: # get width and height of game window width = blenderapi.render().getWindowWidth() height = blenderapi.render().getWindowHeight() # get mouse movement from function move = mouse_move(camera, mouse, width, height) # set mouse sensitivity sensitivity = camera['Sensitivity'] # Amount, direction and sensitivity leftRight = move[0] * sensitivity upDown = move[1] * sensitivity # set the values camera.applyRotation([0.0, 0.0, leftRight], 0) camera.applyRotation([upDown, 0.0, 0.0], 1) # Center mouse in game window # Using the '//' operator (floor division) to produce an integer result blenderapi.render().setMousePosition( width // 2, height // 2) # Set the cursor visibility blenderapi.mousepointer(visible=mouse_visible)
def attempt_grasp_back(self): scene = blenderapi.scene() hand_r = scene.objects['IK_Target_Empty.R'] hand_l = scene.objects['IK_Target_Empty.L'] # back hand_r.localPosition = [0, -0.2, 0.82] hand_l.localPosition = [0, 0.2, 0.82] self.is_ag = False self.is_gr = False
def get_obj_by_name(name): """ Return object in the scene associated to :param name: If it does not exists, throw a MorseRPCInvokationError """ scene = blenderapi.scene() if name not in scene.objects: raise MorseRPCInvokationError( "Object '%s' does not appear in the scene." % name) return scene.objects[name]
def rotate(contr): """ Read the movements of the mouse and apply them as a rotation to the camera. """ # get the object this script is attached to camera = contr.owner scene = blenderapi.scene() if not scene: # not ready, main reload(blenderapi) return # Do not move the camera if the current view is using another camera if camera != scene.active_camera: return # Get sensor named Mouse mouse = contr.sensors['Mouse'] # Get Blender keyboard sensor keyboard = contr.sensors['All_Keys'] # Show the cursor mouse_visible = True keylist = keyboard.events for key in keylist: if key[1] == blenderapi.input_active(): # Left CTRL key allow to rotate the camera if key[0] == blenderapi.LEFTCTRLKEY: # Hide the cursor while we control the camera mouse_visible = False if mouse.positive: # get width and height of game window width = blenderapi.render().getWindowWidth() height = blenderapi.render().getWindowHeight() # get mouse movement from function move = mouse_move(camera, mouse, width, height) # set mouse sensitivity sensitivity = camera['Sensitivity'] # Amount, direction and sensitivity leftRight = move[0] * sensitivity upDown = move[1] * sensitivity # set the values camera.applyRotation( [0.0, 0.0, leftRight], 0 ) camera.applyRotation( [upDown, 0.0, 0.0], 1 ) # Center mouse in game window # Using the '//' operator (floor division) to produce an integer result blenderapi.render().setMousePosition(width//2, height//2) # Set the cursor visibility blenderapi.mousepointer(visible = mouse_visible)