async def broadcast_event(event: events.Event) -> None: logger.debug(event) if glob.USERS.interfaces: message = event.to_json() await asyncio.gather(*[ws_server.send_json_to_client(intf, message) for intf in glob.USERS.interfaces])
async def get_project_problems(scene: CachedScene, project: CachedProject) -> Optional[list[str]]: """Handle caching of project problems.""" assert scene.modified assert project.modified ots = scene.object_types await get_object_types() ot_modified = get_ot_modified(ots) if (project.id not in _project_problems or _project_problems[project.id].scene_modified < scene.modified or _project_problems[project.id].project_modified < project.modified or _project_problems[project.id].ot_modified != ot_modified): logger.debug(f"Updating project_problems for {project.name}.") _project_problems[project.id] = ProjectProblems( scene.modified, project_problems(glob.OBJECT_TYPES, scene, project), ot_modified, project.modified, ) # prune removed projects for csi in set(_project_problems.keys()) - await storage.get_project_ids(): logger.debug(f"Pruning cached problems for removed project {csi}.") _project_problems.pop(csi, None) sp = _project_problems[project.id].problems return sp if sp else None
async def object_aiming_done_cb(req: srpc.o.ObjectAimingDone.Request, ui: WsClient) -> None: """Calls scene service to get a new pose for the object. In case of success, robot and object are kept locked, unlocking is responsibility of ui. On failure, UI may do another attempt or call ObjectAimingCancel. :param req: :param ui: :return: """ scene = glob.LOCK.scene_or_exception() fo, user_name = await object_aiming_check(ui) obj_type = glob.OBJECT_TYPES[scene.object(fo.obj_id).type].meta assert obj_type.object_model assert obj_type.object_model.mesh focus_points = obj_type.object_model.mesh.focus_points assert focus_points if len(fo.poses) < len(focus_points): raise Arcor2Exception( f"Only {len(fo.poses)} points were done out of {len(focus_points)}." ) obj = scene.object(fo.obj_id) assert obj.pose obj_inst = get_instance(fo.obj_id, CollisionObject) if req.dry_run: return fp: list[Position] = [] rp: list[Position] = [] for idx, pose in fo.poses.items(): fp.append(focus_points[idx].position) rp.append(pose.position) mfa = MeshFocusAction(fp, rp) logger.debug(f"Attempt to aim object {obj_inst.name}, data: {mfa}") try: new_pose = await scene_srv.focus(mfa) # TODO how long does it take? except scene_srv.SceneServiceException as e: logger.error(f"Aiming failed with: {e}, mfa: {mfa}.") raise Arcor2Exception(f"Aiming failed. {str(e)}") from e logger.info(f"Done aiming for {obj_inst.name}.") _objects_being_aimed.pop(user_name, None) asyncio.create_task( update_scene_object_pose(scene, obj, new_pose, obj_inst)) return None
async def get_scene_problems(scene: CachedScene) -> Optional[list[str]]: """Handle caching of scene problems.""" assert scene.modified ots = scene.object_types await get_object_types() ot_modified = get_ot_modified(ots) if (scene.id not in _scene_problems or _scene_problems[scene.id].scene_modified < scene.modified or _scene_problems[scene.id].ot_modified != ot_modified): logger.debug(f"Updating scene_problems for {scene.name}.") _scene_problems[scene.id] = SceneProblems( scene.modified, scene_problems(glob.OBJECT_TYPES, scene), get_ot_modified(ots), ) # prune removed scenes for csi in set(_scene_problems.keys()) - await storage.get_scene_ids(): logger.debug(f"Pruning cached problems for removed scene {csi}.") _scene_problems.pop(csi, None) sp = _scene_problems[scene.id].problems return sp if sp else None
async def unschedule_auto_remove(obj_type: str) -> None: try: _objects_to_auto_remove.remove(obj_type) except KeyError: pass else: logger.debug(f"OT {obj_type} unscheduled to be auto-removed.")
async def remove_scheduled() -> None: logger.debug( f"Going to auto-remove following types: {_objects_to_auto_remove}") for ot_id in _objects_to_auto_remove: if ot_id in glob.OBJECT_TYPES: asyncio.create_task( delete_if_not_used(glob.OBJECT_TYPES[ot_id].meta)) _objects_to_auto_remove.clear()
async def remove_object_type(obj_type_id: str) -> None: path = os.path.join(settings.OBJECT_TYPE_PATH, settings.OBJECT_TYPE_MODULE, f"{humps.depascalize(obj_type_id)}.py") logger.debug(f"Deleting {path}.") try: await hlp.run_in_executor(os.remove, path, propagate=[FileNotFoundError]) except FileNotFoundError as e: raise Arcor2Exception(f"File for {obj_type_id} was not found.") from e
async def add_object_to_scene(scene: UpdateableCachedScene, obj: SceneObject, dry_run: bool = False) -> None: """ :param obj: :param add_to_scene: Set to false to only create object instance and add its collision model (if any). :return: """ check_object(glob.OBJECT_TYPES, scene, obj, new_one=True) if dry_run: return None scene.upsert_object(obj) logger.debug(f"Object {obj.id} ({obj.type}) added to scene.")
async def invalidate_joints_using_object_as_parent( obj: common.SceneObject) -> None: assert glob.LOCK.scene # Invalidates robot joints if action point's parent has changed its pose. async for project in projects_using_object_as_parent( glob.LOCK.scene.id, obj.id): for ap in project.action_points: if ap.parent != obj.id: continue logger.debug(f"Invalidating joints for {project.name}/{ap.name}.") project.invalidate_joints(ap.id) await storage.update_project(project)
async def register_user_cb(req: srpc.u.RegisterUser.Request, ui: WsClient) -> None: await glob.USERS.login(req.args.user_name, ui) logger.debug( f"User {req.args.user_name} just logged in. Known user names are: {glob.USERS.user_names}" ) await glob.LOCK.cancel_auto_release(req.args.user_name) # those are locks that are known for all users for user, user_objects in glob.LOCK.all_ui_locks.items(): if user != req.args.user_name: glob.LOCK.notifications_q.put_nowait( LockEventData(user_objects, user, True, ui)) # these locks are known only to the current user if user_write_locks := (await glob.LOCK.get_owner_locks(req.args.user_name))[1]: glob.LOCK.notifications_q.put_nowait( LockEventData(user_write_locks, req.args.user_name, True, ui))
async def execute_action(action_method: Callable, params: list[Any]) -> None: assert glob.RUNNING_ACTION await notif.broadcast_event( ActionExecution(ActionExecution.Data(glob.RUNNING_ACTION))) evt = ActionResult(ActionResult.Data(glob.RUNNING_ACTION)) try: action_result = await hlp.run_in_executor(action_method, *params) except Arcor2Exception as e: logger.error( f"Failed to run method {action_method.__name__} with params {params}. {str(e)}" ) logger.debug(str(e), exc_info=True) evt.data.error = str(e) else: if action_result is not None: glob.PREV_RESULTS[glob.RUNNING_ACTION] = action_result try: evt.data.results = results_to_json(action_result) except Arcor2Exception: logger.error( f"Method {action_method.__name__} returned unsupported type of result: {action_result}." ) if glob.RUNNING_ACTION is None: # action was cancelled, do not send any event return # type: ignore # action could be cancelled during its execution glob.RUNNING_ACTION = None glob.RUNNING_ACTION_PARAMS = None await notif.broadcast_event(evt)
async def delete_if_not_used(meta: ObjectTypeMeta) -> None: if meta.base != VirtualCollisionObject.__name__: logger.debug(f"{meta.type} is not a VCO!") return assert meta.object_model assert meta.type == meta.object_model.model( ).id, f"meta.type={meta.type}, model.id={meta.object_model.model().id}" async for scn in scenes(): if any(scn.objects_of_type(meta.type)): logger.debug( f"Not auto-removing VCO {meta.type} as it is used in scene {scn.name}." ) return logger.debug( f"Auto-removing VCO {meta.type} as it is not used in any scene.") try: await asyncio.gather( storage.delete_object_type(meta.type), storage.delete_model(meta.type), remove_object_type(meta.type), ) except Arcor2Exception as e: logger.warn(str(e)) del glob.OBJECT_TYPES[meta.type] logger.debug(f"Auto-removing {meta.type} done... {meta}") evtr = sevts.o.ChangedObjectTypes([meta]) evtr.change_type = Event.Type.REMOVE await notif.broadcast_event(evtr)
def _feature(type_def: type[Robot], method_name: str, base_class: type[Robot]) -> bool: assert glob.OBJECT_TYPES method = getattr(type_def, method_name) where_it_is_defined = glob.OBJECT_TYPES.get( method.__qualname__.split(".")[0], None) if where_it_is_defined is None: raise Arcor2Exception( f"Can't get origin for {type_def.__name__}/{method_name}.") if where_it_is_defined.type_def is None or where_it_is_defined.ast is None: raise Arcor2Exception( f"Origin {where_it_is_defined.meta.type} for {type_def.__name__}/{method_name} is disabled." ) logger.debug( f"Processing {type_def.__name__}/{method_name} " f"(defined in {where_it_is_defined.type_def.__name__}), with base class {base_class.__name__}." ) if where_it_is_defined.type_def is base_class or where_it_is_defined.meta.disabled: # all of the "feature" methods are abstract in the base class and have to be implemented for the concrete robot return False if not function_implemented(where_it_is_defined.ast, method_name): logger.debug(f"{type_def.__name__}/{method_name} not implemented.") return False sign = inspect.signature(getattr(where_it_is_defined.type_def, method_name)) if not (res := inspect.signature(getattr(base_class, method_name)) == sign): logger.debug( f"{type_def.__name__}/{method_name} has invalid signature.")
async def clear_auto_remove_schedule() -> None: logger.debug( f"Auto-remove schedule will be cleared. It contained: {_objects_to_auto_remove}" ) _objects_to_auto_remove.clear()
async def schedule_auto_remove(obj_type: str) -> None: logger.debug(f"OT {obj_type} scheduled to be autoremoved.") _objects_to_auto_remove.add(obj_type)
async def create_object_instance(obj: SceneObject, overrides: Optional[list[Parameter]] = None ) -> None: obj_type = glob.OBJECT_TYPES[obj.type] # settings -> dataclass assert obj_type.type_def logger.debug( f"Creating instance of {obj_type.type_def.__name__} with name {obj.name}. " f"Parameters: {obj.parameters}, overrides: {overrides}.") settings = settings_from_params(obj_type.type_def, obj.parameters, overrides) assert obj_type.type_def is not None try: try: # the order must be from specific to the generic types if issubclass(obj_type.type_def, Robot): assert obj.pose is not None glob.SCENE_OBJECT_INSTANCES[ obj.id] = await hlp.run_in_executor( obj_type.type_def, obj.id, obj.name, obj.pose, settings) elif issubclass(obj_type.type_def, CollisionObject): assert obj.pose is not None coll_model: Optional[Models] = None if obj_type.meta.object_model: coll_model = obj_type.meta.object_model.model() glob.SCENE_OBJECT_INSTANCES[ obj.id] = await hlp.run_in_executor( obj_type.type_def, obj.id, obj.name, obj.pose, coll_model, settings) elif issubclass(obj_type.type_def, GenericWithPose): assert obj.pose is not None glob.SCENE_OBJECT_INSTANCES[ obj.id] = await hlp.run_in_executor( obj_type.type_def, obj.id, obj.name, obj.pose, settings) elif issubclass(obj_type.type_def, Generic): assert obj.pose is None glob.SCENE_OBJECT_INSTANCES[ obj.id] = await hlp.run_in_executor( obj_type.type_def, obj.id, obj.name, settings) else: raise Arcor2Exception("Object type with unknown base.") except (TypeError, ValueError) as e: # catch some most often exceptions raise Arcor2Exception("Unhandled error.") from e except Arcor2Exception as e: # make the exception a bit more user-friendly by including the object's name raise Arcor2Exception( f"Failed to initialize {obj.name}. {str(e)}") from e return None
def object_actions(type_def: type[Generic], tree: AST) -> dict[str, ObjectAction]: ret: dict[str, ObjectAction] = {} # ...inspect.ismethod does not work on un-initialized classes for method_name, method_def in iterate_over_actions(type_def): meta: ActionMetadata = method_def.__action__ # type: ignore if meta.hidden: logger.debug( f"Action {method_name} of {type_def.__name__} is hidden.") continue data = ObjectAction(name=method_name, meta=meta) if method_name in type_def.CANCEL_MAPPING: meta.cancellable = True try: docstring = parse_docstring(method_def.__doc__) data.description = docstring.short_description signature = inspect.signature( method_def) # sig.parameters is OrderedDict try: method_tree = find_function(method_name, tree) except SourceException: # function is probably defined in predecessor, will be added later continue hints = get_type_hints(method_def) # standard (unordered) dict if "an" not in signature.parameters.keys(): raise IgnoreActionException( "Action is missing 'an' parameter.") try: if hints["an"] != Optional[str]: raise IgnoreActionException( "Parameter 'an' has invalid type annotation.") except KeyError: raise IgnoreActionException( "Parameter 'an' is missing type annotation.") parameter_names_without_self = list( signature.parameters.keys())[1:] if not parameter_names_without_self or parameter_names_without_self[ -1] != "an": raise IgnoreActionException( "The 'an' parameter have to be the last one.") # handle return try: return_ttype = hints["return"] except KeyError: raise IgnoreActionException( "Action is missing return type annotation.") # ...just ignore NoneType for returns if return_ttype != type(None): # noqa: E721 if typing_inspect.is_tuple_type(return_ttype): for arg in typing_inspect.get_args(return_ttype): resolved_param = plugin_from_type(arg) if resolved_param is None: raise IgnoreActionException( "None in return tuple is not supported.") data.returns.append(resolved_param.type_name()) else: # TODO resolving needed for e.g. enums - add possible values to action metadata somewhere? data.returns = [plugin_from_type(return_ttype).type_name()] for name in parameter_names_without_self[:-1]: # omit also an try: ttype = hints[name] except KeyError: raise IgnoreActionException( f"Parameter {name} is missing type annotation.") param_type = plugin_from_type(ttype) assert param_type is not None args = ParameterMeta(name=name, type=param_type.type_name()) try: param_type.meta(args, method_def, method_tree) except ParameterPluginException as e: raise IgnoreActionException(e) from e if name in type_def.DYNAMIC_PARAMS: args.dynamic_value = True dvp = type_def.DYNAMIC_PARAMS[name][1] if dvp: args.dynamic_value_parents = dvp def_val = signature.parameters[name].default if def_val is not inspect.Parameter.empty: args.default_value = param_type.value_to_json(def_val) args.description = docstring.param(name) data.parameters.append(args) except Arcor2Exception as e: data.disabled = True data.problem = str(e) logger.warn( f"Disabling action {method_name} of {type_def.__name__}. {str(e)}" ) ret[data.name] = data return ret
async def get_object_data(object_types: ObjectTypeDict, obj_id: str) -> None: logger.debug(f"Processing {obj_id}.") if obj_id in object_types: logger.debug(f"{obj_id} already processed, skipping...") return obj_iddesc = await storage.get_object_type_iddesc(obj_id) if obj_id in glob.OBJECT_TYPES: assert obj_iddesc.modified assert glob.OBJECT_TYPES[ obj_id].meta.modified, f"Object {obj_id} does not have 'modified' in its meta." if obj_iddesc.modified == glob.OBJECT_TYPES[obj_id].meta.modified: logger.debug(f"No need to update {obj_id}.") return obj = await storage.get_object_type(obj_id) try: bases = otu.base_from_source(obj.source, obj_id) if not bases: logger.debug( f"{obj_id} is definitely not an ObjectType (subclass of {object.__name__}), maybe mixin?" ) return if bases[0] not in object_types.keys() | built_in_types_names(): logger.debug(f"Getting base class {bases[0]} for {obj_id}.") await get_object_data(object_types, bases[0]) for mixin in bases[1:]: mixin_obj = await storage.get_object_type(mixin) await hlp.run_in_executor( hlp.save_and_import_type_def, mixin_obj.source, mixin_obj.id, object, settings.OBJECT_TYPE_PATH, settings.OBJECT_TYPE_MODULE, ) except Arcor2Exception as e: logger.error( f"Disabling ObjectType {obj.id}: can't get a base. {str(e)}") object_types[obj_id] = ObjectTypeData( ObjectTypeMeta(obj_id, "ObjectType disabled.", disabled=True, problem="Can't get base.", modified=obj.modified)) return logger.debug(f"Updating {obj_id}.") try: type_def = await hlp.run_in_executor( hlp.save_and_import_type_def, obj.source, obj.id, Generic, settings.OBJECT_TYPE_PATH, settings.OBJECT_TYPE_MODULE, ) except Arcor2Exception as e: logger.debug(f"{obj.id} is probably not an ObjectType. {str(e)}") return assert issubclass(type_def, Generic) try: meta = meta_from_def(type_def) except Arcor2Exception as e: logger.error(f"Disabling ObjectType {obj.id}.") logger.debug(e, exc_info=True) object_types[obj_id] = ObjectTypeData( ObjectTypeMeta(obj_id, "ObjectType disabled.", disabled=True, problem=str(e), modified=obj.modified)) return meta.modified = obj.modified if obj.model: try: model = await storage.get_model(obj.model.id, obj.model.type) except Arcor2Exception as e: logger.error( f"{obj.model.id}: failed to get collision model of type {obj.model.type}. {str(e)}" ) meta.disabled = True meta.problem = "Can't get collision model." object_types[obj_id] = ObjectTypeData(meta) return if isinstance(model, Mesh) and model.data_id not in await storage.files_ids(): logger.error( f"Disabling {meta.type} as its mesh file {model.data_id} does not exist." ) meta.disabled = True meta.problem = "Mesh file does not exist." object_types[obj_id] = ObjectTypeData(meta) return kwargs = {model.type().value.lower(): model} meta.object_model = ObjectModel(model.type(), **kwargs) # type: ignore ast = parse(obj.source) otd = ObjectTypeData(meta, type_def, object_actions(type_def, ast), ast) object_types[obj_id] = otd
async def get_object_types() -> UpdatedObjectTypes: """Serves to initialize or update knowledge about awailable ObjectTypes. :return: """ initialization = False # initialize with built-in types, this has to be done just once if not glob.OBJECT_TYPES: logger.debug("Initialization of ObjectTypes.") initialization = True await hlp.run_in_executor(prepare_object_types_dir, settings.OBJECT_TYPE_PATH, settings.OBJECT_TYPE_MODULE) glob.OBJECT_TYPES.update(built_in_types_data()) updated_object_types: ObjectTypeDict = {} object_type_ids: Union[set[str], list[str]] = await storage.get_object_type_ids() if __debug__: # this should uncover potential problems with order in which ObjectTypes are processed import random object_type_ids = list(object_type_ids) random.shuffle(object_type_ids) for obj_id in object_type_ids: await get_object_data(updated_object_types, obj_id) removed_object_ids = { obj for obj in glob.OBJECT_TYPES.keys() if obj not in object_type_ids } - built_in_types_names() updated_object_ids = { k for k in updated_object_types.keys() if k in glob.OBJECT_TYPES } new_object_ids = { k for k in updated_object_types.keys() if k not in glob.OBJECT_TYPES } logger.debug(f"Removed ids: {removed_object_ids}") logger.debug(f"Updated ids: {updated_object_ids}") logger.debug(f"New ids: {new_object_ids}") if not initialization and removed_object_ids: # TODO remove it from sys.modules remove_evt = ChangedObjectTypes([ v.meta for k, v in glob.OBJECT_TYPES.items() if k in removed_object_ids ]) remove_evt.change_type = Event.Type.REMOVE asyncio.ensure_future(notif.broadcast_event(remove_evt)) for removed in removed_object_ids: assert removed not in built_in_types_names( ), "Attempt to remove built-in type." del glob.OBJECT_TYPES[removed] await hlp.run_in_executor(remove_object_type, removed) glob.OBJECT_TYPES.update(updated_object_types) logger.debug(f"All known ids: {glob.OBJECT_TYPES.keys()}") for obj_type in updated_object_types.values(): # if description is missing, try to get it from ancestor(s) if not obj_type.meta.description: try: obj_type.meta.description = obj_description_from_base( glob.OBJECT_TYPES, obj_type.meta) except otu.DataError as e: logger.error( f"Failed to get info from base for {obj_type}, error: '{e}'." ) if not obj_type.meta.disabled and not obj_type.meta.built_in: add_ancestor_actions(obj_type.meta.type, glob.OBJECT_TYPES) if not initialization: if updated_object_ids: update_evt = ChangedObjectTypes([ v.meta for k, v in glob.OBJECT_TYPES.items() if k in updated_object_ids ]) update_evt.change_type = Event.Type.UPDATE asyncio.ensure_future(notif.broadcast_event(update_evt)) if new_object_ids: add_evt = ChangedObjectTypes([ v.meta for k, v in glob.OBJECT_TYPES.items() if k in new_object_ids ]) add_evt.change_type = Event.Type.ADD asyncio.ensure_future(notif.broadcast_event(add_evt)) for obj_type in updated_object_types.values(): if obj_type.type_def and issubclass( obj_type.type_def, Robot) and not obj_type.type_def.abstract(): await get_robot_meta(obj_type) # if object does not change but its base has changed, it has to be reloaded for obj_id, obj in glob.OBJECT_TYPES.items(): if obj_id in updated_object_ids: continue if obj.type_def and obj.meta.base in updated_object_ids: logger.debug( f"Re-importing {obj.meta.type} because its base {obj.meta.base} type has changed." ) obj.type_def = await hlp.run_in_executor( hlp.import_type_def, obj.meta.type, Generic, settings.OBJECT_TYPE_PATH, settings.OBJECT_TYPE_MODULE, ) return UpdatedObjectTypes(new_object_ids, updated_object_ids, removed_object_ids)
obj_type.robot_meta.features.forward_kinematics = _feature( obj_type.type_def, Robot.forward_kinematics.__name__, base_class) obj_type.robot_meta.features.hand_teaching = _feature( obj_type.type_def, Robot.set_hand_teaching_mode.__name__, base_class) if urdf_name := obj_type.type_def.urdf_package_name: if urdf_name not in await ps.files_ids(): logger.error( f"URDF package {urdf_name} for {obj_type.meta.type} does not exist." ) else: obj_type.robot_meta.urdf_package_filename = urdf_name # TODO check if URDF is valid? logger.debug(obj_type.robot_meta) async def stop(robot_inst: Robot) -> None: if not robot_inst.move_in_progress: raise Arcor2Exception("Robot is not moving.") await hlp.run_in_executor(robot_inst.stop) async def ik( robot_inst: Robot, end_effector_id: str, arm_id: Optional[str], pose: common.Pose,