def GetId(self): """This is used for some scripts which have a single parameter (For example pid and file name). GetId() just returns the value of an unique key-value pair. """ logging.debug("GetId m_entity_type=%s m_entity_id=%s", self.m_entity_type, str(self.m_entity_id)) try: # If this is a top-level url, no object type, therefore no id. if self.m_entity_type == "": return "" split_kv = self.m_entity_id_dict logging.debug("GetId split_kv=%s", str( split_kv)) # If this class is defined in our ontology, then we know the first property. ent_onto = lib_util.OntologyClassKeys(self.m_entity_type) if ent_onto: key_first = ent_onto[0] # Only if this mandatory key is in the dict. try: return split_kv[key_first] except KeyError: # This is a desperate case... pass # Returns the first value but this is not reliable at all. for key in split_kv: return split_kv[key] except KeyError: pass # If no parameters is found, although one was requested. self.enter_edition_mode() # TODO: This needs a cleaner exit. assert False return ""
def BuildEntity(self, entity_type, *entity_id_arr): # sys.stderr.write("BuildEntity type=%s id_arr=%s Caller=%s\n" % (entity_type, str(entity_id_arr), sys._getframe(1).f_code.co_name ) ) keys = lib_util.OntologyClassKeys(entity_type) #sys.stderr.write("UriMake keys=%s\n" % str(keys) ) lenKeys = len(keys) lenEntIds = len(entity_id_arr) if lenKeys < lenEntIds: # Append fake temporary keys sys.stderr.write( "BuildEntity entity_type=%s Not enough keys:%s and %s\n" % (entity_type, str(keys), str(entity_id_arr))) keys += ["Key_%d" % idx for idx in range(lenKeys, lenEntIds)] elif lenKeys > lenEntIds: # Not enough values. This is not a problem because of queries returning several objects. sys.stderr.write( "BuildEntity entity_type=%s Not enough values:%s and %s\n" % (entity_type, str(keys), str(entity_id_arr))) # entity_id_arr += [ "Unknown" ] * ( lenKeys - lenEntIds ) # Sorted keys entity_id = ",".join( "%s=%s" % kwItems for kwItems in dict(zip(keys, entity_id_arr)).items()) return entity_id
def MonikerToEventFile(jsonMonik): # The subject could be parsed with the usual functions made for moniker. entity_type = jsonMonik["entity_type"] #sys.stderr.write("MonikerToEventFile entity_type=%s\n"%entity_type) dirEntity = lib_common.tmpDir + "/Events/" + entity_type if not os.path.isdir(dirEntity): os.makedirs(dirEntity) #sys.stderr.write("MonikerToEventFile dirEntity=%s\n"%dirEntity) arrOnto = lib_util.OntologyClassKeys(entity_type) #sys.stderr.write("MonikerToEventFile arrOnto=%s\n"%str(arrOnto)) entity_ids_dict = {} # Only the properties we need. for ontoAttrNam in arrOnto: attrVal = jsonMonik[ontoAttrNam] entity_ids_dict[ ontoAttrNam ] = attrVal eventFilNam = EntityTypeIdsToEventFile(entity_type,entity_ids_dict) #sys.stderr.write("MonikerToEventFile eventFilNam=%s\n"%eventFilNam) eventPath = dirEntity + "/" + eventFilNam return eventPath
def GetId(self): sys.stderr.write("GetId m_entity_type=%s m_entity_id=%s\n" % ( self.m_entity_type, str( self.m_entity_id ) ) ) try: # If this is a top-level url, no object type, therefore no id. if self.m_entity_type == "": return "" splitKV = lib_util.SplitMoniker(self.m_entity_id) sys.stderr.write("GetId splitKV=%s\n" % ( str( splitKV ) ) ) # If this class is defined in our ontology, then we know the first property. entOnto = lib_util.OntologyClassKeys(self.m_entity_type) if entOnto: keyFirst = entOnto[0] # Only if this mandatory key is in the dict. try: return splitKV[keyFirst] except KeyError: # This is a desperate case... pass # Returns the first value but this is not reliable at all. for key in splitKV: return splitKV[key] except KeyError: pass # If no parameters although one was requested. self.EditionMode() return ""
def ImportSurvolModuleFromWmiClass(connWmi, className): allBaseClasses = (className, ) + lib_wmi.WmiBaseClasses(connWmi, className) for theClassAscending in allBaseClasses: # Maybe there is a module without ontology. # In this case, try a base class. This is what does this function. ontoKeys = lib_util.OntologyClassKeys(theClassAscending) if len(ontoKeys): return (theClassAscending, ontoKeys) return None, None
def add_associated_instances(grph, root_node, entity_type, entity_id, associator_attribute): assert root_node.find("__associator_attribute__") < 0 logging.debug("This is implemented for WMI only, yet.") logging.debug("entity_type=%s entity_id=%s associator_attribute=%s", entity_type, entity_id, associator_attribute) #result_class, result_role = lib_ontology_tools.get_associated_attribute( # "wmi", lib_wmi.extract_specific_ontology_wmi, associator_attribute) result_class, result_role = lib_ontology_tools.get_associated_class_role( "wmi", lib_wmi.extract_specific_ontology_wmi, associator_attribute) associator_name, _, input_role = associator_attribute.partition(".") if not input_role: raise Exception("associator_attribute %s is invalid" % associator_attribute) # This path will eventually be reformatted for WMI needs. wmi_path = entity_type + "." + entity_id iter_objects = lib_wmi.WmiSparqlExecutor().enumerate_associated_instances( wmi_path, associator_name, result_class, result_role) # WMI returns the attributes of each associated instances, only for the keys. # However, it is needed to iterated on the key-value pairs to trnsform them into strings. # So, an extra check is done, to be sure that the simplified ontology of survol # (List of keys per class) matches WMI class definition. Survol ontology of classes is much simpler # and is just the list of keys, but must be the same as WMI class description. result_class_keys = lib_util.OntologyClassKeys(result_class) for associated_dict_key_values in iter_objects: #logging.debug("associated_dict_key_values=%s", associated_dict_key_values) # This key-values dictionary contains all the attributes of each associated instance, # at least for the keys converted_key_value_dict = {} for property_key_node, property_value_node in associated_dict_key_values.items( ): property_key_name = lib_properties.PropToQName(property_key_node) property_value = str(property_value_node) if property_key_name in result_class_keys: converted_key_value_dict[property_key_name] = property_value logging.debug(" key=%s value=%s", property_key_name, property_value) else: logging.debug("Class %s, key %s is not in the ontology", result_class, property_key_name) script_node = lib_uris.LocalBox().node_from_dict( result_class, converted_key_value_dict) property_node = lib_properties.MakeProp(result_role) logging.debug("script_node=%s", script_node) grph.add((root_node, property_node, script_node))
def __call__(self, *args_call, **kwargs_call): """ This reorders keys to the ontology order. This is not strictly necessary because the information is the same, but helps for testing, because the resulting string is always the same. Notably, it avoids the problem of Python2 which does not preserve the order of **kwargs in a function. It also detects if a key is missing. """ class_keys = lib_util.OntologyClassKeys(self.m_class_name) entity_id = self.m_class_name + "." + ",".join( "%s=%s" % (key, lib_util.Base64EncodeConditional(kwargs_call[key])) for key in class_keys) return entity_id
def _build_entity_id(self, entity_type, *entity_id_arr): """This works only if the attribute values are in the same order as the ontology.""" keys = lib_util.OntologyClassKeys(entity_type) len_keys = len(keys) len_ent_ids = len(entity_id_arr) assert len_keys == len_ent_ids # TODO: See lib_util.EntityUri which does something similar. entity_id = ",".join( "%s=%s" % (key_it, lib_util.Base64EncodeConditional(str(val_it))) for key_it, val_it in zip(keys, entity_id_arr)) return entity_id
def data_retrieve(entity_type,entity_ids_arr): sys.stderr.write("data_retrieve entity_type=%s\n"%entity_type) arrOnto = lib_util.OntologyClassKeys(entity_type) # Properties are in the right order. entity_ids_dict = dict(zip(arrOnto, entity_ids_arr)) eventFilNam = EntityTypeIdsToEventFile(entity_type,entity_ids_dict) sys.stderr.write("data_retrieve eventFilNam=%s\n"%eventFilNam) arrTriples = get_data_from_file(eventFilNam) sys.stderr.write("data_retrieve NumTriples=%d\n"%len(arrTriples)) return arrTriples
def json_moniker_to_entity_class_and_dict(json_moniker): assert len(json_moniker) == 2 entity_type, entity_attributes_dict = json_moniker assert isinstance(entity_type, (six.binary_type, six.text_type)) assert isinstance(entity_attributes_dict, dict) ontology_list = lib_util.OntologyClassKeys(entity_type) # TODO: Only the properties we need. In fact, they should come in the right order. # TODO: Make this faster by assuming this is a list of key-value pairs. entity_ids_dict = { ontology_attribute_name: entity_attributes_dict[ontology_attribute_name] for ontology_attribute_name in ontology_list } return entity_type, entity_ids_dict
def KnownScriptToTitle(filScript, uriMode, entity_host=None, entity_suffix=None): # Extra information depending on the script. # Special display if MIME URL if filScript == "entity_mime.py": if not entity_suffix: entity_suffix = "None" # The Mime type is embedded into the mode, after a "mime:" prefix. entity_label = entity_suffix + " (" + lib_mime.ModeToMimeType( uriMode) + ")" return entity_label # The label is a Survol module name which is a class (With an EntityOntology() function), # or a namespace. So we give the right title. if filScript == "class_type_all.py": moduOntology = lib_util.OntologyClassKeys(entity_suffix) if moduOntology: entity_label = entity_suffix + " (Class)" else: entity_label = entity_suffix + " (Domain)" return entity_label try: entity_label = scripts_to_titles[filScript] except KeyError: entity_label = filScript + "..." if entity_suffix: if entity_label: entity_label = entity_suffix + " (" + entity_label + ")" else: entity_label = entity_suffix # Maybe hostname is a CIMOM address (For WBEM) or a machine name. if entity_host: if not lib_util.IsLocalAddress(entity_host): # If this is a CIMOM, make it shorter: "http://vps516494.ovh.net:5988" or ""https://vps516494.ovh.net:5989" host_only = lib_util.EntHostToIp(entity_host) entity_label += " at " + host_only # TODO: Add the host name in the title. return entity_label
def UrlJsonToTxt(valJson): entity_type = valJson["entity_type"] #sys.stderr.write("UrlJsonToTxt entity_type=%s\n"%entity_type) arrOnto = lib_util.OntologyClassKeys(entity_type) #sys.stderr.write("UrlJsonToTxt arrOnto=%s\n"%str(arrOnto)) entity_ids_dict = {} # Only the properties we need. for ontoAttrNam in arrOnto: attrVal = valJson[ontoAttrNam] entity_ids_dict[ ontoAttrNam ] = attrVal uriInst = lib_common.gUriGen.UriMakeFromDict(entity_type, entity_ids_dict) #sys.stderr.write("UrlJsonToTxt uriInst=%s\n"%str(uriInst)) return uriInst
def EntityToLabel(entity_type, entity_ids_concat, entity_host, force_entity_ip_addr): # sys.stderr.write("EntityToLabel entity_id=%s entity_type=%s\n" % ( entity_ids_concat, entity_type ) ) # Specific case of objtypes.py if not entity_ids_concat: return entity_type # TODO: Robust logic as long as the value does ot contain an '=' sign. splitKV = lib_util.SplitMoniker(entity_ids_concat) # Now build the array of values in the ontology order. ontoKeys = lib_util.OntologyClassKeys(entity_type) # Default value if key is missing. entity_ids_arr = [ splitKV.get(keyOnto, keyOnto + "?") for keyOnto in ontoKeys ] if force_entity_ip_addr: entity_label = EntityArrToAlias(entity_type, entity_ids_arr, entity_host, force_entity_ip_addr) else: entity_label = EntityArrToLabel(entity_type, entity_ids_arr, entity_host) # sys.stderr.write("EntityToLabel entity_label=%s\n" % entity_label ) # There might be extra properties which are not in our ontology. # This happens if duplicates from WBEM or WMI. MAKE THIS FASTER ? # Both must be sets, otherwise unsupported operation. # TODO: This set could be created once and for all. But the original order must be kept. setOntoKeys = set(ontoKeys) # This appends the keys which are not part of the normal ontology, therefore bring extra information. # This is rather slow and should normally not happen. for (extPrpKey, extPrpVal) in splitKV.items(): if not extPrpKey in setOntoKeys: entity_label += " %s=%s" % (extPrpKey, extPrpVal) return entity_label
def entity_to_label(entity_type, entity_ids_concat, force_entity_ip_addr): """ This returns the label of an URL which is a script plus CGI arguments defining an object. For an association, we might have: entity_id=Dependent=root/cimv2:LMI_StorageExtent.CreationClassName="LMI_StorageExtent",SystemCreationClassName="PG_ComputerSystem" Antecedent=root/cimv2:LMI_DiskDrive.CreationClassName="LMI_DiskDrive",DeviceID="/dev/sda" This is not easy to manage but avoids ambiguities. """ # Specific case of objtypes.py if not entity_ids_concat: return entity_type # TODO: Robust logic as long as the value does not contain an '=' sign. split_kv = lib_util.SplitMoniker(entity_ids_concat) # Now build the array of values in the ontology order. onto_keys = lib_util.OntologyClassKeys(entity_type) # Default value if key is missing. entity_ids_arr = [split_kv.get(key_onto, key_onto + "?") for key_onto in onto_keys] if force_entity_ip_addr: entity_label = _entity_array_to_alias(entity_type, entity_ids_arr, force_entity_ip_addr) else: entity_label = _entity_array_to_label(entity_type, entity_ids_arr) # There might be extra properties which are not in our ontology. # This happens if duplicates from WBEM or WMI. MAKE THIS FASTER ? # Both must be sets, otherwise unsupported operation. # TODO: This set could be created once and for all. But the original order must be kept. set_onto_keys = set(onto_keys) # This appends the keys which are not part of the normal ontology, therefore bring extra information. # This is rather slow and should normally not happen. for ext_prp_key, ext_prp_val in split_kv.items(): if not ext_prp_key in set_onto_keys: entity_label += " %s=%s" % (ext_prp_key, ext_prp_val) return entity_label
def recursive_walk_aux(a_parent_node, grand_parent_node, curr_dir, relative_dir, depth_call=1): """This lists the scripts and generate RDF nodes. Returns True if something was added.""" # In case there is nothing. dirs = None for path, dirs, files in os.walk(curr_dir): break # Maybe this class is not defined in our ontology. if dirs == None: logging.warning("dir_to_menu_aux(2) No content in %s", curr_dir) return False # Will still be None if nothing is added. rdf_node = None sub_path = path[len(curr_dir):] relative_dir_sub_path = relative_dir + sub_path arg_dir = relative_dir_sub_path.replace("/", ".")[1:] # If this is a remote host, all scripts are checked because they might have # the flag CanProcessRemote which is defined at the script level, not the directory level. if not entity_host: err_dir_node = directory_usability_error_node(relative_dir, depth_call) if err_dir_node: if flag_show_all: arg_dir_split = arg_dir.split(".") curr_dir_node = lib_util.DirDocNode(".".join(arg_dir_split[:-1]), arg_dir_split[-1]) if not curr_dir_node: curr_dir_node = lib_util.NodeLiteral("Cannot parse relative dir:%s" % arg_dir) callback_grph_add((grand_parent_node, pc.property_script, curr_dir_node), depth_call) callback_grph_add((curr_dir_node, script_error_property, err_dir_node), depth_call) # The directory is not usable, so leave immediately. return False contains_something = False for dir in dirs: # This directory may be generated by our Python interpreter. if dir == "__pycache__": continue full_sub_dir = os.path.join(curr_dir, dir) try: curr_dir_node = lib_util.DirDocNode(arg_dir, dir) except Exception as exc: logging.error("exc=%s", exc) raise if not curr_dir_node: logging.warning("curr_dir_node is None: arg_dir=%s dir=%s", arg_dir, dir) continue sub_relative_dir = relative_dir + "/" + dir sub_entity_class = ".".join(sub_relative_dir.split("/")[2:]) onto_keys = lib_util.OntologyClassKeys(sub_entity_class) # TODO: Beware, if not ontology, returns empty array. Why not returning None ? if onto_keys != []: # Maybe this is a subclass with its own ontology. # So its scripts do not apply to the current class. logging.info("sub_entity_class=%s onto_keys=%s", sub_entity_class, onto_keys) continue something_added = recursive_walk_aux( curr_dir_node, a_parent_node, full_sub_dir, sub_relative_dir, depth_call + 1) # This adds the directory name only if it contains a script. if something_added: # It works both ways, possibly with different properties. callback_grph_add((a_parent_node, pc.property_script, curr_dir_node), depth_call) contains_something = contains_something | something_added for fil in files: # We want to list only the usable Python scripts. if not fil.endswith(".py") or fil == "__init__.py": continue script_path = relative_dir_sub_path + "/" + fil rdf_node = gen_obj.node_from_script_path(script_path, entity_type, encoded_entity_id) error_msg = None try: imported_mod = lib_util.GetScriptModule(arg_dir, fil) except Exception as exc: logging.warning("Caught:%s", exc) error_msg = exc imported_mod = None if not flag_show_all: continue if not error_msg: # Show only scripts which want to be shown. Each script can have an optional function # called Usable(): If it is there and returns False, the script is not displayed. error_msg = _test_usability(imported_mod, entity_type, entity_ids_arr) if error_msg: pass # If this is a local host if not flag_show_all and error_msg and not entity_host: continue # If the entity is on another host, does the script run on remote entities ? # The concept of "CanProcessRemote" is a short-hand to avoid checking # if the remote is in the entity ids. This flag means: # "It is worth anyway investigating on a remote host, if the entity exists there." if entity_host: try: # Script can be used on a remote entity. can_process_remote = imported_mod.CanProcessRemote except AttributeError: can_process_remote = False if not can_process_remote: if not error_msg: error_msg = "%s is local" % entity_host if not flag_show_all: continue else: logging.debug("Script %s %s CAN work on remote entities", arg_dir, fil) # Here, we are sure that the script is added. # TODO: If no script is added, should not add the directory? callback_grph_add((a_parent_node, pc.property_script, rdf_node), depth_call) # Default doc text is file name minus the ".py" extension. nod_modu = lib_util.module_doc_string(imported_mod, fil[:-3]) # nod_modu contains a literal of a str. if not isinstance(nod_modu, rdflib.term.Literal): logging.error("nod_modu=%s should be Literal instead of %s", nod_modu, type(nod_modu)) callback_grph_add((rdf_node, pc.property_information, nod_modu), depth_call) if error_msg: callback_grph_add((rdf_node, script_error_property, lib_util.NodeLiteral(error_msg)), depth_call) # This tells if a script was added in this directory or one of the subdirs. return (rdf_node is not None) | contains_something
def DirToMenuAux(callbackGrphAdd,parentNode,curr_dir,relative_dir,entity_type,entity_ids_arr,encodedEntityId,entity_host,flagShowAll,depthCall = 1): # sys.stderr.write("DirToMenuAux entity_host=%s\n"%(entity_host) ) # DirMenuReport( depthCall, "curr_dir=%s relative_dir=%s\n"%(curr_dir,relative_dir)) # In case there is nothing. dirs = None for path, dirs, files in os.walk(curr_dir): break # Maybe this class is not defined in our ontology. if dirs == None: # sys.stderr.write("No content in "+curr_dir) return False # Will still be None if nothing is added. rdfNode = None sub_path = path[ len(curr_dir) : ] relative_dir_sub_path = relative_dir + sub_path argDir = relative_dir_sub_path.replace("/",".")[1:] # Maybe there is a usability test in the current module. # The goal is to control all scripts in the subdirectories, from here. try: entity_class = ".".join( relative_dir.split("/")[2:] ) # DirMenuReport( depthCall, "entity_class=%s\n"%(entity_class)) importedMod = lib_util.GetEntityModule(entity_class) if importedMod: errorMsg = TestUsability(importedMod,entity_type,entity_ids_arr) # if flagShowAll and errorMsg ??? if errorMsg: # If set to True, the directory is displayed even if all its scripts # are not usable. Surprisingly, the message is not displayed as a subdirectory, but in a separate square. if False: callbackGrphAdd( ( parentNode, lib_common.MakeProp("Usability"), lib_common.NodeLiteral(errorMsg) ),depthCall ) return False except IndexError: # If we are at the top-level, no interest for the module. pass containsSomething = False for dir in dirs: # DirMenuReport( depthCall, "dir=%s\n"%(dir)) # Might be generated by our Python interpreter. if dir == "__pycache__": continue full_sub_dir = curr_dir + "/" + dir full_sub_dir = full_sub_dir.replace("\\","/") currDirNode = lib_util.DirDocNode(argDir,dir) if not currDirNode: DirMenuReport( depthCall, "currDirNode NONE: argDir=%s dir=%s\n"%(argDir,dir)) continue sub_relative_dir = relative_dir + "/" + dir sub_entity_class = ".".join( sub_relative_dir.split("/")[2:] ) ontoKeys = lib_util.OntologyClassKeys(sub_entity_class) # DirMenuReport( depthCall, "Checked ontology of %s: ontoKeys=%s\n"%(sub_entity_class,str(ontoKeys))) # TODO: Beware, if not ontology, returns empty array. Why not returning None ? if ontoKeys != []: # DirMenuReport( depthCall, "Module %s has an ontology so it is a class. Skipping\n"%(sub_relative_dir)) # BEWARE: NO MORE DEFAULT ONTOLOGY ["Id"] continue somethingAdded = DirToMenuAux(callbackGrphAdd,currDirNode, full_sub_dir,sub_relative_dir,entity_type,entity_ids_arr,encodedEntityId,entity_host,flagShowAll,depthCall + 1) # This adds the directory name only if it contains a script. if somethingAdded: # CA MARCHE DANS LES DEUX CAS. SI PROPRIETE DIFFERENTE, ON AURA SIMPLEMENT DEUX PAVES, UN POUR LES DIR, L AUTRE POUR LES FICHIERS. # grph.add( ( parentNode, pc.property_directory, currDirNode ) ) callbackGrphAdd( ( parentNode, pc.property_script, currDirNode ), depthCall ) containsSomething = containsSomething | somethingAdded for fil in files: # We want to list only the usable Python scripts. if not fil.endswith(".py") or fil == "__init__.py": continue script_path = relative_dir_sub_path + "/" + fil # DirMenuReport( depthCall, "DirToMenu encodedEntityId=%s\n" % encodedEntityId) if entity_host: genObj = lib_common.RemoteBox(entity_host) else: genObj = lib_common.gUriGen url_rdf = genObj.MakeTheNodeFromScript( script_path, entity_type, encodedEntityId ) errorMsg = None try: importedMod = lib_util.GetScriptModule(argDir, fil) except Exception: errorMsg = sys.exc_info()[1] DirMenuReport( depthCall, "Cannot import=%s. Caught: %s\n" % (script_path, errorMsg ) ) importedMod = None if not flagShowAll: continue if not errorMsg: # Show only scripts which want to be shown. Each script can have an optional function # called Usable(): If it is there and returns False, the script is not displayed. errorMsg = TestUsability(importedMod,entity_type,entity_ids_arr) if not flagShowAll and errorMsg: continue # If the entity is on another host, does this work on remote entities ? if entity_host: try: # Script can be used on a remote entity. can_process_remote = importedMod.CanProcessRemote except AttributeError: can_process_remote = False can_process_remote = True if not can_process_remote: if not errorMsg: errorMsg = "%s is local" % ( entity_host ) DirMenuReport( depthCall, "Script %s %s cannot work on remote entities: %s at %s\n" % ( argDir, fil, encodedEntityId , entity_host ) ) if not flagShowAll: continue # Here, we are sure that the script is added. # TODO: If no script is added, should not add the directory? rdfNode = lib_common.NodeUrl(url_rdf) callbackGrphAdd( ( parentNode, pc.property_script, rdfNode ), depthCall ) # Default doc text is file name minus the ".py" extension. nodModu = lib_util.FromModuleToDoc(importedMod,fil[:-3]) callbackGrphAdd( ( rdfNode, pc.property_information, nodModu ), depthCall ) if errorMsg: callbackGrphAdd( ( rdfNode, lib_common.MakeProp("Error"), lib_common.NodeLiteral(errorMsg) ), depthCall ) # This tells if a script was added in this directory or one of the subdirs. return ( rdfNode is not None ) | containsSomething
def DirToMenuAux(aParentNode, grandParentNode, curr_dir, relative_dir, depthCall=1): #DirMenuReport( depthCall, "curr_dir=%s relative_dir=%s\n"%(curr_dir,relative_dir)) # In case there is nothing. dirs = None for path, dirs, files in os.walk(curr_dir): break # Maybe this class is not defined in our ontology. if dirs == None: WARNING("DirToMenuAux(2) No content in %s", curr_dir) return False # Will still be None if nothing is added. rdfNode = None sub_path = path[len(curr_dir):] relative_dir_sub_path = relative_dir + sub_path argDir = relative_dir_sub_path.replace("/", ".")[1:] # If this is a remote host, all scripts are checked because they might have # the flag CanProcessRemote which is defined at the script level, not the directory level. if not entity_host: errDirNode = DirectoryUsabilityErrorNode(relative_dir, depthCall) if errDirNode: if flagShowAll: argDirSplit = argDir.split(".") currDirNode = lib_util.DirDocNode( ".".join(argDirSplit[:-1]), argDirSplit[-1]) if not currDirNode: currDirNode = lib_util.NodeLiteral( "Cannot parse relative dir:%s" % argDir) callbackGrphAdd( (grandParentNode, pc.property_script, currDirNode), depthCall) callbackGrphAdd((currDirNode, lib_common.MakeProp("Error"), errDirNode), depthCall) # The directory is not usable, so leave immediately. return False containsSomething = False for dir in dirs: #DirMenuReport( depthCall, "dir=%s\n"%(dir)) # Might be generated by our Python interpreter. if dir == "__pycache__": continue full_sub_dir = os.path.join(curr_dir, dir) currDirNode = lib_util.DirDocNode(argDir, dir) if not currDirNode: #DirMenuReport( depthCall, "currDirNode NONE: argDir=%s dir=%s\n"%(argDir,dir)) continue sub_relative_dir = relative_dir + "/" + dir sub_entity_class = ".".join(sub_relative_dir.split("/")[2:]) ontoKeys = lib_util.OntologyClassKeys(sub_entity_class) #DirMenuReport( depthCall, "Checked ontology of %s: ontoKeys=%s\n"%(sub_entity_class,str(ontoKeys))) # TODO: Beware, if not ontology, returns empty array. Why not returning None ? if ontoKeys != []: #DirMenuReport( depthCall, "Module %s has an ontology so it is a class. Skipping\n"%(sub_relative_dir)) # BEWARE: NO MORE DEFAULT ONTOLOGY ["Id"] continue somethingAdded = DirToMenuAux(currDirNode, aParentNode, full_sub_dir, sub_relative_dir, depthCall + 1) # This adds the directory name only if it contains a script. if somethingAdded: # It works both ways, possibly with different properties. callbackGrphAdd((aParentNode, pc.property_script, currDirNode), depthCall) containsSomething = containsSomething | somethingAdded for fil in files: # We want to list only the usable Python scripts. if not fil.endswith(".py") or fil == "__init__.py": continue script_path = relative_dir_sub_path + "/" + fil #DirMenuReport( depthCall, "DirToMenu encodedEntityId=%s\n" % encodedEntityId) url_rdf = genObj.MakeTheNodeFromScript(script_path, entity_type, encodedEntityId) errorMsg = None try: importedMod = lib_util.GetScriptModule(argDir, fil) except Exception: errorMsg = sys.exc_info()[1] #DirMenuReport( depthCall, "DirToMenuAux Cannot import=%s. Caught: %s\n" % (script_path, errorMsg ) ) importedMod = None if not flagShowAll: continue if not errorMsg: # Show only scripts which want to be shown. Each script can have an optional function # called Usable(): If it is there and returns False, the script is not displayed. errorMsg = TestUsability(importedMod, entity_type, entity_ids_arr) if errorMsg: pass #DEBUG("DirToMenuAux errorMsg(2)=%s",errorMsg) # If this is a local host if not flagShowAll and errorMsg and not entity_host: continue # If the entity is on another host, does the script run on remote entities ? # The concept of "CanProcessRemote" is a short-hand to avoid checking # if the remote is in the entity ids. This flag means: # "It is worth anyway investigating on a remote host, if the entity exists there." if entity_host: try: # Script can be used on a remote entity. can_process_remote = importedMod.CanProcessRemote except AttributeError: can_process_remote = False # can_process_remote = True DEBUG( "entity_dir_menu.py DirToMenuAux entity_host=%s can_process_remote=%d", entity_host, can_process_remote) if not can_process_remote: if not errorMsg: errorMsg = "%s is local" % (entity_host) # DirMenuReport( depthCall, "Script %s %s cannot work on remote entities: %s at %s\n" % ( argDir, fil, encodedEntityId , entity_host ) ) #DirMenuReport( depthCall, "Script %s %s cannot work on remote entities\n" % ( argDir, fil ) ) if not flagShowAll: continue else: DirMenuReport( depthCall, "Script %s %s CAN work on remote entities\n" % (argDir, fil)) # Here, we are sure that the script is added. # TODO: If no script is added, should not add the directory? rdfNode = lib_common.NodeUrl(url_rdf) callbackGrphAdd((aParentNode, pc.property_script, rdfNode), depthCall) # Default doc text is file name minus the ".py" extension. nodModu = lib_util.FromModuleToDoc(importedMod, fil[:-3]) callbackGrphAdd((rdfNode, pc.property_information, nodModu), depthCall) if errorMsg: callbackGrphAdd((rdfNode, lib_common.MakeProp("Error"), lib_common.NodeLiteral(errorMsg)), depthCall) # This tells if a script was added in this directory or one of the subdirs. return (rdfNode is not None) | containsSomething
def CallbackSelect(self, grph, class_name, predicate_prefix, filtered_where_key_values): DEBUG( "SurvolCallbackSelect class_name=%s predicate_prefix=%s where_key_values=%s", class_name, predicate_prefix, str(filtered_where_key_values)) # Maybe there is a script: predicate_prefix="survol:CIM_DataFile/mapping_processes" prefix, colon, script_nickname = predicate_prefix.partition(":") DEBUG("SurvolCallbackSelect script_nickname=%s", script_nickname) if script_nickname: # For example: script_nickname="CIM_DataFile/mapping_processes" # Wildcards or directories are not accepted yet. script_name = "sources_types/" + script_nickname + ".py" DEBUG( "SurvolCallbackSelect script_name=%s filtered_where_key_values=%s", script_name, str(filtered_where_key_values)) # TODO: Check that there are enough parameters for this script ? my_source = lib_client.SourceLocal(script_name, class_name, **filtered_where_key_values) DEBUG("SurvolCallbackSelect my_source=%s", my_source) my_triplestore = my_source.get_triplestore() # This is returned anyway, as a triplestore that rdflib Sparql can work on. my_triplestore.copy_to_graph(grph) list_instances = my_triplestore.get_instances() # TODO: We filter only the objects of the right type, # TODO: ... but we lose all the other objects which could be stored in the output triplestore !!... DEBUG("SurvolCallbackSelect tp=%s class_name=%s", type(list_instances), class_name) DEBUG("SurvolCallbackSelect list_instances=%s", str(list_instances)) for one_instance in list_instances: WARNING( "SurvolCallbackSelect one_instance.__class__.__name__=%s", one_instance.__class__.__name__) if one_instance.__class__.__name__ == class_name: # 'CIM_DataFile.Name=/usr/lib/systemd/systemd-journald' instance_url = one_instance.__class__.__name__ + "." + one_instance.m_entity_id one_instance.m_key_value_pairs[ lib_kbase. PredicateIsDefinedBy] = lib_common.NodeLiteral( predicate_prefix) # Add it again, so the original Sparql query will work. one_instance.m_key_value_pairs[ lib_kbase.PredicateSeeAlso] = lib_common.NodeLiteral( predicate_prefix) DEBUG("SurvolCallbackSelect instance_url=%s", instance_url) yield (instance_url, one_instance.m_key_value_pairs) else: entity_module = lib_util.GetEntityModule(class_name) if not entity_module: raise Exception( "SurvolCallbackSelect: No module for class:%s" % class_name) try: enumerate_function = entity_module.SelectFromWhere except AttributeError: exc = sys.exc_info()[1] WARNING("No Enumerate for %s:%s", class_name, str(exc)) return iter_enumeration = enumerate_function(filtered_where_key_values) # for one_key_value_dict in iter_enumeration: for one_key_value_dict_nodes in iter_enumeration: class_ontology = lib_util.OntologyClassKeys(class_name) ontology_key_values = {} for key_node, value_node in one_key_value_dict_nodes.items(): key_str = lib_properties.PropToQName(key_node) if key_str in class_ontology: ontology_key_values[key_str] = str(value_node) # This reorders the attributes if needed. key_value_path = lib_util.KWArgsToEntityId( class_name, **ontology_key_values) # key_value_path = ".".join( '%s="%s"' % ( lib_properties.PropToQName(key), str(value) ) for key, value in one_key_value_dict_nodes.items() ) object_path = "SurvolLocalHost:" + class_name + "." + key_value_path one_key_value_dict_nodes[ lib_kbase.PredicateIsDefinedBy] = lib_common.NodeLiteral( predicate_prefix) # Add it again, so the original Sparql query will work. one_key_value_dict_nodes[ lib_kbase.PredicateSeeAlso] = lib_common.NodeLiteral( predicate_prefix) yield (object_path, one_key_value_dict_nodes)