def getFromString(obj, string, default=None): attr_obj = obj attrs = string.split('.') for attr in attrs: attr_obj = api.safe_getattr(attr_obj, attr, default=None) if not attr_obj: break return attr_obj or default
def searchable_text(instance): """ Retrieves all the values of metadata columns in the catalog for wildcard searches :return: all metadata values joined in a string """ entries = [] catalog = api.get_tool(CATALOG_PATIENTS) columns = catalog.schema() brains = catalog({"UID": api.get_uid(instance)}) brain = brains[0] if brains else None for column in columns: brain_value = api.safe_getattr(brain, column, None) instance_value = api.safe_getattr(instance, column, None) parsed = api.to_searchable_text_metadata(brain_value or instance_value) entries.append(parsed) # Concatenate all strings to one text blob return " ".join(entries)
def searchable_text(instance): """ Retrieves all the values of metadata columns in the catalog for wildcard searches :return: all metadata values joined in a string """ entries = [] catalog = api.get_tool(CATALOG_PATIENTS) columns = catalog.schema() brains = catalog({"UID": api.get_uid(instance)}) brain = brains[0] if brains else None for column in columns: brain_value = api.safe_getattr(brain, column, None) instance_value = api.safe_getattr(instance, column, None) parsed = api.to_searchable_text_metadata(brain_value or instance_value) entries.append(parsed) # Concatenate all strings to one text blob return " ".join(entries)
def generic_listing_searchable_text(instance, catalog_name, exclude_field_names=None, include_field_names=None): """Retrieves all the values of metadata columns in the catalog for wildcard searches :param instance: the object to retrieve metadata/values from :param catalog_name: the catalog to retrieve metadata from :param exclude_field_names: field names to exclude from the metadata :param include_field_names: field names to include, even if no metadata """ entries = set() # Fields to include/exclude include = include_field_names or [] exclude = exclude_field_names or [] # Get the metadata fields from this instance and catalog catalog = api.get_tool(catalog_name) metadata = get_metadata_for(instance, catalog) for key, brain_value in metadata.items(): if key in exclude: continue elif key in include: # A metadata field already include.remove(key) instance_value = api.safe_getattr(instance, key, None) parsed = api.to_searchable_text_metadata(brain_value or instance_value) entries.add(parsed) # Include values from additional fields for field_name in include: field_value = api.safe_getattr(instance, field_name, None) field_value = api.to_searchable_text_metadata(field_value) entries.add(field_value) # Remove empties entries = filter(None, entries) # Concatenate all strings to one text blob return " ".join(entries)
def listing_searchable_text(instance): """ Retrieves all the values of metadata columns in the catalog for wildcard searches :return: all metadata values joined in a string """ entries = set() catalog = api.get_tool(CATALOG_ANALYSIS_REQUEST_LISTING) columns = catalog.schema() brains = catalog({"UID": api.get_uid(instance)}) brain = brains[0] if brains else None for column in columns: brain_value = api.safe_getattr(brain, column, None) instance_value = api.safe_getattr(instance, column, None) parsed = api.to_searchable_text_metadata(brain_value or instance_value) entries.add(parsed) # add metadata of all descendants for descendant in instance.getDescendants(): entries.add(listing_searchable_text(descendant)()) # Concatenate all strings to one text blob return " ".join(entries)
def listing_searchable_text(instance): """ Retrieves all the values of metadata columns in the catalog for wildcard searches :return: all metadata values joined in a string """ entries = set() catalog = api.get_tool(CATALOG_ANALYSIS_REQUEST_LISTING) metadata = get_metadata_for(instance, catalog) for key, brain_value in metadata.items(): instance_value = api.safe_getattr(instance, key, None) parsed = api.to_searchable_text_metadata(brain_value or instance_value) entries.add(parsed) # add metadata of all descendants for descendant in instance.getDescendants(): entries.add(listing_searchable_text(descendant)()) # Concatenate all strings to one text blob return " ".join(entries)
def get_value(instance, func_name): value = api.safe_getattr(instance, func_name, None) if not value: return None parsed = api.to_searchable_text_metadata(value) return parsed or None
def get_interims_keywords(analysis): interims = api.safe_getattr(analysis, 'getInterimFields') return map(lambda item: item['keyword'], interims)
def is_out_of_range(brain_or_object, result=_marker): """Checks if the result for the analysis passed in is out of range and/or out of shoulders range. min max warn min max warn ·········|---------------|=====================|---------------|········· ----- out-of-range -----><----- in-range ------><----- out-of-range ----- <-- shoulder --><----- in-range ------><-- shoulder --> :param brain_or_object: A single catalog brain or content object :param result: Tentative result. If None, use the analysis result :type brain_or_object: ATContentType/DexterityContentType/CatalogBrain :returns: Tuple of two elements. The first value is `True` if the result is out of range and `False` if it is in range. The second value is `True` if the result is out of shoulder range and `False` if it is in shoulder range :rtype: (bool, bool) """ analysis = api.get_object(brain_or_object) if not IAnalysis.providedBy(analysis) and \ not IReferenceAnalysis.providedBy(analysis): api.fail("{} is not supported. Needs to be IAnalysis or " "IReferenceAnalysis".format(repr(analysis))) if result is _marker: result = api.safe_getattr(analysis, "getResult", None) if result in [None, '']: # Empty result return False, False if IDuplicateAnalysis.providedBy(analysis): # Result range for duplicate analyses is calculated from the original # result, applying a variation % in shoulders. If the analysis has # result options enabled or string results enabled, system returns an # empty result range for the duplicate: result must match %100 with the # original result original = analysis.getAnalysis() original_result = original.getResult() # Does original analysis have a valid result? if original_result in [None, '']: return False, False # Does original result type matches with duplicate result type? if api.is_floatable(result) != api.is_floatable(original_result): return True, True # Does analysis has result options enabled or non-floatable? if analysis.getResultOptions() or not api.is_floatable(original_result): # Let's always assume the result is 'out from shoulders', cause we # consider the shoulders are precisely the duplicate variation % out_of_range = original_result != result return out_of_range, out_of_range elif not api.is_floatable(result): # A non-duplicate with non-floatable result. There is no chance to know # if the result is out-of-range return False, False # Convert result to a float result = api.to_float(result) # Note that routine analyses, duplicates and reference analyses all them # implement the function getResultRange: # - For routine analyses, the function returns the valid range based on the # specs assigned during the creation process. # - For duplicates, the valid range is the result of the analysis the # the duplicate was generated from +/- the duplicate variation. # - For reference analyses, getResultRange returns the valid range as # indicated in the Reference Sample from which the analysis was created. result_range = api.safe_getattr(analysis, "getResultsRange", None) if not result_range: # No result range defined or the passed in object does not suit return False, False # Maybe there is a custom adapter adapters = getAdapters((analysis,), IResultOutOfRange) for name, adapter in adapters: ret = adapter(result=result, specification=result_range) if not ret or not ret.get('out_of_range', False): continue if not ret.get('acceptable', True): # Out of range + out of shoulders return True, True # Out of range, but in shoulders return True, False result_range = ResultsRangeDict(result_range) # The assignment of result as default fallback for min and max guarantees # the result will be in range also if no min/max values are defined specs_min = api.to_float(result_range.min, result) specs_max = api.to_float(result_range.max, result) in_range = False min_operator = result_range.min_operator if min_operator == "geq": in_range = result >= specs_min else: in_range = result > specs_min max_operator = result_range.max_operator if in_range: if max_operator == "leq": in_range = result <= specs_max else: in_range = result < specs_max # If in range, no need to check shoulders if in_range: return False, False # Out of range, check shoulders. If no explicit warn_min or warn_max have # been defined, no shoulders must be considered for this analysis. Thus, use # specs' min and max as default fallback values warn_min = api.to_float(result_range.warn_min, specs_min) warn_max = api.to_float(result_range.warn_max, specs_max) in_shoulder = warn_min <= result <= warn_max return True, not in_shoulder
def is_out_of_range(brain_or_object, result=_marker): """Checks if the result for the analysis passed in is out of range and/or out of shoulders range. min max warn min max warn ·········|---------------|=====================|---------------|········· ----- out-of-range -----><----- in-range ------><----- out-of-range ----- <-- shoulder --><----- in-range ------><-- shoulder --> :param brain_or_object: A single catalog brain or content object :param result: Tentative result. If None, use the analysis result :type brain_or_object: ATContentType/DexterityContentType/CatalogBrain :returns: Tuple of two elements. The first value is `True` if the result is out of range and `False` if it is in range. The second value is `True` if the result is out of shoulder range and `False` if it is in shoulder range :rtype: (bool, bool) """ analysis = api.get_object(brain_or_object) if not IAnalysis.providedBy(analysis) and \ not IReferenceAnalysis.providedBy(analysis): api.fail("{} is not supported. Needs to be IAnalysis or " "IReferenceAnalysis".format(repr(analysis))) if result is _marker: result = api.safe_getattr(analysis, "getResult", None) if not api.is_floatable(result): # Result is empty/None or not a valid number return False, False result = api.to_float(result) # Note that routine analyses, duplicates and reference analyses all them # implement the function getResultRange: # - For routine analyses, the function returns the valid range based on the # specs assigned during the creation process. # - For duplicates, the valid range is the result of the analysis the # the duplicate was generated from +/- the duplicate variation. # - For reference analyses, getResultRange returns the valid range as # indicated in the Reference Sample from which the analysis was created. result_range = api.safe_getattr(analysis, "getResultsRange", None) if not result_range: # No result range defined or the passed in object does not suit return False, False # Maybe there is a custom adapter adapters = getAdapters((analysis, ), IResultOutOfRange) for name, adapter in adapters: ret = adapter(result=result, specification=result_range) if not ret or not ret.get('out_of_range', False): continue if not ret.get('acceptable', True): # Out of range + out of shoulders return True, True # Out of range, but in shoulders return True, False # The assignment of result as default fallback for min and max guarantees # the result will be in range also if no min/max values are defined specs_min = api.to_float(result_range.get('min', result), result) specs_max = api.to_float(result_range.get('max', result), result) if specs_min <= result <= specs_max: # In range, no need to check shoulders return False, False # Out of range, check shoulders. If no explicit warn_min or warn_max have # been defined, no shoulders must be considered for this analysis. Thus, use # specs' min and max as default fallback values warn_min = api.to_float(result_range.get('warn_min', specs_min), specs_min) warn_max = api.to_float(result_range.get('warn_max', specs_max), specs_max) in_shoulder = warn_min <= result <= warn_max return True, not in_shoulder
def get_attr_from_field(instance, fieldname, attrname, default=None): item = get_obj_from_field(instance, fieldname, None) if not item: return default return api.safe_getattr(item, attr=attrname, default=default)
def get_attr_from_field(instance, fieldname, attrname, default=None): item = get_obj_from_field(instance, fieldname, None) if not item: return default return api.safe_getattr(item, attr=attrname, default=default)