def _to_service(self, thing): """Convert to Analysis Service :param thing: UID/Catalog Brain/Object/Something :returns: Analysis Service object or None """ # Convert UIDs to objects if api.is_uid(thing): thing = api.get_object_by_uid(thing, None) # Bail out if the thing is not a valid object if not api.is_object(thing): logger.warn("'{}' is not a valid object!".format(repr(thing))) return None # Ensure we have an object here and not a brain obj = api.get_object(thing) if IAnalysisService.providedBy(obj): return obj if IAnalysis.providedBy(obj): return obj.getAnalysisService() # An object, but neither an Analysis nor AnalysisService? # This should never happen. portal_type = api.get_portal_type(obj) logger.error("ARAnalysesField doesn't accept objects from {} type. " "The object will be dismissed.".format(portal_type)) return None
def _to_service(self, thing): """Convert to Analysis Service :param thing: UID/Catalog Brain/Object/Something :returns: Analysis Service object or None """ # Convert UIDs to objects if api.is_uid(thing): thing = api.get_object_by_uid(thing, None) # Bail out if the thing is not a valid object if not api.is_object(thing): logger.warn("'{}' is not a valid object!".format(repr(thing))) return None # Ensure we have an object here and not a brain obj = api.get_object(thing) if IAnalysisService.providedBy(obj): return obj if IAnalysis.providedBy(obj): return obj.getAnalysisService() # An object, but neither an Analysis nor AnalysisService? # This should never happen. msg = "ARAnalysesField doesn't accept objects from {} type. " \ "The object will be dismissed.".format(api.get_portal_type(obj)) logger.warn(msg) return None
def after_verify(obj): """Event fired after receive (Process) transition is triggered """ logger.info("*** Custom after_verify transition ***") if IAnalysis.providedBy(obj) or IDuplicateAnalysis.providedBy(obj): analysis_events.after_verify(obj) if IAnalysisRequest.providedBy(obj): _promote_transition(obj, "verify")
def after_submit(obj): """Event fired after submit transition is triggered """ logger.info("*** Custom after_submit transition ***") if IAnalysis.providedBy(obj) or IDuplicateAnalysis.providedBy(obj): analysis_events.after_submit(obj) if IAnalysisRequest.providedBy(obj): _promote_transition(obj, "submit")
def _resolve_items_to_service_uids(items): portal = api.portal.get() # We need to send a list of service UIDS to setAnalyses function. # But we may have received one, or a list of: # AnalysisService instances # Analysis instances # service titles # service UIDs # service Keywords service_uids = [] # Maybe only a single item was passed if type(items) not in (list, tuple): items = [ items, ] for item in items: uid = False # service objects if IAnalysisService.providedBy(item): uid = item.UID() service_uids.append(uid) # Analysis objects (shortcut for eg copying analyses from other AR) if IAnalysis.providedBy(item): uid = item.getService().UID() service_uids.append(uid) # Maybe object UID. bsc = getToolByName(portal, 'bika_setup_catalog') brains = bsc(UID=item) if brains: uid = brains[0].UID service_uids.append(uid) # Maybe service Title bsc = getToolByName(portal, 'bika_setup_catalog') brains = bsc(portal_type='AnalysisService', title=item) if brains: uid = brains[0].UID service_uids.append(uid) # Maybe service Title bsc = getToolByName(portal, 'bika_setup_catalog') brains = bsc(portal_type='AnalysisService', getKeyword=item) if brains: uid = brains[0].UID service_uids.append(uid) if not uid: raise RuntimeError( str(item) + " should be the UID, title, keyword " " or title of an AnalysisService.") return service_uids
def _resolve_items_to_service_uids(items): portal = api.portal.get() # We need to send a list of service UIDS to setAnalyses function. # But we may have received one, or a list of: # AnalysisService instances # Analysis instances # service titles # service UIDs # service Keywords service_uids = [] # Maybe only a single item was passed if type(items) not in (list, tuple): items = [items, ] for item in items: uid = False # service objects if IAnalysisService.providedBy(item): uid = item.UID() service_uids.append(uid) # Analysis objects (shortcut for eg copying analyses from other AR) if IAnalysis.providedBy(item): uid = item.getService().UID() service_uids.append(uid) # Maybe object UID. bsc = getToolByName(portal, 'bika_setup_catalog') brains = bsc(UID=item) if brains: uid = brains[0].UID service_uids.append(uid) # Maybe service Title bsc = getToolByName(portal, 'bika_setup_catalog') brains = bsc(portal_type='AnalysisService', title=item) if brains: uid = brains[0].UID service_uids.append(uid) # Maybe service Keyword bsc = getToolByName(portal, 'bika_setup_catalog') brains = bsc(portal_type='AnalysisService', getKeyword=item) if brains: uid = brains[0].UID service_uids.append(uid) if not uid: raise RuntimeError( str(item) + " should be the UID, title, keyword " " or title of an AnalysisService.") return service_uids
def _get_service_uid(self, item): if api.is_uid(item): return item if not api.is_object(item): logger.warn("Not an UID: {}".format(item)) return None obj = api.get_object(item) if IAnalysisService.providedBy(obj): return api.get_uid(obj) if IAnalysis.providedBy(obj) and IRequestAnalysis.providedBy(obj): return obj.getServiceUID() # An object, but neither an Analysis nor AnalysisService? # This should never happen. msg = "ARAnalysesField doesn't accept objects from {} type. " \ "The object will be dismissed." logger.warn(msg.format(api.get_portal_type(obj))) return None
def _resolve_items_to_service_uids(items): """ Returns a list of service uids without duplicates based on the items :param items: A list (or one object) of service-related info items. The list can be heterogeneous and each item can be: - Analysis Service instance - Analysis instance - Analysis Service title - Analysis Service UID - Analysis Service Keyword If an item that doesn't match any of the criterias above is found, the function will raise a RuntimeError """ portal = None bsc = None service_uids = [] # Maybe only a single item was passed if type(items) not in (list, tuple): items = [items, ] for item in items: # service objects if IAnalysisService.providedBy(item): uid = item.UID() service_uids.append(uid) continue # Analysis objects (shortcut for eg copying analyses from other AR) if IAnalysis.providedBy(item): uid = item.getService().UID() service_uids.append(uid) continue # An object UID already there? if (item in service_uids): continue # Maybe object UID. portal = portal if portal else api.portal.get() bsc = bsc if bsc else getToolByName(portal, 'bika_setup_catalog') brains = bsc(UID=item) if brains: uid = brains[0].UID service_uids.append(uid) continue # Maybe service Title brains = bsc(portal_type='AnalysisService', title=item) if brains: uid = brains[0].UID service_uids.append(uid) continue # Maybe service Keyword brains = bsc(portal_type='AnalysisService', getKeyword=item) if brains: uid = brains[0].UID service_uids.append(uid) continue raise RuntimeError( str(item) + " should be the UID, title, keyword " " or title of an AnalysisService.") return list(set(service_uids))
def _resolve_items_to_service_uids(items): """ Returns a list of service uids without duplicates based on the items :param items: A list (or one object) of service-related info items. The list can be heterogeneous and each item can be: - Analysis Service instance - Analysis instance - Analysis Service title - Analysis Service UID - Analysis Service Keyword If an item that doesn't match any of the criterias above is found, the function will raise a RuntimeError """ portal = None bsc = None service_uids = [] # Maybe only a single item was passed if type(items) not in (list, tuple): items = [ items, ] for item in items: # service objects if IAnalysisService.providedBy(item): uid = item.UID() service_uids.append(uid) continue # Analysis objects (shortcut for eg copying analyses from other AR) if IAnalysis.providedBy(item): uid = item.getService().UID() service_uids.append(uid) continue # An object UID already there? if (item in service_uids): continue # Maybe object UID. portal = portal if portal else api.portal.get() bsc = bsc if bsc else getToolByName(portal, 'bika_setup_catalog') brains = bsc(UID=item) if brains: uid = brains[0].UID service_uids.append(uid) continue # Maybe service Title brains = bsc(portal_type='AnalysisService', title=item) if brains: uid = brains[0].UID service_uids.append(uid) continue # Maybe service Keyword brains = bsc(portal_type='AnalysisService', getKeyword=item) if brains: uid = brains[0].UID service_uids.append(uid) continue raise RuntimeError( str(item) + " should be the UID, title, keyword " " or title of an AnalysisService.") return list(set(service_uids))
def is_out_of_range(brain_or_object, result=_marker): """Checks if the result for the analysis passed in is out of range and/or out of shoulders range. min max warn min max warn ·········|---------------|=====================|---------------|········· ----- out-of-range -----><----- in-range ------><----- out-of-range ----- <-- shoulder --><----- in-range ------><-- shoulder --> :param brain_or_object: A single catalog brain or content object :param result: Tentative result. If None, use the analysis result :type brain_or_object: ATContentType/DexterityContentType/CatalogBrain :returns: Tuple of two elements. The first value is `True` if the result is out of range and `False` if it is in range. The second value is `True` if the result is out of shoulder range and `False` if it is in shoulder range :rtype: (bool, bool) """ analysis = api.get_object(brain_or_object) if not IAnalysis.providedBy(analysis) and \ not IReferenceAnalysis.providedBy(analysis): api.fail("{} is not supported. Needs to be IAnalysis or " "IReferenceAnalysis".format(repr(analysis))) if result is _marker: result = api.safe_getattr(analysis, "getResult", None) if result in [None, '']: # Empty result return False, False if IDuplicateAnalysis.providedBy(analysis): # Result range for duplicate analyses is calculated from the original # result, applying a variation % in shoulders. If the analysis has # result options enabled or string results enabled, system returns an # empty result range for the duplicate: result must match %100 with the # original result original = analysis.getAnalysis() original_result = original.getResult() # Does original analysis have a valid result? if original_result in [None, '']: return False, False # Does original result type matches with duplicate result type? if api.is_floatable(result) != api.is_floatable(original_result): return True, True # Does analysis has result options enabled or non-floatable? if analysis.getResultOptions() or not api.is_floatable(original_result): # Let's always assume the result is 'out from shoulders', cause we # consider the shoulders are precisely the duplicate variation % out_of_range = original_result != result return out_of_range, out_of_range elif not api.is_floatable(result): # A non-duplicate with non-floatable result. There is no chance to know # if the result is out-of-range return False, False # Convert result to a float result = api.to_float(result) # Note that routine analyses, duplicates and reference analyses all them # implement the function getResultRange: # - For routine analyses, the function returns the valid range based on the # specs assigned during the creation process. # - For duplicates, the valid range is the result of the analysis the # the duplicate was generated from +/- the duplicate variation. # - For reference analyses, getResultRange returns the valid range as # indicated in the Reference Sample from which the analysis was created. result_range = api.safe_getattr(analysis, "getResultsRange", None) if not result_range: # No result range defined or the passed in object does not suit return False, False # Maybe there is a custom adapter adapters = getAdapters((analysis,), IResultOutOfRange) for name, adapter in adapters: ret = adapter(result=result, specification=result_range) if not ret or not ret.get('out_of_range', False): continue if not ret.get('acceptable', True): # Out of range + out of shoulders return True, True # Out of range, but in shoulders return True, False result_range = ResultsRangeDict(result_range) # The assignment of result as default fallback for min and max guarantees # the result will be in range also if no min/max values are defined specs_min = api.to_float(result_range.min, result) specs_max = api.to_float(result_range.max, result) in_range = False min_operator = result_range.min_operator if min_operator == "geq": in_range = result >= specs_min else: in_range = result > specs_min max_operator = result_range.max_operator if in_range: if max_operator == "leq": in_range = result <= specs_max else: in_range = result < specs_max # If in range, no need to check shoulders if in_range: return False, False # Out of range, check shoulders. If no explicit warn_min or warn_max have # been defined, no shoulders must be considered for this analysis. Thus, use # specs' min and max as default fallback values warn_min = api.to_float(result_range.warn_min, specs_min) warn_max = api.to_float(result_range.warn_max, specs_max) in_shoulder = warn_min <= result <= warn_max return True, not in_shoulder
def is_out_of_range(brain_or_object, result=_marker): """Checks if the result for the analysis passed in is out of range and/or out of shoulders range. min max warn min max warn ·········|---------------|=====================|---------------|········· ----- out-of-range -----><----- in-range ------><----- out-of-range ----- <-- shoulder --><----- in-range ------><-- shoulder --> :param brain_or_object: A single catalog brain or content object :param result: Tentative result. If None, use the analysis result :type brain_or_object: ATContentType/DexterityContentType/CatalogBrain :returns: Tuple of two elements. The first value is `True` if the result is out of range and `False` if it is in range. The second value is `True` if the result is out of shoulder range and `False` if it is in shoulder range :rtype: (bool, bool) """ analysis = api.get_object(brain_or_object) if not IAnalysis.providedBy(analysis) and \ not IReferenceAnalysis.providedBy(analysis): api.fail("{} is not supported. Needs to be IAnalysis or " "IReferenceAnalysis".format(repr(analysis))) if result is _marker: result = api.safe_getattr(analysis, "getResult", None) if not api.is_floatable(result): # Result is empty/None or not a valid number return False, False result = api.to_float(result) # Note that routine analyses, duplicates and reference analyses all them # implement the function getResultRange: # - For routine analyses, the function returns the valid range based on the # specs assigned during the creation process. # - For duplicates, the valid range is the result of the analysis the # the duplicate was generated from +/- the duplicate variation. # - For reference analyses, getResultRange returns the valid range as # indicated in the Reference Sample from which the analysis was created. result_range = api.safe_getattr(analysis, "getResultsRange", None) if not result_range: # No result range defined or the passed in object does not suit return False, False # Maybe there is a custom adapter adapters = getAdapters((analysis, ), IResultOutOfRange) for name, adapter in adapters: ret = adapter(result=result, specification=result_range) if not ret or not ret.get('out_of_range', False): continue if not ret.get('acceptable', True): # Out of range + out of shoulders return True, True # Out of range, but in shoulders return True, False # The assignment of result as default fallback for min and max guarantees # the result will be in range also if no min/max values are defined specs_min = api.to_float(result_range.get('min', result), result) specs_max = api.to_float(result_range.get('max', result), result) if specs_min <= result <= specs_max: # In range, no need to check shoulders return False, False # Out of range, check shoulders. If no explicit warn_min or warn_max have # been defined, no shoulders must be considered for this analysis. Thus, use # specs' min and max as default fallback values warn_min = api.to_float(result_range.get('warn_min', specs_min), specs_min) warn_max = api.to_float(result_range.get('warn_max', specs_max), specs_max) in_shoulder = warn_min <= result <= warn_max return True, not in_shoulder
def is_analysis(self, obj): """Checks if the object is an analysis """ return IAnalysis.providedBy(obj)
def __call__(self, action, objects): # Store invalid instruments-ref.analyses invalid_instrument_refs = defaultdict(set) # Get interims data interims_data = self.get_interims_data() for analysis in objects: # Using the global WF menu passes the AR as context # https://github.com/senaite/senaite.core/issues/1306 if not IAnalysis.providedBy(analysis): continue uid = api.get_uid(analysis) # Need to save remarks? remarks = self.get_form_value("Remarks", uid, default="") analysis.setRemarks(remarks) # Need to save the instrument? instrument = self.get_form_value("Instrument", uid, None) if instrument is not None: # Could be an empty string instrument = instrument or None analysis.setInstrument(instrument) if instrument and IReferenceAnalysis.providedBy(analysis): if is_out_of_range(analysis): # This reference analysis is out of range, so we have # to retract all analyses assigned to this same # instrument that are awaiting for verification invalid_instrument_refs[uid].add(analysis) else: # The reference result is valid, so make the instrument # available again for further analyses instrument.setDisposeUntilNextCalibrationTest(False) # Need to save the method? method = self.get_form_value("Method", uid, default=None) if method is not None: method = method or None analysis.setMethod(method) # Need to save analyst? analyst = self.get_form_value("Analyst", uid, default=None) if analyst is not None: analysis.setAnalyst(analyst) # Save uncertainty uncertainty = self.get_form_value("Uncertainty", uid, "") analysis.setUncertainty(uncertainty) # Save detection limit dlimit = self.get_form_value("DetectionLimitOperand", uid, "") analysis.setDetectionLimitOperand(dlimit) # Interim fields interims = interims_data.get(uid, analysis.getInterimFields()) analysis.setInterimFields(interims) # Save Hidden hidden = self.get_form_value("Hidden", uid, "") analysis.setHidden(hidden == "on") # Only set result if it differs from the actual value to preserve # the result capture date result = self.get_form_value("Result", uid, default=analysis.getResult()) if result != analysis.getResult(): analysis.setResult(result) # Submit all analyses transitioned = self.do_action(action, objects) if not transitioned: return self.redirect(message=_("No changes made"), level="warning") # If a reference analysis with an out-of-range result and instrument # assigned has been submitted, retract then routine analyses that are # awaiting for verification and with same instrument associated retracted = list() for invalid_instrument_uid in invalid_instrument_refs.keys(): query = dict(getInstrumentUID=invalid_instrument_uid, portal_type=['Analysis', 'DuplicateAnalysis'], review_state='to_be_verified',) brains = api.search(query, CATALOG_ANALYSIS_LISTING) for brain in brains: analysis = api.get_object(brain) failed_msg = '{0}: {1}'.format( ulocalized_time(DateTime(), long_format=1), _("Instrument failed reference test")) an_remarks = analysis.getRemarks() analysis.setRemarks('. '.join([an_remarks, failed_msg])) retracted.append(analysis) # If some analyses have been retracted because instrument failed a # reference test, then generate a pdf report if self.do_action("retract", retracted): # Create the Retracted Analyses List portal_url = api.get_url(api.get_portal()) report = AnalysesRetractedListReport(self.context, self.request, portal_url, 'Retracted analyses', retracted) # Attach the pdf to all ReferenceAnalysis that failed (accessible # from Instrument's Internal Calibration Tests list pdf = report.toPdf() for ref in invalid_instrument_refs.values(): ref.setRetractedAnalysesPdfReport(pdf) # Send the email try: report.sendEmail() except Exception as err_msg: message = "Unable to send email: {}".format(err_msg) logger.warn(message) # Redirect to success view return self.success(transitioned)