def getResultsRange(self): """Returns the valid result range for this analysis duplicate, based on both on the result and duplicate variation set in the original analysis A Duplicate will be out of range if its result does not match with the result for the parent analysis plus the duplicate variation in % as the margin error. :return: A dictionary with the keys min and max :rtype: dict """ specs = ResultsRangeDict() analysis = self.getAnalysis() if not analysis: return specs result = analysis.getResult() if not api.is_floatable(result): return specs specs.min = specs.max = result result = api.to_float(result) dup_variation = analysis.getDuplicateVariation() dup_variation = api.to_float(dup_variation) if not dup_variation: return specs margin = abs(result) * (dup_variation / 100.0) specs.min = str(result - margin) specs.max = str(result + margin) return specs
def get_specs(self, service): """Returns the analysis specs available in the request for the given uid """ uid = api.get_uid(service) keyword = service.getKeyword() specs = ResultsRangeDict(keyword=keyword, uid=uid).copy() for key in specs.keys(): specs_value = self.request.form.get(key, [{}])[0].get(uid, None) specs[key] = specs_value or specs.get(key) return specs
def getResultsRange(self): """Returns the valid result range for this routine analysis based on the results ranges defined in the Analysis Request this routine analysis is assigned to. A routine analysis will be considered out of range if it result falls out of the range defined in "min" and "max". If there are values set for "warn_min" and "warn_max", these are used to compute the shoulders in both ends of the range. Thus, an analysis can be out of range, but be within shoulders still. :return: A dictionary with keys "min", "max", "warn_min" and "warn_max" :rtype: dict """ specs = ResultsRangeDict() analysis_request = self.getRequest() if not analysis_request: return specs keyword = self.getKeyword() ar_ranges = analysis_request.getResultsRange() # Get the result range that corresponds to this specific analysis an_range = [rr for rr in ar_ranges if rr.get('keyword', '') == keyword] rr = an_range and an_range[0].copy() or specs # Calculated Specification calc_uid = rr.get("calculation") calc = api.get_object_by_uid(calc_uid, None) if calc: spec = rr.copy() spec["analysis_uid"] = self.UID() calc_spec = calc.calculate_result(mapping={"spec": spec}, default=rr) if calc_spec: rr.update(calc_spec) return rr
def get_results_range(self): """Get the results Range from the AR """ spec = self.context.getResultsRange() if spec: return dicts_to_dict(spec, "keyword") return ResultsRangeDict()
def get_formatted_interval(results_range, default=_marker): """Returns a string representation of the interval defined by the results range passed in :param results_range: a dict or a ResultsRangeDict """ if not isinstance(results_range, Mapping): if default is not _marker: return default api.fail("Type not supported") results_range = ResultsRangeDict(results_range) min_str = results_range.min if api.is_floatable(results_range.min) else None max_str = results_range.max if api.is_floatable(results_range.max) else None if min_str is None and max_str is None: if default is not _marker: return default api.fail("Min and max values are not floatable or not defined") min_operator = results_range.min_operator max_operator = results_range.max_operator if max_str is None: return "{}{}".format(MIN_OPERATORS.getValue(min_operator), min_str) if min_str is None: return "{}{}".format(MAX_OPERATORS.getValue(max_operator), max_str) # Both values set. Return an interval min_bracket = min_operator == 'geq' and '[' or '(' max_bracket = max_operator == 'leq' and ']' or ')' return "{}{};{}{}".format(min_bracket, min_str, max_str, max_bracket)
def getResultsRange(self): """Returns the valid result range for this analysis duplicate, based on both on the result and duplicate variation set in the original analysis A Duplicate will be out of range if its result does not match with the result for the parent analysis plus the duplicate variation in % as the margin error. If the duplicate is from an analysis with result options and/or string results enabled (with non-numeric value), returns an empty result range :return: A dictionary with the keys min and max :rtype: dict """ # Get the original analysis original_analysis = self.getAnalysis() if not original_analysis: logger.warn("Orphan duplicate: {}".format(repr(self))) return {} # Return empty if results option enabled (exact match expected) if original_analysis.getResultOptions(): return {} # Return empty if non-floatable (exact match expected) original_result = original_analysis.getResult() if not api.is_floatable(original_result): return {} # Calculate the min/max based on duplicate variation % specs = ResultsRangeDict(uid=self.getServiceUID()) dup_variation = original_analysis.getDuplicateVariation() dup_variation = api.to_float(dup_variation, default=0) if not dup_variation: # We expect an exact match specs.min = specs.max = original_result return specs original_result = api.to_float(original_result) margin = abs(original_result) * (dup_variation / 100.0) specs.min = str(original_result - margin) specs.max = str(original_result + margin) return specs
def get_panic_tuple(analysis, panic_range=None): """Returns a tuple of min_panic and max_panic for the given analysis. Resolves each item to None if not found or not valid for the analysis. """ if panic_range is None: # Get the panic range directly from the analysis panic_range = analysis.getResultsRange() or ResultsRangeDict() panic_min = panic_range.get("min_panic", None) panic_max = panic_range.get("max_panic", None) return tuple(map(to_float_or_none, [panic_min, panic_max]))
def get(self, instance, **kwargs): values = super(ResultsRangesField, self).get(instance, **kwargs) # If a keyword or an uid has been specified, return the result range # for that uid or keyword only if "search_by" in kwargs: uid_or_keyword = kwargs.get("search_by") if uid_or_keyword: return self.getResultRange(values, uid_or_keyword) or {} return {} # Convert the dict items to ResultRangeDict for easy handling from bika.lims.content.analysisspec import ResultsRangeDict return map(lambda val: ResultsRangeDict(dict(val.items())), values)
def getResultRange(self, values, uid_keyword_service): if not uid_keyword_service: return None if api.is_object(uid_keyword_service): uid_keyword_service = api.get_uid(uid_keyword_service) key = "keyword" if api.is_uid(uid_keyword_service) and uid_keyword_service != "0": # We always assume a uid of "0" refers to portal key = "uid" # Find out the item for the given uid/keyword from bika.lims.content.analysisspec import ResultsRangeDict value = filter(lambda v: v.get(key) == uid_keyword_service, values) return value and ResultsRangeDict(dict(value[0].items())) or None
def getResultsRange(self): """Returns the valid result range for this reference analysis based on the results ranges defined in the Reference Sample from which this analysis has been created. A Reference Analysis (control or blank) will be considered out of range if its results does not match with the result defined on its parent Reference Sample, with the % error as the margin of error, that will be used to set the range's min and max values :return: A dictionary with the keys min and max :rtype: dict """ specs = ResultsRangeDict(result="") sample = self.getSample() if not sample: return specs service_uid = self.getServiceUID() sample_range = sample.getResultsRangeDict() return sample_range.get(service_uid, specs)
def getResultsRange(self): """Returns the valid result range for this routine analysis based on the results ranges defined in the Analysis Request this routine analysis is assigned to. A routine analysis will be considered out of range if it result falls out of the range defined in "min" and "max". If there are values set for "warn_min" and "warn_max", these are used to compute the shoulders in both ends of the range. Thus, an analysis can be out of range, but be within shoulders still. :return: A dictionary with keys "min", "max", "warn_min" and "warn_max" :rtype: dict """ specs = ResultsRangeDict() analysis_request = self.getRequest() if not analysis_request: return specs keyword = self.getKeyword() ar_ranges = analysis_request.getResultsRange() # Get the result range that corresponds to this specific analysis an_range = [rr for rr in ar_ranges if rr.get('keyword', '') == keyword] return an_range and an_range[0] or specs
def folderitem(self, obj, item, index): """Service triggered each time an item is iterated in folderitems. The use of this service prevents the extra-loops in child objects. :obj: the instance of the class to be foldered :item: dict containing the properties of the object to be used by the template :index: current index of the item """ # ensure we have an object and not a brain obj = api.get_object(obj) uid = api.get_uid(obj) # settings for this analysis service_settings = self.context.getAnalysisServiceSettings(uid) hidden = service_settings.get("hidden", obj.getHidden()) # get the category category = obj.getCategoryTitle() item["category"] = category if category not in self.categories: self.categories.append(category) price = obj.getPrice() keyword = obj.getKeyword() if uid in self.analyses: analysis = self.analyses[uid] # Might differ from the service keyword keyword = analysis.getKeyword() # Mark the row as disabled if the analysis has been submitted item["disabled"] = ISubmitted.providedBy(analysis) # get the hidden status of the analysis hidden = analysis.getHidden() # get the price of the analysis price = analysis.getPrice() # get the specification of this object rr = self.get_results_range() spec = rr.get(keyword, ResultsRangeDict()) item["Title"] = obj.Title() item["Unit"] = obj.getUnit() item["Price"] = price item["before"]["Price"] = self.get_currency_symbol() item["allow_edit"] = self.get_editable_columns(obj) item["selected"] = uid in self.selected item["min"] = str(spec.get("min", "")) item["max"] = str(spec.get("max", "")) item["warn_min"] = str(spec.get("warn_min", "")) item["warn_max"] = str(spec.get("warn_max", "")) item["Hidden"] = hidden # Append info link before the service # see: bika.lims.site.coffee for the attached event handler item["before"]["Title"] = get_link( "analysisservice_info?service_uid={}".format(uid), value="<span class='glyphicon glyphicon-info-sign'></span>", css_class="service_info") # Icons after_icons = "" if obj.getAccredited(): after_icons += get_image("accredited.png", title=t(_("Accredited"))) if obj.getAttachmentOption() == "r": after_icons += get_image("attach_reqd.png", title=t(_("Attachment required"))) if obj.getAttachmentOption() == "n": after_icons += get_image("attach_no.png", title=t(_('Attachment not permitted'))) if after_icons: item["after"]["Title"] = after_icons return item
def folderitem(self, obj, item, index): """Service triggered each time an item is iterated in folderitems. The use of this service prevents the extra-loops in child objects. :obj: the instance of the class to be foldered :item: dict containing the properties of the object to be used by the template :index: current index of the item """ # ensure we have an object and not a brain obj = api.get_object(obj) uid = api.get_uid(obj) # settings for this analysis service_settings = self.context.getAnalysisServiceSettings(uid) hidden = service_settings.get("hidden", obj.getHidden()) # get the category category = obj.getCategoryTitle() item["category"] = category if category not in self.categories: self.categories.append(category) parts = filter(api.is_active, self.get_partitions()) partitions = map( lambda part: { "ResultValue": part.Title(), "ResultText": part.getId() }, parts) keyword = obj.getKeyword() partition = None if uid in self.analyses: analysis = self.analyses[uid] # Might differ from the service keyword keyword = analysis.getKeyword() # Mark the row as disabled if the analysis is not in an open state item["disabled"] = not analysis.isOpen() # get the hidden status of the analysis hidden = analysis.getHidden() # get the partition of the analysis partition = self.get_partition(analysis) else: partition = self.get_partitions()[0] # get the specification of this object rr = self.get_results_range() spec = rr.get(keyword, ResultsRangeDict()) item["Title"] = obj.Title() item["Unit"] = obj.getUnit() item["Price"] = obj.getPrice() item["before"]["Price"] = self.get_currency_symbol() item["allow_edit"] = self.get_editable_columns(obj) item["selected"] = uid in self.selected item["min"] = str(spec.get("min", "")) item["max"] = str(spec.get("max", "")) item["warn_min"] = str(spec.get("warn_min", "")) item["warn_max"] = str(spec.get("warn_max", "")) item["Hidden"] = hidden item["Partition"] = partition.getId() item["choices"]["Partition"] = partitions # Icons after_icons = "" if obj.getAccredited(): after_icons += get_image("accredited.png", title=t(_("Accredited"))) if obj.getAttachmentOption() == "r": after_icons += get_image("attach_reqd.png", title=t(_("Attachment required"))) if obj.getAttachmentOption() == "n": after_icons += get_image("attach_no.png", title=t(_('Attachment not permitted'))) if after_icons: item["after"]["Title"] = after_icons return item
def workflow_action_save_analyses_button(self): form = self.request.form workflow = getToolByName(self.context, 'portal_workflow') bsc = self.context.bika_setup_catalog action, came_from = WorkflowAction._get_form_workflow_action(self) # AR Manage Analyses: save Analyses ar = self.context sample = ar.getSample() objects = WorkflowAction._get_selected_items(self) if not objects: message = _("No analyses have been selected") self.context.plone_utils.addPortalMessage(message, 'info') self.destination_url = self.context.absolute_url() + "/analyses" self.request.response.redirect(self.destination_url) return Analyses = objects.keys() prices = form.get("Price", [None])[0] # Hidden analyses? # https://jira.bikalabs.com/browse/LIMS-1324 outs = [] hiddenans = form.get('Hidden', {}) for uid in Analyses: hidden = hiddenans.get(uid, '') hidden = True if hidden == 'on' else False outs.append({'uid': uid, 'hidden': hidden}) ar.setAnalysisServicesSettings(outs) specs = {} if form.get("min", None): for service_uid in Analyses: service = objects[service_uid] keyword = service.getKeyword() specs[service_uid] = { "min": form["min"][0][service_uid], "max": form["max"][0][service_uid], "warn_min": form["warn_min"][0][service_uid], "warn_max": form["warn_max"][0][service_uid], "keyword": keyword, "uid": service_uid, } else: for service_uid in Analyses: service = objects[service_uid] keyword = service.getKeyword() specs[service_uid] = ResultsRangeDict(keyword=keyword, uid=service_uid) new = ar.setAnalyses(Analyses, prices=prices, specs=specs.values()) # link analyses and partitions # If Bika Setup > Analyses > 'Display individual sample # partitions' is checked, no Partitions available. # https://github.com/bikalabs/Bika-LIMS/issues/1030 if 'Partition' in form: for service_uid, service in objects.items(): part_id = form['Partition'][0][service_uid] part = sample[part_id] analysis = ar[service.getKeyword()] analysis.setSamplePartition(part) analysis.reindexObject() partans = part.getAnalyses() partans.append(analysis) part.setAnalyses(partans) part.reindexObject() if new: ar_state = getCurrentState(ar) if wasTransitionPerformed(ar, 'to_be_verified'): # Apply to AR only; we don't want this transition to cascade. ar.REQUEST['workflow_skiplist'].append("retract all analyses") workflow.doActionFor(ar, 'retract') ar.REQUEST['workflow_skiplist'].remove("retract all analyses") ar_state = getCurrentState(ar) for analysis in new: changeWorkflowState(analysis, 'bika_analysis_workflow', ar_state) message = PMF("Changes saved.") self.context.plone_utils.addPortalMessage(message, 'info') self.destination_url = self.context.absolute_url() self.request.response.redirect(self.destination_url)
def is_out_of_range(brain_or_object, result=_marker): """Checks if the result for the analysis passed in is out of range and/or out of shoulders range. min max warn min max warn ·········|---------------|=====================|---------------|········· ----- out-of-range -----><----- in-range ------><----- out-of-range ----- <-- shoulder --><----- in-range ------><-- shoulder --> :param brain_or_object: A single catalog brain or content object :param result: Tentative result. If None, use the analysis result :type brain_or_object: ATContentType/DexterityContentType/CatalogBrain :returns: Tuple of two elements. The first value is `True` if the result is out of range and `False` if it is in range. The second value is `True` if the result is out of shoulder range and `False` if it is in shoulder range :rtype: (bool, bool) """ analysis = api.get_object(brain_or_object) if not IAnalysis.providedBy(analysis) and \ not IReferenceAnalysis.providedBy(analysis): api.fail("{} is not supported. Needs to be IAnalysis or " "IReferenceAnalysis".format(repr(analysis))) if result is _marker: result = api.safe_getattr(analysis, "getResult", None) if result in [None, '']: # Empty result return False, False if IDuplicateAnalysis.providedBy(analysis): # Result range for duplicate analyses is calculated from the original # result, applying a variation % in shoulders. If the analysis has # result options enabled or string results enabled, system returns an # empty result range for the duplicate: result must match %100 with the # original result original = analysis.getAnalysis() original_result = original.getResult() # Does original analysis have a valid result? if original_result in [None, '']: return False, False # Does original result type matches with duplicate result type? if api.is_floatable(result) != api.is_floatable(original_result): return True, True # Does analysis has result options enabled or non-floatable? if analysis.getResultOptions() or not api.is_floatable(original_result): # Let's always assume the result is 'out from shoulders', cause we # consider the shoulders are precisely the duplicate variation % out_of_range = original_result != result return out_of_range, out_of_range elif not api.is_floatable(result): # A non-duplicate with non-floatable result. There is no chance to know # if the result is out-of-range return False, False # Convert result to a float result = api.to_float(result) # Note that routine analyses, duplicates and reference analyses all them # implement the function getResultRange: # - For routine analyses, the function returns the valid range based on the # specs assigned during the creation process. # - For duplicates, the valid range is the result of the analysis the # the duplicate was generated from +/- the duplicate variation. # - For reference analyses, getResultRange returns the valid range as # indicated in the Reference Sample from which the analysis was created. result_range = api.safe_getattr(analysis, "getResultsRange", None) if not result_range: # No result range defined or the passed in object does not suit return False, False # Maybe there is a custom adapter adapters = getAdapters((analysis,), IResultOutOfRange) for name, adapter in adapters: ret = adapter(result=result, specification=result_range) if not ret or not ret.get('out_of_range', False): continue if not ret.get('acceptable', True): # Out of range + out of shoulders return True, True # Out of range, but in shoulders return True, False result_range = ResultsRangeDict(result_range) # The assignment of result as default fallback for min and max guarantees # the result will be in range also if no min/max values are defined specs_min = api.to_float(result_range.min, result) specs_max = api.to_float(result_range.max, result) in_range = False min_operator = result_range.min_operator if min_operator == "geq": in_range = result >= specs_min else: in_range = result > specs_min max_operator = result_range.max_operator if in_range: if max_operator == "leq": in_range = result <= specs_max else: in_range = result < specs_max # If in range, no need to check shoulders if in_range: return False, False # Out of range, check shoulders. If no explicit warn_min or warn_max have # been defined, no shoulders must be considered for this analysis. Thus, use # specs' min and max as default fallback values warn_min = api.to_float(result_range.warn_min, specs_min) warn_max = api.to_float(result_range.warn_max, specs_max) in_shoulder = warn_min <= result <= warn_max return True, not in_shoulder
def get(self, instance, **kwargs): from bika.lims.content.analysisspec import ResultsRangeDict value = super(ResultRangeField, self).get(instance, **kwargs) if value: return ResultsRangeDict(dict(value.items())) return {}
def get_spec_from_ar(self, ar, keyword): empty = ResultsRangeDict(keyword=keyword) spec = ar.getResultsRange() if spec: return dicts_to_dict(spec, 'keyword').get(keyword, empty) return empty