def process_form(self, instance, field, form, empty_marker = None, emptyReturnsMarker = False): """ Return a list of dictionaries fit for AnalysisSpecsResultsField consumption. If neither hidemin nor hidemax are specified, only services which have float()able entries in result,min and max field will be included. If hidemin and/or hidemax specified, results might contain empty min and/or max fields. """ value = [] if 'service' in form: for uid, keyword in form['keyword'][0].items(): hidemin = form['hidemin'][0].get(uid, '') if 'hidemin' in form else '' hidemax = form['hidemax'][0].get(uid, '') if 'hidemax' in form else '' mins = form['min'][0].get(uid, '') if 'min' in form else '' maxs = form['max'][0].get(uid, '') if 'max' in form else '' err = form['error'][0].get(uid, '') if 'error' in form else '' rangecomment = form['rangecomment'][0].get(uid, '') if 'rangecomment' in form else '' if not isnumber(hidemin) and not isnumber(hidemax) and \ (not isnumber(mins) or not isnumber(maxs)): # If neither hidemin nor hidemax have been specified, # min and max values are mandatory. continue value.append({'keyword': keyword, 'uid': uid, 'min': mins if isnumber(mins) else '', 'max': maxs if isnumber(maxs) else '', 'hidemin': hidemin if isnumber(hidemin) else '', 'hidemax': hidemax if isnumber(hidemax) else '', 'error': err if isnumber(err) else '0', 'rangecomment': rangecomment}) return value, {}
def _analysis_data(self, analysis, decimalmark=None): keyword = analysis.getKeyword() service = analysis.getService() andict = {'obj': analysis, 'id': analysis.id, 'title': analysis.Title(), 'keyword': keyword, 'scientific_name': service.getScientificName(), 'accredited': service.getAccredited(), 'point_of_capture': to_utf8(POINTS_OF_CAPTURE.getValue(service.getPointOfCapture())), 'category': to_utf8(service.getCategoryTitle()), 'result': analysis.getResult(), 'isnumber': isnumber(analysis.getResult()), 'unit': to_utf8(service.getUnit()), 'formatted_unit': format_supsub(to_utf8(service.getUnit())), 'capture_date': analysis.getResultCaptureDate(), 'request_id': analysis.aq_parent.getId(), 'formatted_result': '', 'uncertainty': analysis.getUncertainty(), 'formatted_uncertainty': '', 'retested': analysis.getRetested(), 'remarks': to_utf8(analysis.getRemarks()), 'resultdm': to_utf8(analysis.getResultDM()), 'outofrange': False, 'type': analysis.portal_type, 'reftype': analysis.getReferenceType() \ if hasattr(analysis, 'getReferenceType') else None, 'worksheet': None, 'specs': {}, 'formatted_specs': ''} if analysis.portal_type == 'DuplicateAnalysis': andict['reftype'] = 'd' ws = analysis.getBackReferences('WorksheetAnalysis') andict['worksheet'] = ws[0].id if ws and len(ws) > 0 else None andict['worksheet_url'] = ws[0].absolute_url if ws and len( ws) > 0 else None andict['refsample'] = analysis.getSample().id \ if analysis.portal_type == 'Analysis' \ else '%s - %s' % (analysis.aq_parent.id, analysis.aq_parent.Title()) # Which analysis specs must be used? # Try first with those defined at AR Publish Specs level if analysis.portal_type == 'ReferenceAnalysis': # The analysis is a Control or Blank. We might use the # reference results instead other specs uid = analysis.getServiceUID() specs = analysis.aq_parent.getResultsRangeDict().get(uid, {}) elif analysis.portal_type == 'DuplicateAnalysis': specs = analysis.getAnalysisSpecs() else: ar = analysis.aq_parent specs = ar.getPublicationSpecification() if not specs or keyword not in specs.getResultsRangeDict(): specs = analysis.getAnalysisSpecs() specs = specs.getResultsRangeDict().get(keyword, {}) \ if specs else {} andict['specs'] = specs scinot = self.context.bika_setup.getScientificNotationReport() andict['formatted_result'] = analysis.getFormattedResult( specs=specs, sciformat=int(scinot), decimalmark=decimalmark) fs = '' if specs.get('min', None) and specs.get('max', None): fs = '%s - %s' % (specs['min'], specs['max']) elif specs.get('min', None): fs = '> %s' % specs['min'] elif specs.get('max', None): fs = '< %s' % specs['max'] andict['formatted_specs'] = formatDecimalMark(fs, decimalmark) andict['formatted_uncertainty'] = format_uncertainty( analysis, analysis.getResult(), decimalmark=decimalmark, sciformat=int(scinot)) # Out of range? if specs: adapters = getAdapters((analysis, ), IResultOutOfRange) bsc = getToolByName(self.context, "bika_setup_catalog") for name, adapter in adapters: ret = adapter(specification=specs) if ret and ret['out_of_range']: andict['outofrange'] = True break return andict
def calculate(self, uid=None): analysis = self.analyses[uid] form_result = self.current_results[uid]['result'] service = analysis.getService() calculation = service.getCalculation() if analysis.portal_type == 'ReferenceAnalysis': deps = {} else: deps = {} for dep in analysis.getDependencies(): deps[dep.UID()] = dep path = '++resource++bika.lims.images' mapping = {} # values to be returned to form for this UID Result = {'uid': uid, 'result': form_result} try: Result['result'] = float(form_result) except: if form_result == "0/0": Result['result'] = "" if calculation: ''' We need first to create the map of available parameters acording to the interims, analyses and wildcards: params = { <as-1-keyword> : <analysis_result>, <as-1-keyword>.<wildcard-1> : <wildcard_1_value>, <as-1-keyword>.<wildcard-2> : <wildcard_2_value>, <interim-1> : <interim_result>, ... } ''' # Get dependent analyses results and wildcard values to the # mapping. If dependent analysis without result found, # break and abort calculation unsatisfied = False for dependency_uid, dependency in deps.items(): if dependency_uid in self.ignore_uids: unsatisfied = True break # LIMS-1769. Allow to use LDL and UDL in calculations. # https://jira.bikalabs.com/browse/LIMS-1769 analysisvalues = {} if dependency_uid in self.current_results: analysisvalues = self.current_results[dependency_uid] else: # Retrieve the result and DLs from the analysis analysisvalues = { 'keyword': dependency.getKeyword(), 'result': dependency.getResult(), 'ldl': dependency.getLowerDetectionLimit(), 'udl': dependency.getUpperDetectionLimit(), 'belowldl': dependency.isBelowLowerDetectionLimit(), 'aboveudl': dependency.isAboveUpperDetectionLimit(), } if analysisvalues['result']=='': unsatisfied = True break; key = analysisvalues.get('keyword',dependency.getService().getKeyword()) # Analysis result # All result mappings must be float, or they are ignored. try: mapping[key] = float(analysisvalues.get('result')) mapping['%s.%s' % (key, 'RESULT')] = float(analysisvalues.get('result')) mapping['%s.%s' % (key, 'LDL')] = float(analysisvalues.get('ldl')) mapping['%s.%s' % (key, 'UDL')] = float(analysisvalues.get('udl')) mapping['%s.%s' % (key, 'BELOWLDL')] = int(analysisvalues.get('belowldl')) mapping['%s.%s' % (key, 'ABOVEUDL')] = int(analysisvalues.get('aboveudl')) except: # If not floatable, then abort! unsatisfied = True break if unsatisfied: # unsatisfied means that one or more result on which we depend # is blank or unavailable, so we set blank result and abort. self.results.append({'uid': uid, 'result': '', 'formatted_result': ''}) return None # Add all interims to mapping for i_uid, i_data in self.item_data.items(): for i in i_data: # if this interim belongs to current analysis and is blank, # return an empty result for this analysis. if i_uid == uid and i['value'] == '': self.results.append({'uid': uid, 'result': '', 'formatted_result': ''}) return None # All interims must be float, or they are ignored. try: i['value'] = float(i['value']) except: pass # all interims are ServiceKeyword.InterimKeyword if i_uid in deps: key = "%s.%s" % (deps[i_uid].getService().getKeyword(), i['keyword']) mapping[key] = i['value'] # this analysis' interims get extra reference # without service keyword prefix if uid == i_uid: mapping[i['keyword']] = i['value'] # Grab values for hidden InterimFields for only for current calculation # we can't allow non-floats through here till we change the eval's # interpolation hidden_fields = [] c_fields = calculation.getInterimFields() s_fields = service.getInterimFields() for field in c_fields: if field.get('hidden', False): hidden_fields.append(field['keyword']) try: mapping[field['keyword']] = float(field['value']) except ValueError: pass # also grab stickier defaults from AnalysisService for field in s_fields: if field['keyword'] in hidden_fields: try: mapping[field['keyword']] = float(field['value']) except ValueError: pass # convert formula to a valid python string, ready for interpolation formula = calculation.getMinifiedFormula() formula = formula.replace('[', '%(').replace(']', ')f') try: formula = eval("'%s'%%mapping" % formula, {"__builtins__": None, 'math': math, 'context': self.context}, {'mapping': mapping}) # calculate result = eval(formula) Result['result'] = result self.current_results[uid]['result'] = result except TypeError as e: # non-numeric arguments in interim mapping? alert = {'field': 'Result', 'icon': path + '/exclamation.png', 'msg': "{0}: {1} ({2}) ".format( t(_("Type Error")), html_quote(str(e.args[0])), formula)} if uid in self.alerts: self.alerts[uid].append(alert) else: self.alerts[uid] = [alert, ] except ZeroDivisionError as e: Result['result'] = '0/0' Result['formatted_result'] = '0/0' self.current_results[uid]['result'] = '0/0' self.results.append(Result) alert = {'field': 'Result', 'icon': path + '/exclamation.png', 'msg': "{0}: {1} ({2}) ".format( t(_("Division by zero")), html_quote(str(e.args[0])), formula)} if uid in self.alerts: self.alerts[uid].append(alert) else: self.alerts[uid] = [alert, ] return None except KeyError as e: alert = {'field': 'Result', 'icon': path + '/exclamation.png', 'msg': "{0}: {1} ({2}) ".format( t(_("Key Error")), html_quote(str(e.args[0])), formula)} if uid in self.alerts: self.alerts[uid].append(alert) else: self.alerts[uid] = [alert, ] # format result belowmin = False abovemax = False # Some analyses will not have AnalysisSpecs, eg, ReferenceAnalysis if hasattr(analysis, 'getAnalysisSpecs'): specs = analysis.getAnalysisSpecs() specs = specs.getResultsRangeDict() if specs is not None else {} specs = specs.get(analysis.getKeyword(), {}) hidemin = specs.get('hidemin', '') hidemax = specs.get('hidemax', '') if Result.get('result', ''): fresult = Result['result'] try: belowmin = hidemin and fresult < float(hidemin) or False except ValueError: belowmin = False pass try: abovemax = hidemax and fresult > float(hidemax) or False except ValueError: abovemax = False pass if belowmin is True: Result['formatted_result'] = '< %s' % hidemin elif abovemax is True: Result['formatted_result'] = '> %s' % hidemax else: try: Result['formatted_result'] = format_numeric_result(analysis, Result['result']) except ValueError: # non-float Result['formatted_result'] = Result['result'] # calculate Dry Matter result # if parent is not an AR, it's never going to be calculable dm = hasattr(analysis.aq_parent, 'getReportDryMatter') and \ analysis.aq_parent.getReportDryMatter() and \ analysis.getService().getReportDryMatter() if dm: dry_service = self.context.bika_setup.getDryMatterService() # get the UID of the DryMatter Analysis from our parent AR dry_analysis = [a for a in analysis.aq_parent.getAnalyses(full_objects=True) if a.getService().UID() == dry_service.UID()] if dry_analysis: dry_analysis = dry_analysis[0] dry_uid = dry_analysis.UID() # get the current DryMatter analysis result from the form if dry_uid in self.current_results: try: dry_result = float(self.current_results[dry_uid]) except: dm = False else: try: dry_result = float(dry_analysis.getResult()) except: dm = False else: dm = False Result['dry_result'] = dm and dry_result and \ '%.2f' % ((Result['result'] / dry_result) * 100) or '' self.results.append(Result) # if App.config.getConfiguration().debug_mode: # logger.info("calc.py: %s->%s %s" % (analysis.aq_parent.id, # analysis.id, # Result)) # LIMS-1808 Uncertainty calculation on DL # https://jira.bikalabs.com/browse/LIMS-1808 flres = Result.get('result', None) if flres and isnumber(flres): flres = float(flres) anvals = self.current_results[uid] isldl = anvals.get('isldl', False) isudl = anvals.get('isudl', False) ldl = anvals.get('ldl',0) udl = anvals.get('udl',0) ldl = float(ldl) if isnumber(ldl) else 0 udl = float(udl) if isnumber(udl) else 10000000 belowldl = (isldl or flres < ldl) aboveudl = (isudl or flres > udl) unc = '' if (belowldl or aboveudl) else analysis.getUncertainty(Result.get('result')) if not (belowldl or aboveudl): self.uncertainties.append({'uid': uid, 'uncertainty': unc}) # maybe a service who depends on us must be recalculated. if analysis.portal_type == 'ReferenceAnalysis': dependents = [] else: dependents = analysis.getDependents() if dependents: for dependent in dependents: dependent_uid = dependent.UID() # ignore analyses that no longer exist. if dependent_uid in self.ignore_uids or \ dependent_uid not in self.analyses: continue self.calculate(dependent_uid) # These self.alerts are just for the json return. # we're placing the entire form's results in kwargs. adapters = getAdapters((analysis, ), IFieldIcons) for name, adapter in adapters: alerts = adapter(result=Result['result'], form_results=self.current_results) if alerts: if analysis.UID() in self.alerts: self.alerts[analysis.UID()].extend(alerts[analysis.UID()]) else: self.alerts[analysis.UID()] = alerts[analysis.UID()]
def _analysis_data(self, analysis, decimalmark=None): keyword = analysis.getKeyword() service = analysis.getService() andict = {'obj': analysis, 'id': analysis.id, 'title': analysis.Title(), 'keyword': keyword, 'scientific_name': service.getScientificName(), 'accredited': service.getAccredited(), 'point_of_capture': to_utf8(POINTS_OF_CAPTURE.getValue(service.getPointOfCapture())), 'category': to_utf8(service.getCategoryTitle()), 'result': analysis.getResult(), 'isnumber': isnumber(analysis.getResult()), 'unit': to_utf8(service.getUnit()), 'formatted_unit': format_supsub(to_utf8(service.getUnit())), 'capture_date': analysis.getResultCaptureDate(), 'request_id': analysis.aq_parent.getId(), 'formatted_result': '', 'uncertainty': analysis.getUncertainty(), 'formatted_uncertainty': '', 'retested': analysis.getRetested(), 'remarks': to_utf8(analysis.getRemarks()), 'resultdm': to_utf8(analysis.getResultDM()), 'outofrange': False, 'type': analysis.portal_type, 'reftype': analysis.getReferenceType() \ if hasattr(analysis, 'getReferenceType') else None, 'worksheet': None, 'specs': {}, 'formatted_specs': ''} if analysis.portal_type == 'DuplicateAnalysis': andict['reftype'] = 'd' ws = analysis.getBackReferences('WorksheetAnalysis') andict['worksheet'] = ws[0].id if ws and len(ws) > 0 else None andict['worksheet_url'] = ws[0].absolute_url if ws and len(ws) > 0 else None andict['refsample'] = analysis.getSample().id \ if analysis.portal_type == 'Analysis' \ else '%s - %s' % (analysis.aq_parent.id, analysis.aq_parent.Title()) # Which analysis specs must be used? # Try first with those defined at AR Publish Specs level if analysis.portal_type == 'ReferenceAnalysis': # The analysis is a Control or Blank. We might use the # reference results instead other specs uid = analysis.getServiceUID() specs = analysis.aq_parent.getResultsRangeDict().get(uid, {}) elif analysis.portal_type == 'DuplicateAnalysis': specs = analysis.getAnalysisSpecs(); else: ar = analysis.aq_parent specs = ar.getPublicationSpecification() if not specs or keyword not in specs.getResultsRangeDict(): specs = analysis.getAnalysisSpecs() specs = specs.getResultsRangeDict().get(keyword, {}) \ if specs else {} andict['specs'] = specs scinot = self.context.bika_setup.getScientificNotationReport() andict['formatted_result'] = analysis.getFormattedResult(specs=specs, sciformat=int(scinot), decimalmark=decimalmark) fs = '' if specs.get('min', None) and specs.get('max', None): fs = '%s - %s' % (specs['min'], specs['max']) elif specs.get('min', None): fs = '> %s' % specs['min'] elif specs.get('max', None): fs = '< %s' % specs['max'] andict['formatted_specs'] = formatDecimalMark(fs, decimalmark) andict['formatted_uncertainty'] = format_uncertainty(analysis, analysis.getResult(), decimalmark=decimalmark, sciformat=int(scinot)) # Out of range? if specs: adapters = getAdapters((analysis, ), IResultOutOfRange) bsc = getToolByName(self.context, "bika_setup_catalog") for name, adapter in adapters: ret = adapter(specification=specs) if ret and ret['out_of_range']: andict['outofrange'] = True break return andict
def calculate(self, uid=None): analysis = self.analyses[uid] form_result = self.current_results[uid]['result'] service = analysis.getService() calculation = service.getCalculation() if analysis.portal_type == 'ReferenceAnalysis': deps = {} else: deps = {} for dep in analysis.getDependencies(): deps[dep.UID()] = dep path = '++resource++bika.lims.images' mapping = {} # values to be returned to form for this UID Result = {'uid': uid, 'result': form_result} try: Result['result'] = float(form_result) except: if form_result == "0/0": Result['result'] = "" if calculation: ''' We need first to create the map of available parameters acording to the interims, analyses and wildcards: params = { <as-1-keyword> : <analysis_result>, <as-1-keyword>.<wildcard-1> : <wildcard_1_value>, <as-1-keyword>.<wildcard-2> : <wildcard_2_value>, <interim-1> : <interim_result>, ... } ''' # Get dependent analyses results and wildcard values to the # mapping. If dependent analysis without result found, # break and abort calculation unsatisfied = False for dependency_uid, dependency in deps.items(): if dependency_uid in self.ignore_uids: unsatisfied = True break # LIMS-1769. Allow to use LDL and UDL in calculations. # https://jira.bikalabs.com/browse/LIMS-1769 analysisvalues = {} if dependency_uid in self.current_results: analysisvalues = self.current_results[dependency_uid] else: # Retrieve the result and DLs from the analysis analysisvalues = { 'keyword': dependency.getKeyword(), 'result': dependency.getResult(), 'ldl': dependency.getLowerDetectionLimit(), 'udl': dependency.getUpperDetectionLimit(), 'belowldl': dependency.isBelowLowerDetectionLimit(), 'aboveudl': dependency.isAboveUpperDetectionLimit(), } if analysisvalues['result'] == '': unsatisfied = True break key = analysisvalues.get('keyword', dependency.getService().getKeyword()) # Analysis result # All result mappings must be float, or they are ignored. try: mapping[key] = float(analysisvalues.get('result')) mapping['%s.%s' % (key, 'RESULT')] = float( analysisvalues.get('result')) mapping['%s.%s' % (key, 'LDL')] = float( analysisvalues.get('ldl')) mapping['%s.%s' % (key, 'UDL')] = float( analysisvalues.get('udl')) mapping['%s.%s' % (key, 'BELOWLDL')] = int( analysisvalues.get('belowldl')) mapping['%s.%s' % (key, 'ABOVEUDL')] = int( analysisvalues.get('aboveudl')) except: # If not floatable, then abort! unsatisfied = True break if unsatisfied: # unsatisfied means that one or more result on which we depend # is blank or unavailable, so we set blank result and abort. self.results.append({ 'uid': uid, 'result': '', 'formatted_result': '' }) return None # Add all interims to mapping for i_uid, i_data in self.item_data.items(): for i in i_data: # if this interim belongs to current analysis and is blank, # return an empty result for this analysis. if i_uid == uid and i['value'] == '': self.results.append({ 'uid': uid, 'result': '', 'formatted_result': '' }) return None # All interims must be float, or they are ignored. try: i['value'] = float(i['value']) except: pass # all interims are ServiceKeyword.InterimKeyword if i_uid in deps: key = "%s.%s" % (deps[i_uid].getService().getKeyword(), i['keyword']) mapping[key] = i['value'] # this analysis' interims get extra reference # without service keyword prefix if uid == i_uid: mapping[i['keyword']] = i['value'] # Grab values for hidden InterimFields for only for current calculation # we can't allow non-floats through here till we change the eval's # interpolation hidden_fields = [] c_fields = calculation.getInterimFields() s_fields = service.getInterimFields() for field in c_fields: if field.get('hidden', False): hidden_fields.append(field['keyword']) try: mapping[field['keyword']] = float(field['value']) except ValueError: pass # also grab stickier defaults from AnalysisService for field in s_fields: if field['keyword'] in hidden_fields: try: mapping[field['keyword']] = float(field['value']) except ValueError: pass # convert formula to a valid python string, ready for interpolation formula = calculation.getMinifiedFormula() formula = formula.replace('[', '%(').replace(']', ')f') try: formula = eval("'%s'%%mapping" % formula, { "__builtins__": None, 'math': math, 'context': self.context }, {'mapping': mapping}) # calculate result = eval(formula) Result['result'] = result self.current_results[uid]['result'] = result except TypeError as e: # non-numeric arguments in interim mapping? alert = { 'field': 'Result', 'icon': path + '/exclamation.png', 'msg': "{0}: {1} ({2}) ".format(t(_("Type Error")), html_quote(str(e.args[0])), formula) } if uid in self.alerts: self.alerts[uid].append(alert) else: self.alerts[uid] = [ alert, ] except ZeroDivisionError as e: Result['result'] = '0/0' Result['formatted_result'] = '0/0' self.current_results[uid]['result'] = '0/0' self.results.append(Result) alert = { 'field': 'Result', 'icon': path + '/exclamation.png', 'msg': "{0}: {1} ({2}) ".format(t(_("Division by zero")), html_quote(str(e.args[0])), formula) } if uid in self.alerts: self.alerts[uid].append(alert) else: self.alerts[uid] = [ alert, ] return None except KeyError as e: alert = { 'field': 'Result', 'icon': path + '/exclamation.png', 'msg': "{0}: {1} ({2}) ".format(t(_("Key Error")), html_quote(str(e.args[0])), formula) } if uid in self.alerts: self.alerts[uid].append(alert) else: self.alerts[uid] = [ alert, ] # format result belowmin = False abovemax = False # Some analyses will not have AnalysisSpecs, eg, ReferenceAnalysis if hasattr(analysis, 'getAnalysisSpecs'): specs = analysis.getAnalysisSpecs() specs = specs.getResultsRangeDict() if specs is not None else {} specs = specs.get(analysis.getKeyword(), {}) hidemin = specs.get('hidemin', '') hidemax = specs.get('hidemax', '') if Result.get('result', ''): fresult = Result['result'] try: belowmin = hidemin and fresult < float(hidemin) or False except ValueError: belowmin = False pass try: abovemax = hidemax and fresult > float(hidemax) or False except ValueError: abovemax = False pass if belowmin is True: Result['formatted_result'] = '< %s' % hidemin elif abovemax is True: Result['formatted_result'] = '> %s' % hidemax else: try: Result['formatted_result'] = format_numeric_result( analysis, Result['result']) except ValueError: # non-float Result['formatted_result'] = Result['result'] # calculate Dry Matter result # if parent is not an AR, it's never going to be calculable dm = hasattr(analysis.aq_parent, 'getReportDryMatter') and \ analysis.aq_parent.getReportDryMatter() and \ analysis.getService().getReportDryMatter() if dm: dry_service = self.context.bika_setup.getDryMatterService() # get the UID of the DryMatter Analysis from our parent AR dry_analysis = [ a for a in analysis.aq_parent.getAnalyses(full_objects=True) if a.getService().UID() == dry_service.UID() ] if dry_analysis: dry_analysis = dry_analysis[0] dry_uid = dry_analysis.UID() # get the current DryMatter analysis result from the form if dry_uid in self.current_results: try: dry_result = float(self.current_results[dry_uid]) except: dm = False else: try: dry_result = float(dry_analysis.getResult()) except: dm = False else: dm = False Result['dry_result'] = dm and dry_result and \ '%.2f' % ((Result['result'] / dry_result) * 100) or '' self.results.append(Result) # if App.config.getConfiguration().debug_mode: # logger.info("calc.py: %s->%s %s" % (analysis.aq_parent.id, # analysis.id, # Result)) # LIMS-1808 Uncertainty calculation on DL # https://jira.bikalabs.com/browse/LIMS-1808 flres = Result.get('result', None) if flres and isnumber(flres): flres = float(flres) anvals = self.current_results[uid] isldl = anvals.get('isldl', False) isudl = anvals.get('isudl', False) ldl = anvals.get('ldl', 0) udl = anvals.get('udl', 0) ldl = float(ldl) if isnumber(ldl) else 0 udl = float(udl) if isnumber(udl) else 10000000 belowldl = (isldl or flres < ldl) aboveudl = (isudl or flres > udl) unc = '' if (belowldl or aboveudl) else analysis.getUncertainty( Result.get('result')) if not (belowldl or aboveudl): self.uncertainties.append({'uid': uid, 'uncertainty': unc}) # maybe a service who depends on us must be recalculated. if analysis.portal_type == 'ReferenceAnalysis': dependents = [] else: dependents = analysis.getDependents() if dependents: for dependent in dependents: dependent_uid = dependent.UID() # ignore analyses that no longer exist. if dependent_uid in self.ignore_uids or \ dependent_uid not in self.analyses: continue self.calculate(dependent_uid) # These self.alerts are just for the json return. # we're placing the entire form's results in kwargs. adapters = getAdapters((analysis, ), IFieldIcons) for name, adapter in adapters: alerts = adapter(result=Result['result'], form_results=self.current_results) if alerts: if analysis.UID() in self.alerts: self.alerts[analysis.UID()].extend(alerts[analysis.UID()]) else: self.alerts[analysis.UID()] = alerts[analysis.UID()]