def _folder_item_specifications(self, analysis_brain, item): """Set the results range to the item passed in""" # Everyone can see valid-ranges item['Specification'] = '' results_range = analysis_brain.getResultsRange if not results_range: return min_str = results_range.get('min', '') max_str = results_range.get('max', '') min_str = api.is_floatable(min_str) and "{0}".format(min_str) or "" max_str = api.is_floatable(max_str) and "{0}".format(max_str) or "" specs = ", ".join([val for val in [min_str, max_str] if val]) if not specs: return item["Specification"] = "[{}]".format(specs) # Show an icon if out of range out_range, out_shoulders = is_out_of_range(analysis_brain) if not out_range: return # At least is out of range img = get_image("exclamation.png", title=_("Result out of range")) if not out_shoulders: img = get_image("warning.png", title=_("Result in shoulder range")) self._append_html_element(item, "Result", img)
def _render_range_alert(self, analysis, result): """Appends an entry for the passed in analysis in self.alerts if the passed in tentative result is out of range or in shoulder range, in accordance with the assigned results range for the passed in analysis :param analysis: analysis object to be evaluated :param result: the tentative result to test if out of range/in shoulder """ if not analysis or not api.is_floatable(result): return out_of_range, out_of_shoulder = is_out_of_range(analysis, result) if not out_of_range: return result_range = analysis.getResultsRange() rngstr = get_formatted_interval(result_range, default="") message = "Result out of range" icon = "exclamation.png" if not out_of_shoulder: message = "Result in shoulder range" icon = "warning.png" uid = api.get_uid(analysis) alert = self.alerts.get(uid, []) alert.append({'icon': "++resource++bika.lims.images/{}".format(icon), 'msg': "{0} {1}".format(t(_(message)), rngstr), 'field': "Result"}) self.alerts[uid] = alert
def _folder_item_specifications(self, analysis_brain, item): """Set the results range to the item passed in""" # Everyone can see valid-ranges item['Specification'] = '' analysis = api.get_object(analysis_brain) results_range = analysis.getResultsRange() if not results_range: return # Display the specification interval item["Specification"] = get_formatted_interval(results_range, "") # Show an icon if out of range out_range, out_shoulders = is_out_of_range(analysis_brain) if not out_range: return # At least is out of range img = get_image("exclamation.png", title=_("Result out of range")) if not out_shoulders: img = get_image("warning.png", title=_("Result in shoulder range")) self._append_html_element(item, "Result", img) # Grades grade = api.get_grade_number(analysis_brain) if grade: span = " <span class='small grade_{}'>G{}</span>".format( grade, grade) self._append_html_element(item, "Result", span)
def _analysis_data(self, analysis): """ Returns a dict that represents the analysis """ decimalmark = analysis.aq_parent.aq_parent.getDecimalMark() keyword = analysis.getKeyword() andict = { 'obj': analysis, 'id': analysis.id, 'title': analysis.Title(), 'keyword': keyword, 'scientific_name': analysis.getScientificName(), 'accredited': analysis.getAccredited(), 'point_of_capture': to_utf8(POINTS_OF_CAPTURE.getValue(analysis.getPointOfCapture())), 'category': to_utf8(analysis.getCategoryTitle()), 'result': analysis.getResult(), 'unit': to_utf8(analysis.getUnit()), 'formatted_unit': format_supsub(to_utf8(analysis.getUnit())), 'capture_date': analysis.getResultCaptureDate(), 'request_id': analysis.aq_parent.getId(), 'formatted_result': '', 'uncertainty': analysis.getUncertainty(), 'formatted_uncertainty': '', 'retested': analysis.isRetest(), 'remarks': to_utf8(analysis.getRemarks()), 'outofrange': False, 'type': analysis.portal_type, 'reftype': analysis.getReferenceType() if hasattr( analysis, 'getReferenceType') else None, 'worksheet': None, 'specs': {}, 'formatted_specs': '', 'review_state': api.get_workflow_status_of(analysis), } andict['refsample'] = analysis.getSample().id \ if IReferenceAnalysis.providedBy(analysis) \ else analysis.getRequestID() specs = analysis.getResultsRange() andict['specs'] = specs scinot = self.context.bika_setup.getScientificNotationReport() andict['formatted_result'] = analysis.getFormattedResult(specs=specs, sciformat=int(scinot), decimalmark=decimalmark) fs = '' if specs.get('min', None) and specs.get('max', None): fs = '%s - %s' % (specs['min'], specs['max']) elif specs.get('min', None): fs = '> %s' % specs['min'] elif specs.get('max', None): fs = '< %s' % specs['max'] andict['formatted_specs'] = formatDecimalMark(fs, decimalmark) andict['formatted_uncertainty'] = format_uncertainty(analysis, analysis.getResult(), decimalmark=decimalmark, sciformat=int(scinot)) # Out of range? andict['outofrange'] = is_out_of_range(analysis)[0] return andict
def get_panic_analyses_list_message(self, ar): translate = self.context.translate analyses = ar.getAnalyses(full_objects=True, retracted=False) messages = list() for analysis in analyses: if not is_out_of_range(analysis)[1]: continue messages.append("- {0}, {1}: {2} {3}".format( analysis.Title(), translate(_("Result")), analysis.getFormattedResult(), analysis.getUnit()).strip()) return "\n".join(messages)
def isQCValid(self): """ Returns True if the results of the last batch of QC Analyses performed against this instrument was within the valid range. For a given Reference Sample, more than one Reference Analyses assigned to this same instrument can be performed and the Results Capture Date might slightly differ amongst them. Thus, this function gets the latest QC Analysis performed, looks for siblings (through RefAnalysisGroupID) and if the results for all them are valid, then returns True. If there is one single Reference Analysis from the group with an out-of-range result, the function returns False """ query = { "portal_type": "ReferenceAnalysis", "getInstrumentUID": self.UID(), "sort_on": "getResultCaptureDate", "sort_order": "reverse", "sort_limit": 1, } brains = api.search(query, CATALOG_ANALYSIS_LISTING) if len(brains) == 0: # There are no Reference Analyses assigned to this instrument yet return True # Look for siblings. These are the QC Analyses that were created # together with this last ReferenceAnalysis and for the same Reference # Sample. If they were added through "Add Reference Analyses" in a # Worksheet, they typically appear in the same slot. group_id = brains[0].getReferenceAnalysesGroupID query = { "portal_type": "ReferenceAnalysis", "getInstrumentUID": self.UID(), "getReferenceAnalysesGroupID": group_id, } brains = api.search(query, CATALOG_ANALYSIS_LISTING) for brain in brains: analysis = api.get_object(brain) results_range = analysis.getResultsRange() if not results_range: continue # Is out of range? out_of_range = is_out_of_range(analysis)[0] if out_of_range: return False # By default, in range return True
def _folder_item_specifications(self, analysis_brain, item): """Set the results range to the item passed in""" # Everyone can see valid-ranges item['Specification'] = '' results_range = analysis_brain.getResultsRange if not results_range: return # Display the specification interval item["Specification"] = get_formatted_interval(results_range, "") # Show an icon if out of range out_range, out_shoulders = is_out_of_range(analysis_brain) if not out_range: return # At least is out of range img = get_image("exclamation.png", title=_("Result out of range")) if not out_shoulders: img = get_image("warning.png", title=_("Result in shoulder range")) self._append_html_element(item, "Result", img)
def __call__(self, action, objects): # Store invalid instruments-ref.analyses invalid_instrument_refs = defaultdict(set) # Get interims data interims_data = self.get_interims_data() for analysis in objects: uid = api.get_uid(analysis) # Need to save remarks? remarks = self.get_form_value("Remarks", uid, default="") analysis.setRemarks(remarks) # Need to save the instrument? instrument = self.get_form_value("Instrument", uid, None) if instrument is not None: # Could be an empty string instrument = instrument or None analysis.setInstrument(instrument) if instrument and IReferenceAnalysis.providedBy(analysis): if is_out_of_range(analysis): # This reference analysis is out of range, so we have # to retract all analyses assigned to this same # instrument that are awaiting for verification invalid_instrument_refs[uid].add(analysis) else: # The reference result is valid, so make the instrument # available again for further analyses instrument.setDisposeUntilNextCalibrationTest(False) # Need to save the method? method = self.get_form_value("Method", uid, default=None) if method is not None: method = method or None analysis.setMethod(method) # Need to save analyst? analyst = self.get_form_value("Analyst", uid, default=None) if analyst is not None: analysis.setAnalyst(analyst) # Save uncertainty uncertainty = self.get_form_value("Uncertainty", uid, "") analysis.setUncertainty(uncertainty) # Save detection limit dlimit = self.get_form_value("DetectionLimit", uid, "") analysis.setDetectionLimitOperand(dlimit) # Interim fields interims = interims_data.get(uid, analysis.getInterimFields()) analysis.setInterimFields(interims) # Save Hidden hidden = self.get_form_value("Hidden", uid, "") analysis.setHidden(hidden == "on") # Result result = self.get_form_value("Result", uid, default=analysis.getResult()) analysis.setResult(result) # Submit all analyses transitioned = self.do_action(action, objects) if not transitioned: return self.redirect(message=_("No changes made"), level="warning") # If a reference analysis with an out-of-range result and instrument # assigned has been submitted, retract then routine analyses that are # awaiting for verification and with same instrument associated retracted = list() for invalid_instrument_uid in invalid_instrument_refs.keys(): query = dict( getInstrumentUID=invalid_instrument_uid, portal_type=['Analysis', 'DuplicateAnalysis'], review_state='to_be_verified', cancellation_state='active', ) brains = api.search(query, CATALOG_ANALYSIS_LISTING) for brain in brains: analysis = api.get_object(brain) failed_msg = '{0}: {1}'.format( ulocalized_time(DateTime(), long_format=1), _("Instrument failed reference test")) an_remarks = analysis.getRemarks() analysis.setRemarks('. '.join([an_remarks, failed_msg])) retracted.append(analysis) # If some analyses have been retracted because instrument failed a # reference test, then generate a pdf report if self.do_action("retract", retracted): # Create the Retracted Analyses List portal_url = api.get_url(api.get_portal()) report = AnalysesRetractedListReport(self.context, self.request, portal_url, 'Retracted analyses', retracted) # Attach the pdf to all ReferenceAnalysis that failed (accessible # from Instrument's Internal Calibration Tests list pdf = report.toPdf() for ref in invalid_instrument_refs.values(): ref.setRetractedAnalysesPdfReport(pdf) # Send the email try: report.sendEmail() except Exception as err_msg: message = "Unable to send email: {}".format(err_msg) logger.warn(message) # Redirect to success view return self.success(transitioned)
def has_analyses_in_panic(self): analyses = self.getAnalyses(full_objects=True, retracted=False) for analysis in analyses: if is_out_of_range(analysis)[1]: return True return False
def is_out_of_range(self, analysis): """Check if the analysis is out of range """ from bika.lims.api.analysis import is_out_of_range return is_out_of_range(analysis.instance)[0]
def workflow_action_submit(self): uids = self.get_selected_uids() if not uids: message = _('No items selected.') self.context.plone_utils.addPortalMessage(message, 'info') self.request.response.redirect(self.context.absolute_url()) return if not is_active(self.context): message = _('Item is inactive.') self.context.plone_utils.addPortalMessage(message, 'info') self.request.response.redirect(self.context.absolute_url()) return form = self.request.form remarks = form.get('Remarks', [{}])[0] results = form.get('Result', [{}])[0] retested = form.get('retested', {}) methods = form.get('Method', [{}])[0] instruments = form.get('Instrument', [{}])[0] analysts = self.request.form.get('Analyst', [{}])[0] uncertainties = self.request.form.get('Uncertainty', [{}])[0] dlimits = self.request.form.get('DetectionLimit', [{}])[0] # XXX combine data from multiple bika listing tables. # TODO: Is this necessary? item_data = {} if 'item_data' in form: if type(form['item_data']) == list: for i_d in form['item_data']: for i, d in json.loads(i_d).items(): item_data[i] = d else: item_data = json.loads(form['item_data']) # Store affected Analysis Requests affected_ars = set() # Store affected Worksheets affected_ws = set() # Store invalid instruments-ref.analyses invalid_instrument_refs = dict() # We manually query by all analyses uids at once here instead of using # _get_selected_items from the base class, cause that function fetches # the objects by uid, but sequentially one by one query = dict(UID=uids) for brain in api.search(query, CATALOG_ANALYSIS_LISTING): uid = api.get_uid(brain) analysis = api.get_object(brain) # If not active, do nothing if not is_active(brain): continue # Need to save remarks? if uid in remarks: analysis.setRemarks(remarks[uid]) # Retested? if uid in retested: analysis.setRetested(retested[uid]) # Need to save the instrument? if uid in instruments: instrument = instruments[uid] or None analysis.setInstrument(instrument) if instrument and IReferenceAnalysis.providedBy(analysis): if is_out_of_range(analysis): # This reference analysis is out of range, so we have # to retract all analyses assigned to this same # instrument that are awaiting for verification if uid not in invalid_instrument_refs: invalid_instrument_refs[uid] = set() invalid_instrument_refs[uid].add(analysis) else: # The reference result is valid, so make the instrument # available again for further analyses instrument.setDisposeUntilNextCalibrationTest(False) # Need to save the method? if uid in methods: method = methods[uid] or None analysis.setMethod(method) # Need to save the analyst? if uid in analysts: analysis.setAnalyst(analysts[uid]) # Need to save the uncertainty? if uid in uncertainties: analysis.setUncertainty(uncertainties[uid]) # Need to save the detection limit? if uid in dlimits and dlimits[uid]: analysis.setDetectionLimitOperand(dlimits[uid]) # Need to save results? submitted = False if uid in results and results[uid]: interims = item_data.get(uid, []) analysis.setInterimFields(interims) analysis.setResult(results[uid]) # Can the analysis be submitted? # An analysis can only be submitted if all its dependencies # are valid and have been submitted already can_submit = True invalid_states = [ 'to_be_sampled', 'to_be_preserved', 'sample_due', 'sample_received' ] for dependency in analysis.getDependencies(): if in_state(dependency, invalid_states): can_submit = False break if can_submit: # doActionFor transitions the analysis to verif pending, # so must only be done when results are submitted. doActionFor(analysis, 'submit') submitted = True if IRequestAnalysis.providedBy(analysis): # Store the AR uids to be reindexed later. affected_ars.add(brain.getParentUID) if brain.worksheetanalysis_review_state == 'assigned': worksheet_uid = analysis.getWorksheetUID() if worksheet_uid: affected_ws.add(worksheet_uid) if not submitted: # Analysis has not been submitted, so we need to reindex the # object manually, to update catalog's metadata. analysis.reindexObject() # If a reference analysis with an out-of-range result and instrument # assigned has been submitted, retract then routine analyses that are # awaiting for verification and with same instrument associated retracted = list() for invalid_instrument_uid in invalid_instrument_refs.keys(): query = dict( getInstrumentUID=invalid_instrument_uid, portal_type=['Analysis', 'DuplicateAnalysis'], review_state='to_be_verified', cancellation_state='active', ) brains = api.search(query, CATALOG_ANALYSIS_LISTING) for brain in brains: analysis = api.get_object(brain) failed_msg = '{0}: {1}'.format( ulocalized_time(DateTime(), long_format=1), _("Instrument failed reference test")) an_remarks = analysis.getRemarks() analysis.setRemarks('. '.join([an_remarks, failed_msg])) doActionFor(analysis, 'retract') retracted.append(analysis) # If some analyses have been retracted because instrument failed a # reference test, then generate a pdf report if retracted: # Create the Retracted Analyses List report = AnalysesRetractedListReport(self.context, self.request, self.portal_url, 'Retracted analyses', retracted) # Attach the pdf to all ReferenceAnalysis that failed (accessible # from Instrument's Internal Calibration Tests list pdf = report.toPdf() for ref in invalid_instrument_refs.values(): ref.setRetractedAnalysesPdfReport(pdf) # Send the email try: report.sendEmail() except: pass # Finally, when we are done processing all applicable analyses, we must # attempt to initiate the submit transition on the ARs and Worksheets # the processed analyses belong to. # We stick only to affected_ars, and affected_ws # Reindex the Analysis Requests for which at least one Analysis has # been submitted. We do this here because one AR can contain multiple # Analyses, so better to just reindex the AR once instead of each time. # AR Catalog contains some metadata that that rely on the Analyses an # Analysis Request contains. if affected_ars: query = dict(UID=list(affected_ars), portal_type="AnalysisRequest") for ar_brain in api.search(query, CATALOG_ANALYSIS_REQUEST_LISTING): if ar_brain.review_state == 'to_be_verified': continue ar = api.get_object(ar_brain) if isTransitionAllowed(ar, "submit"): doActionFor(ar, "submit") else: ar.reindexObject() if affected_ws: query = dict(UID=list(affected_ws), portal_type="Worksheet") for ws_brain in api.search(query, CATALOG_WORKSHEET_LISTING): if ws_brain.review_state == 'to_be_verified': continue ws = api.get_object(ws_brain) if isTransitionAllowed(ws, "submit"): doActionFor(ws, "submit") message = PMF("Changes saved.") self.context.plone_utils.addPortalMessage(message, 'info') self.destination_url = self.request.get_header( "referer", self.context.absolute_url()) self.request.response.redirect(self.destination_url)
def __call__(self): MinimumResults = self.context.bika_setup.getMinimumResults() warning_icon = "<img " + \ "src='" + self.portal_url + "/++resource++bika.lims.images/warning.png' " + \ "height='9' width='9'/>" error_icon = "<img " + \ "src='" + self.portal_url + "/++resource++bika.lims.images/exclamation.png' " + \ "height='9' width='9'/>" header = _("Results per sample point") subheader = _( "Analysis results for per sample point and analysis service") self.contentFilter = { 'portal_type': 'Analysis', 'review_state': ['verified', 'published'] } parms = [] titles = [] val = self.selection_macros.parse_client(self.request) if val: self.contentFilter[val['contentFilter'] [0]] = val['contentFilter'][1] parms.append(val['parms']) titles.append(val['titles']) val = self.selection_macros.parse_samplepoint(self.request) sp_uid = val if val: self.contentFilter[val['contentFilter'] [0]] = val['contentFilter'][1] parms.append(val['parms']) titles.append(val['titles']) val = self.selection_macros.parse_sampletype(self.request) st_uid = val if val: self.contentFilter[val['contentFilter'] [0]] = val['contentFilter'][1] parms.append(val['parms']) titles.append(val['titles']) val = self.selection_macros.parse_analysisservice(self.request) if val: self.contentFilter[val['contentFilter'] [0]] = val['contentFilter'][1] parms.append(val['parms']) else: message = _("No analysis services were selected.") self.context.plone_utils.addPortalMessage(message, 'error') return self.default_template() val = self.selection_macros.parse_daterange(self.request, 'getDateSampled', 'DateSampled') if val: self.contentFilter[val['contentFilter'] [0]] = val['contentFilter'][1] parms.append(val['parms']) titles.append(val['titles']) val = self.selection_macros.parse_state( self.request, 'bika_worksheetanalysis_workflow', 'worksheetanalysis_review_state', 'Worksheet state') if val: self.contentFilter[val['contentFilter'] [0]] = val['contentFilter'][1] parms.append(val['parms']) # Query the catalog and store analysis data in a dict analyses = {} out_of_range_count = 0 in_shoulder_range_count = 0 analysis_count = 0 proxies = self.bika_analysis_catalog(self.contentFilter) if not proxies: message = _("No analyses matched your query") self.context.plone_utils.addPortalMessage(message, 'error') return self.default_template() # # Compile a list of dictionaries, with all relevant analysis data for analysis in proxies: analysis = analysis.getObject() result = analysis.getResult() client = analysis.aq_parent.aq_parent uid = analysis.UID() keyword = analysis.getKeyword() service_title = "%s (%s)" % (analysis.Title(), keyword) # First tuple element is the out-of-range flag result_in_range = is_out_of_range(analysis)[0] if service_title not in analyses.keys(): analyses[service_title] = [] try: result = float(analysis.getResult()) except: # XXX Unfloatable analysis results should be indicated continue analyses[service_title].append({ # The report should not mind taking 'analysis' in place of # 'service' - the service field values are placed in analysis. 'service': analysis, 'obj': analysis, 'Request ID': analysis.aq_parent.getId(), 'Analyst': analysis.getAnalyst(), 'Result': result, 'Sampled': analysis.getDateSampled(), 'Captured': analysis.getResultCaptureDate(), 'Uncertainty': analysis.getUncertainty(), 'result_in_range': result_in_range, 'Unit': analysis.getUnit(), 'Keyword': keyword, 'icons': '', }) analysis_count += 1 keys = analyses.keys() keys.sort() parms += [ { "title": _("Total analyses"), "value": analysis_count }, ] ## This variable is output to the TAL self.report_data = { 'header': header, 'subheader': subheader, 'parms': parms, 'tables': [], 'footnotes': [], } plotscript = """ set terminal png transparent truecolor enhanced size 700,350 font "Verdana, 8" set title "%(title)s" set xlabel "%(xlabel)s" set ylabel "%(ylabel)s" set key off #set logscale set timefmt "%(date_format_long)s" set xdata time set format x "%(date_format_short)s\\n%(time_format)s" set xrange ["%(x_start)s":"%(x_end)s"] set auto fix set offsets graph 0, 0, 1, 1 set xtics border nomirror rotate by 90 font "Verdana, 5" offset 0,-3 set ytics nomirror f(x) = mean_y fit f(x) 'gpw_DATAFILE_gpw' u 1:3 via mean_y stddev_y = sqrt(FIT_WSSR / (FIT_NDF + 1)) plot mean_y-stddev_y with filledcurves y1=mean_y lt 1 lc rgb "#efefef",\ mean_y+stddev_y with filledcurves y1=mean_y lt 1 lc rgb "#efefef",\ mean_y with lines lc rgb '#ffffff' lw 3,\ "gpw_DATAFILE_gpw" using 1:3 title 'data' with points pt 7 ps 1 lc rgb '#0000ee' lw 2,\ '' using 1:3 smooth unique lc rgb '#aaaaaa' lw 2,\ '' using 1:4 with lines lc rgb '#000000' lw 1,\ '' using 1:5 with lines lc rgb '#000000' lw 1""" ## Compile plots and format data for display for service_title in keys: # used to calculate XY axis ranges result_values = [int(o['Result']) for o in analyses[service_title]] result_dates = [o['Sampled'] for o in analyses[service_title]] parms = [] plotdata = str() range_min = '' range_max = '' for a in analyses[service_title]: a['Sampled'] = a['Sampled'].strftime( self.date_format_long) if a['Sampled'] else '' a['Captured'] = a['Captured'].strftime(self.date_format_long) if \ a['Captured'] else '' R = a['Result'] U = a['Uncertainty'] a['Result'] = a['obj'].getFormattedResult() in_range = a['result_in_range'] # result out of range if str(in_range) == 'False': out_of_range_count += 1 a['Result'] = "%s %s" % (a['Result'], error_icon) # result almost out of range if str(in_range) == '1': in_shoulder_range_count += 1 a['Result'] = "%s %s" % (a['Result'], warning_icon) spec = {} if hasattr(a["obj"], 'specification') and a["obj"].specification: spec = a["obj"].specification plotdata += "%s\t%s\t%s\t%s\t%s\n" % ( a['Sampled'], R, spec.get("min", ""), spec.get("max", ""), U and U or 0, ) plotdata.encode('utf-8') unit = analyses[service_title][0]['Unit'] if MinimumResults <= len(dict([(d, d) for d in result_dates])): _plotscript = str(plotscript) % { 'title': "", 'xlabel': t(_("Date Sampled")), 'ylabel': unit and unit or '', 'x_start': "%s" % min(result_dates).strftime(self.date_format_long), 'x_end': "%s" % max(result_dates).strftime(self.date_format_long), 'date_format_long': self.date_format_long, 'date_format_short': self.date_format_short, 'time_format': self.time_format, } plot_png = plot(str(plotdata), plotscript=str(_plotscript), usefifo=False) # Temporary PNG data file fh, data_fn = tempfile.mkstemp(suffix='.png') os.write(fh, plot_png) plot_url = data_fn self.request['to_remove'].append(data_fn) plot_url = data_fn else: plot_url = "" table = { 'title': "%s: %s" % (t(_("Analysis Service")), service_title), 'parms': parms, 'columns': ['Request ID', 'Analyst', 'Result', 'Sampled', 'Captured'], 'data': analyses[service_title], 'plot_url': plot_url, } self.report_data['tables'].append(table) translate = self.context.translate ## footnotes if out_of_range_count: msgid = _("Analyses out of range") self.report_data['footnotes'].append("%s %s" % (error_icon, t(msgid))) if in_shoulder_range_count: msgid = _("Analyses in error shoulder range") self.report_data['footnotes'].append("%s %s" % (warning_icon, t(msgid))) self.report_data['parms'].append({ "title": _("Analyses out of range"), "value": out_of_range_count }) self.report_data['parms'].append({ "title": _("Analyses in error shoulder range"), "value": in_shoulder_range_count }) title = t(header) if titles: title += " (%s)" % " ".join(titles) return { 'report_title': title, 'report_data': self.template(), }
def workflow_action_submit(self): uids = self.get_selected_uids() if not uids: message = _('No items selected.') self.context.plone_utils.addPortalMessage(message, 'info') self.request.response.redirect(self.context.absolute_url()) return if not is_active(self.context): message = _('Item is inactive.') self.context.plone_utils.addPortalMessage(message, 'info') self.request.response.redirect(self.context.absolute_url()) return form = self.request.form remarks = form.get('Remarks', [{}])[0] results = form.get('Result', [{}])[0] methods = form.get('Method', [{}])[0] instruments = form.get('Instrument', [{}])[0] analysts = self.request.form.get('Analyst', [{}])[0] uncertainties = self.request.form.get('Uncertainty', [{}])[0] dlimits = self.request.form.get('DetectionLimit', [{}])[0] # XXX combine data from multiple bika listing tables. # TODO: Is this necessary? item_data = {} if 'item_data' in form: if type(form['item_data']) == list: for i_d in form['item_data']: for i, d in json.loads(i_d).items(): item_data[i] = d else: item_data = json.loads(form['item_data']) # Store invalid instruments-ref.analyses invalid_instrument_refs = dict() # We manually query by all analyses uids at once here instead of using # _get_selected_items from the base class, cause that function fetches # the objects by uid, but sequentially one by one actions_pool = ActionsPool() query = dict(UID=uids, cancellation_state="active") for brain in api.search(query, CATALOG_ANALYSIS_LISTING): uid = api.get_uid(brain) analysis = api.get_object(brain) # Need to save remarks? if uid in remarks: analysis.setRemarks(remarks[uid]) # Need to save the instrument? if uid in instruments: instrument = instruments[uid] or None analysis.setInstrument(instrument) if instrument and IReferenceAnalysis.providedBy(analysis): if is_out_of_range(analysis): # This reference analysis is out of range, so we have # to retract all analyses assigned to this same # instrument that are awaiting for verification if uid not in invalid_instrument_refs: invalid_instrument_refs[uid] = set() invalid_instrument_refs[uid].add(analysis) else: # The reference result is valid, so make the instrument # available again for further analyses instrument.setDisposeUntilNextCalibrationTest(False) # Need to save the method? if uid in methods: method = methods[uid] or None analysis.setMethod(method) # Need to save the analyst? if uid in analysts: analysis.setAnalyst(analysts[uid]) # Need to save the uncertainty? if uid in uncertainties: analysis.setUncertainty(uncertainties[uid]) # Need to save the detection limit? analysis.setDetectionLimitOperand(dlimits.get(uid, "")) interims = item_data.get(uid, analysis.getInterimFields()) analysis.setInterimFields(interims) analysis.setResult(results.get('uid', analysis.getResult())) # Add this analysis to the actions pool. We want to submit all them # together, when all have values set for results, interims, etc. actions_pool.add(analysis, "submit") # Submit all analyses actions_pool.resume() # If a reference analysis with an out-of-range result and instrument # assigned has been submitted, retract then routine analyses that are # awaiting for verification and with same instrument associated retracted = list() for invalid_instrument_uid in invalid_instrument_refs.keys(): query = dict( getInstrumentUID=invalid_instrument_uid, portal_type=['Analysis', 'DuplicateAnalysis'], review_state='to_be_verified', cancellation_state='active', ) brains = api.search(query, CATALOG_ANALYSIS_LISTING) for brain in brains: analysis = api.get_object(brain) failed_msg = '{0}: {1}'.format( ulocalized_time(DateTime(), long_format=1), _("Instrument failed reference test")) an_remarks = analysis.getRemarks() analysis.setRemarks('. '.join([an_remarks, failed_msg])) doActionFor(analysis, 'retract') retracted.append(analysis) # If some analyses have been retracted because instrument failed a # reference test, then generate a pdf report if retracted: # Create the Retracted Analyses List report = AnalysesRetractedListReport(self.context, self.request, self.portal_url, 'Retracted analyses', retracted) # Attach the pdf to all ReferenceAnalysis that failed (accessible # from Instrument's Internal Calibration Tests list pdf = report.toPdf() for ref in invalid_instrument_refs.values(): ref.setRetractedAnalysesPdfReport(pdf) # Send the email try: report.sendEmail() except: pass message = PMF("Changes saved.") self.context.plone_utils.addPortalMessage(message, 'info') self.destination_url = self.request.get_header( "referer", self.context.absolute_url()) self.request.response.redirect(self.destination_url)