def getServices(self, field, selected_only = False): """ Returns a list of Analysis Services keyed by POC and Category selected_only - set this to return only checked services (for view widget) returns {('poc_id', 'Point Of Capture'): {('cat_id', 'Category Title'): [('serviceUID','service Title'), ..] } } """ pc = getToolByName(self, 'portal_catalog') allservices = [p.getObject() for p in pc(portal_type = "AnalysisService", sort_on='sortable_title')] selectedservices = getattr(field, field.accessor)() res = {} for poc_id in POINTS_OF_CAPTURE.keys(): poc_title = POINTS_OF_CAPTURE.getValue(poc_id) res[(poc_id, poc_title)] = {} if selected_only: services = selectedservices else: services = allservices for service in services: cat = (service.getCategoryUID(), service.getCategoryName()) poc = (service.getPointOfCapture(), POINTS_OF_CAPTURE.getValue(service.getPointOfCapture())) srv = (service.UID(), service.Title()) if not res[poc].has_key(cat): res[poc][cat] = [] res[poc][cat].append(srv) return res
def __call__(self): ar = self.context workflow = getToolByName(ar, 'portal_workflow') if workflow.getInfoFor(ar, 'cancellation_state') == "cancelled": self.request.response.redirect(ar.absolute_url()) elif not(getSecurityManager().checkPermission(EditResults, ar)): self.request.response.redirect(ar.absolute_url()) else: self.tables = {} show_cats = self.context.bika_setup.getCategoriseAnalysisServices() for poc in POINTS_OF_CAPTURE: if self.context.getAnalyses(getPointOfCapture=poc): t = self.createAnalysesView(ar, self.request, getPointOfCapture=poc, sort_on='getServiceTitle', show_categories=show_cats) t.form_id = "ar_manage_results_%s" % poc t.allow_edit = True t.review_states[0]['transitions'] = [{'id': 'submit'}, {'id': 'retract'}, {'id': 'verify'}] t.show_select_column = True poc_value = POINTS_OF_CAPTURE.getValue(poc) self.tables[poc_value] = t.contents_table() self.checkInstrumentsValidity() return self.template()
def _analysis_data(self, analysis): """ Returns a dict that represents the analysis """ decimalmark = analysis.aq_parent.aq_parent.getDecimalMark() keyword = analysis.getKeyword() andict = { 'obj': analysis, 'id': analysis.id, 'title': analysis.Title(), 'keyword': keyword, 'scientific_name': analysis.getScientificName(), 'accredited': analysis.getAccredited(), 'point_of_capture': to_utf8(POINTS_OF_CAPTURE.getValue(analysis.getPointOfCapture())), 'category': to_utf8(analysis.getCategoryTitle()), 'result': analysis.getResult(), 'unit': to_utf8(analysis.getUnit()), 'formatted_unit': format_supsub(to_utf8(analysis.getUnit())), 'capture_date': analysis.getResultCaptureDate(), 'request_id': analysis.aq_parent.getId(), 'formatted_result': '', 'uncertainty': analysis.getUncertainty(), 'formatted_uncertainty': '', 'retested': analysis.isRetest(), 'remarks': to_utf8(analysis.getRemarks()), 'outofrange': False, 'type': analysis.portal_type, 'reftype': analysis.getReferenceType() if hasattr( analysis, 'getReferenceType') else None, 'worksheet': None, 'specs': {}, 'formatted_specs': '', 'review_state': api.get_workflow_status_of(analysis), } andict['refsample'] = analysis.getSample().id \ if IReferenceAnalysis.providedBy(analysis) \ else analysis.getRequestID() specs = analysis.getResultsRange() andict['specs'] = specs scinot = self.context.bika_setup.getScientificNotationReport() andict['formatted_result'] = analysis.getFormattedResult(specs=specs, sciformat=int(scinot), decimalmark=decimalmark) fs = '' if specs.get('min', None) and specs.get('max', None): fs = '%s - %s' % (specs['min'], specs['max']) elif specs.get('min', None): fs = '> %s' % specs['min'] elif specs.get('max', None): fs = '< %s' % specs['max'] andict['formatted_specs'] = formatDecimalMark(fs, decimalmark) andict['formatted_uncertainty'] = format_uncertainty(analysis, analysis.getResult(), decimalmark=decimalmark, sciformat=int(scinot)) # Out of range? andict['outofrange'] = is_out_of_range(analysis)[0] return andict
def service_info(self, service): ret = { "Category": service.getCategory().Title(), "Category_uid": service.getCategory().UID(), "Service": service.Title(), "Service_uid": service.UID(), "PointOfCapture": service.getPointOfCapture(), "PointOfCapture_title": POINTS_OF_CAPTURE.getValue(service.getPointOfCapture()), } return ret
def service_info(self, service): ret = { "Category": service.getCategory().Title(), "Category_uid": service.getCategory().UID(), "Service": service.Title(), "Service_uid": service.UID(), "Keyword": service.getKeyword(), "PointOfCapture": service.getPointOfCapture(), "PointOfCapture_title": POINTS_OF_CAPTURE.getValue(service.getPointOfCapture()), } return ret
def walk(deps): for service_uid, service_deps in deps.items(): service = rc.lookupObject(service_uid) category = service.getCategory() cat = '%s_%s' % (category.UID(), category.Title()) poc = '%s_%s' % (service.getPointOfCapture(), POINTS_OF_CAPTURE.getValue(service.getPointOfCapture())) srv = '%s_%s' % (service.UID(), service.Title()) if not result.has_key(poc): result[poc] = {} if not result[poc].has_key(cat): result[poc][cat] = [] result[poc][cat].append(srv) if service_deps: walk(service_deps)
def __call__(self): ar = self.context self.tables = {} for poc in POINTS_OF_CAPTURE: if self.context.getAnalyses(getPointOfCapture=poc): t = AnalysesView(ar, self.request, getPointOfCapture = poc) t.allow_edit = False t.show_select_column = False self.tables[POINTS_OF_CAPTURE.getValue(poc)] = t.contents_table() return self.template()
def __call__(self): if 'transition' in self.request.form: doActionFor(self.context, self.request.form['transition']) # Add an Analysis request creation button mtool = get_tool('portal_membership', context=self.context) if mtool.checkPermission(AddAnalysisRequest, self.context): self.context_actions[_('Add Analysis Request')] = \ {'url': "ar_add?ar_count=1", 'icon': '++resource++bika.lims.images/add.png'} ## render header table self.header_table = HeaderTableView(self.context, self.request) ## Create Sample Partitions table parts_table = None if not self.allow_edit: p = SamplePartitionsView(self.context, self.request) p.allow_edit = self.allow_edit p.show_select_column = self.allow_edit p.show_workflow_action_buttons = self.allow_edit p.show_column_toggles = False p.show_select_all_checkbox = False p.review_states[0]['transitions'] = [ { 'id': 'empty' }, ] # none parts_table = p.contents_table() self.parts = parts_table ## Create Field and Lab Analyses tables self.tables = {} if not self.allow_edit: for poc in POINTS_OF_CAPTURE: t = SampleAnalysesView(self.context, self.request, getPointOfCapture=poc, sort_on='getId') t.form_id = "sample_%s_analyses" % poc if poc == 'field': t.review_states[0]['columns'].remove('DueDate') t.show_column_toggles = False t.review_states[0]['transitions'] = [{ 'id': 'submit' }, { 'id': 'retract' }, { 'id': 'verify' }] self.tables[POINTS_OF_CAPTURE.getValue( poc)] = t.contents_table() return self.template()
def __call__(self): ar = self.context workflow = getToolByName(ar, 'portal_workflow') if workflow.getInfoFor(ar, 'cancellation_state') == "cancelled": self.request.response.redirect(ar.absolute_url()) elif not (getSecurityManager().checkPermission(EditResults, ar)): self.request.response.redirect(ar.absolute_url()) else: self.tables = {} show_cats = self.context.bika_setup.getCategoriseAnalysisServices() for poc in POINTS_OF_CAPTURE: if self.context.getAnalyses(getPointOfCapture=poc): t = self.createAnalysesView(ar, self.request, getPointOfCapture=poc, sort_on='title', show_categories=show_cats, getAnalysisRequestUID=ar.UID()) t.form_id = "ar_manage_results_%s" % poc t.allow_edit = True t.review_states[0]['transitions'] = [{ 'id': 'submit' }, { 'id': 'retract' }, { 'id': 'verify' }] t.show_select_column = True poc_value = POINTS_OF_CAPTURE.getValue(poc) self.tables[poc_value] = t.contents_table() # If a general retracted is done, rise a waring if workflow.getInfoFor(ar, 'review_state') == 'sample_received': allstatus = list() for analysis in ar.getAnalyses(): status = workflow.getInfoFor(analysis.getObject(), 'review_state') if status not in [ 'retracted', 'to_be_verified', 'verified' ]: allstatus = [] break else: allstatus.append(status) if len(allstatus) > 0: message = "General Retract Done. Submit this AR manually." self.context.plone_utils.addPortalMessage( message, 'warning') self.checkInstrumentsValidity() return self.template()
def __call__(self): if 'transition' in self.request.form: doActionFor(self.context, self.request.form['transition']) ## render header table self.header_table = HeaderTableView(self.context, self.request) ## Create Sample Partitions table parts_table = None if not self.allow_edit: p = SamplePartitionsView(self.context, self.request) p.allow_edit = self.allow_edit p.show_select_column = self.allow_edit p.show_workflow_action_buttons = self.allow_edit p.show_column_toggles = False p.show_select_all_checkbox = False p.review_states[0]['transitions'] = [ { 'id': 'empty' }, ] # none parts_table = p.contents_table() self.parts = parts_table ## Create Field and Lab Analyses tables self.tables = {} if not self.allow_edit: for poc in POINTS_OF_CAPTURE: if not self.context.getAnalyses({'getPointOfCapture': poc}): continue t = SampleAnalysesView(self.context, self.request, getPointOfCapture=poc, sort_on='getServiceTitle') t.form_id = "sample_%s_analyses" % poc if poc == 'field': t.review_states[0]['columns'].remove('DueDate') t.show_column_toggles = False t.review_states[0]['transitions'] = [{ 'id': 'submit' }, { 'id': 'retract' }, { 'id': 'verify' }] self.tables[POINTS_OF_CAPTURE.getValue( poc)] = t.contents_table() return self.template()
def walk(deps): for depserv_uid, depserv_deps in deps.items(): if depserv_uid == uid: continue depserv = services[depserv_uid] category = depserv.getCategory() cat = '%s_%s' % (category.UID(), category.Title()) poc = '%s_%s' % \ (depserv.getPointOfCapture(), POINTS_OF_CAPTURE.getValue(depserv.getPointOfCapture())) srv = '%s_%s' % (depserv.UID(), depserv.Title()) if not deps.has_key(poc): deps[poc] = {} if not deps[poc].has_key(cat): deps[poc][cat] = [] deps[poc][cat].append(srv) if depserv_deps: walk(depserv_deps)
def __call__(self): ar = self.context workflow = getToolByName(ar, 'portal_workflow') if workflow.getInfoFor(ar, 'cancellation_state') == "cancelled": self.request.response.redirect(ar.absolute_url()) elif not(getSecurityManager().checkPermission(EditResults, ar)): self.request.response.redirect(ar.absolute_url()) else: self.tables = {} show_cats = self.context.bika_setup.getCategoriseAnalysisServices() for poc in POINTS_OF_CAPTURE: if self.context.getAnalyses(getPointOfCapture=poc): t = self.createAnalysesView(ar, self.request, getPointOfCapture=poc, sort_on='title', show_categories=show_cats, getRequestUID=ar.UID()) t.form_id = "ar_manage_results_%s" % poc t.allow_edit = True t.review_states[0]['transitions'] = [{'id': 'submit'}, {'id': 'retract'}, {'id': 'verify'}] t.show_select_column = True poc_value = POINTS_OF_CAPTURE.getValue(poc) self.tables[poc_value] = t.contents_table() # If a general retracted is done, rise a waring if workflow.getInfoFor(ar, 'review_state') == 'sample_received': allstatus = list() for analysis in ar.getAnalyses(): status = workflow.getInfoFor(analysis.getObject(), 'review_state') if status not in ['retracted','to_be_verified','verified']: allstatus = [] break else: allstatus.append(status) if len(allstatus) > 0: message = "General Retract Done. Submit this AR manually." self.context.plone_utils.addPortalMessage(message, 'warning') self.checkInstrumentsValidity() return self.template()
def __call__(self): ar = self.context workflow = getToolByName(ar, 'portal_workflow') if workflow.getInfoFor(ar, 'cancellation_state') == "cancelled": self.request.response.redirect(ar.absolute_url()) elif not(getSecurityManager().checkPermission(ManageResults, ar)): self.request.response.redirect(ar.absolute_url()) else: self.tables = {} for poc in POINTS_OF_CAPTURE: if self.context.getAnalyses(getPointOfCapture=poc): t = AnalysesView(ar, self.request, getPointOfCapture = poc) t.allow_edit = True t.review_states[0]['transitions'] = ['submit', 'retract', 'verify'] t.show_select_column = True self.tables[POINTS_OF_CAPTURE.getValue(poc)] = t.contents_table() return self.template()
def _get_categorized_services(self, ar): self.any_accredited = False self.services = {} analyses = ar.getAnalyses(full_objects=True, review_state=self.publish_states) analyses.sort(lambda x, y: cmp(x.Title().lower(), y.Title().lower())) for analysis in analyses: service = analysis.getService() poc = to_utf8(POINTS_OF_CAPTURE.getValue(service.getPointOfCapture())) cat = to_utf8(service.getCategoryTitle()) if poc not in self.services: self.services[poc] = {} if cat not in self.services[poc]: self.services[poc][cat] = [] if self.check_previous: self.get_previous_results(analysis) if service not in self.services[poc][cat]: self.services[poc][cat].append(service) if service.getAccredited(): self.any_accredited = True
def __call__(self): if 'transition' in self.request.form: doActionFor(self.context, self.request.form['transition']) ## render header table self.header_table = HeaderTableView(self.context, self.request) ## Create Sample Partitions table parts_table = None if not self.allow_edit: p = SamplePartitionsView(self.context, self.request) p.allow_edit = self.allow_edit p.show_select_column = self.allow_edit p.show_workflow_action_buttons = self.allow_edit p.show_column_toggles = False p.show_select_all_checkbox = False p.review_states[0]['transitions'] = [{'id': 'empty'},] # none parts_table = p.contents_table() self.parts = parts_table ## Create Field and Lab Analyses tables self.tables = {} if not self.allow_edit: for poc in POINTS_OF_CAPTURE: if not self.context.getAnalyses({'getPointOfCapture': poc}): continue t = SampleAnalysesView(self.context, self.request, getPointOfCapture = poc, sort_on = 'getServiceTitle') t.form_id = "sample_%s_analyses" % poc if poc == 'field': t.review_states[0]['columns'].remove('DueDate') t.show_column_toggles = False t.review_states[0]['transitions'] = [{'id':'submit'}, {'id':'retract'}, {'id':'verify'}] self.tables[POINTS_OF_CAPTURE.getValue(poc)] = t.contents_table() return self.template()
def _get_categorized_qcservices(self, ar): self.qcservices = {} for qcanalysis in ar.getQCAnalyses(): service = qcanalysis.getService() qctype = '' if qcanalysis.portal_type == 'DuplicateAnalysis': qctype = "d" elif qcanalysis.portal_type == 'ReferenceAnalysis': qctype = qcanalysis.getReferenceType() else: continue if qctype not in self.qcservices: self.qcservices[qctype] = {} poc = to_utf8( POINTS_OF_CAPTURE.getValue(service.getPointOfCapture())) if poc not in self.qcservices[qctype]: self.qcservices[qctype][poc] = {} cat = to_utf8(service.getCategoryTitle()) if cat not in self.qcservices[qctype][poc]: self.qcservices[qctype][poc][cat] = [] # if service not in self.qcservices[qctype][poc][cat]: self.qcservices[qctype][poc][cat].append( {'service': service, 'analysis': qcanalysis})
def _analysis_data(self, analysis, decimalmark=None): keyword = analysis.getKeyword() service = analysis.getService() andict = {'obj': analysis, 'id': analysis.id, 'title': analysis.Title(), 'keyword': keyword, 'scientific_name': service.getScientificName(), 'accredited': service.getAccredited(), 'point_of_capture': to_utf8(POINTS_OF_CAPTURE.getValue(service.getPointOfCapture())), 'category': to_utf8(service.getCategoryTitle()), 'result': analysis.getResult(), 'isnumber': isnumber(analysis.getResult()), 'unit': to_utf8(service.getUnit()), 'formatted_unit': format_supsub(to_utf8(service.getUnit())), 'capture_date': analysis.getResultCaptureDate(), 'request_id': analysis.aq_parent.getId(), 'formatted_result': '', 'uncertainty': analysis.getUncertainty(), 'formatted_uncertainty': '', 'retested': analysis.getRetested(), 'remarks': to_utf8(analysis.getRemarks()), 'resultdm': to_utf8(analysis.getResultDM()), 'outofrange': False, 'type': analysis.portal_type, 'reftype': analysis.getReferenceType() \ if hasattr(analysis, 'getReferenceType') else None, 'worksheet': None, 'specs': {}, 'formatted_specs': ''} if analysis.portal_type == 'DuplicateAnalysis': andict['reftype'] = 'd' ws = analysis.getBackReferences('WorksheetAnalysis') andict['worksheet'] = ws[0].id if ws and len(ws) > 0 else None andict['worksheet_url'] = ws[0].absolute_url if ws and len(ws) > 0 else None andict['refsample'] = analysis.getSample().id \ if analysis.portal_type == 'Analysis' \ else '%s - %s' % (analysis.aq_parent.id, analysis.aq_parent.Title()) if analysis.portal_type == 'ReferenceAnalysis': # The analysis is a Control or Blank. We might use the # reference results instead other specs uid = analysis.getServiceUID() specs = analysis.aq_parent.getResultsRangeDict().get(uid, {}) else: # Get the specs directly from the analysis. The getResultsRange # function already takes care about which are the specs to be used: # AR, client or lab. specs = analysis.getResultsRange() andict['specs'] = specs scinot = self.context.bika_setup.getScientificNotationReport() andict['formatted_result'] = analysis.getFormattedResult(specs=specs, sciformat=int(scinot), decimalmark=decimalmark) fs = '' if specs.get('min', None) and specs.get('max', None): fs = '%s - %s' % (specs['min'], specs['max']) elif specs.get('min', None): fs = '> %s' % specs['min'] elif specs.get('max', None): fs = '< %s' % specs['max'] andict['formatted_specs'] = formatDecimalMark(fs, decimalmark) andict['formatted_uncertainty'] = format_uncertainty(analysis, analysis.getResult(), decimalmark=decimalmark, sciformat=int(scinot)) # Out of range? if specs: adapters = getAdapters((analysis, ), IResultOutOfRange) bsc = getToolByName(self.context, "bika_setup_catalog") for name, adapter in adapters: ret = adapter(specification=specs) if ret and ret['out_of_range']: andict['outofrange'] = True break return andict
def _analysis_data(self, analysis, decimalmark=None): keyword = analysis.getKeyword() service = analysis.getService() andict = {'obj': analysis, 'id': analysis.id, 'title': analysis.Title(), 'keyword': keyword, 'scientific_name': service.getScientificName(), 'accredited': service.getAccredited(), 'point_of_capture': to_utf8(POINTS_OF_CAPTURE.getValue(service.getPointOfCapture())), 'category': to_utf8(service.getCategoryTitle()), 'result': analysis.getResult(), 'isnumber': isnumber(analysis.getResult()), 'unit': to_utf8(service.getUnit()), 'formatted_unit': format_supsub(to_utf8(service.getUnit())), 'capture_date': analysis.getResultCaptureDate(), 'request_id': analysis.aq_parent.getId(), 'formatted_result': '', 'uncertainty': analysis.getUncertainty(), 'formatted_uncertainty': '', 'retested': analysis.getRetested(), 'remarks': to_utf8(analysis.getRemarks()), 'resultdm': to_utf8(analysis.getResultDM()), 'outofrange': False, 'type': analysis.portal_type, 'reftype': analysis.getReferenceType() \ if hasattr(analysis, 'getReferenceType') else None, 'worksheet': None, 'specs': {}, 'formatted_specs': ''} if analysis.portal_type == 'DuplicateAnalysis': andict['reftype'] = 'd' ws = analysis.getBackReferences('WorksheetAnalysis') andict['worksheet'] = ws[0].id if ws and len(ws) > 0 else None andict['worksheet_url'] = ws[0].absolute_url if ws and len( ws) > 0 else None andict['refsample'] = analysis.getSample().id \ if analysis.portal_type == 'Analysis' \ else '%s - %s' % (analysis.aq_parent.id, analysis.aq_parent.Title()) # Which analysis specs must be used? # Try first with those defined at AR Publish Specs level if analysis.portal_type == 'ReferenceAnalysis': # The analysis is a Control or Blank. We might use the # reference results instead other specs uid = analysis.getServiceUID() specs = analysis.aq_parent.getResultsRangeDict().get(uid, {}) elif analysis.portal_type == 'DuplicateAnalysis': specs = analysis.getAnalysisSpecs() else: ar = analysis.aq_parent specs = ar.getPublicationSpecification() if not specs or keyword not in specs.getResultsRangeDict(): specs = analysis.getAnalysisSpecs() specs = specs.getResultsRangeDict().get(keyword, {}) \ if specs else {} andict['specs'] = specs scinot = self.context.bika_setup.getScientificNotationReport() andict['formatted_result'] = analysis.getFormattedResult( specs=specs, sciformat=int(scinot), decimalmark=decimalmark) fs = '' if specs.get('min', None) and specs.get('max', None): fs = '%s - %s' % (specs['min'], specs['max']) elif specs.get('min', None): fs = '> %s' % specs['min'] elif specs.get('max', None): fs = '< %s' % specs['max'] andict['formatted_specs'] = formatDecimalMark(fs, decimalmark) andict['formatted_uncertainty'] = format_uncertainty( analysis, analysis.getResult(), decimalmark=decimalmark, sciformat=int(scinot)) # Return specs of current analysis andict['specs_dict'] = analysis.getSpecification().getResultsRangeDict( ).get(analysis.id) # Out of range? if specs: adapters = getAdapters((analysis, ), IResultOutOfRange) bsc = getToolByName(self.context, "bika_setup_catalog") for name, adapter in adapters: ret = adapter(specification=specs) if ret and ret['out_of_range']: andict['outofrange'] = True break return andict
def __call__(self): rc = getToolByName(self.context, REFERENCE_CATALOG) workflow = getToolByName(self.context, 'portal_workflow') laboratory = self.context.bika_setup.laboratory BatchEmail = self.context.bika_setup.getBatchEmail() BatchFax = self.context.bika_setup.getBatchFax() # group analysis requests by contact ARs_by_contact = {} for ar in self.analysis_requests: contact_uid = ar.getContact().UID() if contact_uid not in ARs_by_contact: ARs_by_contact[contact_uid] = [] ARs_by_contact[contact_uid].append(ar) for contact_uid, ars in ARs_by_contact.items(): ars.sort() self.contact = ars[0].getContact() self.pub_pref = self.contact.getPublicationPreference() batch_size = 'email' in self.pub_pref and BatchEmail or \ 'fax' in self.pub_pref and BatchFax or 1 # send batches of ARs to each contact for b in range(0, len(ars), batch_size): self.batch = ars[b:b+batch_size] self.any_accredited = False self.any_drymatter = False # get all services from all requests in this batch into a # dictionary: # {'Point Of Capture': {'Category': [service,service,...]}} self.services = {} for ar in self.batch: if ar.getReportDryMatter(): self.any_drymatter = True states = ("verified", "published") for analysis in ar.getAnalyses(full_objects=True, review_state=states): service = analysis.getService() poc = POINTS_OF_CAPTURE.getValue(service.getPointOfCapture()) cat = service.getCategoryTitle() if poc not in self.services: self.services[poc] = {} if cat not in self.services[poc]: self.services[poc][cat] = [] self.services[poc][cat].append(service) if (service.getAccredited()): self.any_accredited = True # compose and send email if 'email' in self.pub_pref: # render template to utf-8 ar_results = self.ar_results().encode("utf-8") # XXX ar_debug_name = '%s_%s.html' % \ (self.analysis_requests[0].Title(), self.action) open(join(Globals.INSTANCE_HOME,'var', ar_debug_name), "w").write(ar_results) mime_msg = MIMEMultipart('related') mime_msg['Subject'] = self.get_mail_subject() mime_msg['From'] = formataddr( (encode_header(laboratory.getName()), laboratory.getEmailAddress())) mime_msg['To'] = formataddr( (encode_header(self.contact.getFullname()), self.contact.getEmailAddress())) mime_msg.preamble = 'This is a multi-part MIME message.' msg_txt = MIMEText(ar_results, _subtype='html') mime_msg.attach(msg_txt) try: host = getToolByName(self.context, 'MailHost') host.send(mime_msg.as_string(), immediate=True) except SMTPRecipientsRefused, msg: raise WorkflowException(str(msg)) if self.action == 'publish': for ar in self.batch: try: workflow.doActionFor(ar, 'publish') except WorkflowException: pass else: raise Exception, "XXX pub_pref %s" % self.pub_pref
def __call__(self): form = self.request.form bc = getToolByName(self.context, 'bika_catalog') bsc = getToolByName(self.context, 'bika_setup_catalog') checkPermission = self.context.portal_membership.checkPermission getAuthenticatedMember = self.context.portal_membership.getAuthenticatedMember workflow = getToolByName(self.context, 'portal_workflow') ars = self.context.getAnalysisRequests() sample = self.context ## Create header_table data rows ar_links = ", ".join([ "<a href='%s'>%s</a>" % (ar.absolute_url(), ar.Title()) for ar in ars ]) sp = self.context.getSamplePoint() st = self.context.getSampleType() if workflow.getInfoFor(self.context, 'cancellation_state') == "cancelled": allow_sample_edit = False else: edit_states = ['to_be_sampled', 'to_be_preserved', 'sample_due'] allow_sample_edit = checkPermission(ManageSamples, self.context) \ and workflow.getInfoFor(self.context, 'review_state') in edit_states SamplingWorkflowEnabled =\ self.context.bika_setup.getSamplingWorkflowEnabled() samplers = getUsers(sample, ['Sampler', 'LabManager', 'Manager']) samplingdeviations = DisplayList( [(sd.UID, sd.title) for sd \ in bsc(portal_type = 'SamplingDeviation', inactive_state = 'active')]) self.header_columns = 3 self.header_rows = [ { 'id': 'ClientReference', 'title': _('Client Reference'), 'allow_edit': self.allow_edit, 'value': self.context.getClientReference(), 'condition': True, 'type': 'text' }, { 'id': 'ClientSampleID', 'title': _('Client SID'), 'allow_edit': self.allow_edit, 'value': self.context.getClientSampleID(), 'condition': True, 'type': 'text' }, { 'id': 'Requests', 'title': _('Requests'), 'allow_edit': False, 'value': ar_links, 'condition': True, 'type': 'text' }, { 'id': 'SampleType', 'title': _('Sample Type'), 'allow_edit': self.allow_edit and allow_sample_edit, 'value': st and st.Title() or '', 'condition': True, 'type': 'text', 'required': True }, { 'id': 'SampleMatrix', 'title': _('Sample Matrix'), 'allow_edit': False, 'value': st.getSampleMatrix() and st.getSampleMatrix().Title() or '', 'condition': True, 'type': 'text' }, { 'id': 'SamplePoint', 'title': _('Sample Point'), 'allow_edit': self.allow_edit and allow_sample_edit, 'value': sp and sp.Title() or '', 'condition': True, 'type': 'text' }, { 'id': 'Creator', 'title': PMF('Creator'), 'allow_edit': False, 'value': self.user_fullname(self.context.Creator()), 'condition': True, 'type': 'text' }, { 'id': 'Composite', 'title': _('Composite'), 'allow_edit': self.allow_edit and allow_sample_edit, 'value': self.context.getComposite(), 'condition': True, 'type': 'boolean' }, { 'id': 'AdHoc', 'title': _('Ad-Hoc'), 'allow_edit': self.allow_edit and allow_sample_edit, 'value': self.context.getAdHoc(), 'condition': True, 'type': 'boolean' }, { 'id': 'DateCreated', 'title': PMF('Date Created'), 'allow_edit': False, 'value': self.context.created(), 'formatted_value': self.ulocalized_time(self.context.created()), 'condition': True, 'type': 'text' }, { 'id': 'SamplingDate', 'title': _('Sampling Date'), 'allow_edit': self.allow_edit and allow_sample_edit, 'value': self.ulocalized_time(self.context.getSamplingDate()), 'formatted_value': self.ulocalized_time(self.context.getSamplingDate()), 'condition': True, 'class': 'datepicker', 'type': 'text' }, { 'id': 'DateSampled', 'title': _('Date Sampled'), 'allow_edit': self.allow_edit and allow_sample_edit, 'value': sample.getDateSampled() and self.ulocalized_time(sample.getDateSampled()) or '', 'formatted_value': sample.getDateSampled() and self.ulocalized_time(sample.getDateSampled()) or '', 'condition': SamplingWorkflowEnabled, 'class': 'datepicker', 'type': 'text', 'required': True }, { 'id': 'Sampler', 'title': _('Sampler'), 'allow_edit': self.allow_edit and allow_sample_edit, 'value': sample.getSampler(), 'formatted_value': sample.getSampler(), 'condition': SamplingWorkflowEnabled, 'vocabulary': samplers, 'type': 'choices', 'required': True }, { 'id': 'SamplingDeviation', 'title': _('Sampling Deviation'), 'allow_edit': self.allow_edit and allow_sample_edit, 'value': sample.getSamplingDeviation() and sample.getSamplingDeviation().UID() or '', 'formatted_value': sample.getSamplingDeviation() and sample.getSamplingDeviation().Title() or '', 'condition': True, 'vocabulary': samplingdeviations, 'type': 'choices' }, { 'id': 'DateReceived', 'title': _('Date Received'), 'allow_edit': False, 'value': self.context.getDateReceived(), 'formatted_value': self.ulocalized_time(self.context.getDateReceived()), 'condition': True, 'type': 'text' }, { 'id': 'DateExpired', 'title': _('Date Expired'), 'allow_edit': False, 'value': self.context.getDateExpired(), 'formatted_value': self.ulocalized_time(self.context.getDateExpired()), 'condition': True, 'type': 'text' }, { 'id': 'DisposalDate', 'title': _('Disposal Date'), 'allow_edit': False, 'value': self.context.getDisposalDate(), 'formatted_value': self.ulocalized_time(self.context.getDisposalDate()), 'condition': True, 'type': 'text' }, { 'id': 'DateDisposed', 'title': _('Date Disposed'), 'allow_edit': False, 'value': self.context.getDateDisposed(), 'formatted_value': self.ulocalized_time(self.context.getDateDisposed()), 'condition': True, 'type': 'text' }, ] if self.allow_edit: self.header_buttons = [{'name': 'save_button', 'title': _('Save')}] else: self.header_buttons = [] ## handle_header table submit if form.get('header_submitted', None): plone.protect.CheckAuthenticator(form) message = None values = {} for row in [r for r in self.header_rows if r['allow_edit']]: value = urllib.unquote_plus(form.get(row['id'], '')) if row['id'] == 'SampleType': if not value: message = PMF( u'error_required', default=u'${name} is required, please correct.', mapping={'name': _('Sample Type')}) break if not bsc(portal_type='SampleType', title=value): message = _("${sampletype} is not a valid sample type", mapping={'sampletype': value}) break if row['id'] == 'SamplePoint': if value and \ not bsc(portal_type = 'SamplePoint', title = value): message = _( "${samplepoint} is not a valid sample point", mapping={'sampletype': value}) break values[row['id']] = value # boolean - checkboxes are 'true'/'on' or 'false'/missing in form. for row in [ r for r in self.header_rows if r.get('type', '') == 'boolean' ]: value = form.get(row['id'], 'false') values[row[ 'id']] = value == 'true' and True or value == 'on' and True or False if not message: self.context.edit(**values) self.context.reindexObject() ars = self.context.getAnalysisRequests() # Analyses and AnalysisRequets have calculated fields # that are indexed; re-index all these objects. for ar in ars: ar.reindexObject() analyses = self.context.getAnalyses( {'review_state': 'to_be_sampled'}) for a in analyses: a.getObject().reindexObject() message = PMF("Changes saved.") # If this sample was "To Be Sampled", and the # Sampler and DateSampled fields were completed, # do the Sampled transition. if workflow.getInfoFor(sample, "review_state") == "to_be_sampled" \ and form.get("Sampler", None) \ and form.get("DateSampled", None): # This transition does not invoke the regular WorkflowAction # in analysisrequest.py workflow.doActionFor(sample, "sample") sample.reindexObject() self.context.plone_utils.addPortalMessage(message, 'info') url = self.context.absolute_url().split("?")[0] self.request.RESPONSE.redirect(url) return ## Create Sample Partitions table parts_table = None if not self.allow_edit: p = SamplePartitionsView(self.context, self.request) p.allow_edit = self.allow_edit p.show_select_column = self.allow_edit p.show_workflow_action_buttons = self.allow_edit p.show_column_toggles = False p.show_select_all_checkbox = False p.review_states[0]['transitions'] = [ { 'id': 'empty' }, ] # none parts_table = p.contents_table() self.parts = parts_table ## Create Field and Lab Analyses tables self.tables = {} if not self.allow_edit: for poc in POINTS_OF_CAPTURE: if not self.context.getAnalyses({'getPointOfCapture': poc}): continue t = SampleAnalysesView(self.context, self.request, getPointOfCapture=poc, sort_on='getServiceTitle') t.form_id = "sample_%s_analyses" % poc if poc == 'field': t.review_states[0]['columns'].remove('DueDate') t.show_column_toggles = False t.review_states[0]['transitions'] = [{ 'id': 'submit' }, { 'id': 'retract' }, { 'id': 'verify' }] self.tables[POINTS_OF_CAPTURE.getValue( poc)] = t.contents_table() return self.template()
def _analysis_data(self, analysis): keyword = analysis.getKeyword() service = analysis.getService() andict = {'obj': analysis, 'id': analysis.id, 'title': analysis.Title(), 'keyword': keyword, 'accredited': service.getAccredited(), 'point_of_capture': to_utf8(POINTS_OF_CAPTURE.getValue(service.getPointOfCapture())), 'category': to_utf8(service.getCategoryTitle()), 'result': analysis.getResult(), 'unit': to_utf8(service.getUnit()), 'capture_date': analysis.getResultCaptureDate(), 'request_id': analysis.aq_parent.getId(), 'formatted_result': '', 'uncertainty': analysis.getUncertainty(), 'retested': analysis.getRetested(), 'remarks': to_utf8(analysis.getRemarks()), 'resultdm': to_utf8(analysis.getResultDM()), 'outofrange': False, 'type': analysis.portal_type, 'reftype': analysis.getReferenceType() \ if hasattr(analysis, 'getReferenceType') else None, 'worksheet': None, 'specs': {}, 'formatted_specs': ''} if analysis.portal_type == 'DuplicateAnalysis': andict['reftype'] = 'd' ws = analysis.getBackReferences('WorksheetAnalysis') andict['worksheet'] = ws[0].id if ws and len(ws) > 0 else None andict['worksheet_url'] = ws[0].absolute_url if ws and len(ws) > 0 else None andict['refsample'] = analysis.getSample().id \ if analysis.portal_type == 'Analysis' \ else '%s - %s' % (analysis.aq_parent.id, analysis.aq_parent.Title()) # Which analysis specs must be used? # Try first with those defined at AR Publish Specs level if analysis.portal_type == 'ReferenceAnalysis': # The analysis is a Control or Blank. We might use the # reference results instead other specs uid = analysis.getServiceUID() specs = analysis.aq_parent.getResultsRangeDict().get(uid, {}) elif analysis.portal_type == 'DuplicateAnalysis': specs = analysis.getAnalysisSpecs(); else: ar = analysis.aq_parent specs = ar.getPublicationSpecification() if not specs or keyword not in specs.getResultsRangeDict(): specs = analysis.getAnalysisSpecs() specs = specs.getResultsRangeDict().get(keyword, {}) \ if specs else {} andict['specs'] = specs andict['formatted_result'] = analysis.getFormattedResult(specs) if specs.get('min', None) and specs.get('max', None): andict['formatted_specs'] = '%s - %s' % (specs['min'], specs['max']) elif specs.get('min', None): andict['formatted_specs'] = '> %s' % specs['min'] elif specs.get('max', None): andict['formatted_specs'] = '< %s' % specs['max'] # Out of range? if specs: adapters = getAdapters((analysis, ), IResultOutOfRange) bsc = getToolByName(self.context, "bika_setup_catalog") for name, adapter in adapters: ret = adapter(specification=specs) if ret and ret['out_of_range']: andict['outofrange'] = True break return andict
def __call__(self): rc = getToolByName(self.context, REFERENCE_CATALOG) workflow = getToolByName(self.context, 'portal_workflow') laboratory = self.context.bika_setup.laboratory BatchEmail = self.context.bika_setup.getBatchEmail() BatchFax = self.context.bika_setup.getBatchFax() # group analysis requests by contact ARs_by_contact = {} for ar in self.analysis_requests: contact_uid = ar.getContact().UID() if contact_uid not in ARs_by_contact: ARs_by_contact[contact_uid] = [] ARs_by_contact[contact_uid].append(ar) for contact_uid, ars in ARs_by_contact.items(): ars.sort() self.contact = ars[0].getContact() self.pub_pref = self.contact.getPublicationPreference() batch_size = 'email' in self.pub_pref and BatchEmail or \ 'fax' in self.pub_pref and BatchFax or 1 # send batches of ARs to each contact for b in range(0, len(ars), batch_size): self.batch = ars[b:b+batch_size] self.any_accredited = False self.any_drymatter = False # get all services from all requests in this batch into a # dictionary: # {'Point Of Capture': {'Category': [service,service,...]}} self.services = {} for ar in self.batch: if ar.getReportDryMatter(): self.any_drymatter = True states = ("verified", "published") for analysis in ar.getAnalyses(full_objects=True, review_state=states): service = analysis.getService() poc = POINTS_OF_CAPTURE.getValue(service.getPointOfCapture()) cat = service.getCategoryName() if poc not in self.services: self.services[poc] = {} if cat not in self.services[poc]: self.services[poc][cat] = [] self.services[poc][cat].append(service) if (service.getAccredited()): self.any_accredited = True # compose and send email if 'email' in self.pub_pref: mime_msg = MIMEMultipart('related') mime_msg['Subject'] = self.get_mail_subject() mime_msg['From'] = formataddr( (encode_header(laboratory.getName()), laboratory.getEmailAddress())) mime_msg['To'] = formataddr( (encode_header(self.contact.getFullname()), self.contact.getEmailAddress())) mime_msg.preamble = 'This is a multi-part MIME message.' ar_results = self.ar_results() msg_txt = MIMEText(ar_results, _subtype='html') mime_msg.attach(msg_txt) #XXX open(join(Globals.INSTANCE_HOME,'var','ar_results.html'), "w").write(ar_results) try: host = getToolByName(self.context, 'MailHost') host.send(mime_msg.as_string(), immediate=True) except SMTPRecipientsRefused, msg: raise WorkflowException(str(msg)) if self.action == 'publish': for ar in self.batch: try: workflow.doActionFor(ar, 'publish') except WorkflowException: pass else: raise Exception, "XXX pub_pref %s" % self.pub_pref
def __call__(self): ar = self.context workflow = getToolByName(self.context, 'portal_workflow') if 'transition' in self.request.form: doActionFor(self.context, self.request.form['transition']) # Contacts get expanded for view contact = self.context.getContact() contacts = [] for cc in self.context.getCCContact(): contacts.append(cc) if contact in contacts: contacts.remove(contact) ccemails = [] for cc in contacts: ccemails.append("%s <<a href='mailto:%s'>%s</a>>" % (cc.Title(), cc.getEmailAddress(), cc.getEmailAddress())) # CC Emails become mailto links emails = self.context.getCCEmails() if isinstance(emails, str): emails = emails and [emails, ] or [] cc_emails = [] cc_hrefs = [] for cc in emails: cc_emails.append(cc) cc_hrefs.append("<a href='mailto:%s'>%s</a>" % (cc, cc)) # render header table self.header_table = HeaderTableView(self.context, self.request)() # Create Partitions View for this ARs sample p = SamplePartitionsView(self.context.getSample(), self.request) p.show_column_toggles = False self.parts = p.contents_table() # Create Field and Lab Analyses tables self.tables = {} for poc in POINTS_OF_CAPTURE: if self.context.getAnalyses(getPointOfCapture=poc): t = self.createAnalysesView( ar, self.request, getPointOfCapture=poc, show_categories=self.context.bika_setup.getCategoriseAnalysisServices()) t.allow_edit = True t.form_id = "%s_analyses" % poc t.review_states[0]['transitions'] = [{'id': 'submit'}, {'id': 'retract'}, {'id': 'verify'}] t.show_workflow_action_buttons = True t.show_select_column = True if getSecurityManager().checkPermission(EditFieldResults, self.context) \ and poc == 'field': t.review_states[0]['columns'].remove('DueDate') self.tables[POINTS_OF_CAPTURE.getValue(poc)] = t.contents_table() # Create QC Analyses View for this AR show_cats = self.context.bika_setup.getCategoriseAnalysisServices() qcview = self.createQCAnalyesView(ar, self.request, show_categories=show_cats) qcview.allow_edit = False qcview.show_select_column = False qcview.show_workflow_action_buttons = False qcview.form_id = "%s_qcanalyses" qcview.review_states[0]['transitions'] = [{'id': 'submit'}, {'id': 'retract'}, {'id': 'verify'}] self.qctable = qcview.contents_table() # Create the ResultsInterpretation by department view from resultsinterpretation import ARResultsInterpretationView self.riview = ARResultsInterpretationView(ar, self.request) # If a general retracted is done, rise a waring if workflow.getInfoFor(ar, 'review_state') == 'sample_received': allstatus = list() for analysis in ar.getAnalyses(): status = workflow.getInfoFor(analysis.getObject(), 'review_state') if status not in ['retracted', 'to_be_verified', 'verified']: allstatus = [] break else: allstatus.append(status) if len(allstatus) > 0: message = "General Retract Done. Submit this AR manually." self.addMessage(message, 'warning') # If is a retracted AR, show the link to child AR and show a warn msg if workflow.getInfoFor(ar, 'review_state') == 'invalid': childar = hasattr(ar, 'getChildAnalysisRequest') and \ ar.getChildAnalysisRequest() or None message = _('These results have been withdrawn and are ' 'listed here for trace-ability purposes. Please follow ' 'the link to the retest') if childar: message = (message + " %s.") % childar.getRequestID() else: message = message + "." self.addMessage(message, 'warning') # If is an AR automatically generated due to a Retraction, show it's # parent AR information if hasattr(ar, 'getParentAnalysisRequest') and \ ar.getParentAnalysisRequest(): par = ar.getParentAnalysisRequest() message = _('This Analysis Request has been ' 'generated automatically due to ' 'the retraction of the Analysis ' 'Request ${retracted_request_id}.', mapping={'retracted_request_id': par.getRequestID()}) self.addMessage(message, 'info') self.renderMessages() return self.template()
def __call__(self): form = self.request.form bc = getToolByName(self.context, 'bika_catalog') bsc = getToolByName(self.context, 'bika_setup_catalog') checkPermission = self.context.portal_membership.checkPermission getAuthenticatedMember = self.context.portal_membership.getAuthenticatedMember workflow = getToolByName(self.context, 'portal_workflow') ars = self.context.getAnalysisRequests() props = getToolByName(self.context, 'portal_properties').bika_properties datepicker_format = props.getProperty('datepicker_format') sample = self.context ## Create header_table data rows ar_links = ", ".join( ["<a href='%s'>%s</a>"%(ar.absolute_url(), ar.Title()) for ar in ars]) sp = self.context.getSamplePoint() st = self.context.getSampleType() if workflow.getInfoFor(self.context, 'cancellation_state') == "cancelled": allow_sample_edit = False else: edit_states = ['to_be_sampled', 'to_be_preserved', 'sample_due'] allow_sample_edit = checkPermission(ManageSamples, self.context) \ and workflow.getInfoFor(self.context, 'review_state') in edit_states SamplingWorkflowEnabled =\ self.context.bika_setup.getSamplingWorkflowEnabled() samplers = getUsers(sample, ['Sampler', 'LabManager', 'Manager']) self.header_columns = 3 self.header_rows = [ {'id': 'ClientReference', 'title': _('Client Reference'), 'allow_edit': self.allow_edit, 'value': self.context.getClientReference(), 'condition':True, 'type': 'text'}, {'id': 'ClientSampleID', 'title': _('Client SID'), 'allow_edit': self.allow_edit, 'value': self.context.getClientSampleID(), 'condition':True, 'type': 'text'}, {'id': 'Requests', 'title': _('Requests'), 'allow_edit': False, 'value': ar_links, 'condition':True, 'type': 'text'}, {'id': 'SampleType', 'title': _('Sample Type'), 'allow_edit': self.allow_edit and allow_sample_edit, 'value': st and st.Title() or '', 'condition':True, 'type': 'text', 'required': True}, {'id': 'SamplePoint', 'title': _('Sample Point'), 'allow_edit': self.allow_edit and allow_sample_edit, 'value': sp and sp.Title() or '', 'condition':True, 'type': 'text'}, {'id': 'Composite', 'title': _('Composite'), 'allow_edit': self.allow_edit and allow_sample_edit, 'value': self.context.getComposite(), 'condition':True, 'type': 'boolean'}, {'id': 'Creator', 'title': PMF('Creator'), 'allow_edit': False, 'value': pretty_user_name_or_id(self.context, self.context.Creator()), 'condition':True, 'type': 'text'}, {'id': 'DateCreated', 'title': PMF('Date Created'), 'allow_edit': False, 'value': self.context.created(), 'formatted_value': TimeOrDate(self.context, self.context.created()), 'condition':True, 'type': 'text'}, {'id': 'SamplingDate', 'title': _('Sampling Date'), 'allow_edit': self.allow_edit and allow_sample_edit, 'value': self.context.getSamplingDate().strftime(datepicker_format), 'formatted_value': TimeOrDate(self.context, self.context.getSamplingDate()), 'condition':True, 'class': 'datepicker', 'type': 'text'}, {'id': 'DateSampled', 'title': _('Date Sampled'), 'allow_edit': self.allow_edit and allow_sample_edit, 'value': sample.getDateSampled() and sample.getDateSampled().strftime(datepicker_format) or '', 'formatted_value': sample.getDateSampled() and TimeOrDate(self.context, sample.getDateSampled()) or '', 'condition':SamplingWorkflowEnabled, 'class': 'datepicker', 'type': 'text', 'required': True}, {'id': 'Sampler', 'title': _('Sampler'), 'allow_edit': self.allow_edit and allow_sample_edit, 'value': sample.getSampler(), 'formatted_value': sample.getSampler(), 'condition':SamplingWorkflowEnabled, 'vocabulary': samplers, 'type': 'choices', 'required': True}, {'id': 'DateReceived', 'title': _('Date Received'), 'allow_edit': False, 'value': self.context.getDateReceived(), 'formatted_value': TimeOrDate(self.context, self.context.getDateReceived()), 'condition':True, 'type': 'text'}, {'id': 'DateExpired', 'title': _('Date Expired'), 'allow_edit': False, 'value': self.context.getDateExpired(), 'formatted_value': TimeOrDate(self.context, self.context.getDateExpired()), 'condition':True, 'type': 'text'}, {'id': 'DisposalDate', 'title': _('Disposal Date'), 'allow_edit': False, 'value': self.context.getDisposalDate(), 'formatted_value': TimeOrDate(self.context, self.context.getDisposalDate()), 'condition':True, 'type': 'text'}, {'id': 'DateDisposed', 'title': _('Date Disposed'), 'allow_edit': False, 'value': self.context.getDateDisposed(), 'formatted_value': TimeOrDate(self.context, self.context.getDateDisposed()), 'condition':True, 'type': 'text'}, ] if self.allow_edit: self.header_buttons = [{'name':'save_button', 'title':_('Save')}] else: self.header_buttons = [] ## handle_header table submit if form.get('header_submitted', None): plone.protect.CheckAuthenticator(form) message = None values = {} for row in [r for r in self.header_rows if r['allow_edit']]: value = urllib.unquote_plus(form.get(row['id'], '')) if row['id'] == 'SampleType': if not value: message = PMF( u'error_required', default=u'${name} is required, please correct.', mapping={'name': _('Sample Type')}) break if not bsc(portal_type = 'SampleType', title = value): message = _("${sampletype} is not a valid sample type", mapping={'sampletype':value}) break if row['id'] == 'SamplePoint': if value and \ not bsc(portal_type = 'SamplePoint', title = value): message = _("${samplepoint} is not a valid sample point", mapping={'sampletype':value}) break values[row['id']] = value # boolean - checkboxes are 'true' or 'false in form. for row in [r for r in self.header_rows if r.get('type', '') == 'boolean']: value = form.get(row['id'], 'false') values[row['id']] = value == 'true' and True or False if not message: self.context.edit(**values) self.context.reindexObject() ars = self.context.getAnalysisRequests() for ar in ars: ar.reindexObject() message = PMF("Changes saved.") # If this sample was "To Be Sampled", and the # Sampler and DateSampled fields were completed, # do the Sampled transition. if workflow.getInfoFor(sample, "review_state") == "to_be_sampled" \ and form.get("Sampler", None) \ and form.get("DateSampled", None): workflow.doActionFor(sample, "sample") sample.reindexObject() self.context.plone_utils.addPortalMessage(message, 'info') url = self.context.absolute_url().split("?")[0] self.request.RESPONSE.redirect(url) return ## Create Sample Partitions table parts_table = None if not self.allow_edit: p = SamplePartitionsView(self.context, self.request) p.allow_edit = self.allow_edit p.show_select_column = self.allow_edit p.show_workflow_action_buttons = self.allow_edit p.show_column_toggles = False p.show_select_all_checkbox = False p.review_states[0]['transitions'] = [{'id': 'empty'},] # none parts_table = p.contents_table() self.parts = parts_table ## Create Field and Lab Analyses tables self.tables = {} if not self.allow_edit: for poc in POINTS_OF_CAPTURE: if not self.context.getAnalyses({'getPointOfCapture': poc}): continue t = SampleAnalysesView(self.context, self.request, getPointOfCapture = poc, sort_on = 'getServiceTitle') t.form_id = "sample_%s_analyses" % poc if poc == 'field': t.review_states[0]['columns'].remove('DueDate') t.show_column_toggles = False t.review_states[0]['transitions'] = [{'id':'submit'}, {'id':'retract'}, {'id':'verify'}] self.tables[POINTS_OF_CAPTURE.getValue(poc)] = t.contents_table() return self.template()
def _analysis_data(self, analysis): """ Returns a dict that represents the analysis """ decimalmark = analysis.aq_parent.aq_parent.getDecimalMark() keyword = analysis.getKeyword() service = analysis.getService() andict = {'obj': analysis, 'id': analysis.id, 'title': analysis.Title(), 'keyword': keyword, 'scientific_name': service.getScientificName(), 'accredited': service.getAccredited(), 'point_of_capture': to_utf8(POINTS_OF_CAPTURE.getValue(service.getPointOfCapture())), 'category': to_utf8(service.getCategoryTitle()), 'result': analysis.getResult(), 'unit': to_utf8(service.getUnit()), 'formatted_unit': format_supsub(to_utf8(service.getUnit())), 'capture_date': analysis.getResultCaptureDate(), 'request_id': analysis.aq_parent.getId(), 'formatted_result': '', 'uncertainty': analysis.getUncertainty(), 'formatted_uncertainty': '', 'retested': analysis.getRetested(), 'remarks': to_utf8(analysis.getRemarks()), 'resultdm': to_utf8(analysis.getResultDM()), 'outofrange': False, 'type': analysis.portal_type, 'reftype': analysis.getReferenceType() \ if hasattr(analysis, 'getReferenceType') else None, 'worksheet': None, 'specs': {}, 'formatted_specs': ''} andict['refsample'] = analysis.getSample().id \ if analysis.portal_type == 'Analysis' \ else '%s - %s' % (analysis.aq_parent.id, analysis.aq_parent.Title()) # Which analysis specs must be used? # Try first with those defined at AR Publish Specs level if analysis.portal_type == 'ReferenceAnalysis': # The analysis is a Control or Blank. We might use the # reference results instead other specs uid = analysis.getServiceUID() specs = analysis.aq_parent.getResultsRangeDict().get(uid, {}) else: # Get the specs directly from the analysis. The getResultsRange # function already takes care about which are the specs to be used: # AR, client or lab. specs = analysis.getResultsRange() andict['specs'] = specs scinot = self.context.bika_setup.getScientificNotationReport() andict['formatted_result'] = analysis.getFormattedResult( specs=specs, sciformat=int(scinot), decimalmark=decimalmark) fs = '' if specs.get('min', None) and specs.get('max', None): fs = '%s - %s' % (specs['min'], specs['max']) elif specs.get('min', None): fs = '> %s' % specs['min'] elif specs.get('max', None): fs = '< %s' % specs['max'] andict['formatted_specs'] = formatDecimalMark(fs, decimalmark) andict['formatted_uncertainty'] = format_uncertainty( analysis, analysis.getResult(), decimalmark=decimalmark, sciformat=int(scinot)) # Out of range? if specs: adapters = getAdapters((analysis, ), IResultOutOfRange) bsc = getToolByName(self.context, "bika_setup_catalog") for name, adapter in adapters: ret = adapter(specification=specs) if ret and ret['out_of_range']: andict['outofrange'] = True break return andict
def _analysis_data(self, analysis): """ Returns a dict that represents the analysis """ decimalmark = analysis.aq_parent.aq_parent.getDecimalMark() keyword = analysis.getKeyword() andict = { 'obj': analysis, 'id': analysis.id, 'title': analysis.Title(), 'keyword': keyword, 'scientific_name': analysis.getScientificName(), 'accredited': analysis.getAccredited(), 'point_of_capture': to_utf8(POINTS_OF_CAPTURE.getValue(analysis.getPointOfCapture())), 'category': to_utf8(analysis.getCategoryTitle()), 'result': analysis.getResult(), 'unit': to_utf8(analysis.getUnit()), 'formatted_unit': format_supsub(to_utf8(analysis.getUnit())), 'capture_date': analysis.getResultCaptureDate(), 'request_id': analysis.aq_parent.getId(), 'formatted_result': '', 'uncertainty': analysis.getUncertainty(), 'formatted_uncertainty': '', 'retested': analysis.getRetested(), 'remarks': to_utf8(analysis.getRemarks()), 'resultdm': to_utf8(analysis.getResultDM()), 'outofrange': False, 'type': analysis.portal_type, 'reftype': analysis.getReferenceType() if hasattr( analysis, 'getReferenceType') else None, 'worksheet': None, 'specs': {}, 'formatted_specs': '', 'review_state': api.get_workflow_status_of(analysis), } andict['refsample'] = analysis.getSample().id \ if analysis.portal_type == 'Analysis' \ else '%s - %s' % (analysis.aq_parent.id, analysis.aq_parent.Title()) # Which analysis specs must be used? # Try first with those defined at AR Publish Specs level if analysis.portal_type == 'ReferenceAnalysis': # The analysis is a Control or Blank. We might use the # reference results instead other specs uid = analysis.getServiceUID() specs = analysis.aq_parent.getResultsRangeDict().get(uid, {}) else: # Get the specs directly from the analysis. The getResultsRange # function already takes care about which are the specs to be used: # AR, client or lab. specs = analysis.getResultsRange() andict['specs'] = specs scinot = self.context.bika_setup.getScientificNotationReport() andict['formatted_result'] = analysis.getFormattedResult(specs=specs, sciformat=int(scinot), decimalmark=decimalmark) fs = '' if specs.get('min', None) and specs.get('max', None): fs = '%s - %s' % (specs['min'], specs['max']) elif specs.get('min', None): fs = '> %s' % specs['min'] elif specs.get('max', None): fs = '< %s' % specs['max'] andict['formatted_specs'] = formatDecimalMark(fs, decimalmark) andict['formatted_uncertainty'] = format_uncertainty(analysis, analysis.getResult(), decimalmark=decimalmark, sciformat=int(scinot)) # Out of range? if specs: adapters = getAdapters((analysis, ), IResultOutOfRange) for name, adapter in adapters: ret = adapter(specification=specs) if ret and ret['out_of_range']: andict['outofrange'] = True break return andict
def __call__(self): rc = getToolByName(self.context, REFERENCE_CATALOG) workflow = getToolByName(self.context, "portal_workflow") BatchEmail = self.context.bika_setup.getBatchEmail() username = self.context.portal_membership.getAuthenticatedMember().getUserName() self.reporter = self.user_fullname(username) self.reporter_email = self.user_email(username) # signature image self.reporter_signature = "" c = [x for x in self.bika_setup_catalog(portal_type="LabContact") if x.getObject().getUsername() == username] if c: sf = c[0].getObject().getSignature() if sf: self.reporter_signature = sf.absolute_url() + "/Signature" # lab address self.laboratory = laboratory = self.context.bika_setup.laboratory self.lab_address = "<br/>".join(laboratory.getPrintAddress()) # group/publish analysis requests by contact ARs_by_contact = {} for ar in self.analysis_requests: contact_uid = ar.getContact().UID() if contact_uid not in ARs_by_contact: ARs_by_contact[contact_uid] = [] ARs_by_contact[contact_uid].append(ar) for contact_uid, ars in ARs_by_contact.items(): ars.sort() self.contact = ars[0].getContact() self.pub_pref = self.contact.getPublicationPreference() batch_size = "email" in self.pub_pref and BatchEmail or 5 # client address self.client = ars[0].aq_parent self.client_address = "<br/>".join(self.client.getPrintAddress()) self.Footer = self.context.bika_setup.getResultFooter() # send batches of ARs to each contact for b in range(0, len(ars), batch_size): self.batch = ars[b : b + batch_size] self.any_accredited = False self.any_drymatter = False # get all services from all requests in this batch into a # dictionary: # {'Point Of Capture': {'Category': [service,service,...]}} self.services = {} out_fn = "_".join([ar.Title() for ar in self.batch]) for ar in self.batch: if ar.getReportDryMatter(): self.any_drymatter = True states = ("verified", "published") for analysis in ar.getAnalyses(full_objects=True, review_state=states): service = analysis.getService() poc = POINTS_OF_CAPTURE.getValue(service.getPointOfCapture()) cat = service.getCategoryTitle() if poc not in self.services: self.services[poc] = {} if cat not in self.services[poc]: self.services[poc][cat] = [] if service not in self.services[poc][cat]: self.services[poc][cat].append(service) if service.getAccredited(): self.any_accredited = True # compose and send email if "email" in self.pub_pref: # render template ar_results = self.ar_results() ar_results = safe_unicode(ar_results).encode("utf-8") ar_results = self.escape(ar_results) debug_mode = App.config.getConfiguration().debug_mode if debug_mode: open(join(Globals.INSTANCE_HOME, "var", out_fn + ".html"), "w").write(ar_results) pisa.showLogging() ramdisk = StringIO() pdf = pisa.CreatePDF(ar_results, ramdisk) pdf_data = ramdisk.getvalue() ramdisk.close() if debug_mode: open(join(Globals.INSTANCE_HOME, "var", out_fn + ".pdf"), "wb").write(pdf_data) mime_msg = MIMEMultipart("related") mime_msg["Subject"] = self.get_mail_subject() mime_msg["From"] = formataddr((encode_header(laboratory.getName()), laboratory.getEmailAddress())) mime_msg["To"] = formataddr( (encode_header(self.contact.getFullname()), self.contact.getEmailAddress()) ) mime_msg.preamble = "This is a multi-part MIME message." msg_txt = MIMEText(ar_results, _subtype="html") mime_msg.attach(msg_txt) if not pdf.err: part = MIMEBase("application", "application/pdf") part.add_header("Content-Disposition", 'attachment; filename="%s.pdf"' % out_fn) part.set_payload(pdf_data) Encoders.encode_base64(part) mime_msg.attach(part) try: host = getToolByName(self.context, "MailHost") host.send(mime_msg.as_string(), immediate=True) except SMTPServerDisconnected, msg: if not debug_mode: raise SMTPServerDisconnected(msg) except SMTPRecipientsRefused, msg: raise WorkflowException(str(msg)) if self.action == "publish": for ar in self.batch: try: workflow.doActionFor(ar, "publish") except WorkflowException: pass ## if not pdf.err: ## setheader = self.request.RESPONSE.setHeader ## setheader('Content-Type', 'application/pdf') ## setheader("Content-Disposition", "attachment;filename=\"%s.pdf\"" % out_fn) ## self.request.RESPONSE.write(pdf_data) else: raise Exception, "XXX pub_pref %s" % self.pub_pref
def __call__(self): form = self.request.form bsc = getToolByName(self.context, 'bika_setup_catalog') pc = getToolByName(self.context, 'portal_catalog') checkPermission = self.context.portal_membership.checkPermission getAuthenticatedMember = self.context.portal_membership.getAuthenticatedMember workflow = getToolByName(self.context, 'portal_workflow') ars = self.context.getAnalysisRequests() props = getToolByName(self.context, 'portal_properties').bika_properties datepicker_format = props.getProperty('datepicker_format') ## Create header_table data rows ar_links = ", ".join( ["<a href='%s'>%s</a>"%(ar.absolute_url(), ar.Title()) for ar in ars]) sp = self.context.getSamplePoint() st = self.context.getSampleType() if workflow.getInfoFor(self.context, 'cancellation_state') == "cancelled": allow_sample_edit = False else: edit_states = ['to_be_sampled', 'to_be_preserved', 'sample_due'] allow_sample_edit = checkPermission(ManageSamples, self.context) \ and workflow.getInfoFor(self.context, 'review_state') in edit_states self.header_columns = 3 self.header_rows = [ {'id': 'ClientReference', 'title': _('Client Reference'), 'allow_edit': True, 'value': self.context.getClientReference(), 'condition':True, 'type': 'text'}, {'id': 'ClientSampleID', 'title': _('Client SID'), 'allow_edit': True, 'value': self.context.getClientSampleID(), 'condition':True, 'type': 'text'}, {'id': 'Requests', 'title': _('Requests'), 'allow_edit': False, 'value': ar_links, 'condition':True, 'type': 'text'}, {'id': 'SampleType', 'title': _('Sample Type'), 'allow_edit': allow_sample_edit, 'value': st and st.Title() or '', 'condition':True, 'type': 'text', 'required': True}, {'id': 'SamplePoint', 'title': _('Sample Point'), 'allow_edit': allow_sample_edit, 'value': sp and sp.Title() or '', 'condition':True, 'type': 'text'}, {'id': 'Composite', 'title': _('Composite'), 'allow_edit': allow_sample_edit, 'value': self.context.getComposite(), 'condition':True, 'type': 'boolean'}, {'id': 'Creator', 'title': PMF('Creator'), 'allow_edit': False, 'value': pretty_user_name_or_id(self.context, self.context.Creator()), 'condition':True, 'type': 'text'}, {'id': 'DateCreated', 'title': PMF('Date Created'), 'allow_edit': False, 'value': self.context.created(), 'formatted_value': TimeOrDate(self.context, self.context.created()), 'condition':True, 'type': 'text'}, {'id': 'SamplingDate', 'title': _('Sampling Date'), 'allow_edit': allow_sample_edit, 'value': self.context.getSamplingDate().strftime(datepicker_format), 'formatted_value': TimeOrDate(self.context, self.context.getSamplingDate()), 'condition':True, 'class': 'datepicker', 'type': 'text'}, {'id': 'DateReceived', 'title': _('Date Received'), 'allow_edit': False, 'value': self.context.getDateReceived(), 'formatted_value': TimeOrDate(self.context, self.context.getDateReceived()), 'condition':True, 'type': 'text'}, {'id': 'DateExpired', 'title': _('Date Expired'), 'allow_edit': False, 'value': self.context.getDateExpired(), 'formatted_value': TimeOrDate(self.context, self.context.getDateExpired()), 'condition':True, 'type': 'text'}, {'id': 'DisposalDate', 'title': _('Disposal Date'), 'allow_edit': False, 'value': self.context.getDisposalDate(), 'formatted_value': TimeOrDate(self.context, self.context.getDisposalDate()), 'condition':True, 'type': 'text'}, {'id': 'DateDisposed', 'title': _('Date Disposed'), 'allow_edit': False, 'value': self.context.getDateDisposed(), 'formatted_value': TimeOrDate(self.context, self.context.getDateDisposed()), 'condition':True, 'type': 'text'}, ] self.header_buttons = [{'name':'save_button', 'title':_('Save')}] ## handle_header table submit if 'save_button' in form: message = None values = {} for row in [r for r in self.header_rows if r['allow_edit']]: value = form.get(row['id'], '') if row['id'] == 'SampleType': if not value: message = _('Sample Type is required') break if not bsc(portal_type = 'SampleType', title = value): message = _("${sampletype} is not a valid sample type", mapping={'sampletype':value}) break if row['id'] == 'SamplePoint': if value and \ not bsc(portal_type = 'SamplePoint', title = value): message = _("${samplepoint} is not a valid sample point", mapping={'sampletype':value}) break values[row['id']] = value # boolean - checkboxes are present, or not present in form. for row in [r for r in self.header_rows if r.get('type', '') == 'boolean']: values[row['id']] = row['id'] in form if not message: self.context.edit(**values) self.context.reindexObject() ars = self.context.getAnalysisRequests() for ar in ars: ar.reindexObject() message = PMF("Changes saved.") self.context.plone_utils.addPortalMessage(message, 'info') # we need to start the request again, to regenerate header self.request.RESPONSE.redirect(self.context.absolute_url()) return ## Create Sample Partitions table p = SamplePartitionsView(self.context, self.request) p.show_column_toggles = False self.parts = p.contents_table() ## Create Field and Lab Analyses tables self.tables = {} for poc in POINTS_OF_CAPTURE: if not self.context.getAnalyses({'getPointOfCapture': poc}): continue t = SampleAnalysesView(self.context, self.request, getPointOfCapture = poc, sort_on = 'getServiceTitle') t.form_id = "sample_%s_analyses" % poc t.allow_edit = True if poc == 'field': t.review_states[0]['columns'].remove('DueDate') t.show_column_toggles = False t.review_states[0]['transitions'] = [{'id':'submit'}, {'id':'retract'}, {'id':'verify'}] t.show_select_column = True self.tables[POINTS_OF_CAPTURE.getValue(poc)] = t.contents_table() return self.template()
def __call__(self): rc = getToolByName(self.context, REFERENCE_CATALOG) workflow = getToolByName(self.context, 'portal_workflow') BatchEmail = self.context.bika_setup.getBatchEmail() username = self.context.portal_membership.getAuthenticatedMember().getUserName() self.reporter = self.user_fullname(username) self.reporter_email = self.user_email(username) # signature image self.reporter_signature = "" c = [x for x in self.bika_setup_catalog(portal_type='LabContact') if x.getObject().getUsername() == username] if c: sf = c[0].getObject().getSignature() if sf: self.reporter_signature = sf.absolute_url() + "/Signature" # lab address self.laboratory = laboratory = self.context.bika_setup.laboratory lab_address = laboratory.getPostalAddress() \ or laboratory.getBillingAddress() \ or laboratory.getPhysicalAddress() if lab_address: _keys = ['address', 'city', 'state', 'zip', 'country'] _list = [lab_address.get(v) for v in _keys] self.lab_address = "<br/>".join(_list).replace("\n", "<br/>") else: self.lab_address = None # group/publish analysis requests by contact ARs_by_contact = {} for ar in self.analysis_requests: contact_uid = ar.getContact().UID() if contact_uid not in ARs_by_contact: ARs_by_contact[contact_uid] = [] ARs_by_contact[contact_uid].append(ar) for contact_uid, ars in ARs_by_contact.items(): ars.sort() self.contact = ars[0].getContact() self.pub_pref = self.contact.getPublicationPreference() batch_size = 'email' in self.pub_pref and BatchEmail or 5 # client address self.client = ars[0].aq_parent client_address = self.client.getPostalAddress() \ or self.contact.getBillingAddress() \ or self.contact.getPhysicalAddress() if client_address: _keys = ['address', 'city', 'state', 'zip', 'country'] _list = [client_address.get(v) for v in _keys] self.client_address = "<br/>".join(_list).replace("\n", "<br/>") else: self.client_address = None self.Footer = self.context.bika_setup.getResultFooter() # send batches of ARs to each contact for b in range(0, len(ars), batch_size): self.batch = ars[b:b+batch_size] self.any_accredited = False self.any_drymatter = False # get all services from all requests in this batch into a # dictionary: # {'Point Of Capture': {'Category': [service,service,...]}} self.services = {} out_fn = "_".join([ar.Title() for ar in self.batch]) for ar in self.batch: if ar.getReportDryMatter(): self.any_drymatter = True states = ("verified", "published") for analysis in ar.getAnalyses(full_objects=True, review_state=states): service = analysis.getService() poc = POINTS_OF_CAPTURE.getValue(service.getPointOfCapture()) cat = service.getCategoryTitle() if poc not in self.services: self.services[poc] = {} if cat not in self.services[poc]: self.services[poc][cat] = [] if service not in self.services[poc][cat]: self.services[poc][cat].append(service) if (service.getAccredited()): self.any_accredited = True # compose and send email if 'email' in self.pub_pref: # render template ar_results = self.ar_results() debug_mode = App.config.getConfiguration().debug_mode if debug_mode: open(join(Globals.INSTANCE_HOME,'var', out_fn + ".html"), "w").write(ar_results) pisa.showLogging() ramdisk = StringIO() pdf = pisa.CreatePDF(ar_results, ramdisk) pdf_data = ramdisk.getvalue() ramdisk.close() if debug_mode: open(join(Globals.INSTANCE_HOME,'var', out_fn + ".pdf"), "wb").write(pdf_data) mime_msg = MIMEMultipart('related') mime_msg['Subject'] = self.get_mail_subject() mime_msg['From'] = formataddr( (encode_header(laboratory.getName()), laboratory.getEmailAddress())) mime_msg['To'] = formataddr( (encode_header(self.contact.getFullname()), self.contact.getEmailAddress())) mime_msg.preamble = 'This is a multi-part MIME message.' msg_txt = MIMEText(ar_results, _subtype='html') mime_msg.attach(msg_txt) if not pdf.err: part = MIMEBase('application', "application/pdf") part.add_header('Content-Disposition', 'attachment; filename="%s.pdf"' % out_fn) part.set_payload( pdf_data ) Encoders.encode_base64(part) mime_msg.attach(part) try: host = getToolByName(self.context, 'MailHost') host.send(mime_msg.as_string(), immediate=True) except SMTPServerDisconnected, msg: if not debug_mode: raise SMTPServerDisconnected(msg) except SMTPRecipientsRefused, msg: raise WorkflowException(str(msg)) if self.action == 'publish': for ar in self.batch: try: workflow.doActionFor(ar, 'publish') except WorkflowException: pass ## if not pdf.err: ## setheader = self.request.RESPONSE.setHeader ## setheader('Content-Type', 'application/pdf') ## setheader("Content-Disposition", "attachment;filename=\"%s.pdf\"" % out_fn) ## self.request.RESPONSE.write(pdf_data) else: raise Exception, "XXX pub_pref %s" % self.pub_pref
def _analysis_data(self, analysis, decimalmark=None): if analysis.UID() in self._cache['_analysis_data']: return self._cache['_analysis_data'][analysis.UID()] keyword = analysis.getKeyword() service = analysis.getService() andict = {'obj': analysis, 'id': analysis.id, 'title': analysis.Title(), 'keyword': keyword, 'scientific_name': service.getScientificName(), 'accredited': service.getAccredited(), 'point_of_capture': to_utf8(POINTS_OF_CAPTURE.getValue(service.getPointOfCapture())), 'category': to_utf8(service.getCategoryTitle()), 'result': analysis.getResult(), 'isnumber': isnumber(analysis.getResult()), 'unit': to_utf8(service.getUnit()), 'formatted_unit': format_supsub(to_utf8(service.getUnit())), 'capture_date': analysis.getResultCaptureDate(), 'request_id': analysis.aq_parent.getId(), 'formatted_result': '', 'uncertainty': analysis.getUncertainty(), 'formatted_uncertainty': '', 'retested': analysis.getRetested(), 'remarks': to_utf8(analysis.getRemarks()), 'resultdm': to_utf8(analysis.getResultDM()), 'outofrange': False, 'type': analysis.portal_type, 'reftype': analysis.getReferenceType() \ if hasattr(analysis, 'getReferenceType') else None, 'worksheet': None, 'specs': {}, 'formatted_specs': ''} if analysis.portal_type == 'DuplicateAnalysis': andict['reftype'] = 'd' ws = analysis.getBackReferences('WorksheetAnalysis') andict['worksheet'] = ws[0].id if ws and len(ws) > 0 else None andict['worksheet_url'] = ws[0].absolute_url if ws and len(ws) > 0 else None andict['refsample'] = analysis.getSample().id \ if analysis.portal_type == 'Analysis' \ else '%s - %s' % (analysis.aq_parent.id, analysis.aq_parent.Title()) if analysis.portal_type == 'ReferenceAnalysis': # The analysis is a Control or Blank. We might use the # reference results instead other specs uid = analysis.getServiceUID() specs = analysis.aq_parent.getResultsRangeDict().get(uid, {}) else: # Get the specs directly from the analysis. The getResultsRange # function already takes care about which are the specs to be used: # AR, client or lab. specs = analysis.getResultsRange() andict['specs'] = specs scinot = self.context.bika_setup.getScientificNotationReport() fresult = analysis.getFormattedResult(specs=specs, sciformat=int(scinot), decimalmark=decimalmark) # We don't use here cgi.encode because results fields must be rendered # using the 'structure' wildcard. The reason is that the result can be # expressed in sci notation, that may include <sup></sup> html tags. # Please note the default value for the 'html' parameter from # getFormattedResult signature is set to True, so the service will # already take into account LDLs and UDLs symbols '<' and '>' and escape # them if necessary. andict['formatted_result'] = fresult; fs = '' if specs.get('min', None) and specs.get('max', None): fs = '%s - %s' % (specs['min'], specs['max']) elif specs.get('min', None): fs = '> %s' % specs['min'] elif specs.get('max', None): fs = '< %s' % specs['max'] andict['formatted_specs'] = formatDecimalMark(fs, decimalmark) andict['formatted_uncertainty'] = format_uncertainty(analysis, analysis.getResult(), decimalmark=decimalmark, sciformat=int(scinot)) # Out of range? if specs: adapters = getAdapters((analysis, ), IResultOutOfRange) bsc = getToolByName(self.context, "bika_setup_catalog") for name, adapter in adapters: ret = adapter(specification=specs) if ret and ret['out_of_range']: andict['outofrange'] = True break self._cache['_analysis_data'][analysis.UID()] = andict return andict
def __call__(self): ar = self.context workflow = getToolByName(self.context, 'portal_workflow') if 'transition' in self.request.form: doActionFor(self.context, self.request.form['transition']) # If the analysis request has been received and hasn't been yet # verified yet, redirect the user to manage_results view, but only if # the user has privileges to Edit(Field)Results, cause otherwise she/he # will receive an InsufficientPrivileges error! if (self.request.PATH_TRANSLATED.endswith(self.context.id) and check_permission(EditResults, self.context) and check_permission(EditFieldResults, self.context) and wasTransitionPerformed(self.context, 'receive') and not wasTransitionPerformed(self.context, 'verify')): # Redirect to manage results view if not cancelled if api.get_workflow_status_of(ar, 'cancellation_state') != \ "cancelled": manage_results_url = "/".join( [self.context.absolute_url(), 'manage_results']) self.request.response.redirect(manage_results_url) return # Contacts get expanded for view contact = self.context.getContact() contacts = [] for cc in self.context.getCCContact(): contacts.append(cc) if contact in contacts: contacts.remove(contact) ccemails = [] for cc in contacts: ccemails.append( "%s <<a href='mailto:%s'>%s</a>>" % (cc.Title(), cc.getEmailAddress(), cc.getEmailAddress())) # CC Emails become mailto links emails = self.context.getCCEmails() if isinstance(emails, str): emails = emails and [ emails, ] or [] cc_emails = [] cc_hrefs = [] for cc in emails: cc_emails.append(cc) cc_hrefs.append("<a href='mailto:%s'>%s</a>" % (cc, cc)) # render header table self.header_table = HeaderTableView(self.context, self.request)() # Create Partitions View for this ARs sample p = SamplePartitionsView(self.context.getSample(), self.request) p.show_column_toggles = False self.parts = p.contents_table() # Create Field and Lab Analyses tables self.tables = {} for poc in POINTS_OF_CAPTURE: if self.context.getAnalyses(getPointOfCapture=poc): t = self.createAnalysesView( ar, self.request, getPointOfCapture=poc, show_categories=self.context.bika_setup. getCategoriseAnalysisServices(), getRequestUID=self.context.UID()) t.allow_edit = True t.form_id = "%s_analyses" % poc t.review_states[0]['transitions'] = [{ 'id': 'submit' }, { 'id': 'retract' }, { 'id': 'verify' }] t.show_workflow_action_buttons = True t.show_select_column = True if getSecurityManager().checkPermission(EditFieldResults, self.context) \ and poc == 'field': t.review_states[0]['columns'].remove('DueDate') self.tables[POINTS_OF_CAPTURE.getValue( poc)] = t.contents_table() # Create QC Analyses View for this AR show_cats = self.context.bika_setup.getCategoriseAnalysisServices() qcview = self.createQCAnalyesView(ar, self.request, show_categories=show_cats) qcview.allow_edit = False qcview.show_select_column = False qcview.show_workflow_action_buttons = False qcview.form_id = "%s_qcanalyses" qcview.review_states[0]['transitions'] = [{ 'id': 'submit' }, { 'id': 'retract' }, { 'id': 'verify' }] self.qctable = qcview.contents_table() # Create the ResultsInterpretation by department view from resultsinterpretation import ARResultsInterpretationView self.riview = ARResultsInterpretationView(ar, self.request) # If a general retracted is done, rise a waring if workflow.getInfoFor(ar, 'review_state') == 'sample_received': allstatus = list() for analysis in ar.getAnalyses(): status = workflow.getInfoFor(analysis.getObject(), 'review_state') if status not in ['retracted', 'to_be_verified', 'verified']: allstatus = [] break else: allstatus.append(status) if len(allstatus) > 0: message = "General Retract Done. Submit this AR manually." self.addMessage(message, 'warning') # If is a retracted AR, show the link to child AR and show a warn msg if workflow.getInfoFor(ar, 'review_state') == 'invalid': childar = hasattr(ar, 'getChildAnalysisRequest') \ and ar.getChildAnalysisRequest() or None message = _( 'These results have been withdrawn and are ' 'listed here for trace-ability purposes. Please follow ' 'the link to the retest') if childar: message = (message + " %s.") % childar.getId() else: message = message + "." self.addMessage(message, 'warning') # If is an AR automatically generated due to a Retraction, show it's # parent AR information if hasattr(ar, 'getParentAnalysisRequest') \ and ar.getParentAnalysisRequest(): par = ar.getParentAnalysisRequest() message = _( 'This Analysis Request has been ' 'generated automatically due to ' 'the retraction of the Analysis ' 'Request ${retracted_request_id}.', mapping={'retracted_request_id': par.getId()}) self.addMessage(message, 'info') self.renderMessages() return self.template()
def __call__(self): ar = self.context workflow = getToolByName(self.context, 'portal_workflow') if 'transition' in self.request.form: doActionFor(self.context, self.request.form['transition']) # Contacts get expanded for view contact = self.context.getContact() contacts = [] for cc in self.context.getCCContact(): contacts.append(cc) if contact in contacts: contacts.remove(contact) ccemails = [] for cc in contacts: ccemails.append( "%s <<a href='mailto:%s'>%s</a>>" % (cc.Title(), cc.getEmailAddress(), cc.getEmailAddress())) # CC Emails become mailto links emails = self.context.getCCEmails() if isinstance(emails, str): emails = emails and [ emails, ] or [] cc_emails = [] cc_hrefs = [] for cc in emails: cc_emails.append(cc) cc_hrefs.append("<a href='mailto:%s'>%s</a>" % (cc, cc)) # render header table self.header_table = HeaderTableView(self.context, self.request)() # Create Partitions View for this ARs sample p = SamplePartitionsView(self.context.getSample(), self.request) p.show_column_toggles = False self.parts = p.contents_table() # Create Field and Lab Analyses tables self.tables = {} for poc in POINTS_OF_CAPTURE: if self.context.getAnalyses(getPointOfCapture=poc): t = self.createAnalysesView( ar, self.request, getPointOfCapture=poc, show_categories=self.context.bika_setup. getCategoriseAnalysisServices()) t.allow_edit = True t.form_id = "%s_analyses" % poc t.review_states[0]['transitions'] = [{ 'id': 'submit' }, { 'id': 'retract' }, { 'id': 'verify' }] t.show_workflow_action_buttons = True t.show_select_column = True if getSecurityManager().checkPermission(EditFieldResults, self.context) \ and poc == 'field': t.review_states[0]['columns'].remove('DueDate') self.tables[POINTS_OF_CAPTURE.getValue( poc)] = t.contents_table() # Un-captured field analyses may cause confusion if ar.getAnalyses(getPointOfCapture='field', review_state=['sampled', 'sample_due']): message = _("There are field analyses without submitted results.") self.addMessage(message, 'info') # Create QC Analyses View for this AR show_cats = self.context.bika_setup.getCategoriseAnalysisServices() qcview = self.createQCAnalyesView(ar, self.request, show_categories=show_cats) qcview.allow_edit = False qcview.show_select_column = False qcview.show_workflow_action_buttons = False qcview.form_id = "%s_qcanalyses" qcview.review_states[0]['transitions'] = [{ 'id': 'submit' }, { 'id': 'retract' }, { 'id': 'verify' }] self.qctable = qcview.contents_table() # If a general retracted is done, rise a waring if workflow.getInfoFor(ar, 'review_state') == 'sample_received': allstatus = list() for analysis in ar.getAnalyses(): status = workflow.getInfoFor(analysis.getObject(), 'review_state') if status not in ['retracted', 'to_be_verified', 'verified']: allstatus = [] break else: allstatus.append(status) if len(allstatus) > 0: self.addMessage("General Retract Done", 'warning') # If is a retracted AR, show the link to child AR and show a warn msg if workflow.getInfoFor(ar, 'review_state') == 'invalid': childar = hasattr(ar, 'getChildAnalysisRequest') \ and ar.getChildAnalysisRequest() or None message = _( 'These results have been withdrawn and are ' 'listed here for trace-ability purposes. Please follow ' 'the link to the retest') if childar: message = (message + " %s.") % childar.getRequestID() else: message = message + "." self.addMessage(message, 'warning') # If is an AR automatically generated due to a Retraction, show it's # parent AR information if hasattr(ar, 'getParentAnalysisRequest') \ and ar.getParentAnalysisRequest(): par = ar.getParentAnalysisRequest() message = _( 'This Analysis Request has been ' 'generated automatically due to ' 'the retraction of the Analysis ' 'Request ${retracted_request_id}.', mapping={'retracted_request_id': par.getRequestID()}) self.addMessage(message, 'info') self.renderMessages() return self.template()