def __call__(self, result=None, **kwargs): translate = self.context.translate path = '++resource++bika.lims.images' alerts = {} # We look for IResultOutOfRange adapters for this object for name, adapter in getAdapters((self.context, ), IResultOutOfRange): ret = adapter(result) if not ret: continue spec = ret["spec_values"] rngstr = "{0} {1}, {2} {3}".format( t(_("min")), str(spec.get('min','')), t(_("max")), str(spec.get('max',''))) if ret["out_of_range"]: if ret["acceptable"]: message = "{0} ({1})".format( t(_('Result in shoulder range')), rngstr ) icon = path + '/warning.png' else: message = "{0} ({1})".format( t(_('Result out of range')), rngstr ) icon = path + '/exclamation.png' alerts[self.context.UID()] = [ { 'icon': icon, 'msg': message, 'field': 'Result', }, ] break return alerts
def render_field_view(self, field): fieldname = field.getName() field = self.context.Schema()[fieldname] ret = {'fieldName': fieldname, 'mode': 'view'} try: adapter = getAdapter(self.context, interface=IHeaderTableFieldRenderer, name=fieldname) except ComponentLookupError: adapter = None if adapter: ret = {'fieldName': fieldname, 'mode': 'structure', 'html': adapter(field)} else: if field.getType().find("ool") > -1: value = field.get(self.context) ret = { 'fieldName': fieldname, 'mode': 'structure', 'html': t(_('Yes')) if value else t(_('No')) } elif field.getType().find("Reference") > -1: # Prioritize method retrieval over schema's field targets = None if hasattr(self.context, 'get%s' % fieldname): fieldaccessor = getattr(self.context, 'get%s' % fieldname) if callable(fieldaccessor): targets = fieldaccessor() if not targets: targets = field.get(self.context) if targets: if not type(targets) == list: targets = [targets, ] sm = getSecurityManager() if all([sm.checkPermission(view, ta) for ta in targets]): a = ["<a href='%s'>%s</a>" % (target.absolute_url(), target.Title()) for target in targets] ret = {'fieldName': fieldname, 'mode': 'structure', 'html': ", ".join(a)} else: ret = {'fieldName': fieldname, 'mode': 'structure', 'html': ", ".join([ta.Title() for ta in targets])} else: ret = {'fieldName': fieldname, 'mode': 'structure', 'html': ''} elif field.getType().lower().find('datetime') > -1: value = field.get(self.context) ret = { 'fieldName': fieldname, 'mode': 'structure', 'html': self.ulocalized_time(value, long_format=True) } return ret
def get_custom_fields(self): """ Returns a dictionary with custom fields to be rendered after header_table with this structure: {<fieldid>:{title:<title>, value:<html>} """ custom = {} ar = self.context workflow = getToolByName(self.context, 'portal_workflow') # If is a retracted AR, show the link to child AR and show a warn msg if workflow.getInfoFor(ar, 'review_state') == 'invalid': childar = hasattr(ar, 'getChildAnalysisRequest') \ and ar.getChildAnalysisRequest() or None anchor = childar and ("<a href='%s'>%s</a>" % (childar.absolute_url(), childar.getRequestID())) or None if anchor: custom['ChildAR'] = { 'title': t(_("AR for retested results")), 'value': anchor } # If is an AR automatically generated due to a Retraction, show it's # parent AR information if hasattr(ar, 'getParentAnalysisRequest') and \ ar.getParentAnalysisRequest(): par = ar.getParentAnalysisRequest() anchor = "<a href='%s'>%s</a>" % (par.absolute_url(), par.getRequestID()) custom['ParentAR'] = { 'title': t(_("Invalid AR retested")), 'value': anchor } return custom
def folderitem(self, obj, item, index): if item.get('review_state', 'current') == 'current': # Check expiry date exdate = obj.getExpiryDate() if exdate: expirydate = DT2dt(exdate).replace(tzinfo=None) if (datetime.today() > expirydate): # Trigger expiration workflow.doActionFor(obj, 'expire') item['review_state'] = 'expired' item['obj'] = obj if self.contentFilter.get('review_state', '') \ and item.get('review_state', '') == 'expired': # This item must be omitted from the list return None item['ID'] = obj.id item['DateSampled'] = self.ulocalized_time(obj.getDateSampled(), long_format=True) item['DateReceived'] = self.ulocalized_time(obj.getDateReceived()) item['DateOpened'] = self.ulocalized_time(obj.getDateOpened()) item['ExpiryDate'] = self.ulocalized_time(obj.getExpiryDate()) after_icons = '' if obj.getBlank(): after_icons += "<img\ src='%s/++resource++bika.lims.images/blank.png' \ title='%s'>" % (self.portal_url, t(_('Blank'))) if obj.getHazardous(): after_icons += "<img\ src='%s/++resource++bika.lims.images/hazardous.png' \ title='%s'>" % (self.portal_url, t(_('Hazardous'))) item['replace']['ID'] = "<a href='%s/base_view'>%s</a> %s" % \ (item['url'], item['ID'], after_icons) return item
def extra(self): workflow = self.tools.workflow() state = self.context_state.workflow_state() stateTitle = self._currentStateTitle() if workflow.getInfoFor(self.context, 'cancellation_state', '') == 'cancelled': title2 = t(_('Cancelled')) # cater for bika_one_state_workflow (always Active) if not stateTitle or \ workflow.getInfoFor(self.context, 'review_state', '') == 'active': stateTitle = t(_('Cancelled')) else: stateTitle = "%s (%s)" % (stateTitle, _(title2)) return {'id': 'plone-contentmenu-workflow', 'class': 'state-cancelled', 'state': state, 'stateTitle': stateTitle, } elif workflow.getInfoFor(self.context, 'inactive_state', '') == 'inactive': title2 = t(_('Dormant')) # cater for bika_one_state_workflow (always Active) if not stateTitle or \ (workflow.getInfoFor(self.context, 'review_state', '') in ('active', 'current')): stateTitle = t(_('Dormant')) else: stateTitle = "%s (%s)" % (stateTitle, _(title2)) return {'id': 'plone-contentmenu-workflow', 'class': 'state-inactive', 'state': state, 'stateTitle': stateTitle, } else: return {'id': 'plone-contentmenu-workflow', 'class': 'state-%s' % state, 'state': state, 'stateTitle': stateTitle, }
def make_title(o): # the javascript uses these strings to decide if it should # check the blank or hazardous checkboxes when a reference # definition is selected (./js/referencesample.js) if not o: return '' title = _u(o.Title()) if o.getBlank(): title += " %s" % t(_('(Blank)')) if o.getHazardous(): title += " %s" % t(_('(Hazardous)')) return title
def folderitems(self): translate = self.context.translate workflow = getToolByName(self.context, 'portal_workflow') items = super(ReferenceSamplesView, self).folderitems() new_items = [] for x in range(len(items)): if not items[x].has_key('obj'): continue obj = items[x]['obj'] if self.control_type == 'b' and not obj.getBlank(): continue if self.control_type == 'c' and obj.getBlank(): continue ref_services = obj.getServices() ws_ref_services = [rs for rs in ref_services if rs.UID() in self.service_uids] if ws_ref_services: if workflow.getInfoFor(obj, 'review_state') != 'current': continue services = [rs.Title() for rs in ws_ref_services] items[x]['nr_services'] = len(services) items[x]['Definition'] = (obj.getReferenceDefinition() and obj.getReferenceDefinition().Title()) or '' services.sort(lambda x, y: cmp(x.lower(), y.lower())) items[x]['Services'] = ", ".join(services) items[x]['replace'] = {} after_icons = "<a href='%s' target='_blank'><img src='++resource++bika.lims.images/referencesample.png' title='%s: %s'></a>" % \ (obj.absolute_url(), \ t(_("Reference sample")), obj.Title()) items[x]['before']['ID'] = after_icons new_items.append(items[x]) new_items = sorted(new_items, key = itemgetter('nr_services')) new_items.reverse() return new_items
def __call__(self): self.context_actions = {} wf = getToolByName(self.context, 'portal_workflow') mtool = getToolByName(self.context, 'portal_membership') addPortalMessage = self.context.plone_utils.addPortalMessage translate = self.context.translate # client contact required active_contacts = [c for c in self.context.objectValues('Contact') if wf.getInfoFor(c, 'inactive_state', '') == 'active'] if isActive(self.context): if self.context.portal_type == "Client" and not active_contacts: msg = _("Client contact required before request may be submitted") addPortalMessage(msg) else: if mtool.checkPermission(AddAnalysisRequest, self.context): self.context_actions[t(_('Add'))] = { 'url': self.context.absolute_url() + "/portal_factory/" "AnalysisRequest/Request new analyses/ar_add", 'icon': '++resource++bika.lims.images/add.png'} # in client context we can use a permission check for this transition # in multi-client listings, we must rather check against user roles. if mtool.checkPermission(ModifyPortalContent, self.context): review_states = [] for review_state in self.review_states: review_state['custom_actions'].extend( [{'id': 'copy_to_new', 'title': _('Copy to new'), 'url': 'workflow_action?action=copy_to_new'}, ]) review_states.append(review_state) self.review_states = review_states return super(ClientAnalysisRequestsView, self).__call__()
def __call__(self): ar = self.context workflow = getToolByName(ar, 'portal_workflow') # If is a retracted AR, show the link to child AR and show a warn msg if workflow.getInfoFor(ar, 'review_state') == 'invalid': childar = hasattr(ar, 'getChildAnalysisRequest') \ and ar.getChildAnalysisRequest() or None childid = childar and childar.getRequestID() or None message = _('This Analysis Request has been withdrawn and is shown ' 'for trace-ability purposes only. Retest: ' '${retest_child_id}.', mapping={'retest_child_id': safe_unicode(childid) or ''}) self.context.plone_utils.addPortalMessage(message, 'warning') # If is an AR automatically generated due to a Retraction, show it's # parent AR information if hasattr(ar, 'getParentAnalysisRequest') \ and ar.getParentAnalysisRequest(): par = ar.getParentAnalysisRequest() message = _('This Analysis Request has been ' 'generated automatically due to ' 'the retraction of the Analysis ' 'Request ${retracted_request_id}.', mapping={'retracted_request_id': safe_unicode(par.getRequestID())}) self.context.plone_utils.addPortalMessage( t(message), 'info') template = LogView.__call__(self) return template
def __init__(self, context, request): BikaListingView.__init__(self, context, request) self.show_sort_column = False self.show_select_row = False self.show_select_column = False self.show_workflow_action_buttons = False self.pagesize = 0 self.icon = self.portal_url + "/++resource++bika.lims.images/%s_big.png" % \ context.portal_type.lower() self.title = to_utf8(self.context.Title()) + " " + t(_("Log")) self.description = "" self.columns = { 'Version': {'title': _('Version'), 'sortable': False}, 'Date': {'title': _('Date'), 'sortable': False}, 'User': {'title': _('User'), 'sortable': False}, 'Action': {'title': _('Action'), 'sortable': False}, 'Description': {'title': _('Description'), 'sortable': False}, } self.review_states = [ {'id': 'default', 'title': 'All', 'contentFilter': {}, 'columns': ['Version', 'Date', 'User', 'Action', 'Description']}, ]
def __call__(self): ar = self.context workflow = getToolByName(ar, "portal_workflow") # If is a retracted AR, show the link to child AR and show a warn msg if workflow.getInfoFor(ar, "review_state") == "invalid": childar = hasattr(ar, "getChildAnalysisRequest") and ar.getChildAnalysisRequest() or None childid = childar and childar.getRequestID() or None message = _( "This Analysis Request has been withdrawn and is shown " "for trace-ability purposes only. Retest: " "${retest_child_id}.", mapping={"retest_child_id": safe_unicode(childid) or ""}, ) self.context.plone_utils.addPortalMessage(message, "warning") # If is an AR automatically generated due to a Retraction, show it's # parent AR information if hasattr(ar, "getParentAnalysisRequest") and ar.getParentAnalysisRequest(): par = ar.getParentAnalysisRequest() message = _( "This Analysis Request has been " "generated automatically due to " "the retraction of the Analysis " "Request ${retracted_request_id}.", mapping={"retracted_request_id": safe_unicode(par.getRequestID())}, ) self.context.plone_utils.addPortalMessage(t(message), "info") template = LogView.__call__(self) return template
def get_workflow_actions(self): """ Compile a list of possible workflow transitions for items in this Table. """ # cbb return empty list if we are unable to select items if not self.bika_listing.show_select_column: return [] workflow = getToolByName(self.context, 'portal_workflow') # get all transitions for all items. transitions = {} actions = [] for obj in [i.get('obj', '') for i in self.items]: obj = get_object(obj) for it in workflow.getTransitionsFor(obj): transitions[it['id']] = it # the list is restricted to and ordered by these transitions. if 'transitions' in self.bika_listing.review_state: for tdict in self.bika_listing.review_state['transitions']: if tdict['id'] in transitions: actions.append(transitions[tdict['id']]) else: actions = transitions.values() new_actions = [] # remove any invalid items with a warning for a, action in enumerate(actions): if isinstance(action, dict) \ and 'id' in action: new_actions.append(action) else: logger.warning("bad action in review_state['transitions']: %s. " "(complete list: %s)." % (action, actions)) actions = new_actions # and these are removed if 'hide_transitions' in self.bika_listing.review_state: hidden_transitions = self.bika_listing.review_state['hide_transitions'] actions = [a for a in actions if a['id'] not in hidden_transitions] # cheat: until workflow_action is abolished, all URLs defined in # GS workflow setup will be ignored, and the default will apply. # (that means, WorkflowAction-bound URL is called). for i, action in enumerate(actions): actions[i]['url'] = '' # if there is a self.review_state['some_state']['custom_transitions'] # attribute on the BikaListingView, add these actions to the list. if 'custom_transitions' in self.bika_listing.review_state: for action in self.bika_listing.review_state['custom_transitions']: if isinstance(action, dict) and 'id' in action: actions.append(action) for a, action in enumerate(actions): actions[a]['title'] = t(PMF(actions[a]['title'])) return actions
def getInstrumentLocations(self): bsc = getToolByName(self, 'bika_setup_catalog') items = [(c.UID, c.Title) for c in bsc(portal_type='InstrumentLocation', inactive_state='active')] items.sort(lambda x, y: cmp(x[1], y[1])) items.insert(0, ('', t(_('None')))) return DisplayList(items)
def extra(self): workflow = self.tools.workflow() state = self.context_state.workflow_state() stateTitle = self._currentStateTitle() if workflow.getInfoFor(self.context, 'cancellation_state', '') == 'cancelled': title2 = t(_('Cancelled')) # cater for bika_one_state_workflow (always Active) if not stateTitle or \ workflow.getInfoFor(self.context, 'review_state', '') == 'active': stateTitle = t(_('Cancelled')) else: stateTitle = "%s (%s)" % (stateTitle, _(title2)) return {'id': 'plone-contentmenu-workflow', 'class': 'state-cancelled', 'state': state, 'stateTitle': stateTitle, } elif workflow.getInfoFor(self.context, 'inactive_state', '') == 'inactive': title2 = t(_('Dormant')) # cater for bika_one_state_workflow (always Active) if not stateTitle or \ (workflow.getInfoFor(self.context, 'review_state', '') in ('active', 'current')): stateTitle = t(_('Dormant')) else: stateTitle = "%s (%s)" % (stateTitle, _(title2)) return {'id': 'plone-contentmenu-workflow', 'class': 'state-inactive', 'state': state, 'stateTitle': stateTitle, } elif workflow.getInfoFor(self.context, 'sampleprep_review_state', 0): prep_wf_id = self.context.getPreparationWorkflow() prep_wf = workflow.getWorkflowById(prep_wf_id) prep_state = workflow.getInfoFor( self.context, 'sampleprep_review_state') prep_title = prep_wf.states[prep_state].title stateTitle = "%s (%s)" % (stateTitle, _(prep_title)) return {'id': 'plone-contentmenu-workflow', 'class': 'state-sampleprep', 'state': state, 'stateTitle': stateTitle, } else: return {'id': 'plone-contentmenu-workflow', 'class': 'state-%s' % state, 'state': state, 'stateTitle': stateTitle, }
def ajax_form_error(errors, field=None, arnum=None, message=None): if not message: message = t(PMF('Input is required but no input given.')) if (arnum or field): error_key = ' %s.%s' % (int(arnum) + 1, field or '') else: error_key = 'Form Error' errors[error_key] = message
def get_workflow_actions(self): """ Compile a list of possible workflow transitions for items in this Table. """ # cbb return empty list if we are unable to select items if not self.show_select_column: return [] workflow = getToolByName(self.context, 'portal_workflow') # check POST for a specified review_state selection selected_state = self.request.get("%s_review_state"%self.form_id, 'default') # get review_state id=selected_state states = [r for r in self.review_states if r['id'] == selected_state] review_state = states and states[0] \ or self.review_states[0] # get all transitions for all items. transitions = {} actions = [] for obj in [i.get('obj', '') for i in self.items]: obj = hasattr(obj, 'getObject') and obj.getObject() or obj for it in workflow.getTransitionsFor(obj): transitions[it['id']] = it # the list is restricted to and ordered by these transitions. if 'transitions' in review_state: for transition_dict in review_state['transitions']: if transition_dict['id'] in transitions: actions.append(transitions[transition_dict['id']]) else: actions = transitions.values() # and these are removed if 'hide_transitions' in review_state: actions = [a for a in actions if a['id'] not in review_state['hide_transitions']] # cheat: until workflow_action is abolished, all URLs defined in # GS workflow setup will be ignored, and the default will apply. # (that means, WorkflowAction-bound URL is called). for i, action in enumerate(actions): actions[i]['url'] = '' # if there is a review_state['some_state']['custom_actions'] attribute # on the BikaListingView, add these actions to the list. if 'custom_actions' in review_state: for action in review_state['custom_actions']: actions.append(action) for a,action in enumerate(actions): actions[a]['title'] = \ t(PMF(actions[a]['id'] + "_transition_title")) return actions
def getDataInterfaces(context): """ Return the current list of data interfaces """ from bika.lims.exportimport import instruments exims = [('', t(_('None')))] for exim_id in instruments.__all__: exim = instruments.getExim(exim_id) exims.append((exim_id, exim.title)) return DisplayList(exims)
def msg(self, array, msg, numline=None, line=None, mapping={}): prefix = '' suffix = '' msg = t(_(safe_unicode(msg), mapping=mapping)) if numline: prefix = "[%s] " % numline if line: suffix = ": %s" % line array.append(prefix + msg + suffix)
def __call__(self): uc = getToolByName(self.context, 'uid_catalog') if 'copy_form_submitted' not in self.request: uids = self.request.form.get('uids', []) self.services = [] for uid in uids: proxies = uc(UID=uid) if proxies: self.services.append(proxies[0].getObject()) return self.template() else: self.savepoint = savepoint() sources = self.request.form.get('uids', []) titles = self.request.form.get('dst_title', []) keywords = self.request.form.get('dst_keyword', []) self.created = [] for i, s in enumerate(sources): if not titles[i]: message = _('Validation failed: title is required') self.context.plone_utils.addPortalMessage(message, 'info') self.savepoint.rollback() self.created = [] break if not keywords[i]: message = _('Validation failed: keyword is required') self.context.plone_utils.addPortalMessage(message, 'info') self.savepoint.rollback() self.created = [] break title = self.copy_service(s, titles[i], keywords[i]) if title: self.created.append(title) if len(self.created) > 1: message = t(_( '${items} were successfully created.', mapping={'items': safe_unicode(', '.join(self.created))})) elif len(self.created) == 1: message = t(_( '${item} was successfully created.', mapping={'item': safe_unicode(self.created[0])})) else: message = _('No new items were created.') self.context.plone_utils.addPortalMessage(message, 'info') self.request.response.redirect(self.context.absolute_url())
def folderitems(self): items = BikaListingView.folderitems(self) outitems = [] workflow = getToolByName(self.context, 'portal_workflow') for x in range(len(items)): if not items[x].has_key('obj'): continue obj = items[x]['obj'] if workflow.getInfoFor(obj, 'review_state') == 'current': # Check expiry date from Products.ATContentTypes.utils import DT2dt from datetime import datetime expirydate = DT2dt(obj.getExpiryDate()).replace(tzinfo=None) if (datetime.today() > expirydate): workflow.doActionFor(obj, 'expire') items[x]['review_state'] = 'expired' items[x]['obj'] = obj if 'review_state' in self.contentFilter \ and self.contentFilter['review_state'] == 'current': continue items[x]['ID'] = obj.id items[x]['Manufacturer'] = obj.getManufacturer() and \ obj.getManufacturer().Title() or '' items[x]['Definition'] = obj.getReferenceDefinition() and \ obj.getReferenceDefinition().Title() or '' items[x]['DateSampled'] = self.ulocalized_time( obj.getDateSampled(), long_format=True) items[x]['DateReceived'] = self.ulocalized_time(obj.getDateReceived()) items[x]['DateOpened'] = self.ulocalized_time(obj.getDateOpened()) items[x]['ExpiryDate'] = self.ulocalized_time(obj.getExpiryDate()) after_icons = '' if obj.getBlank(): after_icons += "<img\ src='%s/++resource++bika.lims.images/blank.png' \ title='%s'>" % (self.portal_url, t(_('Blank'))) if obj.getHazardous(): after_icons += "<img\ src='%s/++resource++bika.lims.images/hazardous.png' \ title='%s'>" % (self.portal_url, t(_('Hazardous'))) items[x]['replace']['ID'] = "<a href='%s/base_view'>%s</a> %s" % \ (items[x]['url'], items[x]['ID'], after_icons) outitems.append(items[x]) return outitems
def _getAvailableMethods(self): """ Returns the available (active) methods. One method can be done by multiple instruments, but one instrument can only be used in one method. """ bsc = getToolByName(self, "bika_setup_catalog") items = [(c.UID, c.Title) for c in bsc(portal_type="Method", inactive_state="active")] items.sort(lambda x, y: cmp(x[1], y[1])) items.insert(0, ("", t(_("None")))) return DisplayList(items)
def getDataInterfaces(context): """ Return the current list of data interfaces """ from bika.lims.exportimport import instruments exims = [] for exim_id in instruments.__all__: exim = instruments.getExim(exim_id) exims.append((exim_id, exim.title)) exims.sort(lambda x, y: cmp(x[1].lower(), y[1].lower())) exims.insert(0, ('', t(_('None')))) return DisplayList(exims)
def __call__(self, context, request): """ Horiba Jobin-Yvon ICPanalysis results """ self.request = request self.errors = [] self.logs = [] self.warns = [] infile = self.request.form['data_file'] if not hasattr(infile, 'filename'): self.errors.append(_("No file selected")) return self.ret() parser = HoribaJobinYvonICPCSVParser(infile) # Load the most suitable parser according to file extension/options/etc... format = self.request.form['format'] parser = None if not hasattr(infile, 'filename'): self.errors.append(_("No file selected")) elif format == 'csv': parser = HoribaJobinYvonCSVParser(infile) else: self.errors.append(t(_("Unrecognized file format ${format}", mapping={"format": format}))) if parser: ar_states = self.get_allowed_ar_states() over = self.get_overrides() crit = self.get_id_search_criteria() instrument = request.form.get('instrument', None) importer = HoribaJobinYvonICPImporter(parser=parser, context=context, idsearchcriteria=crit, allowed_ar_states=ar_states, allowed_analysis_states=None, override=over, instrument_uid=instrument) exception_string = '' try: importer.process() except: exception_string = traceback.format_exc() self.errors = importer.errors self.logs = importer.logs self.warns = importer.warns if exception_string: self.errors.append(exception_string) return self.ret()
def _getCalculations(self): """ Returns a DisplayList with the available Calculations registered in Bika-Setup. Used to fill the Calculation ReferenceWidget. """ bsc = getToolByName(self, 'bika_setup_catalog') items = [(c.UID, c.Title) \ for c in bsc(portal_type='Calculation', inactive_state = 'active')] items.sort(lambda x,y: cmp(x[1], y[1])) items.insert(0, ('', t(_('None')))) return DisplayList(list(items))
def getImportDataInterfaces(context, import_only=False): """ Return the current list of import data interfaces """ from bika.lims.exportimport import instruments exims = [] for exim_id in instruments.__all__: exim = instruments.getExim(exim_id) if import_only and not hasattr(exim, 'Import'): pass else: exims.append((exim_id, exim.title)) exims.sort(lambda x, y: cmp(x[1].lower(), y[1].lower())) exims.insert(0, ('', t(_('None')))) return DisplayList(exims)
def getDataInterfaces(self): """ Return the current list of data interfaces """ from bika.equigerminal.exportimport import instruments from bika.lims.exportimport import instruments as blinstruments instrs = deepcopy(instruments.__all__) instrs.extend(blinstruments.__all__) exims = [] for exim_id in instrs: exim = instruments.getExim(exim_id) exims.append((exim_id, exim.title)) exims.sort(lambda x, y: cmp(x[1].lower(), y[1].lower())) exims.insert(0, ('', t(_('None')))) return DisplayList(exims)
def folderitems(self): items = BikaListingView.folderitems(self) valid = [c.UID() for c in self.context.getValidCertifications()] latest = self.context.getLatestValidCertification() latest = latest.UID() if latest else '' for x in range (len(items)): if not items[x].has_key('obj'): continue obj = items[x]['obj'] # items[x]['getAgency'] = obj.getAgency() items[x]['getDate'] = self.ulocalized_time(obj.getDate(), long_format=0) items[x]['getValidFrom'] = self.ulocalized_time(obj.getValidFrom(), long_format=0) items[x]['getValidTo'] = self.ulocalized_time(obj.getValidTo(), long_format=0) items[x]['replace']['Title'] = "<a href='%s'>%s</a>" % \ (items[x]['url'], items[x]['Title']) if obj.getInternal() == True: items[x]['replace']['getAgency'] = "" items[x]['state_class'] = '%s %s' % (items[x]['state_class'], 'internalcertificate') items[x]['getDocument'] = "" items[x]['replace']['getDocument'] = "" try: doc = obj.getDocument() if doc and doc.get_size() > 0: anchor = "<a href='%s/at_download/Document'>%s</a>" % \ (obj.absolute_url(), doc.filename) items[x]['getDocument'] = doc.filename items[x]['replace']['getDocument'] = anchor except: # POSKeyError: 'No blob file' # Show the record, but not the link title = _('Not available') items[x]['getDocument'] = _('Not available') items[x]['replace']['getDocument'] = _('Not available') uid = obj.UID() if uid in valid: # Valid calibration. items[x]['state_class'] = '%s %s' % (items[x]['state_class'], 'active') elif uid == latest: # Latest valid certificate img = "<img title='%s' src='%s/++resource++bika.lims.images/exclamation.png'/> " \ % (t(_('Out of date')), self.portal_url) items[x]['replace']['getValidTo'] = '%s %s' % (items[x]['getValidTo'], img) items[x]['state_class'] = '%s %s' % (items[x]['state_class'], 'inactive outofdate') else: # Old and further calibrations items[x]['state_class'] = '%s %s' % (items[x]['state_class'], 'inactive') return items
def __call__(self, context): site = getSite() request = aq_get(site, "REQUEST", None) items = [] wf = site.portal_workflow for folder in self.folders: folder = site.restrictedTraverse(folder) for portal_type in self.portal_types: objects = list(folder.objectValues(portal_type)) objects = [o for o in objects if wf.getInfoFor(o, "inactive_state") == "active"] if not objects: continue objects.sort(lambda x, y: cmp(x.Title().lower(), y.Title().lower())) xitems = [(t(item.Title()), item.Title()) for item in objects] xitems = [SimpleTerm(i[1], i[1], i[0]) for i in xitems] items += xitems return SimpleVocabulary(items)
def emailInvoice(self, templateHTML, to=[]): """ Send the invoice via email. :param templateHTML: The invoice template in HTML, ready to be send. :param to: A list with the addresses to send the invoice. """ ar = self.aq_parent # SMTP errors are silently ignored if server is in debug mode debug_mode = App.config.getConfiguration().debug_mode # Useful variables lab = ar.bika_setup.laboratory # Compose and send email. subject = t(_('Invoice')) + ' ' + ar.getInvoice().getId() mime_msg = MIMEMultipart('related') mime_msg['Subject'] = subject mime_msg['From'] = formataddr( (encode_header(lab.getName()), lab.getEmailAddress())) mime_msg.preamble = 'This is a multi-part MIME message.' msg_txt_t = MIMEText(templateHTML.encode('utf-8'), _subtype='html') mime_msg.attach(msg_txt_t) # Build the responsible's addresses mngrs = ar.getResponsible() for mngrid in mngrs['ids']: name = mngrs['dict'][mngrid].get('name', '') email = mngrs['dict'][mngrid].get('email', '') if (email != ''): to.append(formataddr((encode_header(name), email))) # Build the client's address caddress = ar.aq_parent.getEmailAddress() cname = ar.aq_parent.getName() if (caddress != ''): to.append(formataddr((encode_header(cname), caddress))) if len(to) > 0: # Send the emails mime_msg['To'] = ','.join(to) try: host = getToolByName(ar, 'MailHost') host.send(mime_msg.as_string(), immediate=True) except SMTPServerDisconnected as msg: pass if not debug_mode: raise SMTPServerDisconnected(msg) except SMTPRecipientsRefused as msg: raise WorkflowException(str(msg))
def __call__(self, context): portal = getSite() wftool = getToolByName(portal, 'portal_workflow', None) if wftool is None: return SimpleVocabulary([]) # XXX This is evil. A vocabulary shouldn't be request specific. # The sorting should go into a separate widget. # we get REQUEST from wftool because context may be an adapter request = aq_get(wftool, 'REQUEST', None) wf = wftool.getWorkflowById('bika_ar_workflow') items = wftool.listWFStatesByTitle(filter_similar=True) items_dict = dict([(i[1], t(i[0])) for i in items]) items_list = [(k, v) for k, v in items_dict.items()] items_list.sort(lambda x, y: cmp(x[1], y[1])) terms = [SimpleTerm(k, title=u'%s' % v) for k, v in items_list] return SimpleVocabulary(terms)
def _folder_item_verify_icons(self, analysis_brain, item): """Set the analysis' verification icons to the item passed in. :param analysis_brain: Brain that represents an analysis :param item: analysis' dictionary counterpart that represents a row """ submitter = analysis_brain.getSubmittedBy if not submitter: # This analysis hasn't yet been submitted, no verification yet return if analysis_brain.review_state == 'retracted': # Don't display icons and additional info about verification return verifiers = analysis_brain.getVerificators in_verifiers = submitter in verifiers if in_verifiers: # If analysis has been submitted and verified by the same person, # display a warning icon msg = t(_("Submitted and verified by the same user: {}")) icon = get_image('warning.png', title=msg.format(submitter)) self._append_html_element(item, 'state_title', icon) num_verifications = analysis_brain.getNumberOfRequiredVerifications if num_verifications > 1: # More than one verification required, place an icon and display # the number of verifications done vs. total required done = analysis_brain.getNumberOfVerifications pending = num_verifications - done ratio = float(done) / float(num_verifications) if done > 0 else 0 ratio = int(ratio * 100) scale = ratio == 0 and 0 or (ratio / 25) * 25 anchor = "<a href='#' title='{} {} {}' " \ "class='multi-verification scale-{}'>{}/{}</a>" anchor = anchor.format(t(_("Multi-verification required")), str(pending), t(_("verification(s) pending")), str(scale), str(done), str(num_verifications)) self._append_html_element(item, 'state_title', anchor) if analysis_brain.review_state != 'to_be_verified': # The verification of analysis has already been done or first # verification has not been done yet. Nothing to do return # Check if the user has "Bika: Verify" privileges if not self.has_permission(TransitionVerify): # User cannot verify, do nothing return username = api.get_current_user().id if username not in verifiers: # Current user has not verified this analysis if submitter != username: # Current user is neither a submitter nor a verifier return # Current user is the same who submitted the result if analysis_brain.isSelfVerificationEnabled: # Same user who submitted can verify title = t(_("Can verify, but submitted by current user")) html = get_image('warning.png', title=title) self._append_html_element(item, 'state_title', html) return # User who submitted cannot verify title = t(_("Cannot verify, submitted by current user")) html = get_image('submitted-by-current-user.png', title=title) self._append_html_element(item, 'state_title', html) return # This user verified this analysis before multi_verif = self.context.bika_setup.getTypeOfmultiVerification() if multi_verif != 'self_multi_not_cons': # Multi verification by same user is not allowed title = t(_("Cannot verify, was verified by current user")) html = get_image('submitted-by-current-user.png', title=title) self._append_html_element(item, 'state_title', html) return # Multi-verification by same user, but non-consecutively, is allowed if analysis_brain.getLastVerificator != username: # Current user was not the last user to verify title = t( _("Can verify, but was already verified by current user")) html = get_image('warning.png', title=title) self._append_html_element(item, 'state_title', html) return # Last user who verified is the same as current user title = t(_("Cannot verify, last verified by current user")) html = get_image('submitted-by-current-user.png', title=title) self._append_html_element(item, 'state_title', html) return
def translate(id): return t(PMF(id + "_transition_title"))
def __init__(self, context, request): super(AnalysisRequestsView, self).__init__(context, request) request.set('disable_plone.rightcolumn', 1) self.catalog = "bika_catalog" self.contentFilter = { 'portal_type': 'AnalysisRequest', 'sort_on': 'created', 'sort_order': 'reverse', 'path': { "query": "/", "level": 0 }, 'cancellation_state': 'active', } self.context_actions = {} if self.context.portal_type == "AnalysisRequestsFolder": self.request.set('disable_border', 1) if self.view_url.find("analysisrequests") == -1: self.view_url = self.view_url + "/analysisrequests" self.allow_edit = True self.show_sort_column = False self.show_select_row = False self.show_select_column = True self.form_id = "analysisrequests" self.icon = self.portal_url + "/++resource++bika.lims.images/analysisrequest_big.png" self.title = self.context.translate(_("Analysis Requests")) self.description = "" SamplingWorkflowEnabled = self.context.bika_setup.getSamplingWorkflowEnabled( ) mtool = getToolByName(self.context, 'portal_membership') member = mtool.getAuthenticatedMember() user_is_preserver = 'Preserver' in member.getRoles() self.columns = { 'getRequestID': { 'title': _('Request ID'), 'index': 'getRequestID' }, 'getClientOrderNumber': { 'title': _('Client Order'), 'index': 'getClientOrderNumber', 'toggle': True }, 'Creator': { 'title': PMF('Creator'), 'index': 'Creator', 'toggle': True }, 'Created': { 'title': PMF('Date Created'), 'index': 'created', 'toggle': False }, 'getSample': { 'title': _("Sample"), 'toggle': True, }, 'BatchID': { 'title': _("Batch ID"), 'toggle': True }, 'SubGroup': { 'title': _('Sub-group') }, 'Client': { 'title': _('Client'), 'toggle': True }, 'getClientReference': { 'title': _('Client Ref'), 'index': 'getClientReference', 'toggle': True }, 'getClientSampleID': { 'title': _('Client SID'), 'index': 'getClientSampleID', 'toggle': True }, 'ClientContact': { 'title': _('Contact'), 'toggle': False }, 'getSampleTypeTitle': { 'title': _('Sample Type'), 'index': 'getSampleTypeTitle', 'toggle': True }, 'getSamplePointTitle': { 'title': _('Sample Point'), 'index': 'getSamplePointTitle', 'toggle': False }, 'getStorageLocation': { 'title': _('Storage Location'), 'toggle': False }, 'SamplingDeviation': { 'title': _('Sampling Deviation'), 'toggle': False }, 'Priority': { 'title': _('Priority'), 'toggle': True, 'index': 'Priority', 'sortable': True }, 'AdHoc': { 'title': _('Ad-Hoc'), 'toggle': False }, 'SamplingDate': { 'title': _('Sampling Date'), 'index': 'getSamplingDate', 'toggle': True }, 'getDateSampled': { 'title': _('Date Sampled'), 'index': 'getDateSampled', 'toggle': SamplingWorkflowEnabled, 'input_class': 'datepicker_nofuture', 'input_width': '10' }, 'getSampler': { 'title': _('Sampler'), 'toggle': SamplingWorkflowEnabled }, 'getDatePreserved': { 'title': _('Date Preserved'), 'toggle': user_is_preserver, 'input_class': 'datepicker_nofuture', 'input_width': '10', 'sortable': False }, # no datesort without index 'getPreserver': { 'title': _('Preserver'), 'toggle': user_is_preserver }, 'getDateReceived': { 'title': _('Date Received'), 'index': 'getDateReceived', 'toggle': False }, 'getDatePublished': { 'title': _('Date Published'), 'index': 'getDatePublished', 'toggle': False }, 'state_title': { 'title': _('State'), 'index': 'review_state' }, 'getProfilesTitle': { 'title': _('Profile'), 'index': 'getProfilesTitle', 'toggle': False }, 'getAnalysesNum': { 'title': _('Number of Analyses'), 'index': 'getAnalysesNum', 'sortable': True, 'toggle': False }, 'getTemplateTitle': { 'title': _('Template'), 'index': 'getTemplateTitle', 'toggle': False }, } self.review_states = [ { 'id': 'default', 'title': _('Active'), 'contentFilter': { 'sort_on': 'created', 'sort_order': 'reverse' }, 'transitions': [{ 'id': 'sample' }, { 'id': 'preserve' }, { 'id': 'receive' }, { 'id': 'retract' }, { 'id': 'verify' }, { 'id': 'prepublish' }, { 'id': 'publish' }, { 'id': 'republish' }, { 'id': 'cancel' }, { 'id': 'reinstate' }], 'custom_actions': [], 'columns': [ 'getRequestID', 'getSample', 'BatchID', 'SubGroup', 'Client', 'Creator', 'Created', 'getClientOrderNumber', 'getClientReference', 'ClientContact', 'getClientSampleID', 'getProfilesTitle', 'getTemplateTitle', 'getSampleTypeTitle', 'getSamplePointTitle', 'getStorageLocation', 'SamplingDeviation', 'Priority', 'AdHoc', 'SamplingDate', 'getDateSampled', 'getSampler', 'getDatePreserved', 'getPreserver', 'getDateReceived', 'getAnalysesNum', 'state_title' ] }, { 'id': 'sample_due', 'title': _('Due'), 'contentFilter': { 'review_state': ('to_be_sampled', 'to_be_preserved', 'sample_due'), 'sort_on': 'created', 'sort_order': 'reverse' }, 'transitions': [{ 'id': 'sample' }, { 'id': 'preserve' }, { 'id': 'receive' }, { 'id': 'cancel' }, { 'id': 'reinstate' }], 'custom_actions': [], 'columns': [ 'getRequestID', 'getSample', 'BatchID', 'SubGroup', 'Client', 'getProfilesTitle', 'getTemplateTitle', 'Creator', 'Created', 'getClientOrderNumber', 'getClientReference', 'getClientSampleID', 'ClientContact', 'getDateSampled', 'getSampler', 'getDatePreserved', 'getPreserver', 'getSampleTypeTitle', 'getSamplePointTitle', 'getStorageLocation', 'SamplingDeviation', 'Priority', 'AdHoc', 'getAnalysesNum', 'state_title' ] }, { 'id': 'sample_received', 'title': _('Received'), 'contentFilter': { 'review_state': 'sample_received', 'sort_on': 'created', 'sort_order': 'reverse' }, 'transitions': [{ 'id': 'prepublish' }, { 'id': 'cancel' }, { 'id': 'reinstate' }], 'custom_actions': [], 'columns': [ 'getRequestID', 'getSample', 'BatchID', 'SubGroup', 'Client', 'getProfilesTitle', 'getTemplateTitle', 'Creator', 'Created', 'getClientOrderNumber', 'getClientReference', 'getClientSampleID', 'ClientContact', 'getSampleTypeTitle', 'getSamplePointTitle', 'getStorageLocation', 'SamplingDeviation', 'Priority', 'AdHoc', 'getDateSampled', 'getSampler', 'getDatePreserved', 'getPreserver', 'getAnalysesNum', 'getDateReceived' ] }, { 'id': 'to_be_verified', 'title': _('To be verified'), 'contentFilter': { 'review_state': 'to_be_verified', 'sort_on': 'created', 'sort_order': 'reverse' }, 'transitions': [{ 'id': 'retract' }, { 'id': 'verify' }, { 'id': 'prepublish' }, { 'id': 'cancel' }, { 'id': 'reinstate' }], 'custom_actions': [], 'columns': [ 'getRequestID', 'getSample', 'BatchID', 'SubGroup', 'Client', 'getProfilesTitle', 'getTemplateTitle', 'Creator', 'Created', 'getClientOrderNumber', 'getClientReference', 'getClientSampleID', 'ClientContact', 'getSampleTypeTitle', 'getSamplePointTitle', 'getStorageLocation', 'SamplingDeviation', 'Priority', 'AdHoc', 'getDateSampled', 'getSampler', 'getDatePreserved', 'getPreserver', 'getAnalysesNum', 'getDateReceived' ] }, { 'id': 'verified', 'title': _('Verified'), 'contentFilter': { 'review_state': 'verified', 'sort_on': 'created', 'sort_order': 'reverse' }, 'transitions': [{ 'id': 'publish' }], 'custom_actions': [], 'columns': [ 'getRequestID', 'getSample', 'BatchID', 'SubGroup', 'Client', 'getProfilesTitle', 'getTemplateTitle', 'Creator', 'Created', 'getClientOrderNumber', 'getClientReference', 'getClientSampleID', 'ClientContact', 'getSampleTypeTitle', 'getSamplePointTitle', 'getStorageLocation', 'SamplingDeviation', 'Priority', 'AdHoc', 'getDateSampled', 'getSampler', 'getDatePreserved', 'getPreserver', 'getAnalysesNum', 'getDateReceived' ] }, { 'id': 'published', 'title': _('Published'), 'contentFilter': { 'review_state': ('published', 'invalid'), 'sort_on': 'created', 'sort_order': 'reverse' }, 'transitions': [{ 'id': 'republish' }], 'custom_actions': [], 'columns': [ 'getRequestID', 'getSample', 'BatchID', 'SubGroup', 'Client', 'getProfilesTitle', 'getTemplateTitle', 'Creator', 'Created', 'getClientOrderNumber', 'getClientReference', 'getClientSampleID', 'ClientContact', 'getSampleTypeTitle', 'getSamplePointTitle', 'getStorageLocation', 'SamplingDeviation', 'Priority', 'AdHoc', 'getDateSampled', 'getSampler', 'getDatePreserved', 'getPreserver', 'getDateReceived', 'getAnalysesNum', 'getDatePublished' ] }, { 'id': 'cancelled', 'title': _('Cancelled'), 'contentFilter': { 'cancellation_state': 'cancelled', 'review_state': ('sample_registered', 'to_be_sampled', 'to_be_preserved', 'sample_due', 'sample_received', 'to_be_verified', 'attachment_due', 'verified', 'published'), 'sort_on': 'created', 'sort_order': 'reverse' }, 'transitions': [{ 'id': 'reinstate' }], 'custom_actions': [], 'columns': [ 'getRequestID', 'getSample', 'BatchID', 'SubGroup', 'Client', 'getProfilesTitle', 'getTemplateTitle', 'Creator', 'Created', 'getClientOrderNumber', 'getClientReference', 'getClientSampleID', 'ClientContact', 'getSampleTypeTitle', 'getSamplePointTitle', 'getStorageLocation', 'SamplingDeviation', 'Priority', 'AdHoc', 'getDateSampled', 'getSampler', 'getDatePreserved', 'getPreserver', 'getDateReceived', 'getDatePublished', 'getAnalysesNum', 'state_title' ] }, { 'id': 'invalid', 'title': _('Invalid'), 'contentFilter': { 'review_state': 'invalid', 'sort_on': 'created', 'sort_order': 'reverse' }, 'transitions': [], 'custom_actions': [], 'columns': [ 'getRequestID', 'getSample', 'BatchID', 'SubGroup', 'Client', 'getProfilesTitle', 'getTemplateTitle', 'Creator', 'Created', 'getClientOrderNumber', 'getClientReference', 'getClientSampleID', 'ClientContact', 'getSampleTypeTitle', 'getSamplePointTitle', 'getStorageLocation', 'SamplingDeviation', 'Priority', 'AdHoc', 'getDateSampled', 'getSampler', 'getDatePreserved', 'getPreserver', 'getDateReceived', 'getAnalysesNum', 'getDatePublished' ] }, { 'id': 'assigned', 'title': "<img title='%s'\ src='%s/++resource++bika.lims.images/assigned.png'/>" % (t(_("Assigned")), self.portal_url), 'contentFilter': { 'worksheetanalysis_review_state': 'assigned', 'review_state': ('sample_received', 'to_be_verified', 'attachment_due', 'verified', 'published'), 'sort_on': 'created', 'sort_order': 'reverse' }, 'transitions': [{ 'id': 'retract' }, { 'id': 'verify' }, { 'id': 'prepublish' }, { 'id': 'publish' }, { 'id': 'republish' }, { 'id': 'cancel' }, { 'id': 'reinstate' }], 'custom_actions': [], 'columns': [ 'getRequestID', 'getSample', 'BatchID', 'SubGroup', 'Client', 'getProfilesTitle', 'getTemplateTitle', 'Creator', 'Created', 'getClientOrderNumber', 'getClientReference', 'getClientSampleID', 'ClientContact', 'getSampleTypeTitle', 'getSamplePointTitle', 'getStorageLocation', 'SamplingDeviation', 'Priority', 'AdHoc', 'getDateSampled', 'getSampler', 'getDatePreserved', 'getPreserver', 'getDateReceived', 'getAnalysesNum', 'state_title' ] }, { 'id': 'unassigned', 'title': "<img title='%s'\ src='%s/++resource++bika.lims.images/unassigned.png'/>" % (t(_("Unassigned")), self.portal_url), 'contentFilter': { 'worksheetanalysis_review_state': 'unassigned', 'review_state': ('sample_received', 'to_be_verified', 'attachment_due', 'verified', 'published'), 'sort_on': 'created', 'sort_order': 'reverse' }, 'transitions': [{ 'id': 'receive' }, { 'id': 'retract' }, { 'id': 'verify' }, { 'id': 'prepublish' }, { 'id': 'publish' }, { 'id': 'republish' }, { 'id': 'cancel' }, { 'id': 'reinstate' }], 'custom_actions': [], 'columns': [ 'getRequestID', 'getSample', 'BatchID', 'SubGroup', 'Client', 'getProfilesTitle', 'getTemplateTitle', 'Creator', 'Created', 'getClientOrderNumber', 'getClientReference', 'getClientSampleID', 'ClientContact', 'getSampleTypeTitle', 'getSamplePointTitle', 'getStorageLocation', 'SamplingDeviation', 'Priority', 'AdHoc', 'SamplingDate', 'getDateSampled', 'getSampler', 'getDatePreserved', 'getPreserver', 'getDateReceived', 'getAnalysesNum', 'state_title' ] }, ]
def Import(context, request): """ Read biodrop analysis results """ infile = request.form['filename'] fileformat = request.form['format'] artoapply = request.form['artoapply'] override = request.form['override'] instrument = request.form.get('instrument', None) errors = [] logs = [] warns = [] # Load the suitable parser parser = None if not hasattr(infile, 'filename'): errors.append(_("No file selected")) elif fileformat == 'csv': analysis = request.form.get('analysis', None) if analysis: parser = BioDropCSVParser(infile, analysis) else: errors.append(t(_("No analysis selected"))) else: errors.append( t( _("Unrecognized file format ${fileformat}", mapping={"fileformat": fileformat}))) if parser: # Load the importer status = ['sample_received', 'attachment_due', 'to_be_verified'] if artoapply == 'received': status = ['sample_received'] elif artoapply == 'received_tobeverified': status = ['sample_received', 'attachment_due', 'to_be_verified'] over = [False, False] if override == 'nooverride': over = [False, False] elif override == 'override': over = [True, False] elif override == 'overrideempty': over = [True, True] importer = BioDropImporter(parser=parser, context=context, allowed_ar_states=status, allowed_analysis_states=None, override=over, instrument_uid=instrument) tbex = '' try: importer.process() except: tbex = traceback.format_exc() errors = importer.errors logs = importer.logs warns = importer.warns if tbex: errors.append(tbex) results = {'errors': errors, 'log': logs, 'warns': warns} return json.dumps(results)
def __init__(self, context, request, **kwargs): self.catalog = "bika_analysis_catalog" self.contentFilter = dict(kwargs) self.contentFilter['portal_type'] = 'Analysis' self.contentFilter['sort_on'] = 'sortable_title' self.context_actions = {} self.show_sort_column = False self.show_select_row = False self.show_select_column = False self.show_column_toggles = False self.pagesize = 0 self.form_id = 'analyses_form' self.portal = getToolByName(context, 'portal_url').getPortalObject() self.portal_url = self.portal.absolute_url() request.set('disable_plone.rightcolumn', 1); # each editable item needs it's own allow_edit # which is a list of field names. self.allow_edit = False self.columns = { 'Service': { 'title': _('Analysis'), 'sortable': False}, 'Partition': { 'title': _("Partition"), 'sortable':False}, 'Method': { 'title': _('Method'), 'sortable': False, 'toggle': True}, 'Instrument': { 'title': _('Instrument'), 'sortable': False, 'toggle': True}, 'Analyst': { 'title': _('Analyst'), 'sortable': False, 'toggle': True}, 'state_title': { 'title': _('Status'), 'sortable': False}, 'Result': { 'title': _('Result'), 'input_width': '6', 'input_class': 'ajax_calculate numeric', 'sortable': False}, 'Specification': { 'title': _('Specification'), 'sortable': False}, 'ResultDM': { 'title': _('Dry'), 'sortable': False}, 'Uncertainty': { 'title': _('+-'), 'sortable': False}, 'retested': { 'title': "<img title='%s' src='%s/++resource++bika.lims.images/retested.png'/>"%\ (t(_('Retested')), self.portal_url), 'type':'boolean', 'sortable': False}, 'Attachments': { 'title': _('Attachments'), 'sortable': False}, 'CaptureDate': { 'title': _('Captured'), 'index': 'getResultCaptureDate', 'sortable':False}, 'DueDate': { 'title': _('Due Date'), 'index': 'getDueDate', 'sortable':False}, } self.review_states = [ {'id': 'default', 'title': _('All'), 'contentFilter': {}, 'columns': ['Service', 'Partition', 'Result', 'Specification', 'Method', 'Instrument', 'Analyst', 'Uncertainty', 'CaptureDate', 'DueDate', 'state_title'] }, ] if not context.bika_setup.getShowPartitions(): self.review_states[0]['columns'].remove('Partition') super(AnalysesView, self).__init__(context, request, show_categories=context.bika_setup.getCategoriseAnalysisServices(), expand_all_categories=True)
def render_field_view(self, field): fieldname = field.getName() field = self.context.Schema()[fieldname] ret = {'fieldName': fieldname, 'mode': 'view'} try: adapter = getAdapter(self.context, interface=IHeaderTableFieldRenderer, name=fieldname) except ComponentLookupError: adapter = None if adapter: ret = { 'fieldName': fieldname, 'mode': 'structure', 'html': adapter(field) } else: if field.getType().find("ool") > -1: value = field.get(self.context) ret = { 'fieldName': fieldname, 'mode': 'structure', 'html': t(_('Yes')) if value else t(_('No')) } elif field.getType().find("Reference") > -1: # Prioritize method retrieval over schema's field targets = None if hasattr(self.context, 'get%s' % fieldname): fieldaccessor = getattr(self.context, 'get%s' % fieldname) if callable(fieldaccessor): targets = fieldaccessor() if not targets: targets = field.get(self.context) if targets: if not type(targets) == list: targets = [ targets, ] sm = getSecurityManager() if all([sm.checkPermission(view, ta) for ta in targets]): a = [ "<a href='%s'>%s</a>" % (target.absolute_url(), target.Title()) for target in targets ] ret = { 'fieldName': fieldname, 'mode': 'structure', 'html': ", ".join(a) } else: ret = { 'fieldName': fieldname, 'mode': 'structure', 'html': ", ".join([ta.Title() for ta in targets]) } else: ret = { 'fieldName': fieldname, 'mode': 'structure', 'html': '' } elif field.getType().lower().find('datetime') > -1: value = field.get(self.context) ret = { 'fieldName': fieldname, 'mode': 'structure', 'html': self.ulocalized_time(value, long_format=True) } return ret
def __init__(self, context, request): super(AnalysisRequestsView, self).__init__(context, request) self.catalog = "portal_catalog" SamplingWorkflowEnabled = self.context.bika_setup.getSamplingWorkflowEnabled( ) self.columns = { 'securitySealIntact': { 'title': _('Security Seal Intact'), 'toggle': True }, 'samplingRoundTemplate': { 'title': _('Sampling Round Template'), 'toggle': True }, 'getId': { 'title': _('Request ID'), 'index': 'getId' }, 'getDateSampled': { 'title': _('Date Sampled'), 'index': 'getDateSampled', 'toggle': True, 'input_class': 'datetimepicker', 'input_width': '10' }, 'state_title': { 'title': _('State'), 'index': 'review_state' }, 'getProfilesTitle': { 'title': _('Profile'), 'toggle': False }, 'getTemplateTitle': { 'title': _('Template'), 'toggle': False }, } self.review_states = [ { 'id': 'default', 'title': _('Active'), 'contentFilter': { 'cancellation_state': 'active', 'sort_on': 'created', 'sort_order': 'reverse' }, 'transitions': [{ 'id': 'sample' }, { 'id': 'preserve' }, { 'id': 'receive' }, { 'id': 'retract' }, { 'id': 'verify' }, { 'id': 'prepublish' }, { 'id': 'publish' }, { 'id': 'republish' }, { 'id': 'cancel' }, { 'id': 'reinstate' }], 'custom_transitions': [], 'columns': [ 'securitySealIntact', 'getId', 'samplingRoundTemplate', 'getDateSampled', 'state_title' ] }, { 'id': 'sample_due', 'title': _('Due'), 'contentFilter': { 'review_state': ('to_be_sampled', 'to_be_preserved', 'sample_due'), 'sort_on': 'created', 'sort_order': 'reverse' }, 'transitions': [{ 'id': 'sample' }, { 'id': 'preserve' }, { 'id': 'receive' }, { 'id': 'cancel' }, { 'id': 'reinstate' }], 'custom_transitions': [], 'columns': [ 'securitySealIntact', 'getId', 'samplingRoundTemplate', 'getDateSampled', 'state_title' ] }, { 'id': 'sample_received', 'title': _('Received'), 'contentFilter': { 'review_state': 'sample_received', 'sort_on': 'created', 'sort_order': 'reverse' }, 'transitions': [{ 'id': 'prepublish' }, { 'id': 'cancel' }, { 'id': 'reinstate' }], 'custom_transitions': [], 'columns': [ 'securitySealIntact', 'getId', 'samplingRoundTemplate', 'getDateSampled', 'state_title' ] }, { 'id': 'to_be_verified', 'title': _('To be verified'), 'contentFilter': { 'review_state': 'to_be_verified', 'sort_on': 'created', 'sort_order': 'reverse' }, 'transitions': [{ 'id': 'retract' }, { 'id': 'verify' }, { 'id': 'prepublish' }, { 'id': 'cancel' }, { 'id': 'reinstate' }], 'custom_transitions': [], 'columns': [ 'securitySealIntact', 'getId', 'samplingRoundTemplate', 'getDateSampled', 'state_title' ] }, { 'id': 'verified', 'title': _('Verified'), 'contentFilter': { 'review_state': 'verified', 'sort_on': 'created', 'sort_order': 'reverse' }, 'transitions': [{ 'id': 'publish' }], 'custom_transitions': [], 'columns': [ 'securitySealIntact', 'getId', 'samplingRoundTemplate', 'getDateSampled', 'state_title' ] }, { 'id': 'published', 'title': _('Published'), 'contentFilter': { 'review_state': ('published', 'invalid'), 'sort_on': 'created', 'sort_order': 'reverse' }, 'transitions': [{ 'id': 'republish' }], 'custom_transitions': [], 'columns': [ 'securitySealIntact', 'getId', 'samplingRoundTemplate', 'getDateSampled', 'state_title' ] }, { 'id': 'cancelled', 'title': _('Cancelled'), 'contentFilter': { 'cancellation_state': 'cancelled', 'review_state': ('to_be_sampled', 'to_be_preserved', 'sample_due', 'sample_received', 'to_be_verified', 'attachment_due', 'verified', 'published'), 'sort_on': 'created', 'sort_order': 'reverse' }, 'transitions': [{ 'id': 'reinstate' }], 'custom_transitions': [], 'columns': [ 'securitySealIntact', 'getId', 'samplingRoundTemplate', 'getDateSampled', 'state_title' ] }, { 'id': 'invalid', 'title': _('Invalid'), 'contentFilter': { 'review_state': 'invalid', 'sort_on': 'created', 'sort_order': 'reverse' }, 'transitions': [], 'custom_transitions': [], 'columns': [ 'securitySealIntact', 'getId', 'samplingRoundTemplate', 'getDateSampled', 'state_title' ] }, { 'id': 'assigned', 'title': "<img title='%s'\ src='%s/++resource++bika.lims.images/assigned.png'/>" % (t(_("Assigned")), self.portal_url), 'contentFilter': { 'assigned_state': 'assigned', 'review_state': ('sample_received', 'to_be_verified', 'attachment_due', 'verified', 'published'), 'sort_on': 'created', 'sort_order': 'reverse' }, 'transitions': [{ 'id': 'retract' }, { 'id': 'verify' }, { 'id': 'prepublish' }, { 'id': 'publish' }, { 'id': 'republish' }, { 'id': 'cancel' }, { 'id': 'reinstate' }], 'custom_transitions': [], 'columns': [ 'securitySealIntact', 'getId', 'samplingRoundTemplate', 'getDateSampled', 'state_title' ] }, { 'id': 'unassigned', 'title': "<img title='%s'\ src='%s/++resource++bika.lims.images/unassigned.png'/>" % (t(_("Unassigned")), self.portal_url), 'contentFilter': { 'assigned_state': 'unassigned', 'review_state': ('sample_received', 'to_be_verified', 'attachment_due', 'verified', 'published'), 'sort_on': 'created', 'sort_order': 'reverse' }, 'transitions': [{ 'id': 'receive' }, { 'id': 'retract' }, { 'id': 'verify' }, { 'id': 'prepublish' }, { 'id': 'publish' }, { 'id': 'republish' }, { 'id': 'cancel' }, { 'id': 'reinstate' }], 'custom_transitions': [], 'columns': [ 'securitySealIntact', 'getId', 'samplingRoundTemplate', 'getDateSampled', 'state_title' ] }, ]
def __call__(self): form = self.request.form plone.protect.CheckAuthenticator(self.request.form) plone.protect.PostOnly(self.request.form) uc = getToolByName(self.context, 'uid_catalog') bsc = getToolByName(self.context, 'bika_setup_catalog') portal_catalog = getToolByName(self.context, 'portal_catalog') # Load the form data from request.state. If anything goes wrong here, # put a bullet through the whole process. try: states = json.loads(form['state']) except Exception as e: message = t( _('Badly formed state: ${errmsg}', mapping={'errmsg': e.message})) ajax_form_error(self.errors, message=message) return json.dumps({'errors': self.errors}) # Validate incoming form data required = [ field.getName() for field in AnalysisRequestSchema.fields() if field.required ] + ["Analyses"] # First remove all states which are completely empty; if all # required fields are not present, we assume that the current # AR had no data entered, and can be ignored nonblank_states = {} for arnum, state in states.items(): for key, val in state.items(): if val \ and "%s_hidden" % key not in state \ and not key.endswith('hidden'): nonblank_states[arnum] = state break # in valid_states, all ars that pass validation will be stored valid_states = {} for arnum, state in nonblank_states.items(): # Secondary ARs are a special case, these fields are not required if state.get('Sample', ''): if 'SamplingDate' in required: required.remove('SamplingDate') if 'SampleType' in required: required.remove('SampleType') # fields flagged as 'hidden' are not considered required because # they will already have default values inserted in them for fieldname in required: if fieldname + '_hidden' in state: required.remove(fieldname) missing = [f for f in required if not state.get(f, '')] # If there are required fields missing, flag an error if missing: msg = t( _('Required fields have no values: ' '${field_names}', mapping={'field_names': ', '.join(missing)})) ajax_form_error(self.errors, arnum=arnum, message=msg) continue # This ar is valid! valid_states[arnum] = state # - Expand lists of UIDs returned by multiValued reference widgets # - Transfer _uid values into their respective fields for arnum in valid_states.keys(): for field, value in valid_states[arnum].items(): if field.endswith('_uid') and ',' in value: valid_states[arnum][field] = value.split(',') elif field.endswith('_uid'): valid_states[arnum][field] = value if self.errors: return json.dumps({'errors': self.errors}) # Now, we will create the specified ARs. ARs = [] for arnum, state in valid_states.items(): # Create the Analysis Request ar = crar( portal_catalog(UID=state['Client'])[0].getObject(), self.request, state) ARs.append(ar.Title()) # Display the appropriate message after creation if len(ARs) > 1: message = _('Analysis requests ${ARs} were successfully created.', mapping={'ARs': safe_unicode(', '.join(ARs))}) else: message = _('Analysis request ${AR} was successfully created.', mapping={'AR': safe_unicode(ARs[0])}) self.context.plone_utils.addPortalMessage(message, 'info') # Automatic label printing won't print "register" labels for Secondary. ARs new_ars = [ar for ar in ARs if ar[-2:] == '01'] if 'register' in self.context.bika_setup.getAutoPrintStickers() \ and new_ars: return json.dumps({ 'success': message, 'stickers': new_ars, 'stickertemplate': self.context.bika_setup.getAutoStickerTemplate() }) else: return json.dumps({'success': message})
def workflow_action_retract_ar(self): workflow = getToolByName(self.context, 'portal_workflow') # AR should be retracted # Can't transition inactive ARs if not isActive(self.context): message = _('Item is inactive.') self.context.plone_utils.addPortalMessage(message, 'info') self.request.response.redirect(self.context.absolute_url()) return # 1. Copies the AR linking the original one and viceversa ar = self.context newar = self.cloneAR(ar) # 2. The old AR gets a status of 'invalid' workflow.doActionFor(ar, 'retract_ar') # 3. The new AR copy opens in status 'to be verified' changeWorkflowState(newar, 'bika_ar_workflow', 'to_be_verified') # 4. The system immediately alerts the client contacts who ordered # the results, per email and SMS, that a possible mistake has been # picked up and is under investigation. # A much possible information is provided in the email, linking # to the AR online. laboratory = self.context.bika_setup.laboratory lab_address = "<br/>".join(laboratory.getPrintAddress()) mime_msg = MIMEMultipart('related') mime_msg['Subject'] = t( _("Erroneus result publication from ${request_id}", mapping={"request_id": ar.getRequestID()})) mime_msg['From'] = formataddr((encode_header(laboratory.getName()), laboratory.getEmailAddress())) to = [] contact = ar.getContact() if contact: to.append( formataddr((encode_header(contact.Title()), contact.getEmailAddress()))) for cc in ar.getCCContact(): formatted = formataddr( (encode_header(cc.Title()), cc.getEmailAddress())) if formatted not in to: to.append(formatted) managers = self.context.portal_groups.getGroupMembers('LabManagers') for bcc in managers: user = self.portal.acl_users.getUser(bcc) if user: uemail = user.getProperty('email') ufull = user.getProperty('fullname') formatted = formataddr((encode_header(ufull), uemail)) if formatted not in to: to.append(formatted) mime_msg['To'] = ','.join(to) aranchor = "<a href='%s'>%s</a>" % (ar.absolute_url(), ar.getRequestID()) naranchor = "<a href='%s'>%s</a>" % (newar.absolute_url(), newar.getRequestID()) addremarks = ('addremarks' in self.request and ar.getRemarks()) \ and ("<br/><br/>" + _("Additional remarks:") + "<br/>" + ar.getRemarks().split("===")[1].strip() + "<br/><br/>") \ or '' sub_d = dict(request_link=aranchor, new_request_link=naranchor, remarks=addremarks, lab_address=lab_address) body = Template( "Some errors have been detected in the results report " "published from the Analysis Request $request_link. The Analysis " "Request $new_request_link has been created automatically and the " "previous has been invalidated.<br/>The possible mistake " "has been picked up and is under investigation.<br/><br/>" "$remarks $lab_address").safe_substitute(sub_d) msg_txt = MIMEText(safe_unicode(body).encode('utf-8'), _subtype='html') mime_msg.preamble = 'This is a multi-part MIME message.' mime_msg.attach(msg_txt) try: host = getToolByName(self.context, 'MailHost') host.send(mime_msg.as_string(), immediate=True) except Exception as msg: message = _( 'Unable to send an email to alert lab ' 'client contacts that the Analysis Request has been ' 'retracted: ${error}', mapping={'error': safe_unicode(msg)}) self.context.plone_utils.addPortalMessage(message, 'warning') message = _('${items} invalidated.', mapping={'items': ar.getRequestID()}) self.context.plone_utils.addPortalMessage(message, 'warning') self.request.response.redirect(newar.absolute_url())
def Import(context, request): """ Import Form """ infile = request.form['nuclisens_easyq_file'] fileformat = request.form['nuclisens_easyq_format'] artoapply = request.form['nuclisens_easyq_artoapply'] override = request.form['nuclisens_easyq_override'] instrument = request.form.get('instrument', None) errors = [] logs = [] warns = [] # Load the most suitable parser according to file extension/options/etc... parser = None if not hasattr(infile, 'filename'): errors.append(_("No file selected")) if fileformat == 'xlsx': parser = EasyQParser(infile) elif fileformat == 'xml': parser = EasyQXMLParser(infile) else: errors.append( t( _("Unrecognized file format ${fileformat}", mapping={"fileformat": fileformat}))) if parser: # Load the importer status = ['sample_received', 'attachment_due', 'to_be_verified'] if artoapply == 'received': status = ['sample_received'] elif artoapply == 'received_tobeverified': status = ['sample_received', 'attachment_due', 'to_be_verified'] over = [False, False] if override == 'nooverride': over = [False, False] elif override == 'override': over = [True, False] elif override == 'overrideempty': over = [True, True] importer = EasyQImporter(parser=parser, context=context, allowed_ar_states=status, allowed_analysis_states=None, override=over, instrument_uid=instrument) tbex = '' try: importer.process() except: tbex = traceback.format_exc() errors = importer.errors logs = importer.logs warns = importer.warns if tbex: errors.append(tbex) results = {'errors': errors, 'log': logs, 'warns': warns} return json.dumps(results)
def __call__(self): MinimumResults = self.context.bika_setup.getMinimumResults() warning_icon = "<img " + \ "src='" + self.portal_url + "/++resource++bika.lims.images/warning.png' " + \ "height='9' width='9'/>" error_icon = "<img " + \ "src='" + self.portal_url + "/++resource++bika.lims.images/exclamation.png' " + \ "height='9' width='9'/>" header = _("Results per sample point") subheader = _( "Analysis results for per sample point and analysis service") self.contentFilter = { 'portal_type': 'Analysis', 'review_state': ['verified', 'published'] } parms = [] titles = [] val = self.selection_macros.parse_client(self.request) if val: self.contentFilter[val['contentFilter'] [0]] = val['contentFilter'][1] parms.append(val['parms']) titles.append(val['titles']) val = self.selection_macros.parse_samplepoint(self.request) sp_uid = val if val: self.contentFilter[val['contentFilter'] [0]] = val['contentFilter'][1] parms.append(val['parms']) titles.append(val['titles']) val = self.selection_macros.parse_sampletype(self.request) st_uid = val if val: self.contentFilter[val['contentFilter'] [0]] = val['contentFilter'][1] parms.append(val['parms']) titles.append(val['titles']) val = self.selection_macros.parse_analysisservice(self.request) if val: self.contentFilter[val['contentFilter'] [0]] = val['contentFilter'][1] parms.append(val['parms']) else: message = _("No analysis services were selected.") self.context.plone_utils.addPortalMessage(message, 'error') return self.default_template() val = self.selection_macros.parse_daterange(self.request, 'getDateSampled', 'DateSampled') if val: self.contentFilter[val['contentFilter'] [0]] = val['contentFilter'][1] parms.append(val['parms']) titles.append(val['titles']) val = self.selection_macros.parse_state( self.request, 'bika_worksheetanalysis_workflow', 'worksheetanalysis_review_state', 'Worksheet state') if val: self.contentFilter[val['contentFilter'] [0]] = val['contentFilter'][1] parms.append(val['parms']) # Query the catalog and store analysis data in a dict analyses = {} out_of_range_count = 0 in_shoulder_range_count = 0 analysis_count = 0 proxies = self.bika_analysis_catalog(self.contentFilter) if not proxies: message = _("No analyses matched your query") self.context.plone_utils.addPortalMessage(message, 'error') return self.default_template() # # Compile a list of dictionaries, with all relevant analysis data for analysis in proxies: analysis = analysis.getObject() result = analysis.getResult() client = analysis.aq_parent.aq_parent uid = analysis.UID() service = analysis.getService() keyword = service.getKeyword() service_title = "%s (%s)" % (service.Title(), keyword) result_in_range = self.ResultOutOfRange(analysis) if service_title not in analyses.keys(): analyses[service_title] = [] try: result = float(analysis.getResult()) except: # XXX Unfloatable analysis results should be indicated continue analyses[service_title].append({ 'service': service, 'obj': analysis, 'Request ID': analysis.aq_parent.getId(), 'Analyst': analysis.getAnalyst(), 'Result': result, 'Sampled': analysis.getDateSampled(), 'Captured': analysis.getResultCaptureDate(), 'Uncertainty': analysis.getUncertainty(), 'result_in_range': result_in_range, 'Unit': service.getUnit(), 'Keyword': keyword, 'icons': '', }) analysis_count += 1 keys = analyses.keys() keys.sort() parms += [ { "title": _("Total analyses"), "value": analysis_count }, ] ## This variable is output to the TAL self.report_data = { 'header': header, 'subheader': subheader, 'parms': parms, 'tables': [], 'footnotes': [], } plotscript = """ set terminal png transparent truecolor enhanced size 700,350 font "Verdana, 8" set title "%(title)s" set xlabel "%(xlabel)s" set ylabel "%(ylabel)s" set key off #set logscale set timefmt "%(date_format_long)s" set xdata time set format x "%(date_format_short)s\\n%(time_format)s" set xrange ["%(x_start)s":"%(x_end)s"] set auto fix set offsets graph 0, 0, 1, 1 set xtics border nomirror rotate by 90 font "Verdana, 5" offset 0,-3 set ytics nomirror f(x) = mean_y fit f(x) 'gpw_DATAFILE_gpw' u 1:3 via mean_y stddev_y = sqrt(FIT_WSSR / (FIT_NDF + 1)) plot mean_y-stddev_y with filledcurves y1=mean_y lt 1 lc rgb "#efefef",\ mean_y+stddev_y with filledcurves y1=mean_y lt 1 lc rgb "#efefef",\ mean_y with lines lc rgb '#ffffff' lw 3,\ "gpw_DATAFILE_gpw" using 1:3 title 'data' with points pt 7 ps 1 lc rgb '#0000ee' lw 2,\ '' using 1:3 smooth unique lc rgb '#aaaaaa' lw 2,\ '' using 1:4 with lines lc rgb '#000000' lw 1,\ '' using 1:5 with lines lc rgb '#000000' lw 1""" ## Compile plots and format data for display for service_title in keys: # used to calculate XY axis ranges result_values = [int(o['Result']) for o in analyses[service_title]] result_dates = [o['Sampled'] for o in analyses[service_title]] parms = [] plotdata = str() range_min = '' range_max = '' for a in analyses[service_title]: a['Sampled'] = a['Sampled'].strftime( self.date_format_long) if a['Sampled'] else '' a['Captured'] = a['Captured'].strftime(self.date_format_long) if \ a['Captured'] else '' R = a['Result'] U = a['Uncertainty'] a['Result'] = a['obj'].getFormattedResult() in_range = a['result_in_range'] # result out of range if str(in_range[0]) == 'False': out_of_range_count += 1 a['Result'] = "%s %s" % (a['Result'], error_icon) # result almost out of range if str(in_range[0]) == '1': in_shoulder_range_count += 1 a['Result'] = "%s %s" % (a['Result'], warning_icon) spec = {} if hasattr(a["obj"], 'specification') and a["obj"].specification: spec = a["obj"].specification plotdata += "%s\t%s\t%s\t%s\t%s\n" % ( a['Sampled'], R, spec.get("min", ""), spec.get("max", ""), U and U or 0, ) plotdata.encode('utf-8') unit = analyses[service_title][0]['Unit'] if MinimumResults <= len(dict([(d, d) for d in result_dates])): _plotscript = str(plotscript) % { 'title': "", 'xlabel': t(_("Date Sampled")), 'ylabel': unit and unit or '', 'x_start': "%s" % min(result_dates).strftime(self.date_format_long), 'x_end': "%s" % max(result_dates).strftime(self.date_format_long), 'date_format_long': self.date_format_long, 'date_format_short': self.date_format_short, 'time_format': self.time_format, } plot_png = plot(str(plotdata), plotscript=str(_plotscript), usefifo=False) # Temporary PNG data file fh, data_fn = tempfile.mkstemp(suffix='.png') os.write(fh, plot_png) plot_url = data_fn self.request['to_remove'].append(data_fn) plot_url = data_fn else: plot_url = "" table = { 'title': "%s: %s" % (t(_("Analysis Service")), service_title), 'parms': parms, 'columns': ['Request ID', 'Analyst', 'Result', 'Sampled', 'Captured'], 'data': analyses[service_title], 'plot_url': plot_url, } self.report_data['tables'].append(table) translate = self.context.translate ## footnotes if out_of_range_count: msgid = _("Analyses out of range") self.report_data['footnotes'].append("%s %s" % (error_icon, t(msgid))) if in_shoulder_range_count: msgid = _("Analyses in error shoulder range") self.report_data['footnotes'].append("%s %s" % (warning_icon, t(msgid))) self.report_data['parms'].append({ "title": _("Analyses out of range"), "value": out_of_range_count }) self.report_data['parms'].append({ "title": _("Analyses in error shoulder range"), "value": in_shoulder_range_count }) title = t(header) if titles: title += " (%s)" % " ".join(titles) return { 'report_title': title, 'report_data': self.template(), }
def __call__(self): # get all the data into datalines sc = getToolByName(self.context, 'bika_setup_catalog') bac = getToolByName(self.context, 'bika_analysis_catalog') rc = getToolByName(self.context, 'reference_catalog') self.report_content = {} parm_lines = {} parms = [] headings = {} headings['header'] = _("Analyses per sample type") headings['subheader'] = _("Number of analyses requested per sample type") count_all = 0 query = {'portal_type': 'Analysis'} client_title = None if 'ClientUID' in self.request.form: client_uid = self.request.form['ClientUID'] query['getClientUID'] = client_uid client = rc.lookupObject(client_uid) client_title = client.Title() else: client = logged_in_client(self.context) if client: client_title = client.Title() query['getClientUID'] = client.UID() if client_title: parms.append( {'title': _('Client'), 'value': client_title, 'type': 'text'}) date_query = formatDateQuery(self.context, 'Requested') if date_query: query['created'] = date_query requested = formatDateParms(self.context, 'Requested') parms.append( {'title': _('Requested'), 'value': requested, 'type': 'text'}) workflow = getToolByName(self.context, 'portal_workflow') if 'bika_analysis_workflow' in self.request.form: query['review_state'] = self.request.form['bika_analysis_workflow'] review_state = workflow.getTitleForStateOnType( self.request.form['bika_analysis_workflow'], 'Analysis') parms.append( {'title': _('Status'), 'value': review_state, 'type': 'text'}) if 'bika_cancellation_workflow' in self.request.form: query['cancellation_state'] = self.request.form[ 'bika_cancellation_workflow'] cancellation_state = workflow.getTitleForStateOnType( self.request.form['bika_cancellation_workflow'], 'Analysis') parms.append( {'title': _('Active'), 'value': cancellation_state, 'type': 'text'}) # and now lets do the actual report lines formats = {'columns': 2, 'col_heads': [_('Sample type'), _('Number of analyses')], 'class': '', } datalines = [] for sampletype in sc(portal_type="SampleType", sort_on='sortable_title'): query['getSampleTypeUID'] = sampletype.UID analyses = bac(query) count_analyses = len(analyses) dataline = [] dataitem = {'value': sampletype.Title} dataline.append(dataitem) dataitem = {'value': count_analyses} dataline.append(dataitem) datalines.append(dataline) count_all += count_analyses # footer data footlines = [] footline = [] footitem = {'value': _('Total'), 'class': 'total_label'} footline.append(footitem) footitem = {'value': count_all} footline.append(footitem) footlines.append(footline) self.report_content = { 'headings': headings, 'parms': parms, 'formats': formats, 'datalines': datalines, 'footings': footlines} if self.request.get('output_format', '') == 'CSV': import csv import StringIO import datetime fieldnames = [ 'Sample Type', 'Analyses', ] output = StringIO.StringIO() dw = csv.DictWriter(output, extrasaction='ignore', fieldnames=fieldnames) dw.writerow(dict((fn, fn) for fn in fieldnames)) for row in datalines: dw.writerow({ 'Sample Type': row[0]['value'], 'Analyses': row[1]['value'], }) report_data = output.getvalue() output.close() date = datetime.datetime.now().strftime("%Y%m%d%H%M") setheader = self.request.RESPONSE.setHeader setheader('Content-Type', 'text/csv') setheader("Content-Disposition", "attachment;filename=\"analysespersampletype_%s.csv\"" % date) self.request.RESPONSE.write(report_data) else: return {'report_title': t(headings['header']), 'report_data': self.template()}
def __call__(self): # get all the data into datalines pc = getToolByName(self.context, 'portal_catalog') rc = getToolByName(self.context, 'reference_catalog') self.report_content = {} parms = [] headings = {} headings['header'] = _("Attachments") headings['subheader'] = _( "The attachments linked to analysis requests and analyses") count_all = 0 query = {'portal_type': 'Attachment'} if 'ClientUID' in self.request.form: client_uid = self.request.form['ClientUID'] query['getClientUID'] = client_uid client = rc.lookupObject(client_uid) client_title = client.Title() else: client = logged_in_client(self.context) if client: client_title = client.Title() query['getClientUID'] = client.UID() else: client_title = 'All' parms.append({ 'title': _('Client'), 'value': client_title, 'type': 'text' }) date_query = formatDateQuery(self.context, 'Loaded') if date_query: query['getDateLoaded'] = date_query loaded = formatDateParms(self.context, 'Loaded') parms.append({ 'title': _('Loaded'), 'value': loaded, 'type': 'text' }) # and now lets do the actual report lines formats = { 'columns': 6, 'col_heads': [ _('Request'), _('File'), _('Attachment type'), _('Content type'), _('Size'), _('Loaded'), ], 'class': '', } datalines = [] attachments = pc(query) for a_proxy in attachments: attachment = a_proxy.getObject() attachment_file = attachment.getAttachmentFile() icon = attachment_file.icon filename = attachment_file.filename filesize = attachment_file.get_size() filesize = filesize / 1024 sizeunit = "Kb" if filesize > 1024: filesize = filesize / 1024 sizeunit = "Mb" dateloaded = attachment.getDateLoaded() dataline = [] dataitem = {'value': attachment.getTextTitle()} dataline.append(dataitem) dataitem = {'value': filename, 'img_before': icon} dataline.append(dataitem) dataitem = { 'value': attachment.getAttachmentType().Title() if attachment.getAttachmentType() else '' } dataline.append(dataitem) dataitem = { 'value': self.context.lookupMime(attachment_file.getContentType()) } dataline.append(dataitem) dataitem = {'value': '%s%s' % (filesize, sizeunit)} dataline.append(dataitem) dataitem = {'value': self.ulocalized_time(dateloaded)} dataline.append(dataitem) datalines.append(dataline) count_all += 1 # footer data footlines = [] footline = [] footitem = {'value': _('Total'), 'colspan': 5, 'class': 'total_label'} footline.append(footitem) footitem = {'value': count_all} footline.append(footitem) footlines.append(footline) self.report_content = { 'headings': headings, 'parms': parms, 'formats': formats, 'datalines': datalines, 'footings': footlines } if self.request.get('output_format', '') == 'CSV': import csv import StringIO import datetime fieldnames = [ _('Request'), _('File'), _('Attachment type'), _('Content type'), _('Size'), _('Loaded'), ] output = StringIO.StringIO() dw = csv.DictWriter(output, fieldnames=fieldnames) dw.writerow(dict((fn, fn) for fn in fieldnames)) for row in datalines: dw.writerow(row) report_data = output.getvalue() output.close() date = datetime.datetime.now().strftime("%Y%m%d%H%M") setheader = self.request.RESPONSE.setHeader setheader('Content-Type', 'text/csv') setheader( "Content-Disposition", "attachment;filename=\"analysesattachments_%s.csv\"" % date) self.request.RESPONSE.write(report_data) else: return { 'report_title': t(headings['header']), 'report_data': self.template() }
def folderitem(self, obj, item, index): """Service triggered each time an item is iterated in folderitems. The use of this service prevents the extra-loops in child objects. :obj: the instance of the class to be foldered :item: dict containing the properties of the object to be used by the template :index: current index of the item """ # ensure we have an object and not a brain obj = api.get_object(obj) uid = api.get_uid(obj) url = api.get_url(obj) title = api.get_title(obj) # get the category category = obj.getCategoryTitle() item["category"] = category if category not in self.categories: self.categories.append(category) config = self.configuration.get(uid, {}) partition = config.get("partition", "part-1") hidden = config.get("hidden", False) item["replace"]["Title"] = get_link(url, value=title) item["Price"] = self.format_price(obj.Price) item["allow_edit"] = self.get_editable_columns() item["required"].append("Partition") item["choices"]["Partition"] = self.partition_choices item["Partition"] = partition item["Hidden"] = hidden item["selected"] = uid in self.configuration # Add methods methods = obj.getMethods() if methods: links = map( lambda m: get_link( m.absolute_url(), value=m.Title(), css_class="link"), methods) item["replace"]["Methods"] = ", ".join(links) else: item["methods"] = "" # Icons after_icons = "" if obj.getAccredited(): after_icons += get_image( "accredited.png", title=t(_("Accredited"))) if obj.getAttachmentOption() == "r": after_icons += get_image( "attach_reqd.png", title=t(_("Attachment required"))) if obj.getAttachmentOption() == "n": after_icons += get_image( "attach_no.png", title=t(_('Attachment not permitted'))) if after_icons: item["after"]["Title"] = after_icons return item
def workflow_action_submit(self): form = self.request.form rc = getToolByName(self.context, REFERENCE_CATALOG) action, came_from = WorkflowAction._get_form_workflow_action(self) checkPermission = self.context.portal_membership.checkPermission if not isActive(self.context): message = _('Item is inactive.') self.context.plone_utils.addPortalMessage(message, 'info') self.request.response.redirect(self.context.absolute_url()) return # calcs.js has kept item_data and form input interim values synced, # so the json strings from item_data will be the same as the form values item_data = {} if 'item_data' in form: if isinstance(form['item_data'], list): for i_d in form['item_data']: for i, d in json.loads(i_d).items(): item_data[i] = d else: item_data = json.loads(form['item_data']) selected_analyses = WorkflowAction._get_selected_items(self) results = {} hasInterims = {} # check that the form values match the database # save them if not. for uid, result in self.request.form.get('Result', [{}])[0].items(): # Do not save data for analyses that are not selected. if uid not in selected_analyses: continue analysis = selected_analyses[uid] # never save any part of rows with empty result values. # https://jira.bikalabs.com/browse/LIMS-1944: if not result: continue # ignore result if analysis object no longer exists if not analysis: continue # Prevent saving data if the analysis is already transitioned if not (checkPermission(EditResults, analysis) or checkPermission(EditFieldResults, analysis)): title = safe_unicode(analysis.getService().Title()) msgid = _( 'Result for ${analysis} could not be saved because ' 'it was already submitted by another user.', mapping={'analysis': title}) message = safe_unicode(t(msgid)) self.context.plone_utils.addPortalMessage(message) continue # if the AR has ReportDryMatter set, get dry_result from form. dry_result = '' if hasattr(self.context, 'getReportDryMatter') \ and self.context.getReportDryMatter(): for k, v in self.request.form['ResultDM'][0].items(): if uid == k: dry_result = v break results[uid] = result interimFields = item_data[uid] if len(interimFields) > 0: hasInterims[uid] = True else: hasInterims[uid] = False retested = 'retested' in form and uid in form['retested'] remarks = form.get('Remarks', [ {}, ])[0].get(uid, '') # Don't save uneccessary things # https://github.com/bikalabs/Bika-LIMS/issues/766: # Somehow, using analysis.edit() fails silently when # logged in as Analyst. if analysis.getInterimFields() != interimFields or \ analysis.getRetested() != retested or \ analysis.getRemarks() != remarks: analysis.setInterimFields(interimFields) analysis.setRetested(retested) analysis.setRemarks(remarks) # save results separately, otherwise capture date is rewritten if analysis.getResult() != result or \ analysis.getResultDM() != dry_result: analysis.setResultDM(dry_result) analysis.setResult(result) methods = self.request.form.get('Method', [{}])[0] instruments = self.request.form.get('Instrument', [{}])[0] analysts = self.request.form.get('Analyst', [{}])[0] uncertainties = self.request.form.get('Uncertainty', [{}])[0] dlimits = self.request.form.get('DetectionLimit', [{}])[0] # discover which items may be submitted submissable = [] for uid, analysis in selected_analyses.items(): analysis_active = isActive(analysis) # Need to save the instrument? if uid in instruments and analysis_active: # TODO: Add SetAnalysisInstrument permission # allow_setinstrument = sm.checkPermission(SetAnalysisInstrument) allow_setinstrument = True # ---8<----- if allow_setinstrument == True: # The current analysis allows the instrument regards # to its analysis service and method? if (instruments[uid] == ''): previnstr = analysis.getInstrument() if previnstr: previnstr.removeAnalysis(analysis) analysis.setInstrument(None) elif analysis.isInstrumentAllowed(instruments[uid]): previnstr = analysis.getInstrument() if previnstr: previnstr.removeAnalysis(analysis) analysis.setInstrument(instruments[uid]) instrument = analysis.getInstrument() instrument.addAnalysis(analysis) # Need to save the method? if uid in methods and analysis_active: # TODO: Add SetAnalysisMethod permission # allow_setmethod = sm.checkPermission(SetAnalysisMethod) allow_setmethod = True # ---8<----- if allow_setmethod == True and analysis.isMethodAllowed( methods[uid]): analysis.setMethod(methods[uid]) # Need to save the analyst? if uid in analysts and analysis_active: analysis.setAnalyst(analysts[uid]) # Need to save the uncertainty? if uid in uncertainties and analysis_active: analysis.setUncertainty(uncertainties[uid]) # Need to save the detection limit? if analysis_active: analysis.setDetectionLimitOperand(dlimits.get(uid, None)) if uid not in results or not results[uid]: continue can_submit = True # guard_submit does a lot of the same stuff, too. # the code there has also been commented. # we must find a better way to allow dependencies to control # this process. # for dependency in analysis.getDependencies(): # dep_state = workflow.getInfoFor(dependency, 'review_state') # if hasInterims[uid]: # if dep_state in ('to_be_sampled', 'to_be_preserved', # 'sample_due', 'sample_received', # 'attachment_due', 'to_be_verified',): # can_submit = False # break # else: # if dep_state in ('to_be_sampled', 'to_be_preserved', # 'sample_due', 'sample_received',): # can_submit = False # break if can_submit and analysis not in submissable: submissable.append(analysis) # and then submit them. for analysis in submissable: doActionFor(analysis, 'submit') # LIMS-2366: Finally, when we are done processing all applicable # analyses, we must attempt to initiate the submit transition on the # AR itself. This is for the case where "general retraction" has been # done, or where the last "received" analysis has been removed, and # the AR is in state "received" while there are no "received" analyses # left to trigger the parent transition. if self.context.portal_type == 'Sample': ar = self.context.getAnalysisRequests()[0] elif self.context.portal_type == 'Analysis': ar = self.context.aq_parent else: ar = self.context doActionFor(ar, 'submit') message = PMF("Changes saved.") self.context.plone_utils.addPortalMessage(message, 'info') if checkPermission(EditResults, self.context): self.destination_url = self.context.absolute_url( ) + "/manage_results" else: self.destination_url = self.context.absolute_url() self.request.response.redirect(self.destination_url)
def _folderitems(self, full_objects=False): """WARNING: :full_objects: could create a big performance hit. """ # Setting up some attributes plone_layout = getMultiAdapter((self.context.aq_inner, self.request), name=u'plone_layout') plone_utils = getToolByName(self.context.aq_inner, 'plone_utils') portal_types = getToolByName(self.context.aq_inner, 'portal_types') if self.request.form.get('show_all', '').lower() == 'true' \ or self.show_all is True \ or self.pagesize == 0: show_all = True else: show_all = False # idx increases one unit each time an object is added to the 'items' # dictionary to be returned. Note that if the item is not rendered, # the idx will not increase. idx = 0 results = [] self.show_more = False brains = self._fetch_brains(self.limit_from) for obj in brains: # avoid creating unnecessary info for items outside the current # batch; only the path is needed for the "select all" case... # we only take allowed items into account if not show_all and idx >= self.pagesize: # Maximum number of items to be shown reached! self.show_more = True break # we don't know yet if it's a brain or an object path = hasattr(obj, 'getPath') and obj.getPath() or \ "/".join(obj.getPhysicalPath()) # This item must be rendered, we need the object instead of a brain obj = obj.getObject() if hasattr(obj, 'getObject') else obj # check if the item must be rendered or not (prevents from # doing it later in folderitems) and dealing with paging if not obj or not self.isItemAllowed(obj): continue uid = obj.UID() title = obj.Title() description = obj.Description() icon = plone_layout.getIcon(obj) url = obj.absolute_url() relative_url = obj.absolute_url(relative=True) fti = portal_types.get(obj.portal_type) if fti is not None: type_title_msgid = fti.Title() else: type_title_msgid = obj.portal_type url_href_title = '%s at %s: %s' % ( t(type_title_msgid), path, to_utf8(description)) modified = self.ulocalized_time(obj.modified()), # element css classes type_class = 'contenttype-' + \ plone_utils.normalizeString(obj.portal_type) state_class = '' states = {} for w in self.workflow.getWorkflowsFor(obj): state = w._getWorkflowStateOf(obj).id states[w.state_var] = state state_class += "state-%s " % state results_dict = dict( obj=obj, id=obj.getId(), title=title, uid=uid, path=path, url=url, fti=fti, item_data=json.dumps([]), url_href_title=url_href_title, obj_type=obj.Type, size=obj.getObjSize, modified=modified, icon=icon.html_tag(), type_class=type_class, # a list of lookups for single-value-select fields choices={}, state_class=state_class, relative_url=relative_url, view_url=url, table_row_class="", category='None', # a list of names of fields that may be edited on this item allow_edit=[], # a list of names of fields that are compulsory (if editable) required=[], # a dict where the column name works as a key and the value is # the name of the field related with the column. It is used # when the name given to the column and the content field it # represents diverges. bika_listing_table_items.pt defines an # attribute for each item, this attribute is named 'field' and # the system fills it taking advantage of this dictionary or # the name of the column if it isn't defined in the dict. field={}, # "before", "after" and replace: dictionary (key is column ID) # A snippet of HTML which will be rendered # before/after/instead of the table cell content. before={}, # { before : "<a href=..>" } after={}, replace={}, ) rs = None wf_state_var = None workflows = self.workflow.getWorkflowsFor(obj) for wf in workflows: if wf.state_var: wf_state_var = wf.state_var break if wf_state_var is not None: rs = self.workflow.getInfoFor(obj, wf_state_var) st_title = self.workflow.getTitleForStateOnType( rs, obj.portal_type) st_title = t(_(st_title)) if rs: results_dict['review_state'] = rs for state_var, state in states.items(): if not st_title: st_title = self.workflow.getTitleForStateOnType( state, obj.portal_type) results_dict[state_var] = state results_dict['state_title'] = st_title results_dict['class'] = {} # As far as I am concerned, adapters for IFieldIcons are only used # for Analysis content types. Since AnalysesView is not using this # "classic" folderitems from bikalisting anymore, this logic has # been added in AnalysesView. Even though, this logic hasn't been # removed from here, cause this _folderitems function is marked as # deprecated, so it will be eventually removed alltogether. for name, adapter in getAdapters((obj,), IFieldIcons): auid = obj.UID() if hasattr(obj, 'UID') and callable( obj.UID) else None if not auid: continue alerts = adapter() # logger.info(str(alerts)) if alerts and auid in alerts: if auid in self.field_icons: self.field_icons[auid].extend(alerts[auid]) else: self.field_icons[auid] = alerts[auid] # Search for values for all columns in obj for key in self.columns.keys(): # if the key is already in the results dict # then we don't replace it's value value = results_dict.get(key, '') if key not in results_dict: attrobj = getFromString(obj, key) value = attrobj if attrobj else value # Custom attribute? Inspect to set the value # for the current column dinamically vattr = self.columns[key].get('attr', None) if vattr: attrobj = getFromString(obj, vattr) value = attrobj if attrobj else value results_dict[key] = value # Replace with an url? replace_url = self.columns[key].get('replace_url', None) if replace_url: attrobj = getFromString(obj, replace_url) if attrobj: results_dict['replace'][key] = \ '<a href="%s">%s</a>' % (attrobj, value) # The item basics filled. Delegate additional actions to folderitem # service. folderitem service is frequently overriden by child # objects item = self.folderitem(obj, results_dict, idx) # Call folder_item from subscriber adapters for subscriber in self.get_listing_view_adapters(): subscriber.folder_item(obj, item, idx) if item: results.append(item) idx += 1 return results
def Import(context, request, instrumentname='sysmex_xs_500i'): """ Sysmex XS - 500i analysis results """ # I don't really know how this file works, for this reason I added an 'Analysis Service selector'. # If non Analysis Service is selected, each 'data' column will be interpreted as a different Analysis Service. In # the case that an Analysis Service was selected, all the data columns would be interpreted as different data from # an unique Analysis Service. formitems = getForm(instrumentname, request) infile = formitems['infile'] fileformat = formitems['fileformat'] artoapply = formitems['artoapply'] override = formitems['override'] instrument = formitems['instrument'] errors = [] logs = [] warns = [] # Load the most suitable parser according to file extension/options/etc... parser = None if not hasattr(infile, 'filename'): errors.append(_("No file selected")) if fileformat == 'csv': # Get the Analysis Service selected, if there is one. analysis = request.form.get('analysis_service', None) if analysis: # Get default result key defaultresult = request.form.get('default_result', None) # Rise an error if default result is missing. parser = SysmexXS500iCSVParser(infile, analysis, defaultresult) if defaultresult \ else errors.append(t(_("You should introduce a default result key.", mapping={"fileformat": fileformat}))) else: parser = SysmexXS500iCSVParser(infile) else: errors.append( t( _("Unrecognized file format ${fileformat}", mapping={"fileformat": fileformat}))) if parser: # Load the importer status = ['sample_received', 'attachment_due', 'to_be_verified'] if artoapply == 'received': status = ['sample_received'] elif artoapply == 'received_tobeverified': status = ['sample_received', 'attachment_due', 'to_be_verified'] over = [False, False] if override == 'nooverride': over = [False, False] elif override == 'override': over = [True, False] elif override == 'overrideempty': over = [True, True] importer = SysmexXS500iImporter(parser=parser, context=context, allowed_ar_states=status, allowed_analysis_states=None, override=over, instrument_uid=instrument) tbex = '' try: importer.process() except: tbex = traceback.format_exc() errors = importer.errors logs = importer.logs warns = importer.warns if tbex: errors.append(tbex) results = {'errors': errors, 'log': logs, 'warns': warns} return json.dumps(results)
def folderitem(self, obj, item, index): """Service triggered each time an item is iterated in folderitems. The use of this service prevents the extra-loops in child objects. :obj: the instance of the class to be foldered :item: dict containing the properties of the object to be used by the template :index: current index of the item """ # ensure we have an object and not a brain obj = api.get_object(obj) uid = api.get_uid(obj) # settings for this analysis service_settings = self.context.getAnalysisServiceSettings(uid) hidden = service_settings.get("hidden", obj.getHidden()) # get the category category = obj.getCategoryTitle() item["category"] = category if category not in self.categories: self.categories.append(category) parts = filter(api.is_active, self.get_partitions()) partitions = map( lambda part: { "ResultValue": part.Title(), "ResultText": part.getId() }, parts) keyword = obj.getKeyword() partition = None if uid in self.analyses: analysis = self.analyses[uid] # Might differ from the service keyword keyword = analysis.getKeyword() # Mark the row as disabled if the analysis is not in an open state item["disabled"] = not analysis.isOpen() # get the hidden status of the analysis hidden = analysis.getHidden() # get the partition of the analysis partition = self.get_partition(analysis) else: partition = self.get_partitions()[0] # get the specification of this object rr = self.get_results_range() spec = rr.get(keyword, ResultsRangeDict()) item["Title"] = obj.Title() item["Unit"] = obj.getUnit() item["Price"] = obj.getPrice() item["before"]["Price"] = self.get_currency_symbol() item["allow_edit"] = self.get_editable_columns(obj) item["selected"] = uid in self.selected item["min"] = str(spec.get("min", "")) item["max"] = str(spec.get("max", "")) item["warn_min"] = str(spec.get("warn_min", "")) item["warn_max"] = str(spec.get("warn_max", "")) item["Hidden"] = hidden item["Partition"] = partition.getId() item["choices"]["Partition"] = partitions # Icons after_icons = "" if obj.getAccredited(): after_icons += get_image("accredited.png", title=t(_("Accredited"))) if obj.getAttachmentOption() == "r": after_icons += get_image("attach_reqd.png", title=t(_("Attachment required"))) if obj.getAttachmentOption() == "n": after_icons += get_image("attach_no.png", title=t(_('Attachment not permitted'))) if after_icons: item["after"]["Title"] = after_icons return item
def __call__(self): self.service_uids = self.request.get('service_uids', '').split(",") self.control_type = self.request.get('control_type', '') if not self.control_type: return t(_("No control type specified")) return super(ReferenceSamplesView, self).contents_table()
def __call__(self): # get all the data into datalines sc = getToolByName(self.context, 'bika_setup_catalog') bc = getToolByName(self.context, 'bika_analysis_catalog') rc = getToolByName(self.context, 'reference_catalog') self.report_content = {} parms = [] headings = {} headings['header'] = _("Analysis turnaround times") headings['subheader'] = _("The turnaround time of analyses") query = {'portal_type': 'Analysis'} client_title = None if 'ClientUID' in self.request.form: client_uid = self.request.form['ClientUID'] query['getClientUID'] = client_uid client = rc.lookupObject(client_uid) client_title = client.Title() else: client = logged_in_client(self.context) if client: client_title = client.Title() query['getClientUID'] = client.UID() if client_title: parms.append( {'title': _('Client'), 'value': client_title, 'type': 'text'}) date_query = formatDateQuery(self.context, 'Received') if date_query: query['created'] = date_query received = formatDateParms(self.context, 'Received') parms.append( {'title': _('Received'), 'value': received, 'type': 'text'}) query['review_state'] = 'published' workflow = getToolByName(self.context, 'portal_workflow') if 'bika_worksheetanalysis_workflow' in self.request.form: query['worksheetanalysis_review_state'] = self.request.form[ 'bika_worksheetanalysis_workflow'] ws_review_state = workflow.getTitleForStateOnType( self.request.form['bika_worksheetanalysis_workflow'], 'Analysis') parms.append( {'title': _('Assigned to worksheet'), 'value': ws_review_state, 'type': 'text'}) # query all the analyses and increment the counts count_early = 0 mins_early = 0 count_late = 0 mins_late = 0 count_undefined = 0 services = {} analyses = bc(query) for a in analyses: analysis = a.getObject() service_uid = analysis.getServiceUID() if service_uid not in services: services[service_uid] = {'count_early': 0, 'count_late': 0, 'mins_early': 0, 'mins_late': 0, 'count_undefined': 0, } earliness = analysis.getEarliness() if earliness < 0: count_late = services[service_uid]['count_late'] mins_late = services[service_uid]['mins_late'] count_late += 1 mins_late -= earliness services[service_uid]['count_late'] = count_late services[service_uid]['mins_late'] = mins_late if earliness > 0: count_early = services[service_uid]['count_early'] mins_early = services[service_uid]['mins_early'] count_early += 1 mins_early += earliness services[service_uid]['count_early'] = count_early services[service_uid]['mins_early'] = mins_early if earliness == 0: count_undefined = services[service_uid]['count_undefined'] count_undefined += 1 services[service_uid]['count_undefined'] = count_undefined # calculate averages for service_uid in services.keys(): count_early = services[service_uid]['count_early'] mins_early = services[service_uid]['mins_early'] if count_early == 0: services[service_uid]['ave_early'] = '' else: avemins = (mins_early) / count_early services[service_uid]['ave_early'] = \ api.to_dhm_format(minutes=avemins) count_late = services[service_uid]['count_late'] mins_late = services[service_uid]['mins_late'] if count_late == 0: services[service_uid]['ave_late'] = '' else: avemins = mins_late / count_late services[service_uid]['ave_late'] = api.to_dhm_format(avemins) # and now lets do the actual report lines formats = {'columns': 7, 'col_heads': [_('Analysis'), _('Count'), _('Undefined'), _('Late'), _('Average late'), _('Early'), _('Average early'), ], 'class': '', } total_count_early = 0 total_count_late = 0 total_mins_early = 0 total_mins_late = 0 total_count_undefined = 0 datalines = [] for cat in sc(portal_type='AnalysisCategory', sort_on='sortable_title'): catline = [{'value': cat.Title, 'class': 'category_heading', 'colspan': 7}, ] first_time = True cat_count_early = 0 cat_count_late = 0 cat_count_undefined = 0 cat_mins_early = 0 cat_mins_late = 0 for service in sc(portal_type="AnalysisService", getCategoryUID=cat.UID, sort_on='sortable_title'): dataline = [{'value': service.Title, 'class': 'testgreen'}, ] if service.UID not in services: continue if first_time: datalines.append(catline) first_time = False # analyses found cat_count_early += services[service.UID]['count_early'] cat_count_late += services[service.UID]['count_late'] cat_count_undefined += services[service.UID]['count_undefined'] cat_mins_early += services[service.UID]['mins_early'] cat_mins_late += services[service.UID]['mins_late'] count = services[service.UID]['count_early'] + \ services[service.UID]['count_late'] + \ services[service.UID]['count_undefined'] dataline.append({'value': count, 'class': 'number'}) dataline.append( {'value': services[service.UID]['count_undefined'], 'class': 'number'}) dataline.append({'value': services[service.UID]['count_late'], 'class': 'number'}) dataline.append({'value': services[service.UID]['ave_late'], 'class': 'number'}) dataline.append({'value': services[service.UID]['count_early'], 'class': 'number'}) dataline.append({'value': services[service.UID]['ave_early'], 'class': 'number'}) datalines.append(dataline) # category totals dataline = [{'value': '%s - total' % (cat.Title), 'class': 'subtotal_label'}, ] dataline.append({'value': cat_count_early + cat_count_late + cat_count_undefined, 'class': 'subtotal_number'}) dataline.append({'value': cat_count_undefined, 'class': 'subtotal_number'}) dataline.append({'value': cat_count_late, 'class': 'subtotal_number'}) if cat_count_late: dataitem = {'value': cat_mins_late / cat_count_late, 'class': 'subtotal_number'} else: dataitem = {'value': 0, 'class': 'subtotal_number'} dataline.append(dataitem) dataline.append({'value': cat_count_early, 'class': 'subtotal_number'}) if cat_count_early: dataitem = {'value': cat_mins_early / cat_count_early, 'class': 'subtotal_number'} else: dataitem = {'value': 0, 'class': 'subtotal_number'} dataline.append(dataitem) total_count_early += cat_count_early total_count_late += cat_count_late total_count_undefined += cat_count_undefined total_mins_early += cat_mins_early total_mins_late += cat_mins_late # footer data footlines = [] footline = [] footline = [{'value': _('Total'), 'class': 'total'}, ] footline.append({'value': total_count_early + total_count_late + total_count_undefined, 'class': 'total number'}) footline.append({'value': total_count_undefined, 'class': 'total number'}) footline.append({'value': total_count_late, 'class': 'total number'}) if total_count_late: ave_mins = total_mins_late / total_count_late footline.append({'value': api.to_dhm_format(minutes=ave_mins), 'class': 'total number'}) else: footline.append({'value': ''}) footline.append({'value': total_count_early, 'class': 'total number'}) if total_count_early: ave_mins = total_mins_early / total_count_early footline.append({'value': api.to_dhm_format(minutes=ave_mins), 'class': 'total number'}) else: footline.append({'value': '', 'class': 'total number'}) footlines.append(footline) self.report_content = { 'headings': headings, 'parms': parms, 'formats': formats, 'datalines': datalines, 'footings': footlines} if self.request.get('output_format', '') == 'CSV': import csv import StringIO import datetime fieldnames = [ 'Analysis', 'Count', 'Undefined', 'Late', 'Average late', 'Early', 'Average early', ] output = StringIO.StringIO() dw = csv.DictWriter(output, extrasaction='ignore', fieldnames=fieldnames) dw.writerow(dict((fn, fn) for fn in fieldnames)) for row in datalines: if len(row) == 1: # category heading thingy continue dw.writerow({ 'Analysis': row[0]['value'], 'Count': row[1]['value'], 'Undefined': row[2]['value'], 'Late': row[3]['value'], 'Average late': row[4]['value'], 'Early': row[5]['value'], 'Average early': row[6]['value'], }) report_data = output.getvalue() output.close() date = datetime.datetime.now().strftime("%Y%m%d%H%M") setheader = self.request.RESPONSE.setHeader setheader('Content-Type', 'text/csv') setheader("Content-Disposition", "attachment;filename=\"analysestats_%s.csv\"" % date) self.request.RESPONSE.write(report_data) else: return {'report_title': t(headings['header']), 'report_data': self.template()}
def __call__(self): # get all the data into datalines catalog = api.get_tool(CATALOG_ANALYSIS_LISTING, self.context) self.report_content = {} parms = [] headings = {} headings['header'] = "" count_all = 0 query = {} # Getting the query filters val = self.selection_macros.parse_client(self.request) if val: query[val['contentFilter'][0]] = val['contentFilter'][1] parms.append(val['parms']) val = self.selection_macros.parse_sampletype(self.request) if val: query[val['contentFilter'][0]] = val['contentFilter'][1] parms.append(val['parms']) val = self.selection_macros.parse_analysisservice(self.request) if val: query[val['contentFilter'][0]] = val['contentFilter'][1] parms.append(val['parms']) val = self.selection_macros.parse_daterange(self.request, 'created', 'Created') if val: query[val['contentFilter'][0]] = val['contentFilter'][1] parms.append(val['parms']) val = self.selection_macros.parse_daterange(self.request, 'getDatePublished', 'Date Published') if val: query[val['contentFilter'][0]] = val['contentFilter'][1] parms.append(val['parms']) val = self.selection_macros.parse_daterange(self.request, 'getDateReceived', 'Date Received') if val: query[val['contentFilter'][0]] = val['contentFilter'][1] parms.append(val['parms']) formats = { 'columns': 25, 'col_heads': [ _('Lab Number'), _('Testing Lab'), _('First Name'), _('Middle Name'), _('Last Name'), _('Gender'), _('Age'), _('Age Type'), _('Town'), _('Reporting County'), _('Reporting District'), _('Reporting Facility'), _('Date Onset'), _('Date of Reporting'), _('Was Specimen collected? '), _('Date specimen collected'), _('Type of Specimen'), _('Date Specimen Sent to Lab'), _('Date Specimen Received in Lab'), _('Date Published'), _('Condition of Specimen'), _('Comment'), _('Test Result') ], } # and now lets do the actual report lines datalines = [] laboratory = self.context.bika_setup.laboratory # Get analyses brains logger.info("Searching Analyses: {}".format(repr(query))) brains = catalog(query) # Collect all AR uids and Patient UIDs so only one query to get all # them will be needed ar_uids = list(set([brain.getParentUID for brain in brains])) ar_uids = filter(None, ar_uids) self.map_uids_to_brains(ar_uids) logger.info("Filling datalines with {} Analyses".format(len(brains))) for analysis in brains: # We get the AR and the patient of the # analysis here to avoid having to get them # inside each of the following method calls. # If they are not found its value will be None ar_brain = self.get_ar_brain(analysis) patient_brain = self.get_patient_brain(analysis) dataline = [] # Lab Number dataitem = self.get_lab_number(analysis) dataline.append(dataitem) # Testing Lab dataline.append({'value': laboratory.Title()}) #First Name dataitem = self.get_firstname(patient_brain) dataline.append(dataitem) #Middle Name dataitem = self.get_middlename(patient_brain) dataline.append(dataitem) #Last Name dataitem = self.get_lastname(patient_brain) dataline.append(dataitem) #Gender dataitem = self.get_gender(patient_brain) dataline.append(dataitem) #Age dataitem = self.get_age(patient_brain) dataline.append(dataitem) #AgeType dataitem = self.get_agetype(patient_brain) dataline.append(dataitem) # Facility Province dataitem = self.get_facility_province(ar_brain) dataline.append(dataitem) # Facility District dataitem = self.get_facility_district(ar_brain) dataline.append(dataitem) # Facility dataitem = self.get_client_name(ar_brain) dataline.append(dataitem) # Date of Collection - Onset dataitem = self.get_date_of_collection(ar_brain) dataline.append(dataitem) # Date Reporting dataitem = self.get_date_of_dispatch(ar_brain) dataline.append(dataitem) # Specimen Collected dataitem = self.get_date_of_collection(ar_brain) dataline.append(dataitem) # Date of Collection - Onset dataitem = self.get_date_of_collection(ar_brain) dataline.append(dataitem) # Specimen Type dataitem = self.get_specimentype(ar_brain) dataline.append(dataitem) # Date of Dispatch dataitem = self.get_date_of_dispatch(ar_brain) dataline.append(dataitem) # Date of Receiving dataitem = self.get_date_of_receiving(ar_brain) dataline.append(dataitem) # Date of Publication dataitem = self.get_date_published(analysis) dataline.append(dataitem) # Condition of Specimen #dataitem = self.get_date_published(analysis) #dataline.append(dataitem) # Comment #dataitem = self.get_date_published(analysis) ##dataline.append(dataitem) # Sex #dataitem = self.get_patient_sex(patient_brain) #dataline.append(dataitem) # Date Of Birth #dataitem = self.get_patient_dob(patient_brain) #dataline.append(dataitem) # Date of Testing #dataitem = self.get_date_of_testing(analysis) #dataline.append(dataitem) #Test Result dataitem = self.get_result(analysis) dataline.append(dataitem) count_all += 1 datalines.append(dataline) logger.info("Generating output") # footer data footlines = [] footline = [] footitem = {'value': _('Total'), 'class': 'total_label'} footline.append(footitem) footitem = {'value': count_all} footline.append(footitem) footlines.append(footline) self.report_content = { 'headings': headings, 'parms': parms, 'formats': formats, 'datalines': datalines, 'footings': footlines } if self.request.get('output_format', '') == 'CSV': fieldnames = formats.get('col_heads') output = StringIO.StringIO() dw = csv.DictWriter(output, extrasaction='ignore', fieldnames=fieldnames) dw.writerow(dict((fn, fn) for fn in fieldnames)) for row in datalines: dict_row = {} row_idx = 0 for column in fieldnames: dict_row[column] = row[row_idx]['value'] row_idx += 1 dw.writerow(dict_row) report_data = output.getvalue() output.close() date = datetime.datetime.now().strftime("%Y%m%d%H%M") setheader = self.request.RESPONSE.setHeader setheader('Content-Type', 'text/csv') setheader( "Content-Disposition", "attachment;filename=\"analysisresultbyclient_%s.csv" "\"" % date) self.request.RESPONSE.write(report_data) else: return { 'report_title': t(headings['header']), 'report_data': self.template() }
def __init__(self, context, request): super(AccreditationView, self).__init__(context, request) self.contentFilter = { 'portal_type': 'AnalysisService', 'sort_on': 'sortable_title', 'getAccredited': True, 'is_active': True } self.context_actions = {} self.title = self.context.translate(_("Accreditation")) self.icon = self.portal_url + \ "/++resource++bika.lims.images/accredited_big.png" lab = context.bika_setup.laboratory accredited = lab.getLaboratoryAccredited() self.mapping = { 'lab_is_accredited': accredited, 'lab_name': safe_unicode(lab.getName()), 'lab_country': safe_unicode(lab.getPhysicalAddress().get('country', '')), 'confidence': safe_unicode(lab.getConfidence()), 'accreditation_body_abbr': safe_unicode(lab.getAccreditationBody()), 'accreditation_body_name': safe_unicode(lab.getAccreditationBodyURL()), 'accreditation_standard': safe_unicode(lab.getAccreditation()), 'accreditation_reference': safe_unicode(lab.getAccreditationReference()) } if accredited: self.description = t( _(safe_unicode(lab.getAccreditationPageHeader()), mapping=self.mapping)) else: self.description = t( _("The lab is not accredited, or accreditation has not been " "configured. ")) msg = t(_("All Accredited analysis services are listed here.")) self.description = "%s<p><br/>%s</p>" % (self.description, msg) self.show_select_column = False request.set('disable_border', 1) self.review_states = [ { 'id': 'default', 'title': _('All'), 'contentFilter': {}, 'transitions': [ { 'id': 'empty' }, ], # none 'columns': [ 'Title', 'Keyword', 'Price', 'MaxTimeAllowed', 'DuplicateVariation', ], }, ] if not self.context.bika_setup.getShowPrices(): self.review_states[0]['columns'].remove('Price')
def process(self): self._parser.parse() parsed = self._parser.resume() self._errors = self._parser.errors self._warns = self._parser.warns self._logs = self._parser.logs self._priorizedsearchcriteria = '' if parsed is False: return False # Allowed analysis states allowed_ar_states_msg = [t(_(s)) for s in self.getAllowedARStates()] allowed_an_states_msg = [ t(_(s)) for s in self.getAllowedAnalysisStates()] self.log("Allowed Analysis Request states: ${allowed_states}", mapping={'allowed_states': ', '.join(allowed_ar_states_msg)}) self.log("Allowed analysis states: ${allowed_states}", mapping={'allowed_states': ', '.join(allowed_an_states_msg)}) # Exclude non existing ACODEs acodes = [] ancount = 0 instprocessed = [] importedars = {} importedinsts = {} rawacodes = self._parser.getAnalysisKeywords() exclude = self.getKeywordsToBeExcluded() for acode in rawacodes: if acode in exclude or not acode: continue service = self.bsc(getKeyword=acode) if not service: self.warn('Service keyword ${analysis_keyword} not found', mapping={"analysis_keyword": acode}) else: acodes.append(acode) if len(acodes) == 0: self.warn("Service keywords: no matches found") # Attachments will be created in any worksheet that contains # analyses that are updated by this import attachments = {} infile = self._parser.getInputFile() # searchcriteria = self.getIdSearchCriteria() # self.log(_("Search criterias: %s") % (', '.join(searchcriteria))) for objid, results in self._parser.getRawResults().iteritems(): # Allowed more than one result for the same sample and analysis. # Needed for calibration tests for result in results: analyses = self._getZODBAnalyses(objid) inst = None if len(analyses) == 0 and self.instrument_uid: # No registered analyses found, but maybe we need to # create them first if an instruemnt id has been set in insts = self.bsc(portal_type='Instrument', UID=self.instrument_uid) if len(insts) == 0: # No instrument found self.warn("No Analysis Request with " "'${allowed_ar_states}' " "states found, And no QC" "analyses found for ${object_id}", mapping={"allowed_ar_states": ', '.join( allowed_ar_states_msg), "object_id": objid}) self.warn("Instrument not found") continue inst = insts[0].getObject() # Create a new ReferenceAnalysis and link it to # the Instrument # Here we have an objid (i.e. R01200012) and # a dict with results (the key is the AS keyword). # How can we create a ReferenceAnalysis if we don't know # which ReferenceSample we might use? # Ok. The objid HAS to be the ReferenceSample code. refsample = self.bc(portal_type='ReferenceSample', id=objid) if refsample and len(refsample) == 1: refsample = refsample[0].getObject() elif refsample and len(refsample) > 1: # More than one reference sample found! self.warn( "More than one reference sample found for" "'${object_id}'", mapping={"object_id": objid}) continue else: # No reference sample found! self.warn("No Reference Sample found for ${object_id}", mapping={"object_id": objid}) continue # For each acode, create a ReferenceAnalysis and attach it # to the Reference Sample services = self.bsc(portal_type='AnalysisService') service_uids = [service.UID for service in services if service.getObject().getKeyword() in result.keys()] analyses = inst.addReferences(refsample, service_uids) elif len(analyses) == 0: # No analyses found self.warn("No Analysis Request with " "'${allowed_ar_states}' " "states neither QC analyses found " "for ${object_id}", mapping={ "allowed_ar_states": ', '.join( allowed_ar_states_msg), "object_id": objid}) continue # Look for timestamp capturedate = result.get('DateTime', {}).get('DateTime', None) if capturedate: del result['DateTime'] for acode, values in result.iteritems(): if acode not in acodes: # Analysis keyword doesn't exist continue ans = [analysis for analysis in analyses if analysis.getKeyword() == acode] if len(ans) > 1: self.warn("More than one analysis found for " "${object_id} and ${analysis_keyword}", mapping={"object_id": objid, "analysis_keyword": acode}) continue elif len(ans) == 0: self.warn("No analyses found for ${object_id} " "and ${analysis_keyword}", mapping={"object_id": objid, "analysis_keyword": acode}) continue analysis = ans[0] # Create attachment in worksheet linked to this analysis. # Only if this import has not already created the # attachment # And only if the filename of the attachment is unique in # this worksheet. Otherwise we will attempt to use # existing attachment. wss = analysis.getBackReferences('WorksheetAnalysis') ws = wss[0] if wss else None if ws: if ws.getId() not in attachments: fn = infile.filename fn_attachments = self.get_attachment_filenames(ws) if fn in fn_attachments.keys(): attachments[ws.getId()] = fn_attachments[fn] else: attachments[ws.getId()] = \ self.create_attachment(ws, infile) if capturedate: values['DateTime'] = capturedate processed = self._process_analysis(objid, analysis, values) if processed: ancount += 1 if inst: # Calibration Test (import to Instrument) instprocessed.append(inst.UID()) importedinst = inst.title in importedinsts.keys() \ and importedinsts[inst.title] or [] if acode not in importedinst: importedinst.append(acode) importedinsts[inst.title] = importedinst else: ar = analysis.portal_type == 'Analysis' \ and analysis.aq_parent or None if ar and ar.UID: importedar = ar.getId() in importedars.keys() \ and importedars[ar.getId()] or [] if acode not in importedar: importedar.append(acode) importedars[ar.getId()] = importedar if ws: self.attach_attachment( analysis, attachments[ws.getId()]) else: self.warn( "Attachment cannot be linked to analysis as " "it is not assigned to a worksheet (%s)" % analysis) for arid, acodes in importedars.iteritems(): acodesmsg = ["Analysis %s" % acod for acod in acodes] self.log( "${request_id}: ${analysis_keywords} imported sucessfully", mapping={"request_id": arid, "analysis_keywords": acodesmsg}) for instid, acodes in importedinsts.iteritems(): acodesmsg = ["Analysis %s" % acod for acod in acodes] msg = "%s: %s %s" % (instid, ", ".join(acodesmsg), "imported sucessfully") self.log(msg) if self.instrument_uid: self.log( "Import finished successfully: ${nr_updated_ars} ARs, " "${nr_updated_instruments} Instruments and " "${nr_updated_results} " "results updated", mapping={"nr_updated_ars": str(len(importedars)), "nr_updated_instruments": str(len(importedinsts)), "nr_updated_results": str(ancount)}) else: self.log( "Import finished successfully: ${nr_updated_ars} ARs and " "${nr_updated_results} results updated", mapping={"nr_updated_ars": str(len(importedars)), "nr_updated_results": str(ancount)})
def Import(context, request): """ Horiba Jobin-Yvon ICPanalysis results """ infile = request.form['data_file'] fileformat = request.form['format'] artoapply = request.form['artoapply'] override = request.form['override'] sample = request.form.get('sample', 'requestid') instrument = request.form.get('instrument', None) errors = [] logs = [] warns = [] # Load the most suitable parser according to file extension/options/etc... parser = None if not hasattr(infile, 'filename'): errors.append(_("No file selected")) if fileformat == 'csv': parser = HoribaJobinYvonICPCSVParser(infile) else: errors.append( t( _("Unrecognized file format ${fileformat}", mapping={"fileformat": fileformat}))) if parser: # Load the importer status = ['sample_received', 'attachment_due', 'to_be_verified'] if artoapply == 'received': status = ['sample_received'] elif artoapply == 'received_tobeverified': status = ['sample_received', 'attachment_due', 'to_be_verified'] over = [False, False] if override == 'nooverride': over = [False, False] elif override == 'override': over = [True, False] elif override == 'overrideempty': over = [True, True] sam = ['getRequestID', 'getSampleID', 'getClientSampleID'] if sample == 'requestid': sam = ['getRequestID'] if sample == 'sampleid': sam = ['getSampleID'] elif sample == 'clientsid': sam = ['getClientSampleID'] elif sample == 'sample_clientsid': sam = ['getSampleID', 'getClientSampleID'] importer = HoribaJobinYvonICPImporter(parser=parser, context=context, idsearchcriteria=sam, allowed_ar_states=status, allowed_analysis_states=None, override=over, instrument_uid=instrument) tbex = '' try: importer.process() except: tbex = traceback.format_exc() errors = importer.errors logs = importer.logs warns = importer.warns if tbex: errors.append(tbex) results = {'errors': errors, 'log': logs, 'warns': warns} return json.dumps(results)
def folderitems(self): rc = getToolByName(self.context, REFERENCE_CATALOG) bsc = getToolByName(self.context, 'bika_setup_catalog') workflow = getToolByName(self.context, 'portal_workflow') mtool = getToolByName(self.context, 'portal_membership') checkPermission = mtool.checkPermission if not self.allow_edit: can_edit_analyses = False else: if self.contentFilter.get('getPointOfCapture', '') == 'field': can_edit_analyses = checkPermission(EditFieldResults, self.context) else: can_edit_analyses = checkPermission(EditResults, self.context) self.allow_edit = can_edit_analyses self.show_select_column = self.allow_edit context_active = isActive(self.context) self.categories = [] items = super(AnalysesView, self).folderitems(full_objects = True) # manually skim retracted analyses from the list new_items = [] for i,item in enumerate(items): # self.contentsMethod may return brains or objects. if not ('obj' in items[i]): continue obj = hasattr(items[i]['obj'], 'getObject') and \ items[i]['obj'].getObject() or \ items[i]['obj'] if workflow.getInfoFor(obj, 'review_state') == 'retracted' \ and not checkPermission(ViewRetractedAnalyses, self.context): continue new_items.append(item) items = new_items methods = self.get_methods_vocabulary() self.interim_fields = {} self.interim_columns = {} self.specs = {} show_methodinstr_columns = False for i, item in enumerate(items): # self.contentsMethod may return brains or objects. obj = hasattr(items[i]['obj'], 'getObject') and \ items[i]['obj'].getObject() or \ items[i]['obj'] if workflow.getInfoFor(obj, 'review_state') == 'retracted' \ and not checkPermission(ViewRetractedAnalyses, self.context): continue result = obj.getResult() service = obj.getService() calculation = service.getCalculation() unit = service.getUnit() keyword = service.getKeyword() if self.show_categories: cat = obj.getService().getCategoryTitle() items[i]['category'] = cat if cat not in self.categories: self.categories.append(cat) # Check for InterimFields attribute on our object, interim_fields = hasattr(obj, 'getInterimFields') \ and obj.getInterimFields() or [] # kick some pretty display values in. for x in range(len(interim_fields)): interim_fields[x]['formatted_value'] = \ format_numeric_result(obj, interim_fields[x]['value']) self.interim_fields[obj.UID()] = interim_fields items[i]['service_uid'] = service.UID() items[i]['Service'] = service.Title() items[i]['Keyword'] = keyword items[i]['Unit'] = format_supsub(unit) if unit else '' items[i]['Result'] = '' items[i]['formatted_result'] = '' items[i]['interim_fields'] = interim_fields items[i]['Remarks'] = obj.getRemarks() items[i]['Uncertainty'] = '' items[i]['retested'] = obj.getRetested() items[i]['class']['retested'] = 'center' items[i]['result_captured'] = self.ulocalized_time( obj.getResultCaptureDate(), long_format=0) items[i]['calculation'] = calculation and True or False try: items[i]['Partition'] = obj.getSamplePartition().getId() except AttributeError: items[i]['Partition'] = '' if obj.portal_type == "ReferenceAnalysis": items[i]['DueDate'] = self.ulocalized_time(obj.aq_parent.getExpiryDate(), long_format=0) else: items[i]['DueDate'] = self.ulocalized_time(obj.getDueDate(), long_format=1) cd = obj.getResultCaptureDate() items[i]['CaptureDate'] = cd and self.ulocalized_time(cd, long_format=1) or '' items[i]['Attachments'] = '' item['allow_edit'] = [] client_or_lab = "" tblrowclass = items[i].get('table_row_class'); if obj.portal_type == 'ReferenceAnalysis': items[i]['st_uid'] = obj.aq_parent.UID() items[i]['table_row_class'] = ' '.join([tblrowclass, 'qc-analysis']); elif obj.portal_type == 'DuplicateAnalysis' and \ obj.getAnalysis().portal_type == 'ReferenceAnalysis': items[i]['st_uid'] = obj.aq_parent.UID() items[i]['table_row_class'] = ' '.join([tblrowclass, 'qc-analysis']); else: if self.context.portal_type == 'AnalysisRequest': sample = self.context.getSample() st_uid = sample.getSampleType().UID() items[i]['st_uid'] = st_uid if st_uid not in self.specs: proxies = bsc(portal_type = 'AnalysisSpec', getSampleTypeUID = st_uid) elif self.context.portal_type == "Worksheet": if obj.portal_type == "DuplicateAnalysis": sample = obj.getAnalysis().getSample() elif obj.portal_type == "RejectAnalysis": sample = obj.getAnalysis().getSample() else: sample = obj.aq_parent.getSample() st_uid = sample.getSampleType().UID() items[i]['st_uid'] = st_uid if st_uid not in self.specs: proxies = bsc(portal_type = 'AnalysisSpec', getSampleTypeUID = st_uid) elif self.context.portal_type == 'Sample': st_uid = self.context.getSampleType().UID() items[i]['st_uid'] = st_uid if st_uid not in self.specs: proxies = bsc(portal_type = 'AnalysisSpec', getSampleTypeUID = st_uid) else: proxies = [] if st_uid not in self.specs: for spec in (p.getObject() for p in proxies): if spec.getClientUID() == obj.getClientUID(): client_or_lab = 'client' elif spec.getClientUID() == self.context.bika_setup.bika_analysisspecs.UID(): client_or_lab = 'lab' else: continue for keyword, results_range in \ spec.getResultsRangeDict().items(): # hidden form field 'specs' keyed by sampletype uid: # {st_uid: {'lab/client':{keyword:{min,max,error}}}} if st_uid in self.specs: if client_or_lab in self.specs[st_uid]: self.specs[st_uid][client_or_lab][keyword] = results_range else: self.specs[st_uid][client_or_lab] = {keyword: results_range} else: self.specs[st_uid] = {client_or_lab: {keyword: results_range}} if checkPermission(ManageBika, self.context): service_uid = service.UID() latest = rc.lookupObject(service_uid).version_id items[i]['Service'] = service.Title() items[i]['class']['Service'] = "service_title" # Show version number of out-of-date objects # No: This should be done in another column, if at all. # The (vX) value confuses some more fragile forms. # if hasattr(obj, 'reference_versions') and \ # service_uid in obj.reference_versions and \ # latest != obj.reference_versions[service_uid]: # items[i]['after']['Service'] = "(v%s)" % \ # (obj.reference_versions[service_uid]) # choices defined on Service apply to result fields. choices = service.getResultOptions() if choices: item['choices']['Result'] = choices # permission to view this item's results can_view_result = \ getSecurityManager().checkPermission(ViewResults, obj) # permission to edit this item's results # Editing Field Results is possible while in Sample Due. poc = self.contentFilter.get("getPointOfCapture", 'lab') can_edit_analysis = self.allow_edit and context_active and \ ( (poc == 'field' and getSecurityManager().checkPermission(EditFieldResults, obj)) or (poc != 'field' and getSecurityManager().checkPermission(EditResults, obj)) ) allowed_method_states = ['to_be_sampled', 'to_be_preserved', 'sample_received', 'sample_registered', 'sampled', 'assigned'] # Prevent from being edited if the instrument assigned # is not valid (out-of-date or uncalibrated), except if # the analysis is a QC with assigned status can_edit_analysis = can_edit_analysis \ and (obj.isInstrumentValid() \ or (obj.portal_type == 'ReferenceAnalysis' \ and item['review_state'] in allowed_method_states)) if can_edit_analysis: items[i]['allow_edit'].extend(['Analyst', 'Result', 'Remarks']) # if the Result field is editable, our interim fields are too for f in self.interim_fields[obj.UID()]: items[i]['allow_edit'].append(f['keyword']) # if there isn't a calculation then result must be re-testable, # and if there are interim fields, they too must be re-testable. if not items[i]['calculation'] or \ (items[i]['calculation'] and self.interim_fields[obj.UID()]): items[i]['allow_edit'].append('retested') # TODO: Only the labmanager must be able to change the method # can_set_method = getSecurityManager().checkPermission(SetAnalysisMethod, obj) can_set_method = can_edit_analysis \ and item['review_state'] in allowed_method_states method = obj.getMethod() \ if hasattr(obj, 'getMethod') and obj.getMethod() \ else service.getMethod() # Display the methods selector if the AS has at least one # method assigned item['Method'] = '' item['replace']['Method'] = '' if can_set_method: voc = self.get_methods_vocabulary(obj) if voc: # The service has at least one method available item['Method'] = method.UID() if method else '' item['choices']['Method'] = voc item['allow_edit'].append('Method') show_methodinstr_columns = True elif method: # This should never happen # The analysis has set a method, but its parent # service hasn't any method available O_o item['Method'] = method.Title() item['replace']['Method'] = "<a href='%s'>%s</a>" % \ (method.absolute_url(), method.Title()) show_methodinstr_columns = True elif method: # Edition not allowed, but method set item['Method'] = method.Title() item['replace']['Method'] = "<a href='%s'>%s</a>" % \ (method.absolute_url(), method.Title()) show_methodinstr_columns = True # TODO: Instrument selector dynamic behavior in worksheet Results # Only the labmanager must be able to change the instrument to be used. Also, # the instrument selection should be done in accordance with the method selected # can_set_instrument = service.getInstrumentEntryOfResults() and getSecurityManager().checkPermission(SetAnalysisInstrument, obj) can_set_instrument = service.getInstrumentEntryOfResults() \ and can_edit_analysis \ and item['review_state'] in allowed_method_states item['Instrument'] = '' item['replace']['Instrument'] = '' if service.getInstrumentEntryOfResults(): instrument = None # If the analysis has an instrument already assigned, use it if service.getInstrumentEntryOfResults() \ and hasattr(obj, 'getInstrument') \ and obj.getInstrument(): instrument = obj.getInstrument() # Otherwise, use the Service's default instrument elif service.getInstrumentEntryOfResults(): instrument = service.getInstrument() if can_set_instrument: # Edition allowed voc = self.get_instruments_vocabulary(obj) if voc: # The service has at least one instrument available item['Instrument'] = instrument.UID() if instrument else '' item['choices']['Instrument'] = voc item['allow_edit'].append('Instrument') show_methodinstr_columns = True elif instrument: # This should never happen # The analysis has an instrument set, but the # service hasn't any available instrument item['Instrument'] = instrument.Title() item['replace']['Instrument'] = "<a href='%s'>%s</a>" % \ (instrument.absolute_url(), instrument.Title()) show_methodinstr_columns = True elif instrument: # Edition not allowed, but instrument set item['Instrument'] = instrument.Title() item['replace']['Instrument'] = "<a href='%s'>%s</a>" % \ (instrument.absolute_url(), instrument.Title()) show_methodinstr_columns = True else: # Manual entry of results, instrument not allowed item['Instrument'] = _('Manual') msgtitle = t(_( "Instrument entry of results not allowed for ${service}", mapping={"service": safe_unicode(service.Title())}, )) item['replace']['Instrument'] = \ '<a href="#" title="%s">%s</a>' % (msgtitle, t(_('Manual'))) # Sets the analyst assigned to this analysis if can_edit_analysis: analyst = obj.getAnalyst() # widget default: current user if not analyst: analyst = mtool.getAuthenticatedMember().getUserName() items[i]['Analyst'] = analyst item['choices']['Analyst'] = self.getAnalysts() else: items[i]['Analyst'] = obj.getAnalystName() # If the user can attach files to analyses, show the attachment col can_add_attachment = \ getSecurityManager().checkPermission(AddAttachment, obj) if can_add_attachment or can_view_result: attachments = "" if hasattr(obj, 'getAttachment'): for attachment in obj.getAttachment(): af = attachment.getAttachmentFile() icon = af.getBestIcon() attachments += "<span class='attachment' attachment_uid='%s'>" % (attachment.UID()) if icon: attachments += "<img src='%s/%s'/>" % (self.portal_url, icon) attachments += '<a href="%s/at_download/AttachmentFile"/>%s</a>' % (attachment.absolute_url(), af.filename) if can_edit_analysis: attachments += "<img class='deleteAttachmentButton' attachment_uid='%s' src='%s'/>" % (attachment.UID(), "++resource++bika.lims.images/delete.png") attachments += "</br></span>" items[i]['replace']['Attachments'] = attachments[:-12] + "</span>" # Only display data bearing fields if we have ViewResults # permission, otherwise just put an icon in Result column. if can_view_result: items[i]['Result'] = result scinot = self.context.bika_setup.getScientificNotationResults() dmk = self.context.bika_setup.getResultsDecimalMark() items[i]['formatted_result'] = obj.getFormattedResult(sciformat=int(scinot),decimalmark=dmk) items[i]['Uncertainty'] = format_uncertainty(obj, result, decimalmark=dmk, sciformat=int(scinot)) else: items[i]['Specification'] = "" if 'Result' in items[i]['allow_edit']: items[i]['allow_edit'].remove('Result') items[i]['before']['Result'] = \ '<img width="16" height="16" ' + \ 'src="%s/++resource++bika.lims.images/to_follow.png"/>' % \ (self.portal_url) # Everyone can see valid-ranges spec = self.get_analysis_spec(obj) if spec: min_val = spec.get('min', '') min_str = ">{0}".format(min_val) if min_val else '' max_val = spec.get('max', '') max_str = "<{0}".format(max_val) if max_val else '' error_val = spec.get('error', '') error_str = "{0}%".format(error_val) if error_val else '' rngstr = ",".join([x for x in [min_str, max_str, error_str] if x]) else: rngstr = "" items[i]['Specification'] = rngstr # Add this analysis' interim fields to the interim_columns list for f in self.interim_fields[obj.UID()]: if f['keyword'] not in self.interim_columns and not f.get('hidden', False): self.interim_columns[f['keyword']] = f['title'] # and to the item itself items[i][f['keyword']] = f items[i]['class'][f['keyword']] = 'interim' # check if this analysis is late/overdue resultdate = obj.aq_parent.getDateSampled() \ if obj.portal_type == 'ReferenceAnalysis' \ else obj.getResultCaptureDate() duedate = obj.aq_parent.getExpiryDate() \ if obj.portal_type == 'ReferenceAnalysis' \ else obj.getDueDate() items[i]['replace']['DueDate'] = \ self.ulocalized_time(duedate, long_format=1) if items[i]['review_state'] not in ['to_be_sampled', 'to_be_preserved', 'sample_due', 'published']: if (resultdate and resultdate > duedate) \ or (not resultdate and DateTime() > duedate): items[i]['replace']['DueDate'] = '%s <img width="16" height="16" src="%s/++resource++bika.lims.images/late.png" title="%s"/>' % \ (self.ulocalized_time(duedate, long_format=1), self.portal_url, t(_("Late Analysis"))) # Submitting user may not verify results (admin can though) if items[i]['review_state'] == 'to_be_verified' and \ not checkPermission(VerifyOwnResults, obj): user_id = getSecurityManager().getUser().getId() self_submitted = False try: review_history = list(workflow.getInfoFor(obj, 'review_history')) review_history.reverse() for event in review_history: if event.get('action') == 'submit': if event.get('actor') == user_id: self_submitted = True break if self_submitted: items[i]['after']['state_title'] = \ "<img src='++resource++bika.lims.images/submitted-by-current-user.png' title='%s'/>" % \ (t(_("Cannot verify: Submitted by current user"))) except WorkflowException: pass # add icon for assigned analyses in AR views if self.context.portal_type == 'AnalysisRequest': obj = items[i]['obj'] if obj.portal_type in ['ReferenceAnalysis', 'DuplicateAnalysis'] or \ workflow.getInfoFor(obj, 'worksheetanalysis_review_state') == 'assigned': br = obj.getBackReferences('WorksheetAnalysis') if len(br) > 0: ws = br[0] items[i]['after']['state_title'] = \ "<a href='%s'><img src='++resource++bika.lims.images/worksheet.png' title='%s'/></a>" % \ (ws.absolute_url(), t(_("Assigned to: ${worksheet_id}", mapping={'worksheet_id': safe_unicode(ws.id)}))) # the TAL requires values for all interim fields on all # items, so we set blank values in unused cells for item in items: for field in self.interim_columns: if field not in item: item[field] = '' # XXX order the list of interim columns interim_keys = self.interim_columns.keys() interim_keys.reverse() # add InterimFields keys to columns for col_id in interim_keys: if col_id not in self.columns: self.columns[col_id] = {'title': self.interim_columns[col_id], 'input_width': '6', 'input_class': 'ajax_calculate numeric', 'sortable': False} if can_edit_analyses: new_states = [] for state in self.review_states: # InterimFields are displayed in review_state # They are anyway available through View.columns though. # In case of hidden fields, the calcs.py should check calcs/services # for additional InterimFields!! pos = 'Result' in state['columns'] and \ state['columns'].index('Result') or len(state['columns']) for col_id in interim_keys: if col_id not in state['columns']: state['columns'].insert(pos, col_id) # retested column is added after Result. pos = 'Result' in state['columns'] and \ state['columns'].index('Uncertainty') + 1 or len(state['columns']) state['columns'].insert(pos, 'retested') new_states.append(state) self.review_states = new_states # Allow selecting individual analyses self.show_select_column = True # Dry Matter. # The Dry Matter column is never enabled for reference sample contexts # and refers to getReportDryMatter in ARs. if items and \ (hasattr(self.context, 'getReportDryMatter') and \ self.context.getReportDryMatter()): # look through all items # if the item's Service supports ReportDryMatter, add getResultDM(). for item in items: if item['obj'].getService().getReportDryMatter(): item['ResultDM'] = item['obj'].getResultDM() else: item['ResultDM'] = '' if item['ResultDM']: item['after']['ResultDM'] = "<em class='discreet'>%</em>" # modify the review_states list to include the ResultDM column new_states = [] for state in self.review_states: pos = 'Result' in state['columns'] and \ state['columns'].index('Uncertainty') + 1 or len(state['columns']) state['columns'].insert(pos, 'ResultDM') new_states.append(state) self.review_states = new_states self.categories.sort() # self.json_specs = json.dumps(self.specs) self.json_interim_fields = json.dumps(self.interim_fields) self.items = items # Method and Instrument columns must be shown or hidden at the # same time, because the value assigned to one causes # a value reassignment to the other (one method can be performed # by different instruments) self.columns['Method']['toggle'] = show_methodinstr_columns self.columns['Instrument']['toggle'] = show_methodinstr_columns return items
def __call__(self): bac = getToolByName(self.context, 'bika_analysis_catalog') self.report_content = {} parm_lines = {} parms = [] headings = {} headings['header'] = _("Analyses retested") headings['subheader'] = _("Analyses which have been retested") count_all = 0 query = {'portal_type': 'Analysis', 'getRetested': True, 'sort_order': 'reverse'} date_query = formatDateQuery(self.context, 'Received') if date_query: query['getDateReceived'] = date_query received = formatDateParms(self.context, 'Received') else: received = 'Undefined' parms.append( {'title': _('Received'), 'value': received, 'type': 'text'}) wf_tool = getToolByName(self.context, 'portal_workflow') if self.request.form.has_key('bika_analysis_workflow'): query['review_state'] = self.request.form['bika_analysis_workflow'] review_state = wf_tool.getTitleForStateOnType( self.request.form['bika_analysis_workflow'], 'Analysis') else: review_state = 'Undefined' parms.append( {'title': _('Status'), 'value': review_state, 'type': 'text'}) if self.request.form.has_key('bika_cancellation_workflow'): query['cancellation_state'] = self.request.form[ 'bika_cancellation_workflow'] cancellation_state = wf_tool.getTitleForStateOnType( self.request.form['bika_cancellation_workflow'], 'Analysis') else: cancellation_state = 'Undefined' parms.append( {'title': _('Active'), 'value': cancellation_state, 'type': 'text'}) if self.request.form.has_key('bika_worksheetanalysis_workflow'): query['worksheetanalysis_review_state'] = self.request.form[ 'bika_worksheetanalysis_workflow'] ws_review_state = wf_tool.getTitleForStateOnType( self.request.form['bika_worksheetanalysis_workflow'], 'Analysis') else: ws_review_state = 'Undefined' parms.append( {'title': _('Assigned to worksheet'), 'value': ws_review_state, 'type': 'text'}) # and now lets do the actual report lines formats = {'columns': 8, 'col_heads': [_('Client'), _('Request'), _('Sample type'), _('Sample point'), _('Category'), _('Analysis'), _('Received'), _('Status'), ], 'class': '', } datalines = [] clients = {} sampletypes = {} samplepoints = {} categories = {} services = {} for a_proxy in bac(query): analysis = a_proxy.getObject() dataline = [] dataitem = {'value': analysis.getClientTitle()} dataline.append(dataitem) dataitem = {'value': analysis.getRequestID()} dataline.append(dataitem) dataitem = {'value': analysis.aq_parent.getSampleTypeTitle()} dataline.append(dataitem) dataitem = {'value': analysis.aq_parent.getSamplePointTitle()} dataline.append(dataitem) dataitem = {'value': analysis.getCategoryTitle()} dataline.append(dataitem) dataitem = {'value': analysis.getServiceTitle()} dataline.append(dataitem) dataitem = {'value': self.ulocalized_time(analysis.getDateReceived())} dataline.append(dataitem) state = wf_tool.getInfoFor(analysis, 'review_state', '') review_state = wf_tool.getTitleForStateOnType( state, 'Analysis') dataitem = {'value': review_state} dataline.append(dataitem) datalines.append(dataline) count_all += 1 # table footer data footlines = [] footline = [] footitem = {'value': _('Number of analyses retested for period'), 'colspan': 7, 'class': 'total_label'} footline.append(footitem) footitem = {'value': count_all} footline.append(footitem) footlines.append(footline) self.report_content = { 'headings': headings, 'parms': parms, 'formats': formats, 'datalines': datalines, 'footings': footlines} title = t(headings['header']) return {'report_title': title, 'report_data': self.template()}
def __call__(self): header = _("Reference analysis QC") subheader = _("Reference analysis quality control graphs ") MinimumResults = self.context.bika_setup.getMinimumResults() warning_icon = "<img src='" + self.portal_url + "/++resource++bika.lims.images/warning.png' height='9' width='9'/>" error_icon = "<img src='" + self.portal_url + "/++resource++bika.lims.images/exclamation.png' height='9' width='9'/>" self.parms = [] titles = [] sample_uid = self.request.form.get('ReferenceSampleUID', '') sample = self.reference_catalog.lookupObject(sample_uid) if not sample: message = _("No reference sample was selected.") self.context.plone_utils.addPortalMessage(message, 'error') return self.default_template() self.parms.append({ 'title': _("Reference Sample"), 'value': sample.Title() }) titles.append(sample.Title()) service_uid = self.request.form.get('ReferenceServiceUID', '') service = self.reference_catalog.lookupObject(service_uid) if not service: message = _("No analysis services were selected.") self.context.plone_utils.addPortalMessage(message, 'error') return self.default_template() self.contentFilter = { 'portal_type': 'ReferenceAnalysis', 'review_state': ['verified', 'published'], 'path': { "query": "/".join(sample.getPhysicalPath()), "level": 0 } } self.parms.append({ 'title': _("Analysis Service"), 'value': service.Title() }) titles.append(service.Title()) val = self.selection_macros.parse_daterange(self.request, 'getDateVerified', 'DateVerified') if val: self.contentFilter[val['contentFilter'] [0]] = val['contentFilter'][1] self.parms.append(val['parms']) titles.append(val['titles']) proxies = self.bika_analysis_catalog(self.contentFilter) if not proxies: message = _("No analyses matched your query") self.context.plone_utils.addPortalMessage(message, 'error') return self.default_template() # Compile a list with all relevant analysis data analyses = [] out_of_range_count = 0 results = [] capture_dates = [] plotdata = "" tabledata = [] for analysis in proxies: analysis = analysis.getObject() service = analysis.getService() resultsrange = \ [x for x in sample.getReferenceResults() if x['uid'] == service_uid][ 0] try: result = float(analysis.getResult()) results.append(result) except: result = analysis.getResult() capture_dates.append(analysis.getResultCaptureDate()) if result < float(resultsrange['min']) or result > float( resultsrange['max']): out_of_range_count += 1 try: precision = str(service.getPrecision()) except: precision = "2" try: formatted_result = str("%." + precision + "f") % result except: formatted_result = result tabledata.append({ _("Analysis"): analysis.getId(), _("Result"): formatted_result, _("Analyst"): analysis.getAnalyst(), _("Captured"): analysis.getResultCaptureDate().strftime(self.date_format_long) }) plotdata += "%s\t%s\t%s\t%s\n" % ( analysis.getResultCaptureDate().strftime( self.date_format_long), result, resultsrange['min'], resultsrange['max']) plotdata.encode('utf-8') result_values = [int(r) for r in results] result_dates = [c for c in capture_dates] self.parms += [ { "title": _("Total analyses"), "value": len(proxies) }, ] # # This variable is output to the TAL self.report_data = { 'header': header, 'subheader': subheader, 'parms': self.parms, 'tables': [], 'footnotes': [], } if MinimumResults <= len(proxies): plotscript = """ set terminal png transparent truecolor enhanced size 700,350 font "Verdana, 8" set title "%(title)s" set xlabel "%(xlabel)s" set ylabel "%(ylabel)s" set key off #set logscale set timefmt "%(timefmt)s" set xdata time set format x "%(xformat)s" set xrange ["%(x_start)s":"%(x_end)s"] set auto fix set offsets graph 0, 0, 1, 1 set xtics border nomirror rotate by 90 font "Verdana, 5" offset 0,-3 set ytics nomirror f(x) = mean_y fit f(x) 'gpw_DATAFILE_gpw' u 1:3 via mean_y stddev_y = sqrt(FIT_WSSR / (FIT_NDF + 1)) plot mean_y-stddev_y with filledcurves y1=mean_y lt 1 lc rgb "#efefef",\ mean_y+stddev_y with filledcurves y1=mean_y lt 1 lc rgb "#efefef",\ mean_y with lines lc rgb '#ffffff' lw 3,\ "gpw_DATAFILE_gpw" using 1:3 title 'data' with points pt 7 ps 1 lc rgb '#0000ee' lw 2,\ '' using 1:3 smooth unique lc rgb '#aaaaaa' lw 2,\ '' using 1:4 with lines lc rgb '#000000' lw 1,\ '' using 1:5 with lines lc rgb '#000000' lw 1""" % \ { 'title': "", 'xlabel': "", 'ylabel': service.getUnit(), 'x_start': "%s" % min(result_dates).strftime( self.date_format_short), 'x_end': "%s" % max(result_dates).strftime( self.date_format_short), 'timefmt': r'%Y-%m-%d %H:%M', 'xformat': '%%Y-%%m-%%d\n%%H:%%M', } plot_png = plot(str(plotdata), plotscript=str(plotscript), usefifo=False) # Temporary PNG data file fh, data_fn = tempfile.mkstemp(suffix='.png') os.write(fh, plot_png) plot_url = data_fn self.request['to_remove'].append(data_fn) plot_url = data_fn else: plot_url = "" table = { 'title': "%s: %s (%s)" % (t(_("Analysis Service")), service.Title(), service.getKeyword()), 'columns': [_('Analysis'), _('Result'), _('Analyst'), _('Captured')], 'parms': [], 'data': tabledata, 'plot_url': plot_url, } self.report_data['tables'].append(table) translate = self.context.translate ## footnotes if out_of_range_count: msgid = _("Analyses out of range") self.report_data['footnotes'].append("%s %s" % (error_icon, t(msgid))) self.report_data['parms'].append({ "title": _("Analyses out of range"), "value": out_of_range_count }) title = t(header) if titles: title += " (%s)" % " ".join(titles) return { 'report_title': title, 'report_data': self.template(), }
def pretty_title_or_id(context, obj, empty_value=_marker, domain='plone'): _ = MessageFactory(domain) title = _pretty_title_or_id(context, obj, empty_value=_marker) return t(context.translate(_(safe_unicode(title))))
def folderitem(self, obj, item, index): # Additional info from AnalysisRequest to be added in the item generated # by default by bikalisting. # Call the folderitem method from the base class item = BikaListingView.folderitem(self, obj, item, index) if not item: return None member = self.mtool.getAuthenticatedMember() roles = member.getRoles() hideclientlink = 'RegulatoryInspector' in roles \ and 'Manager' not in roles \ and 'LabManager' not in roles \ and 'LabClerk' not in roles sample = obj.getSample() url = obj.absolute_url() if getSecurityManager().checkPermission(EditResults, obj): url += "/manage_results" item['Client'] = obj.aq_parent.Title() if (hideclientlink == False): item['replace']['Client'] = "<a href='%s'>%s</a>" % \ (obj.aq_parent.absolute_url(), obj.aq_parent.Title()) item['Creator'] = self.user_fullname(obj.Creator()) item['getRequestID'] = obj.getRequestID() item['replace']['getRequestID'] = "<a href='%s'>%s</a>" % \ (url, item['getRequestID']) item['getSample'] = sample item['replace']['getSample'] = \ "<a href='%s'>%s</a>" % (sample.absolute_url(), sample.Title()) item['replace']['getProfilesTitle'] = ", ".join( [p.Title() for p in obj.getProfiles()]) analysesnum = obj.getAnalysesNum() if analysesnum: item['getAnalysesNum'] = str(analysesnum[0]) + '/' + str( analysesnum[1]) else: item['getAnalysesNum'] = '' batch = obj.getBatch() if batch: item['BatchID'] = batch.getBatchID() item['replace']['BatchID'] = "<a href='%s'>%s</a>" % \ (batch.absolute_url(), item['BatchID']) else: item['BatchID'] = '' val = obj.Schema().getField('SubGroup').get(obj) item['SubGroup'] = val.Title() if val else '' samplingdate = obj.getSample().getSamplingDate() item['SamplingDate'] = self.ulocalized_time(samplingdate, long_format=1) item['getDateReceived'] = self.ulocalized_time(obj.getDateReceived()) item['getDatePublished'] = self.ulocalized_time(obj.getDatePublished()) deviation = sample.getSamplingDeviation() item['SamplingDeviation'] = deviation and deviation.Title() or '' priority = obj.getPriority() item['Priority'] = '' # priority.Title() item['getStorageLocation'] = sample.getStorageLocation( ) and sample.getStorageLocation().Title() or '' item['AdHoc'] = sample.getAdHoc() and True or '' after_icons = "" state = self.workflow.getInfoFor(obj, 'worksheetanalysis_review_state') if state == 'assigned': after_icons += "<img src='%s/++resource++bika.lims.images/worksheet.png' title='%s'/>" % \ (self.portal_url, t(_("All analyses assigned"))) if self.workflow.getInfoFor(obj, 'review_state') == 'invalid': after_icons += "<img src='%s/++resource++bika.lims.images/delete.png' title='%s'/>" % \ (self.portal_url, t(_("Results have been withdrawn"))) if obj.getLate(): after_icons += "<img src='%s/++resource++bika.lims.images/late.png' title='%s'>" % \ (self.portal_url, t(_("Late Analyses"))) if samplingdate > DateTime(): after_icons += "<img src='%s/++resource++bika.lims.images/calendar.png' title='%s'>" % \ (self.portal_url, t(_("Future dated sample"))) if obj.getInvoiceExclude(): after_icons += "<img src='%s/++resource++bika.lims.images/invoice_exclude.png' title='%s'>" % \ (self.portal_url, t(_("Exclude from invoice"))) if sample.getSampleType().getHazardous(): after_icons += "<img src='%s/++resource++bika.lims.images/hazardous.png' title='%s'>" % \ (self.portal_url, t(_("Hazardous"))) if after_icons: item['after']['getRequestID'] = after_icons item['Created'] = self.ulocalized_time(obj.created()) contact = obj.getContact() if contact: item['ClientContact'] = contact.Title() item['replace']['ClientContact'] = "<a href='%s'>%s</a>" % \ (contact.absolute_url(), contact.Title()) else: item['ClientContact'] = "" SamplingWorkflowEnabled = sample.getSamplingWorkflowEnabled() if SamplingWorkflowEnabled and not samplingdate > DateTime(): datesampled = self.ulocalized_time(sample.getDateSampled()) if not datesampled: datesampled = self.ulocalized_time(DateTime(), long_format=1) item['class']['getDateSampled'] = 'provisional' sampler = sample.getSampler().strip() if sampler: item['replace']['getSampler'] = self.user_fullname(sampler) if 'Sampler' in member.getRoles() and not sampler: sampler = member.id item['class']['getSampler'] = 'provisional' else: datesampled = '' sampler = '' item['getDateSampled'] = datesampled item['getSampler'] = sampler # sampling workflow - inline edits for Sampler and Date Sampled checkPermission = self.context.portal_membership.checkPermission state = self.workflow.getInfoFor(obj, 'review_state') if state == 'to_be_sampled' \ and checkPermission(SampleSample, obj) \ and not samplingdate > DateTime(): item['required'] = ['getSampler', 'getDateSampled'] item['allow_edit'] = ['getSampler', 'getDateSampled'] samplers = getUsers(sample, ['Sampler', 'LabManager', 'Manager']) username = member.getUserName() users = [({ 'ResultValue': u, 'ResultText': samplers.getValue(u) }) for u in samplers] item['choices'] = {'getSampler': users} Sampler = sampler and sampler or \ (username in samplers.keys() and username) or '' item['getSampler'] = Sampler # These don't exist on ARs # XXX This should be a list of preservers... item['getPreserver'] = '' item['getDatePreserved'] = '' # inline edits for Preserver and Date Preserved checkPermission = self.context.portal_membership.checkPermission if checkPermission(PreserveSample, obj): item['required'] = ['getPreserver', 'getDatePreserved'] item['allow_edit'] = ['getPreserver', 'getDatePreserved'] preservers = getUsers(obj, ['Preserver', 'LabManager', 'Manager']) username = member.getUserName() users = [({ 'ResultValue': u, 'ResultText': preservers.getValue(u) }) for u in preservers] item['choices'] = {'getPreserver': users} preserver = username in preservers.keys() and username or '' item['getPreserver'] = preserver item['getDatePreserved'] = self.ulocalized_time(DateTime(), long_format=1) item['class']['getPreserver'] = 'provisional' item['class']['getDatePreserved'] = 'provisional' # Submitting user may not verify results if item['review_state'] == 'to_be_verified' and \ not checkPermission(VerifyOwnResults, obj): self_submitted = False try: review_history = list( self.workflow.getInfoFor(obj, 'review_history')) review_history.reverse() for event in review_history: if event.get('action') == 'submit': if event.get('actor') == member.getId(): self_submitted = True break if self_submitted: item['after']['state_title'] = \ "<img src='++resource++bika.lims.images/submitted-by-current-user.png' title='%s'/>" % \ t(_("Cannot verify: Submitted by current user")) except Exception: pass return item
def process(self): self._parser.parse() parsed = self._parser.resume() self._errors = self._parser.errors self._warns = self._parser.warns self._logs = self._parser.logs self._priorizedsearchcriteria = '' if parsed == False: return False # Allowed analysis states allowed_ar_states_msg = [t(_(s)) for s in self.getAllowedARStates()] allowed_an_states_msg = [ t(_(s)) for s in self.getAllowedAnalysisStates() ] self.log("Allowed Analysis Request states: ${allowed_states}", mapping={'allowed_states': ', '.join(allowed_ar_states_msg)}) self.log("Allowed analysis states: ${allowed_states}", mapping={'allowed_states': ', '.join(allowed_an_states_msg)}) # Exclude non existing ACODEs acodes = [] ancount = 0 arprocessed = [] instprocessed = [] importedars = {} importedinsts = {} rawacodes = self._parser.getAnalysisKeywords() exclude = self.getKeywordsToBeExcluded() for acode in rawacodes: if acode in exclude: continue service = self.bsc(getKeyword=acode) if not service: self.warn('Service keyword ${analysis_keyword} not found', mapping={"analysis_keyword": acode}) else: acodes.append(acode) if len(acodes) == 0: self.err("Service keywords: no matches found") searchcriteria = self.getIdSearchCriteria() #self.log(_("Search criterias: %s") % (', '.join(searchcriteria))) for objid, results in self._parser.getRawResults().iteritems(): # Allowed more than one result for the same sample and analysis. # Needed for calibration tests for result in results: analyses = self._getZODBAnalyses(objid) inst = None if len(analyses) == 0 and self.instrument_uid: # No registered analyses found, but maybe we need to # create them first if an instruemnt id has been set in insts = self.bsc(portal_type='Instrument', UID=self.instrument_uid) if len(insts) == 0: # No instrument found self.err( "No Analysis Request with '${allowed_ar_states}' " "states found, And no QC analyses found for ${object_id}", mapping={ "allowed_ar_states": ', '.join(allowed_ar_states_msg), "object_id": objid }) self.err("Instrument not found") continue inst = insts[0].getObject() # Create a new ReferenceAnalysis and link it to the Instrument # Here we have an objid (i.e. R01200012) and # a dict with results (the key is the AS keyword). # How can we create a ReferenceAnalysis if we don't know # which ReferenceSample we might use? # Ok. The objid HAS to be the ReferenceSample code. refsample = self.bc(portal_type='ReferenceSample', id=objid) if refsample and len(refsample) == 1: refsample = refsample[0].getObject() elif refsample and len(refsample) > 1: # More than one reference sample found! self.err( "More than one reference sample found for '${object_id}'", mapping={"object_id": objid}) continue else: # No reference sample found! self.err("No Reference Sample found for ${object_id}", mapping={"object_id": objid}) continue # For each acode, create a ReferenceAnalysis and attach it # to the Reference Sample service_uids = [] reference_type = 'b' if refsample.getBlank( ) == True else 'c' services = self.bsc(portal_type='AnalysisService') service_uids = [service.UID for service in services \ if service.getObject().getKeyword() in result.keys()] analyses = inst.addReferences(refsample, service_uids) elif len(analyses) == 0: # No analyses found self.err( "No Analysis Request with '${allowed_ar_states}' " "states neither QC analyses found for ${object_id}", mapping={ "allowed_ar_states": ', '.join(allowed_ar_states_msg), "object_id": objid }) continue # Look for timestamp capturedate = result.get('DateTime', {}).get('DateTime', None) if capturedate: del result['DateTime'] for acode, values in result.iteritems(): if acode not in acodes: # Analysis keyword doesn't exist continue ans = [analysis for analysis in analyses \ if analysis.getKeyword() == acode] if len(ans) > 1: self.err( "More than one analyses found for ${object_id} and ${analysis_keyword}", mapping={ "object_id": objid, "analysis_keyword": acode }) continue elif len(ans) == 0: self.err( "No analyses found for ${object_id} and ${analysis_keyword}", mapping={ "object_id": objid, "analysis_keyword": acode }) continue analysis = ans[0] if capturedate: values['DateTime'] = capturedate processed = self._process_analysis(objid, analysis, values) if processed: ancount += 1 if inst: # Calibration Test (import to Instrument) instprocessed.append(inst.UID()) importedinst = inst.title in importedinsts.keys() \ and importedinsts[inst.title] or [] if acode not in importedinst: importedinst.append(acode) importedinsts[inst.title] = importedinst else: ar = analysis.portal_type == 'Analysis' and analysis.aq_parent or None if ar and ar.UID: # Set AR imported info arprocessed.append(ar.UID()) importedar = ar.getRequestID() in importedars.keys() \ and importedars[ar.getRequestID()] or [] if acode not in importedar: importedar.append(acode) importedars[ar.getRequestID()] = importedar # Create the AttachmentType for mime type if not exists attuid = None attachmentType = self.bsc( portal_type="AttachmentType", title=self._parser.getAttachmentFileType()) if len(attachmentType) == 0: try: folder = self.context.bika_setup.bika_attachmenttypes obj = _createObjectByType( "AttachmentType", folder, tmpID()) obj.edit( title=self._parser.getAttachmentFileType(), description="Autogenerated file type") obj.unmarkCreationFlag() renameAfterCreation(obj) attuid = obj.UID() except: attuid = None self.err( "Unable to create the Attachment Type ${mime_type}", mapping={ "mime_type": self._parser.getFileMimeType() }) else: attuid = attachmentType[0].UID if attuid is not None: try: # Attach the file to the Analysis wss = analysis.getBackReferences( 'WorksheetAnalysis') if wss and len(wss) > 0: #TODO: Mirar si es pot evitar utilitzar el WS i utilitzar directament l'Anàlisi (útil en cas de CalibrationTest) ws = wss[0] attachment = _createObjectByType( "Attachment", ws, tmpID()) attachment.edit( AttachmentFile=self._parser. getInputFile(), AttachmentType=attuid, AttachmentKeys= 'Results, Automatic import') attachment.reindexObject() others = analysis.getAttachment() attachments = [] for other in others: if other.getAttachmentFile( ).filename != attachment.getAttachmentFile( ).filename: attachments.append(other.UID()) attachments.append(attachment.UID()) analysis.setAttachment(attachments) except: # self.err(_("Unable to attach results file '${file_name}' to AR ${request_id}", # mapping={"file_name": self._parser.getInputFile().filename, # "request_id": ar.getRequestID()})) pass # Calculate analysis dependencies for aruid in arprocessed: ar = self.bc(portal_type='AnalysisRequest', UID=aruid) ar = ar[0].getObject() analyses = ar.getAnalyses() for analysis in analyses: analysis = analysis.getObject() if analysis.calculateResult(True, True): self.log( "${request_id} calculated result for '${analysis_keyword}': '${analysis_result}'", mapping={ "request_id": ar.getRequestID(), "analysis_keyword": analysis.getKeyword(), "analysis_result": str(analysis.getResult()) }) # Not sure if there's any reason why ReferenceAnalyses have not # defined the method calculateResult... # Needs investigation. #for instuid in instprocessed: # inst = self.bsc(portal_type='Instrument',UID=instuid)[0].getObject() # analyses = inst.getAnalyses() # for analysis in analyses: # if (analysis.calculateResult(True, True)): # self.log(_("%s calculated result for '%s': '%s'") % # (inst.title, analysis.getKeyword(), str(analysis.getResult()))) for arid, acodes in importedars.iteritems(): acodesmsg = ["Analysis %s" % acod for acod in acodes] self.log( "${request_id}: ${analysis_keywords} imported sucessfully", mapping={ "request_id": arid, "analysis_keywords": acodesmsg }) for instid, acodes in importedinsts.iteritems(): acodesmsg = ["Analysis %s" % acod for acod in acodes] msg = "%s: %s %s" % (instid, ", ".join(acodesmsg), "imported sucessfully") self.log(msg) if self.instrument_uid: self.log( "Import finished successfully: ${nr_updated_ars} ARs, " "${nr_updated_instruments} Instruments and ${nr_updated_results} " "results updated", mapping={ "nr_updated_ars": str(len(importedars)), "nr_updated_instruments": str(len(importedinsts)), "nr_updated_results": str(ancount) }) else: self.log( "Import finished successfully: ${nr_updated_ars} ARs and " "${nr_updated_results} results updated", mapping={ "nr_updated_ars": str(len(importedars)), "nr_updated_results": str(ancount) })