def __call__(self): ar = self.context workflow = getToolByName(ar, 'portal_workflow') # If is a retracted AR, show the link to child AR and show a warn msg if workflow.getInfoFor(ar, 'review_state') == 'invalid': childar = hasattr(ar, 'getChildAnalysisRequest') \ and ar.getChildAnalysisRequest() or None childid = childar and childar.getRequestID() or None message = _('This Analysis Request has been withdrawn and is shown ' 'for trace-ability purposes only. Retest: ' '${retest_child_id}.', mapping={'retest_child_id': safe_unicode(childid) or ''}) self.context.plone_utils.addPortalMessage(message, 'warning') # If is an AR automatically generated due to a Retraction, show it's # parent AR information if hasattr(ar, 'getParentAnalysisRequest') \ and ar.getParentAnalysisRequest(): par = ar.getParentAnalysisRequest() message = _('This Analysis Request has been ' 'generated automatically due to ' 'the retraction of the Analysis ' 'Request ${retracted_request_id}.', mapping={'retracted_request_id': safe_unicode(par.getRequestID())}) self.context.plone_utils.addPortalMessage( t(message), 'info') template = LogView.__call__(self) return template
def Title(self): """ Return the Batch ID if title is not defined """ titlefield = self.Schema().getField('title') if titlefield.widget.visible: return safe_unicode(self.title).encode('utf-8') else: return safe_unicode(self.id).encode('utf-8')
def getMaintenanceTypes(self): """ Return the current list of maintenance types """ types = [('Preventive',safe_unicode(_('Preventive')).encode('utf-8')), ('Repair', safe_unicode(_('Repair')).encode('utf-8')), ('Enhancement', safe_unicode(_('Enhancement')).encode('utf-8'))] return DisplayList(types)
def workflow_action_preserve(self): form = self.request.form workflow = getToolByName(self.context, 'portal_workflow') action, came_from = WorkflowAction._get_form_workflow_action(self) checkPermission = self.context.portal_membership.checkPermission # Partition Preservation # the partition table shown in AR and Sample views sends it's # action button submits here. objects = WorkflowAction._get_selected_items(self) transitioned = [] incomplete = [] for obj_uid, obj in objects.items(): part = obj # can't transition inactive items if workflow.getInfoFor(part, 'inactive_state', '') == 'inactive': continue if not checkPermission(PreserveSample, part): continue # grab this object's Preserver and DatePreserved from the form Preserver = form['getPreserver'][0][obj_uid].strip() Preserver = Preserver and Preserver or '' DatePreserved = form['getDatePreserved'][0][obj_uid].strip() DatePreserved = DatePreserved and DateTime(DatePreserved) or '' # write them to the sample part.setPreserver(Preserver) part.setDatePreserved(DatePreserved) # transition the object if both values are present if Preserver and DatePreserved: workflow.doActionFor(part, action) transitioned.append(part.id) else: incomplete.append(part.id) part.reindexObject() part.aq_parent.reindexObject() message = None if len(transitioned) > 1: message = _('${items} are waiting to be received.', mapping={'items': safe_unicode(', '.join(transitioned))}) self.context.plone_utils.addPortalMessage(message, 'info') elif len(transitioned) == 1: message = _('${item} is waiting to be received.', mapping={'item': safe_unicode(', '.join(transitioned))}) self.context.plone_utils.addPortalMessage(message, 'info') if not message: message = _('No changes made.') self.context.plone_utils.addPortalMessage(message, 'info') if len(incomplete) > 1: message = _('${items} are missing Preserver or Date Preserved', mapping={'items': safe_unicode(', '.join(incomplete))}) self.context.plone_utils.addPortalMessage(message, 'error') elif len(incomplete) == 1: message = _('${item} is missing Preserver or Preservation Date', mapping={'item': safe_unicode(', '.join(incomplete))}) self.context.plone_utils.addPortalMessage(message, 'error') self.destination_url = self.request.get_header("referer", self.context.absolute_url()) self.request.response.redirect(self.destination_url)
def getTaskTypes(self): """ Return the current list of task types """ types = [('Calibration', safe_unicode(_('Calibration')).encode('utf-8')), ('Enhancement', safe_unicode(_('Enhancement')).encode('utf-8')), ('Preventive',safe_unicode(_('Preventive')).encode('utf-8')), ('Repair', safe_unicode(_('Repair')).encode('utf-8')), ('Validation', safe_unicode(_('Validation')).encode('utf-8'))] return DisplayList(types)
def __call__(self): CheckAuthenticator(self.request) bsc = getToolByName(self.context, 'bika_setup_catalog') term = safe_unicode(self.request.get('term', '')).lower() items = [] if not term: return json.dumps(items) # Strip "Lab: " from sample point title term = term.replace("%s: " % _("Lab"), '') sampletype = safe_unicode(self.request.get('sampletype', '')) if sampletype and len(sampletype) > 1: st = bsc(portal_type = "SampleType", title = sampletype, inactive_state = 'active') if not st: return json.dumps([]) st = st[0].getObject() items = [o.Title() for o in st.getSamplePoints()] if not items: client_items = lab_items = [] # User (client) sample points if self.context.portal_type in ('Client', 'AnalysisRequest'): if self.context.portal_type == 'Client': client_path = self.context.getPhysicalPath() else: client_path = self.context.aq_parent.getPhysicalPath() client_items = list( bsc(portal_type = "SamplePoint", path = {"query": "/".join(client_path), "level" : 0 }, inactive_state = 'active', sort_on='sortable_title')) # Global (lab) sample points lab_path = self.context.bika_setup.bika_samplepoints.getPhysicalPath() lab_items = list( bsc(portal_type = "SamplePoint", path = {"query": "/".join(lab_path), "level" : 0 }, inactive_state = 'active', sort_on='sortable_title')) client_items = [callable(s.Title) and s.Title() or s.title for s in self.filter_list(client_items, term)] lab_items = [callable(s.Title) and s.Title() or s.title for s in self.filter_list(lab_items, term)] lab_items = ["%s: %s" % (_("Lab"), safe_unicode(i)) for i in lab_items] items = client_items + lab_items return json.dumps(items)
def __call__(self, value, *args, **kwargs): instance = kwargs['instance'] fieldname = kwargs['field'].getName() # request = kwargs.get('REQUEST', {}) # form = request.get('form', {}) translate = getToolByName(instance, 'translation_service').translate if value == instance.get(fieldname): return True for item in aq_parent(instance).objectValues(): if hasattr(item, 'UID') and item.UID() != instance.UID() and \ fieldname in item.Schema() and \ str(item.Schema()[fieldname].get(item)) == str(value): # We have to compare them as strings because # even if a number (as an id) is saved inside # a string widget and string field, it will be # returned as an int. I don't know if it is # caused because is called with # <item.Schema()[fieldname].get(item)>, # but it happens... msg = _("Validation failed: '${value}' is not unique", mapping={'value': safe_unicode(value)}) return to_utf8(translate(msg)) return True
def __call__(self, value, *args, **kwargs): instance = kwargs['instance'] # fieldname = kwargs['field'].getName() # request = kwargs.get('REQUEST', {}) # form = request.get('form', {}) translate = getToolByName(instance, 'translation_service').translate bsc = getToolByName(instance, 'bika_setup_catalog') # uc = getToolByName(instance, 'uid_catalog') failures = [] for category in value: if not category: continue services = bsc(portal_type="AnalysisService", getCategoryUID=category) for service in services: service = service.getObject() calc = service.getCalculation() deps = calc and calc.getDependentServices() or [] for dep in deps: if dep.getCategoryUID() not in value: title = dep.getCategoryTitle() if title not in failures: failures.append(title) if failures: msg = _("Validation failed: The selection requires the following " "categories to be selected: ${categories}", mapping={'categories': safe_unicode(','.join(failures))}) return to_utf8(translate(msg)) return True
def __call__(self, value, *args, **kwargs): if not value: return True instance = kwargs['instance'] # fieldname = kwargs['field'].getName() request = kwargs.get('REQUEST', {}) form = request.form interim_fields = form.get('InterimFields') translate = getToolByName(instance, 'translation_service').translate bsc = getToolByName(instance, 'bika_setup_catalog') interim_keywords = interim_fields and \ [f['keyword'] for f in interim_fields] or [] keywords = re.compile(r"\[([^\.^\]]+)\]").findall(value) for keyword in keywords: # Check if the service keyword exists and is active. dep_service = bsc(getKeyword=keyword, inactive_state="active") if not dep_service and \ not keyword in interim_keywords: msg = _("Validation failed: Keyword '${keyword}' is invalid", mapping={'keyword': safe_unicode(keyword)}) return to_utf8(translate(msg)) # Wildcards # LIMS-1769 Allow to use LDL and UDL in calculations # https://jira.bikalabs.com/browse/LIMS-1769 allowedwds = ['LDL', 'UDL', 'BELOWLDL', 'ABOVEUDL'] keysandwildcards = re.compile(r"\[([^\]]+)\]").findall(value) keysandwildcards = [k for k in keysandwildcards if '.' in k] keysandwildcards = [k.split('.',1) for k in keysandwildcards] errwilds = [k[1] for k in keysandwildcards if k[0] not in keywords] if len(errwilds) > 0: msg = _("Wildcards for interims are not allowed: ${wildcards}", mapping={'wildcards': safe_unicode(', '.join(errwilds))}) return to_utf8(translate(msg)) wildcards = [k[1] for k in keysandwildcards if k[0] in keywords] wildcards = [wd for wd in wildcards if wd not in allowedwds] if len(wildcards) > 0: msg = _("Invalid wildcards found: ${wildcards}", mapping={'wildcards': safe_unicode(', '.join(wildcards))}) return to_utf8(translate(msg)) return True
def __call__(self): uc = getToolByName(self.context, 'uid_catalog') if 'copy_form_submitted' not in self.request: uids = self.request.form.get('uids', []) self.services = [] for uid in uids: proxies = uc(UID=uid) if proxies: self.services.append(proxies[0].getObject()) return self.template() else: self.savepoint = savepoint() sources = self.request.form.get('uids', []) titles = self.request.form.get('dst_title', []) keywords = self.request.form.get('dst_keyword', []) self.created = [] for i, s in enumerate(sources): if not titles[i]: message = _('Validation failed: title is required') self.context.plone_utils.addPortalMessage(message, 'info') self.savepoint.rollback() self.created = [] break if not keywords[i]: message = _('Validation failed: keyword is required') self.context.plone_utils.addPortalMessage(message, 'info') self.savepoint.rollback() self.created = [] break title = self.copy_service(s, titles[i], keywords[i]) if title: self.created.append(title) if len(self.created) > 1: message = t(_( '${items} were successfully created.', mapping={'items': safe_unicode(', '.join(self.created))})) elif len(self.created) == 1: message = t(_( '${item} was successfully created.', mapping={'item': safe_unicode(self.created[0])})) else: message = _('No new items were created.') self.context.plone_utils.addPortalMessage(message, 'info') self.request.response.redirect(self.context.absolute_url())
def __call__(self, value, *args, **kwargs): instance = kwargs['instance'] # fieldname = kwargs['field'].getName() # request = kwargs.get('REQUEST', {}) # form = request.get('form', {}) translate = getToolByName(instance, 'translation_service').translate if re.findall(r"[^A-Za-z\w\d\-\_]", value): return _("Validation failed: keyword contains invalid characters") # check the value against all AnalysisService keywords # this has to be done from catalog so we don't # clash with ourself bsc = getToolByName(instance, 'bika_setup_catalog') services = bsc(portal_type='AnalysisService', getKeyword=value) for service in services: if service.UID != instance.UID(): msg = _("Validation failed: '${title}': This keyword " "is already in use by service '${used_by}'", mapping={'title': safe_unicode(value), 'used_by': safe_unicode(service.Title)}) return to_utf8(translate(msg)) calc = hasattr(instance, 'getCalculation') and \ instance.getCalculation() or None our_calc_uid = calc and calc.UID() or '' # check the value against all Calculation Interim Field ids calcs = [c for c in bsc(portal_type='Calculation')] for calc in calcs: calc = calc.getObject() interim_fields = calc.getInterimFields() if not interim_fields: continue for field in interim_fields: if field['keyword'] == value and our_calc_uid != calc.UID(): msg = _("Validation failed: '${title}': This keyword " "is already in use by calculation '${used_by}'", mapping={'title': safe_unicode(value), 'used_by': safe_unicode(calc.Title())}) return to_utf8(translate(msg)) return True
def checkInstrumentsValidity(self): """ Checks the validity of the instruments used in the Analyses If an analysis with an invalid instrument (out-of-date or with calibration tests failed) is found, a warn message will be displayed. """ invalid = [] ans = [a.getObject() for a in self.context.getAnalyses()] for an in ans: valid = an.isInstrumentValid() if not valid: inv = '%s (%s)' % (safe_unicode(an.Title()), safe_unicode(an.getInstrument().Title())) if inv not in invalid: invalid.append(inv) if len(invalid) > 0: message = _("Some analyses use out-of-date or uncalibrated " "instruments. Results edition not allowed") message = "%s: %s" % (message, (', '.join(invalid))) self.context.plone_utils.addPortalMessage(message, 'warn')
def __call__(self): CheckAuthenticator(self.request) bsc = getToolByName(self.context, 'bika_setup_catalog') term = safe_unicode(self.request.get('term', '')).lower() items = [] if not term: return json.dumps(items) samplepoint = safe_unicode(self.request.get('samplepoint', '')) # Strip "Lab: " from sample point titles samplepoint = samplepoint.replace("%s: " % _("Lab"), '') if samplepoint and len(samplepoint) > 1: sp = bsc(portal_type = "SamplePoint", inactive_state = 'active', title=samplepoint) if not sp: return json.dumps([]) sp = sp[0].getObject() items = sp.getSampleTypes() if not items: items = bsc(portal_type = "SampleType", inactive_state = 'active', sort_on='sortable_title') if term and len(term) < 3: # Items that start with A or AA items = [s.getObject() for s in items if s.title.lower().startswith(term)] if not items: # or, items that contain A or AA items = [s.getObject() for s in items if s.title.lower().find(term) > -1] else: # or, items that contain term. items = [s.getObject() for s in items if s.title.lower().find(term) > -1] items = [callable(s.Title) and s.Title() or s.title for s in items] return json.dumps(items)
def sendEmail(self): added = [] to = '' for analysis in self.analyses: department = analysis.getService().getDepartment() if department is None: continue department_id = department.UID() if department_id in added: continue added.append(department_id) manager = department.getManager() if manager is None: continue manager_id = manager.UID() if manager_id not in added and manager.getEmailAddress(): added.append(manager_id) name = safe_unicode(manager.getFullname()).encode('utf-8') email = safe_unicode(manager.getEmailAddress()).encode('utf-8') to = '%s, %s' % (to, formataddr((encode_header(name), email))) html = safe_unicode(self.template()).encode('utf-8') lab = self.context.bika_setup.laboratory mime_msg = MIMEMultipart('related') mime_msg['Subject'] = self.title mime_msg['From'] = formataddr( (encode_header(lab.getName()), lab.getEmailAddress())) mime_msg['To'] = to mime_msg.preamble = 'This is a multi-part MIME message.' msg_txt = MIMEText(html, _subtype='html') mime_msg.attach(msg_txt) # Send the email try: host = getToolByName(self.context, 'MailHost') host.send(mime_msg.as_string(), immediate=True) except SMTPServerDisconnected as msg: raise SMTPServerDisconnected(msg) except SMTPRecipientsRefused as msg: raise WorkflowException(str(msg))
def __call__(self): CheckAuthenticator(self.request) bsc = getToolByName(self.context, 'bika_setup_catalog') term = safe_unicode(self.request.get('term', '')).lower() if not term: return json.dumps([]) client_items = lab_items = [] # User (client) storage locations if self.context.portal_type == 'Client': client_path = self.context.getPhysicalPath() client_items = list( bsc(portal_type = "StorageLocation", path = {"query": "/".join(client_path), "level" : 0 }, inactive_state = 'active', sort_on='sortable_title')) # Global (lab) storage locations lab_path = \ self.context.bika_setup.bika_storagelocations.getPhysicalPath() lab_items = list( bsc(portal_type = "StorageLocation", path = {"query": "/".join(lab_path), "level" : 0 }, inactive_state = 'active', sort_on='sortable_title')) client_items = [callable(s.Title) and s.Title() or s.title for s in self.filter_list(client_items, term)] lab_items = [callable(s.Title) and s.Title() or s.title for s in self.filter_list(lab_items, term)] lab_items = ["%s: %s" % (_("Lab"), safe_unicode(i)) for i in lab_items] items = client_items + lab_items return json.dumps(items)
def workflow_script_activate(self): wf = getToolByName(self, 'portal_workflow') pu = getToolByName(self, 'plone_utils') # A calculation cannot be re-activated if services it depends on # are deactivated. services = self.getDependentServices() inactive_services = [] for service in services: if wf.getInfoFor(service, "inactive_state") == "inactive": inactive_services.append(service.Title()) if inactive_services: msg = _("Cannot activate calculation, because the following " "service dependencies are inactive: ${inactive_services}", mapping={'inactive_services': safe_unicode(", ".join(inactive_services))}) pu.addPortalMessage(msg, 'error') transaction.get().abort() raise WorkflowException
def workflow_script_deactivate(self): bsc = getToolByName(self, 'bika_setup_catalog') pu = getToolByName(self, 'plone_utils') # A calculation cannot be deactivated if active services are using it. services = bsc(portal_type="AnalysisService", inactive_state="active") calc_services = [] for service in services: service = service.getObject() calc = service.getCalculation() if calc and calc.UID() == self.UID(): calc_services.append(service.Title()) if calc_services: msg = _('Cannot deactivate calculation, because it is in use by the ' 'following services: ${calc_services}', mapping={'calc_services': safe_unicode(", ".join(calc_services))}) pu.addPortalMessage(msg, 'error') transaction.get().abort() raise WorkflowException
def publish(self): """ Publish the AR report/s. Generates a results pdf file associated to each AR, sends an email with the report to the lab manager and sends a notification (usually an email with the PDF attached) to the AR's contact and CCs. Transitions each published AR to statuses 'published', 'prepublished' or 'republished'. Returns a list with the AR identifiers that have been published/prepublished/republished (only those 'verified', 'published' or at least have one 'verified' result). """ if len(self._ars) > 1: published_ars = [] for ar in self._ars: arpub = AnalysisRequestPublishView(ar, self.request, publish=True) ar = arpub.publish() published_ars.extend(ar) published_ars = [par.id for par in published_ars] return published_ars results_html = safe_unicode(self.template()).encode('utf-8') return self.publishFromHTML(results_html)
def sortable_title(portal, title): """Convert title to sortable title """ if not title: return '' def_charset = portal.plone_utils.getSiteEncoding() sortabletitle = title.lower().strip() # Replace numbers with zero filled numbers sortabletitle = num_sort_regex.sub(zero_fill, sortabletitle) # Truncate to prevent bloat for charset in [def_charset, 'latin-1', 'utf-8']: try: sortabletitle = safe_unicode(sortabletitle, charset)[:30] sortabletitle = sortabletitle.encode(def_charset or 'utf-8') break except UnicodeError: pass except TypeError: # If we get a TypeError if we already have a unicode string sortabletitle = sortabletitle[:30] break return sortabletitle
def __init__(self, context, request): super(AccreditationView, self).__init__(context, request) self.contentFilter = { 'portal_type': 'AnalysisService', 'sort_on': 'sortable_title', 'getAccredited': True, 'inactive_state': 'active' } self.context_actions = {} self.icon = self.portal_url + "/++resource++bika.lims.images/accredited_big.png" self.title = self.context.translate(_("Accreditation")) lab = context.bika_setup.laboratory accredited = lab.getLaboratoryAccredited() self.mapping = { 'lab_is_accredited': accredited, 'lab_name': safe_unicode(lab.getName()), 'lab_country': safe_unicode(lab.getPhysicalAddress().get('country', '')), 'confidence': safe_unicode(lab.getConfidence()), 'accreditation_body_abbr': safe_unicode(lab.getAccreditationBody()), 'accreditation_body_name': safe_unicode(lab.getAccreditationBodyURL()), 'accreditation_standard': safe_unicode(lab.getAccreditation()), 'accreditation_reference': safe_unicode(lab.getAccreditationReference()) } if accredited: self.description = t( _(safe_unicode(lab.getAccreditationPageHeader()), mapping=self.mapping)) else: self.description = t( _("The lab is not accredited, or accreditation has " "not been configured. ")) msg = t(_("All Accredited analysis services are listed here.")) self.description = "%s<p><br/>%s</p>" % (self.description, msg) self.show_select_column = False request.set('disable_border', 1) self.columns = { 'Title': { 'title': _('Service'), 'sortable': False }, 'Keyword': { 'title': _('Keyword'), 'sortable': False }, 'Category': { 'title': _('Category'), 'sortable': False }, 'Department': { 'title': _('Department'), 'sortable': False }, 'Instrument': { 'title': _('Instrument'), 'sortable': False }, 'Unit': { 'title': _('Unit'), 'sortable': False }, 'Price': { 'title': _('Price'), 'sortable': False }, 'MaxTimeAllowed': { 'title': _('Max Time'), 'sortable': False }, 'DuplicateVariation': { 'title': _('Dup Var'), 'sortable': False }, 'Calculation': { 'title': _('Calculation'), 'sortable': False }, } self.review_states = [ { 'id': 'default', 'title': _('All'), 'contentFilter': {}, 'transitions': [ { 'id': 'empty' }, ], # none 'columns': [ 'Title', 'Keyword', 'Category', 'Price', 'MaxTimeAllowed', 'DuplicateVariation', ], }, ]
def getOrderNumber(self): return safe_unicode(self.getId()).encode('utf-8')
def getCurrentStateI18n(self): return safe_unicode(_(self.getCurrentState()).encode('utf-8'))
def Title(self): return to_utf8(safe_unicode(self.title))
def read(context, request): tag = AuthenticatorView(context, request).authenticator() pattern = '<input .*name="(\w+)".*value="(\w+)"' _authenticator = re.match(pattern, tag).groups()[1] ret = { "url": router.url_for("read", force_external=True), "success": True, "error": False, "objects": [], "_authenticator": _authenticator, } debug_mode = True #App.config.getConfiguration().debug_mode "Commented by Yasir" catalog_name = request.get("catalog_name", "portal_catalog") if not catalog_name: raise ValueError("bad or missing catalog_name: " + catalog_name) catalog = getToolByName(context, catalog_name) indexes = catalog.indexes() contentFilter = {} for index in indexes: if index in request: if index == 'review_state' and "{" in request[index]: continue contentFilter[index] = safe_unicode(request[index]) if "%s[]"%index in request: value = request["%s[]"%index] contentFilter[index] = [safe_unicode(v) for v in value] if 'limit' in request: try: contentFilter['sort_limit'] = int(request["limit"]) except ValueError: pass sort_on = request.get('sort_on', 'id') contentFilter['sort_on'] = sort_on # sort order sort_order = request.get('sort_order', '') if sort_order: contentFilter['sort_order'] = sort_order else: sort_order = 'ascending' contentFilter['sort_order'] = 'ascending' include_fields = get_include_fields(request) if debug_mode: logger.info("contentFilter: " + str(contentFilter)) # Get matching objects from catalog proxies = catalog(**contentFilter) # batching items page_nr = int(request.get("page_nr", 0)) try: page_size = int(request.get("page_size", 10)) except ValueError: page_size = 10 # page_size == 0: show all if page_size == 0: page_size = len(proxies) first_item_nr = page_size * page_nr if first_item_nr > len(proxies): first_item_nr = 0 page_proxies = proxies[first_item_nr:first_item_nr + page_size] for proxy in page_proxies: obj_data = {} # Place all proxy attributes into the result. obj_data.update(load_brain_metadata(proxy, include_fields)) # Place all schema fields ino the result. obj = proxy.getObject() obj_data.update(load_field_values(obj, include_fields)) obj_data['path'] = "/".join(obj.getPhysicalPath()) # call any adapters that care to modify this data. adapters = getAdapters((obj, ), IJSONReadExtender) for name, adapter in adapters: adapter(request, obj_data) ret['objects'].append(obj_data) ret['total_objects'] = len(proxies) ret['first_object_nr'] = first_item_nr last_object_nr = first_item_nr + len(page_proxies) if last_object_nr > ret['total_objects']: last_object_nr = ret['total_objects'] ret['last_object_nr'] = last_object_nr if debug_mode: logger.info("{0} objects returned".format(len(ret['objects']))) return ret
def to_utf8(text): if text is None: text = '' return safe_unicode(text).encode('utf-8')
def Title(self): return safe_unicode(self.getField('title').get(self)).encode('utf-8')
def Title(self): """ Return the Organisation's Name as its title """ field = self.getField('Name') field = field and field.get(self) or '' return safe_unicode(field).encode('utf-8')
def Title(self): """ Return the Service ID as title """ s = self.getService() s = s and s.Title() or '' return safe_unicode(s).encode('utf-8')
def Title(self): title = self.getName() and self.getName() or _("Laboratory") return safe_unicode(title).encode('utf-8')
def Title(self): """ Return the Id """ return safe_unicode(self.getId()).encode('utf-8')
def workflow_action_retract_ar(self): workflow = getToolByName(self.context, 'portal_workflow') # AR should be retracted # Can't transition inactive ARs if not isActive(self.context): message = _('Item is inactive.') self.context.plone_utils.addPortalMessage(message, 'info') self.request.response.redirect(self.context.absolute_url()) return # 1. Copies the AR linking the original one and viceversa ar = self.context newar = self.cloneAR(ar) # 2. The old AR gets a status of 'invalid' workflow.doActionFor(ar, 'retract_ar') # 3. The new AR copy opens in status 'to be verified' changeWorkflowState(newar, 'bika_ar_workflow', 'to_be_verified') # 4. The system immediately alerts the client contacts who ordered # the results, per email and SMS, that a possible mistake has been # picked up and is under investigation. # A much possible information is provided in the email, linking # to the AR online. laboratory = self.context.bika_setup.laboratory lab_address = "<br/>".join(laboratory.getPrintAddress()) mime_msg = MIMEMultipart('related') mime_msg['Subject'] = t( _("Erroneus result publication from ${request_id}", mapping={"request_id": ar.getRequestID()})) mime_msg['From'] = formataddr((encode_header(laboratory.getName()), laboratory.getEmailAddress())) to = [] contact = ar.getContact() if contact: to.append( formataddr((encode_header(contact.Title()), contact.getEmailAddress()))) for cc in ar.getCCContact(): formatted = formataddr( (encode_header(cc.Title()), cc.getEmailAddress())) if formatted not in to: to.append(formatted) managers = self.context.portal_groups.getGroupMembers('LabManagers') for bcc in managers: user = self.portal.acl_users.getUser(bcc) if user: uemail = user.getProperty('email') ufull = user.getProperty('fullname') formatted = formataddr((encode_header(ufull), uemail)) if formatted not in to: to.append(formatted) mime_msg['To'] = ','.join(to) aranchor = "<a href='%s'>%s</a>" % (ar.absolute_url(), ar.getRequestID()) naranchor = "<a href='%s'>%s</a>" % (newar.absolute_url(), newar.getRequestID()) addremarks = ('addremarks' in self.request and ar.getRemarks()) \ and ("<br/><br/>" + _("Additional remarks:") + "<br/>" + ar.getRemarks().split("===")[1].strip() + "<br/><br/>") \ or '' sub_d = dict(request_link=aranchor, new_request_link=naranchor, remarks=addremarks, lab_address=lab_address) body = Template( "Some errors have been detected in the results report " "published from the Analysis Request $request_link. The Analysis " "Request $new_request_link has been created automatically and the " "previous has been invalidated.<br/>The possible mistake " "has been picked up and is under investigation.<br/><br/>" "$remarks $lab_address").safe_substitute(sub_d) msg_txt = MIMEText(safe_unicode(body).encode('utf-8'), _subtype='html') mime_msg.preamble = 'This is a multi-part MIME message.' mime_msg.attach(msg_txt) try: host = getToolByName(self.context, 'MailHost') host.send(mime_msg.as_string(), immediate=True) except Exception as msg: message = _( 'Unable to send an email to alert lab ' 'client contacts that the Analysis Request has been ' 'retracted: ${error}', mapping={'error': safe_unicode(msg)}) self.context.plone_utils.addPortalMessage(message, 'warning') message = _('${items} invalidated.', mapping={'items': ar.getRequestID()}) self.context.plone_utils.addPortalMessage(message, 'warning') self.request.response.redirect(newar.absolute_url())
def workflow_action_preserve(self): form = self.request.form workflow = getToolByName(self.context, 'portal_workflow') action, came_from = WorkflowAction._get_form_workflow_action(self) checkPermission = self.context.portal_membership.checkPermission # Partition Preservation # the partition table shown in AR and Sample views sends it's # action button submits here. objects = WorkflowAction._get_selected_items(self) transitioned = [] incomplete = [] for obj_uid, obj in objects.items(): part = obj # can't transition inactive items if workflow.getInfoFor(part, 'inactive_state', '') == 'inactive': continue if not checkPermission(PreserveSample, part): continue # grab this object's Preserver and DatePreserved from the form Preserver = form['getPreserver'][0][obj_uid].strip() Preserver = Preserver and Preserver or '' DatePreserved = form['getDatePreserved'][0][obj_uid].strip() DatePreserved = DatePreserved and DateTime(DatePreserved) or '' # write them to the sample part.setPreserver(Preserver) part.setDatePreserved(DatePreserved) # transition the object if both values are present if Preserver and DatePreserved: workflow.doActionFor(part, action) transitioned.append(part.id) else: incomplete.append(part.id) part.reindexObject() part.aq_parent.reindexObject() message = None if len(transitioned) > 1: message = _( '${items} are waiting to be received.', mapping={'items': safe_unicode(', '.join(transitioned))}) self.context.plone_utils.addPortalMessage(message, 'info') elif len(transitioned) == 1: message = _( '${item} is waiting to be received.', mapping={'item': safe_unicode(', '.join(transitioned))}) self.context.plone_utils.addPortalMessage(message, 'info') if not message: message = _('No changes made.') self.context.plone_utils.addPortalMessage(message, 'info') if len(incomplete) > 1: message = _('${items} are missing Preserver or Date Preserved', mapping={'items': safe_unicode(', '.join(incomplete))}) self.context.plone_utils.addPortalMessage(message, 'error') elif len(incomplete) == 1: message = _('${item} is missing Preserver or Preservation Date', mapping={'item': safe_unicode(', '.join(incomplete))}) self.context.plone_utils.addPortalMessage(message, 'error') self.destination_url = self.request.get_header( "referer", self.context.absolute_url()) self.request.response.redirect(self.destination_url)
def to_unicode(text): if text is None: text = '' return safe_unicode(text)
def folderitems(self): rc = getToolByName(self.context, REFERENCE_CATALOG) bsc = getToolByName(self.context, 'bika_setup_catalog') workflow = getToolByName(self.context, 'portal_workflow') mtool = getToolByName(self.context, 'portal_membership') checkPermission = mtool.checkPermission if not self.allow_edit: can_edit_analyses = False else: if self.contentFilter.get('getPointOfCapture', '') == 'field': can_edit_analyses = checkPermission(EditFieldResults, self.context) else: can_edit_analyses = checkPermission(EditResults, self.context) self.allow_edit = can_edit_analyses self.show_select_column = self.allow_edit context_active = isActive(self.context) self.categories = [] items = super(AnalysesView, self).folderitems(full_objects=True) # manually skim retracted analyses from the list new_items = [] for i, item in enumerate(items): # self.contentsMethod may return brains or objects. if not ('obj' in items[i]): continue obj = hasattr(items[i]['obj'], 'getObject') and \ items[i]['obj'].getObject() or \ items[i]['obj'] if workflow.getInfoFor(obj, 'review_state') == 'retracted' \ and not checkPermission(ViewRetractedAnalyses, self.context): continue new_items.append(item) items = new_items methods = self.get_methods_vocabulary() self.interim_fields = {} self.interim_columns = {} self.specs = {} show_methodinstr_columns = False for i, item in enumerate(items): # self.contentsMethod may return brains or objects. obj = hasattr(items[i]['obj'], 'getObject') and \ items[i]['obj'].getObject() or \ items[i]['obj'] if workflow.getInfoFor(obj, 'review_state') == 'retracted' \ and not checkPermission(ViewRetractedAnalyses, self.context): continue result = obj.getResult() service = obj.getService() calculation = service.getCalculation() unit = service.getUnit() keyword = service.getKeyword() if self.show_categories: cat = obj.getService().getCategoryTitle() items[i]['category'] = cat if cat not in self.categories: self.categories.append(cat) # Check for InterimFields attribute on our object, interim_fields = hasattr(obj, 'getInterimFields') \ and obj.getInterimFields() or [] # kick some pretty display values in. for x in range(len(interim_fields)): interim_fields[x]['formatted_value'] = \ format_numeric_result(obj, interim_fields[x]['value']) self.interim_fields[obj.UID()] = interim_fields items[i]['service_uid'] = service.UID() items[i]['Service'] = service.Title() items[i]['Keyword'] = keyword items[i]['Unit'] = format_supsub(unit) if unit else '' items[i]['Result'] = '' items[i]['formatted_result'] = '' items[i]['interim_fields'] = interim_fields items[i]['Remarks'] = obj.getRemarks() items[i]['Uncertainty'] = '' items[i]['DetectionLimit'] = '' items[i]['retested'] = obj.getRetested() items[i]['class']['retested'] = 'center' items[i]['result_captured'] = self.ulocalized_time( obj.getResultCaptureDate(), long_format=0) items[i]['calculation'] = calculation and True or False try: items[i]['Partition'] = obj.getSamplePartition().getId() except AttributeError: items[i]['Partition'] = '' if obj.portal_type == "ReferenceAnalysis": items[i]['DueDate'] = self.ulocalized_time( obj.aq_parent.getExpiryDate(), long_format=0) else: items[i]['DueDate'] = self.ulocalized_time(obj.getDueDate(), long_format=1) cd = obj.getResultCaptureDate() items[i]['CaptureDate'] = cd and self.ulocalized_time( cd, long_format=1) or '' items[i]['Attachments'] = '' item['allow_edit'] = [] client_or_lab = "" tblrowclass = items[i].get('table_row_class') if obj.portal_type == 'ReferenceAnalysis': items[i]['st_uid'] = obj.aq_parent.UID() items[i]['table_row_class'] = ' '.join( [tblrowclass, 'qc-analysis']) elif obj.portal_type == 'DuplicateAnalysis' and \ obj.getAnalysis().portal_type == 'ReferenceAnalysis': items[i]['st_uid'] = obj.aq_parent.UID() items[i]['table_row_class'] = ' '.join( [tblrowclass, 'qc-analysis']) else: sample = None if self.context.portal_type == 'AnalysisRequest': sample = self.context.getSample() elif self.context.portal_type == 'Worksheet': if obj.portal_type in ('DuplicateAnalysis', 'RejectAnalysis'): sample = obj.getAnalysis().getSample() else: sample = obj.aq_parent.getSample() elif self.context.portal_type == 'Sample': sample = self.context st_uid = sample.getSampleType().UID() if sample else '' items[i]['st_uid'] = st_uid if checkPermission(ManageBika, self.context): service_uid = service.UID() latest = rc.lookupObject(service_uid).version_id items[i]['Service'] = service.Title() items[i]['class']['Service'] = "service_title" # Show version number of out-of-date objects # No: This should be done in another column, if at all. # The (vX) value confuses some more fragile forms. # if hasattr(obj, 'reference_versions') and \ # service_uid in obj.reference_versions and \ # latest != obj.reference_versions[service_uid]: # items[i]['after']['Service'] = "(v%s)" % \ # (obj.reference_versions[service_uid]) # choices defined on Service apply to result fields. choices = service.getResultOptions() if choices: item['choices']['Result'] = choices # permission to view this item's results can_view_result = \ getSecurityManager().checkPermission(ViewResults, obj) # permission to edit this item's results # Editing Field Results is possible while in Sample Due. poc = self.contentFilter.get("getPointOfCapture", 'lab') can_edit_analysis = self.allow_edit and context_active and \ ( (poc == 'field' and getSecurityManager().checkPermission(EditFieldResults, obj)) or (poc != 'field' and getSecurityManager().checkPermission(EditResults, obj)) ) allowed_method_states = [ 'to_be_sampled', 'to_be_preserved', 'sample_received', 'sample_registered', 'sampled', 'assigned' ] # Prevent from being edited if the instrument assigned # is not valid (out-of-date or uncalibrated), except if # the analysis is a QC with assigned status can_edit_analysis = can_edit_analysis \ and (obj.isInstrumentValid() \ or (obj.portal_type == 'ReferenceAnalysis' \ and item['review_state'] in allowed_method_states)) if can_edit_analysis: items[i]['allow_edit'].extend(['Analyst', 'Result', 'Remarks']) # if the Result field is editable, our interim fields are too for f in self.interim_fields[obj.UID()]: items[i]['allow_edit'].append(f['keyword']) # if there isn't a calculation then result must be re-testable, # and if there are interim fields, they too must be re-testable. if not items[i]['calculation'] or \ (items[i]['calculation'] and self.interim_fields[obj.UID()]): items[i]['allow_edit'].append('retested') # TODO: Only the labmanager must be able to change the method # can_set_method = getSecurityManager().checkPermission(SetAnalysisMethod, obj) can_set_method = can_edit_analysis \ and item['review_state'] in allowed_method_states method = obj.getMethod() \ if hasattr(obj, 'getMethod') and obj.getMethod() \ else service.getMethod() # Display the methods selector if the AS has at least one # method assigned item['Method'] = '' item['replace']['Method'] = '' if can_set_method: voc = self.get_methods_vocabulary(obj) if voc: # The service has at least one method available item['Method'] = method.UID() if method else '' item['choices']['Method'] = voc item['allow_edit'].append('Method') show_methodinstr_columns = True elif method: # This should never happen # The analysis has set a method, but its parent # service hasn't any method available O_o item['Method'] = method.Title() item['replace']['Method'] = "<a href='%s'>%s</a>" % \ (method.absolute_url(), method.Title()) show_methodinstr_columns = True elif method: # Edition not allowed, but method set item['Method'] = method.Title() item['replace']['Method'] = "<a href='%s'>%s</a>" % \ (method.absolute_url(), method.Title()) show_methodinstr_columns = True # TODO: Instrument selector dynamic behavior in worksheet Results # Only the labmanager must be able to change the instrument to be used. Also, # the instrument selection should be done in accordance with the method selected # can_set_instrument = service.getInstrumentEntryOfResults() and getSecurityManager().checkPermission(SetAnalysisInstrument, obj) can_set_instrument = service.getInstrumentEntryOfResults() \ and can_edit_analysis \ and item['review_state'] in allowed_method_states item['Instrument'] = '' item['replace']['Instrument'] = '' if service.getInstrumentEntryOfResults(): instrument = None # If the analysis has an instrument already assigned, use it if service.getInstrumentEntryOfResults() \ and hasattr(obj, 'getInstrument') \ and obj.getInstrument(): instrument = obj.getInstrument() # Otherwise, use the Service's default instrument elif service.getInstrumentEntryOfResults(): instrument = service.getInstrument() if can_set_instrument: # Edition allowed voc = self.get_instruments_vocabulary(obj) if voc: # The service has at least one instrument available item['Instrument'] = instrument.UID( ) if instrument else '' item['choices']['Instrument'] = voc item['allow_edit'].append('Instrument') show_methodinstr_columns = True elif instrument: # This should never happen # The analysis has an instrument set, but the # service hasn't any available instrument item['Instrument'] = instrument.Title() item['replace']['Instrument'] = "<a href='%s'>%s</a>" % \ (instrument.absolute_url(), instrument.Title()) show_methodinstr_columns = True elif instrument: # Edition not allowed, but instrument set item['Instrument'] = instrument.Title() item['replace']['Instrument'] = "<a href='%s'>%s</a>" % \ (instrument.absolute_url(), instrument.Title()) show_methodinstr_columns = True else: # Manual entry of results, instrument not allowed item['Instrument'] = _('Manual') msgtitle = t( _( "Instrument entry of results not allowed for ${service}", mapping={"service": safe_unicode(service.Title())}, )) item['replace']['Instrument'] = \ '<a href="#" title="%s">%s</a>' % (msgtitle, t(_('Manual'))) # Sets the analyst assigned to this analysis if can_edit_analysis: analyst = obj.getAnalyst() # widget default: current user if not analyst: analyst = mtool.getAuthenticatedMember().getUserName() items[i]['Analyst'] = analyst item['choices']['Analyst'] = self.getAnalysts() else: items[i]['Analyst'] = obj.getAnalystName() # If the user can attach files to analyses, show the attachment col can_add_attachment = \ getSecurityManager().checkPermission(AddAttachment, obj) if can_add_attachment or can_view_result: attachments = "" if hasattr(obj, 'getAttachment'): for attachment in obj.getAttachment(): af = attachment.getAttachmentFile() icon = af.getBestIcon() attachments += "<span class='attachment' attachment_uid='%s'>" % ( attachment.UID()) if icon: attachments += "<img src='%s/%s'/>" % ( self.portal_url, icon) attachments += '<a href="%s/at_download/AttachmentFile"/>%s</a>' % ( attachment.absolute_url(), af.filename) if can_edit_analysis: attachments += "<img class='deleteAttachmentButton' attachment_uid='%s' src='%s'/>" % ( attachment.UID(), "++resource++bika.lims.images/delete.png") attachments += "</br></span>" items[i]['replace'][ 'Attachments'] = attachments[:-12] + "</span>" # Only display data bearing fields if we have ViewResults # permission, otherwise just put an icon in Result column. if can_view_result: items[i]['Result'] = result scinot = self.context.bika_setup.getScientificNotationResults() dmk = self.context.bika_setup.getResultsDecimalMark() items[i]['formatted_result'] = obj.getFormattedResult( sciformat=int(scinot), decimalmark=dmk) # LIMS-1379 Allow manual uncertainty value input # https://jira.bikalabs.com/browse/LIMS-1379 fu = format_uncertainty(obj, result, decimalmark=dmk, sciformat=int(scinot)) fu = fu if fu else '' if can_edit_analysis and service.getAllowManualUncertainty( ) == True: unc = obj.getUncertainty(result) item['allow_edit'].append('Uncertainty') items[i]['Uncertainty'] = unc if unc else '' items[i]['before']['Uncertainty'] = '± ' items[i]['after'][ 'Uncertainty'] = '<em class="discreet" style="white-space:nowrap;"> %s</em>' % items[ i]['Unit'] elif fu: items[i]['Uncertainty'] = fu items[i]['before']['Uncertainty'] = '± ' items[i]['after'][ 'Uncertainty'] = '<em class="discreet" style="white-space:nowrap;"> %s</em>' % items[ i]['Unit'] # LIMS-1700. Allow manual input of Detection Limits # LIMS-1775. Allow to select LDL or UDL defaults in results with readonly mode # https://jira.bikalabs.com/browse/LIMS-1700 # https://jira.bikalabs.com/browse/LIMS-1775 if can_edit_analysis and \ hasattr(obj, 'getDetectionLimitOperand') and \ hasattr(service, 'getDetectionLimitSelector') and \ service.getDetectionLimitSelector() == True: isldl = obj.isBelowLowerDetectionLimit() isudl = obj.isAboveUpperDetectionLimit() dlval = '' if isldl or isudl: dlval = '<' if isldl else '>' item['allow_edit'].append('DetectionLimit') item['DetectionLimit'] = dlval choices = [{ 'ResultValue': '<', 'ResultText': '<' }, { 'ResultValue': '>', 'ResultText': '>' }] item['choices']['DetectionLimit'] = choices self.columns['DetectionLimit']['toggle'] = True srv = obj.getService() defdls = { 'min': srv.getLowerDetectionLimit(), 'max': srv.getUpperDetectionLimit(), 'manual': srv.getAllowManualDetectionLimit() } defin = '<input type="hidden" id="DefaultDLS.%s" value=\'%s\'/>' defin = defin % (obj.UID(), json.dumps(defdls)) item['after']['DetectionLimit'] = defin # LIMS-1769. Allow to use LDL and UDL in calculations. # https://jira.bikalabs.com/browse/LIMS-1769 # Since LDL, UDL, etc. are wildcards that can be used # in calculations, these fields must be loaded always # for 'live' calculations. if can_edit_analysis: dls = { 'default_ldl': 'none', 'default_udl': 'none', 'below_ldl': False, 'above_udl': False, 'is_ldl': False, 'is_udl': False, 'manual_allowed': False, 'dlselect_allowed': False } if hasattr(obj, 'getDetectionLimits'): dls['below_ldl'] = obj.isBelowLowerDetectionLimit() dls['above_udl'] = obj.isBelowLowerDetectionLimit() dls['is_ldl'] = obj.isLowerDetectionLimit() dls['is_udl'] = obj.isUpperDetectionLimit() dls['default_ldl'] = service.getLowerDetectionLimit() dls['default_udl'] = service.getUpperDetectionLimit() dls['manual_allowed'] = service.getAllowManualDetectionLimit( ) dls['dlselect_allowed'] = service.getDetectionLimitSelector( ) dlsin = '<input type="hidden" id="AnalysisDLS.%s" value=\'%s\'/>' dlsin = dlsin % (obj.UID(), json.dumps(dls)) item['after']['Result'] = dlsin else: items[i]['Specification'] = "" if 'Result' in items[i]['allow_edit']: items[i]['allow_edit'].remove('Result') items[i]['before']['Result'] = \ '<img width="16" height="16" ' + \ 'src="%s/++resource++bika.lims.images/to_follow.png"/>' % \ (self.portal_url) # Everyone can see valid-ranges spec = self.get_analysis_spec(obj) if spec: min_val = spec.get('min', '') min_str = ">{0}".format(min_val) if min_val else '' max_val = spec.get('max', '') max_str = "<{0}".format(max_val) if max_val else '' error_val = spec.get('error', '') error_str = "{0}%".format(error_val) if error_val else '' rngstr = ",".join( [x for x in [min_str, max_str, error_str] if x]) else: rngstr = "" items[i]['Specification'] = rngstr # Add this analysis' interim fields to the interim_columns list for f in self.interim_fields[obj.UID()]: if f['keyword'] not in self.interim_columns and not f.get( 'hidden', False): self.interim_columns[f['keyword']] = f['title'] # and to the item itself items[i][f['keyword']] = f items[i]['class'][f['keyword']] = 'interim' # check if this analysis is late/overdue resultdate = obj.aq_parent.getDateSampled() \ if obj.portal_type == 'ReferenceAnalysis' \ else obj.getResultCaptureDate() duedate = obj.aq_parent.getExpiryDate() \ if obj.portal_type == 'ReferenceAnalysis' \ else obj.getDueDate() items[i]['replace']['DueDate'] = \ self.ulocalized_time(duedate, long_format=1) if items[i]['review_state'] not in [ 'to_be_sampled', 'to_be_preserved', 'sample_due', 'published' ]: if (resultdate and resultdate > duedate) \ or (not resultdate and DateTime() > duedate): items[i]['replace']['DueDate'] = '%s <img width="16" height="16" src="%s/++resource++bika.lims.images/late.png" title="%s"/>' % \ (self.ulocalized_time(duedate, long_format=1), self.portal_url, t(_("Late Analysis"))) # Submitting user may not verify results (admin can though) if items[i]['review_state'] == 'to_be_verified' and \ not checkPermission(VerifyOwnResults, obj): user_id = getSecurityManager().getUser().getId() self_submitted = False try: review_history = list( workflow.getInfoFor(obj, 'review_history')) review_history.reverse() for event in review_history: if event.get('action') == 'submit': if event.get('actor') == user_id: self_submitted = True break if self_submitted: items[i]['after']['state_title'] = \ "<img src='++resource++bika.lims.images/submitted-by-current-user.png' title='%s'/>" % \ (t(_("Cannot verify: Submitted by current user"))) except WorkflowException: pass # add icon for assigned analyses in AR views if self.context.portal_type == 'AnalysisRequest': obj = items[i]['obj'] if obj.portal_type in ['ReferenceAnalysis', 'DuplicateAnalysis'] or \ workflow.getInfoFor(obj, 'worksheetanalysis_review_state') == 'assigned': br = obj.getBackReferences('WorksheetAnalysis') if len(br) > 0: ws = br[0] items[i]['after']['state_title'] = \ "<a href='%s'><img src='++resource++bika.lims.images/worksheet.png' title='%s'/></a>" % \ (ws.absolute_url(), t(_("Assigned to: ${worksheet_id}", mapping={'worksheet_id': safe_unicode(ws.id)}))) # the TAL requires values for all interim fields on all # items, so we set blank values in unused cells for item in items: for field in self.interim_columns: if field not in item: item[field] = '' # XXX order the list of interim columns interim_keys = self.interim_columns.keys() interim_keys.reverse() # add InterimFields keys to columns for col_id in interim_keys: if col_id not in self.columns: self.columns[col_id] = { 'title': self.interim_columns[col_id], 'input_width': '6', 'input_class': 'ajax_calculate numeric', 'sortable': False } if can_edit_analyses: new_states = [] for state in self.review_states: # InterimFields are displayed in review_state # They are anyway available through View.columns though. # In case of hidden fields, the calcs.py should check calcs/services # for additional InterimFields!! pos = 'Result' in state['columns'] and \ state['columns'].index('Result') or len(state['columns']) for col_id in interim_keys: if col_id not in state['columns']: state['columns'].insert(pos, col_id) # retested column is added after Result. pos = 'Result' in state['columns'] and \ state['columns'].index('Uncertainty') + 1 or len(state['columns']) state['columns'].insert(pos, 'retested') new_states.append(state) self.review_states = new_states # Allow selecting individual analyses self.show_select_column = True # Dry Matter. # The Dry Matter column is never enabled for reference sample contexts # and refers to getReportDryMatter in ARs. if items and \ (hasattr(self.context, 'getReportDryMatter') and \ self.context.getReportDryMatter()): # look through all items # if the item's Service supports ReportDryMatter, add getResultDM(). for item in items: if item['obj'].getService().getReportDryMatter(): item['ResultDM'] = item['obj'].getResultDM() else: item['ResultDM'] = '' if item['ResultDM']: item['after']['ResultDM'] = "<em class='discreet'>%</em>" # modify the review_states list to include the ResultDM column new_states = [] for state in self.review_states: pos = 'Result' in state['columns'] and \ state['columns'].index('Uncertainty') + 1 or len(state['columns']) state['columns'].insert(pos, 'ResultDM') new_states.append(state) self.review_states = new_states self.categories.sort() # self.json_specs = json.dumps(self.specs) self.json_interim_fields = json.dumps(self.interim_fields) self.items = items # Method and Instrument columns must be shown or hidden at the # same time, because the value assigned to one causes # a value reassignment to the other (one method can be performed # by different instruments) self.columns['Method']['toggle'] = show_methodinstr_columns self.columns['Instrument']['toggle'] = show_methodinstr_columns return items
def Title(self): """ Return the Organisation's Name as its title """ return safe_unicode(self.getField('Name').get(self)).encode('utf-8')
def workflow_action_retract_ar(self): workflow = getToolByName(self.context, 'portal_workflow') # AR should be retracted # Can't transition inactive ARs if not isActive(self.context): message = _('Item is inactive.') self.context.plone_utils.addPortalMessage(message, 'info') self.request.response.redirect(self.context.absolute_url()) return # 1. Copies the AR linking the original one and viceversa ar = self.context newar = self.cloneAR(ar) # 2. The old AR gets a status of 'invalid' workflow.doActionFor(ar, 'retract_ar') # 3. The new AR copy opens in status 'to be verified' changeWorkflowState(newar, 'bika_ar_workflow', 'to_be_verified') # 4. The system immediately alerts the client contacts who ordered # the results, per email and SMS, that a possible mistake has been # picked up and is under investigation. # A much possible information is provided in the email, linking # to the AR online. laboratory = self.context.bika_setup.laboratory lab_address = "<br/>".join(laboratory.getPrintAddress()) mime_msg = MIMEMultipart('related') mime_msg['Subject'] = t(_("Erroneus result publication from ${request_id}", mapping={"request_id": ar.getRequestID()})) mime_msg['From'] = formataddr( (encode_header(laboratory.getName()), laboratory.getEmailAddress())) to = [] contact = ar.getContact() if contact: to.append(formataddr((encode_header(contact.Title()), contact.getEmailAddress()))) for cc in ar.getCCContact(): formatted = formataddr((encode_header(cc.Title()), cc.getEmailAddress())) if formatted not in to: to.append(formatted) managers = self.context.portal_groups.getGroupMembers('LabManagers') for bcc in managers: user = self.portal.acl_users.getUser(bcc) if user: uemail = user.getProperty('email') ufull = user.getProperty('fullname') formatted = formataddr((encode_header(ufull), uemail)) if formatted not in to: to.append(formatted) mime_msg['To'] = ','.join(to) aranchor = "<a href='%s'>%s</a>" % (ar.absolute_url(), ar.getRequestID()) naranchor = "<a href='%s'>%s</a>" % (newar.absolute_url(), newar.getRequestID()) addremarks = ('addremarks' in self.request and ar.getRemarks()) \ and ("<br/><br/>" + _("Additional remarks:") + "<br/>" + ar.getRemarks().split("===")[1].strip() + "<br/><br/>") \ or '' sub_d = dict(request_link=aranchor, new_request_link=naranchor, remarks=addremarks, lab_address=lab_address) body = Template("Some errors have been detected in the results report " "published from the Analysis Request $request_link. The Analysis " "Request $new_request_link has been created automatically and the " "previous has been invalidated.<br/>The possible mistake " "has been picked up and is under investigation.<br/><br/>" "$remarks $lab_address").safe_substitute(sub_d) msg_txt = MIMEText(safe_unicode(body).encode('utf-8'), _subtype='html') mime_msg.preamble = 'This is a multi-part MIME message.' mime_msg.attach(msg_txt) try: host = getToolByName(self.context, 'MailHost') host.send(mime_msg.as_string(), immediate=True) except Exception as msg: message = _('Unable to send an email to alert lab ' 'client contacts that the Analysis Request has been ' 'retracted: ${error}', mapping={'error': safe_unicode(msg)}) self.context.plone_utils.addPortalMessage(message, 'warning') message = _('${items} invalidated.', mapping={'items': ar.getRequestID()}) self.context.plone_utils.addPortalMessage(message, 'warning') self.request.response.redirect(newar.absolute_url())
def __call__(self): form = self.request.form CheckAuthenticator(self.request.form) PostOnly(self.request.form) uc = getToolByName(self.context, 'uid_catalog') bsc = getToolByName(self.context, 'bika_setup_catalog') portal_catalog = getToolByName(self.context, 'portal_catalog') # Load the form data from request.state. If anything goes wrong here, # put a bullet through the whole process. try: states = json.loads(form['state']) except Exception as e: message = t(_('Badly formed state: ${errmsg}', mapping={'errmsg': e.message})) ajax_form_error(self.errors, message=message) return json.dumps({'errors': self.errors}) # Validate incoming form data required = [field.getName() for field in AnalysisRequestSchema.fields() if field.required] + ["Analyses"] # First remove all states which are completely empty; if all # required fields are not present, we assume that the current # AR had no data entered, and can be ignored nonblank_states = {} for arnum, state in states.items(): for key, val in state.items(): if val \ and "%s_hidden" % key not in state \ and not key.endswith('hidden'): nonblank_states[arnum] = state break # in valid_states, all ars that pass validation will be stored valid_states = {} for arnum, state in nonblank_states.items(): # Secondary ARs are a special case, these fields are not required if state.get('Sample', ''): if 'SamplingDate' in required: required.remove('SamplingDate') if 'SampleType' in required: required.remove('SampleType') # fields flagged as 'hidden' are not considered required because # they will already have default values inserted in them for fieldname in required: if fieldname + '_hidden' in state: required.remove(fieldname) missing = [f for f in required if not state.get(f, '')] # If there are required fields missing, flag an error if missing: msg = t(_('Required fields have no values: ' '${field_names}', mapping={'field_names': ', '.join(missing)})) ajax_form_error(self.errors, arnum=arnum, message=msg) continue # This ar is valid! valid_states[arnum] = state # - Expand lists of UIDs returned by multiValued reference widgets # - Transfer _uid values into their respective fields for arnum in valid_states.keys(): for field, value in valid_states[arnum].items(): if field.endswith('_uid') and ',' in value: valid_states[arnum][field] = value.split(',') elif field.endswith('_uid'): valid_states[arnum][field] = value if self.errors: return json.dumps({'errors': self.errors}) # Now, we will create the specified ARs. ARs = [] for arnum, state in valid_states.items(): # Create the Analysis Request ar = create_analysisrequest( portal_catalog(UID=state['Client'])[0].getObject(), self.request, state ) ARs.append(ar.Title()) # Display the appropriate message after creation if len(ARs) > 1: message = _('Analysis requests ${ARs} were successfully created.', mapping={'ARs': safe_unicode(', '.join(ARs))}) else: message = _('Analysis request ${AR} was successfully created.', mapping={'AR': safe_unicode(ARs[0])}) self.context.plone_utils.addPortalMessage(message, 'info') # Automatic label printing won't print "register" labels for Secondary. ARs new_ars = [ar for ar in ARs if ar[-2:] == '01'] if 'register' in self.context.bika_setup.getAutoPrintStickers() \ and new_ars: return json.dumps({ 'success': message, 'stickers': new_ars, 'stickertemplate': self.context.bika_setup.getAutoStickerTemplate() }) else: return json.dumps({'success': message})
def __call__(self, value, *args, **kwargs): instance = kwargs['instance'] fieldname = kwargs['field'].getName() request = kwargs.get('REQUEST', {}) form = request.form interim_fields = form.get(fieldname, []) translate = getToolByName(instance, 'translation_service').translate bsc = getToolByName(instance, 'bika_setup_catalog') # We run through the validator once per form submit, and check all values # this value in request prevents running once per subfield value. key = instance.id + fieldname if instance.REQUEST.get(key, False): return True for x in range(len(interim_fields)): row = interim_fields[x] keys = row.keys() if 'title' not in keys: instance.REQUEST[key] = to_utf8(translate(_("Validation failed: title is required"))) return instance.REQUEST[key] if 'keyword' not in keys: instance.REQUEST[key] = to_utf8(translate(_("Validation failed: keyword is required"))) return instance.REQUEST[key] if not re.match(r"^[A-Za-z\w\d\-\_]+$", row['keyword']): instance.REQUEST[key] = _("Validation failed: keyword contains invalid characters") return instance.REQUEST[key] # keywords and titles used once only in the submitted form keywords = {} titles = {} for field in interim_fields: if 'keyword' in field: if field['keyword'] in keywords: keywords[field['keyword']] += 1 else: keywords[field['keyword']] = 1 if 'title' in field: if field['title'] in titles: titles[field['title']] += 1 else: titles[field['title']] = 1 for k in [k for k in keywords.keys() if keywords[k] > 1]: msg = _("Validation failed: '${keyword}': duplicate keyword", mapping={'keyword': safe_unicode(k)}) instance.REQUEST[key] = to_utf8(translate(msg)) return instance.REQUEST[key] for t in [t for t in titles.keys() if titles[t] > 1]: msg = _("Validation failed: '${title}': duplicate title", mapping={'title': safe_unicode(t)}) instance.REQUEST[key] = to_utf8(translate(msg)) return instance.REQUEST[key] # check all keywords against all AnalysisService keywords for dups services = bsc(portal_type='AnalysisService', getKeyword=value) if services: msg = _("Validation failed: '${title}': " "This keyword is already in use by service '${used_by}'", mapping={'title': safe_unicode(value), 'used_by': safe_unicode(services[0].Title)}) instance.REQUEST[key] = to_utf8(translate(msg)) return instance.REQUEST[key] # any duplicated interimfield titles must share the same keyword # any duplicated interimfield keywords must share the same title calcs = bsc(portal_type='Calculation') keyword_titles = {} title_keywords = {} for calc in calcs: if calc.UID == instance.UID(): continue calc = calc.getObject() for field in calc.getInterimFields(): keyword_titles[field['keyword']] = field['title'] title_keywords[field['title']] = field['keyword'] for field in interim_fields: if field['keyword'] != value: continue if 'title' in field and \ field['title'] in title_keywords.keys() and \ title_keywords[field['title']] != field['keyword']: msg = _("Validation failed: column title '${title}' " "must have keyword '${keyword}'", mapping={'title': safe_unicode(field['title']), 'keyword': safe_unicode(title_keywords[field['title']])}) instance.REQUEST[key] = to_utf8(translate(msg)) return instance.REQUEST[key] if 'keyword' in field and \ field['keyword'] in keyword_titles.keys() and \ keyword_titles[field['keyword']] != field['title']: msg = _("Validation failed: keyword '${keyword}' " "must have column title '${title}'", mapping={'keyword': safe_unicode(field['keyword']), 'title': safe_unicode(keyword_titles[field['keyword']])}) instance.REQUEST[key] = to_utf8(translate(msg)) return instance.REQUEST[key] instance.REQUEST[key] = True return True
def Title(self): """ Return the Sample ID as title """ return safe_unicode(self.getId()).encode('utf-8')
def toPdf(self): html = safe_unicode(self.template()).encode('utf-8') pdf_data = createPdf(html) return pdf_data
def read(context, request): tag = AuthenticatorView(context, request).authenticator() pattern = '<input .*name="(\w+)".*value="(\w+)"' _authenticator = re.match(pattern, tag).groups()[1] ret = { "url": router.url_for("read", force_external=True), "success": True, "error": False, "objects": [], "_authenticator": _authenticator, } debug_mode = True #App.config.getConfiguration().debug_mode "Commented by Yasir" catalog_name = request.get("catalog_name", "portal_catalog") if not catalog_name: raise ValueError("bad or missing catalog_name: " + catalog_name) catalog = getToolByName(context, catalog_name) indexes = catalog.indexes() contentFilter = {} for index in indexes: if index in request: if index == 'review_state' and "{" in request[index]: continue contentFilter[index] = safe_unicode(request[index]) if "%s[]" % index in request: value = request["%s[]" % index] contentFilter[index] = [safe_unicode(v) for v in value] if 'limit' in request: try: contentFilter['sort_limit'] = int(request["limit"]) except ValueError: pass sort_on = request.get('sort_on', 'id') contentFilter['sort_on'] = sort_on # sort order sort_order = request.get('sort_order', '') if sort_order: contentFilter['sort_order'] = sort_order else: sort_order = 'ascending' contentFilter['sort_order'] = 'ascending' include_fields = get_include_fields(request) if debug_mode: logger.info("contentFilter: " + str(contentFilter)) # Get matching objects from catalog proxies = catalog(**contentFilter) # batching items page_nr = int(request.get("page_nr", 0)) try: page_size = int(request.get("page_size", 10)) except ValueError: page_size = 10 # page_size == 0: show all if page_size == 0: page_size = len(proxies) first_item_nr = page_size * page_nr if first_item_nr > len(proxies): first_item_nr = 0 page_proxies = proxies[first_item_nr:first_item_nr + page_size] for proxy in page_proxies: obj_data = {} # Place all proxy attributes into the result. obj_data.update(load_brain_metadata(proxy, include_fields)) # Place all schema fields ino the result. obj = proxy.getObject() obj_data.update(load_field_values(obj, include_fields)) obj_data['path'] = "/".join(obj.getPhysicalPath()) # call any adapters that care to modify this data. adapters = getAdapters((obj, ), IJSONReadExtender) for name, adapter in adapters: adapter(request, obj_data) ret['objects'].append(obj_data) ret['total_objects'] = len(proxies) ret['first_object_nr'] = first_item_nr last_object_nr = first_item_nr + len(page_proxies) if last_object_nr > ret['total_objects']: last_object_nr = ret['total_objects'] ret['last_object_nr'] = last_object_nr if debug_mode: logger.info("{0} objects returned".format(len(ret['objects']))) return ret
def Title(self): """ Return the OrderNumber as title """ return safe_unicode(self.getOrderNumber()).encode('utf-8')
def Title(self): """ Return the contact's Fullname as title """ return safe_unicode(self.getFullname()).encode('utf-8')
def __call__(self): if self.request.form.has_key("submitted"): def error(field, message): if field: message = "%s: %s" % (field, message) self.context.plone_utils.addPortalMessage(message, 'error') return self.template() form = self.request.form contact = self.context password = safe_unicode(form.get('password', '')).encode('utf-8') username = safe_unicode(form.get('username', '')).encode('utf-8') confirm = form.get('confirm', '') email = safe_unicode(form.get('email', '')).encode('utf-8') if not username: return error('username', PMF("Input is required but not given.")) if not email: return error('email', PMF("Input is required but not given.")) reg_tool = self.context.portal_registration properties = self.context.portal_properties.site_properties ## if properties.validate_email: ## password = reg_tool.generatePassword() ## else: if password != confirm: return error('password', PMF("Passwords do not match.")) if not password: return error('password', PMF("Input is required but not given.")) if not confirm: return error('password', PMF("Passwords do not match.")) if len(password) < 5: return error('password', PMF("Passwords must contain at least 5 letters.")) try: reg_tool.addMember(username, password, properties={ 'username': username, 'email': email, 'fullname': username }) except ValueError, msg: return error(None, msg) contact.setUsername(username) contact.setEmailAddress(email) # If we're being created in a Client context, then give # the contact an Owner local role on client. if contact.aq_parent.portal_type == 'Client': contact.aq_parent.manage_setLocalRoles(username, [ 'Owner', ]) if hasattr(aq_base(contact.aq_parent), 'reindexObjectSecurity'): contact.aq_parent.reindexObjectSecurity() # add user to Clients group group = self.context.portal_groups.getGroupById('Clients') group.addMember(username) # Additional groups for LabContact users. # not required (not available for client Contact) if 'groups' in self.request and self.request['groups']: groups = self.request['groups'] if not type(groups) in (list, tuple): groups = [ groups, ] for group in groups: group = self.portal_groups.getGroupById(group) group.addMember(username) contact.reindexObject() if properties.validate_email or self.request.get('mail_me', 0): try: reg_tool.registeredNotify(username) except: import transaction transaction.abort() return error(None, PMF("SMTP server disconnected.")) message = PMF("Member registered.") self.context.plone_utils.addPortalMessage(message, 'info') return self.template()
def Title(self): """ Return the Product as title """ return safe_unicode(self.getSampleName()).encode('utf-8')