예제 #1
0
 def get_custom_fields(self):
     """ Returns a dictionary with custom fields to be rendered after
         header_table with this structure:
         {<fieldid>:{title:<title>, value:<html>}
     """
     custom = {}
     ar = self.context
     workflow = getToolByName(self.context, 'portal_workflow')
     # If is a retracted AR, show the link to child AR and show a warn msg
     if workflow.getInfoFor(ar, 'review_state') == 'invalid':
         childar = hasattr(ar, 'getChildAnalysisRequest') \
                     and ar.getChildAnalysisRequest() or None
         anchor = childar and (
             "<a href='%s'>%s</a>" %
             (childar.absolute_url(), childar.getRequestID())) or None
         if anchor:
             custom['ChildAR'] = {
                 'title': t(_("AR for retested results")),
                 'value': anchor
             }
     # If is an AR automatically generated due to a Retraction, show it's
     # parent AR information
     if hasattr(ar, 'getParentAnalysisRequest') \
         and ar.getParentAnalysisRequest():
         par = ar.getParentAnalysisRequest()
         anchor = "<a href='%s'>%s</a>" % (par.absolute_url(),
                                           par.getRequestID())
         custom['ParentAR'] = {
             'title': t(_("Invalid AR retested")),
             'value': anchor
         }
     return custom
예제 #2
0
 def __call__(self, result=None, specification=None, **kwargs):
     translate = self.context.translate
     path = '++resource++bika.lims.images'
     alerts = {}
     # We look for IResultOutOfRange adapters for this object
     for name, adapter in getAdapters((self.context, ), IResultOutOfRange):
         ret = adapter(result)
         if not ret:
             continue
         spec = ret["spec_values"]
         if spec:
             rngstr = " ({0} {1}, {2}, {3})".format(t(_("min")),
                                                    str(spec['min']),
                                                    t(_("max")),
                                                    str(spec['max']))
         else:
             rngstr = ""
         if ret["out_of_range"]:
             if ret["acceptable"]:
                 message = "{0}{1}".format(t(_('Result in shoulder range')),
                                           rngstr)
                 icon = path + '/warning.png'
             else:
                 message = "{0}{1}".format(t(_('Result out of range')),
                                           rngstr)
                 icon = path + '/exclamation.png'
             alerts[self.context.UID()] = [
                 {
                     'icon': icon,
                     'msg': message,
                     'field': 'Result',
                 },
             ]
         break
     return alerts
예제 #3
0
파일: menu.py 프로젝트: nafwa03/olims
    def extra(self):
        workflow = self.tools.workflow()
        state = self.context_state.workflow_state()
        stateTitle = self._currentStateTitle()

        if workflow.getInfoFor(self.context, 'cancellation_state', '') == 'cancelled':
            title2 = t(_('Cancelled'))
            # cater for bika_one_state_workflow (always Active)
            if not stateTitle or \
               workflow.getInfoFor(self.context, 'review_state', '') == 'active':
                stateTitle = t(_('Cancelled'))
            else:
                stateTitle = "%s (%s)" % (stateTitle, _(title2))
            return {'id': 'plone-contentmenu-workflow',
                    'class': 'state-cancelled',
                    'state': state,
                    'stateTitle': stateTitle, }
        elif workflow.getInfoFor(self.context, 'inactive_state', '') == 'inactive':
            title2 = t(_('Dormant'))
            # cater for bika_one_state_workflow (always Active)
            if not stateTitle or \
               (workflow.getInfoFor(self.context, 'review_state', '') in
                                                    ('active', 'current')):
                stateTitle = t(_('Dormant'))
            else:
                stateTitle = "%s (%s)" % (stateTitle, _(title2))
            return {'id': 'plone-contentmenu-workflow',
                    'class': 'state-inactive',
                    'state': state,
                    'stateTitle': stateTitle, }
        else:
            return {'id': 'plone-contentmenu-workflow',
                    'class': 'state-%s' % state,
                    'state': state,
                    'stateTitle': stateTitle, }
예제 #4
0
파일: analysis.py 프로젝트: nafwa03/olims
 def __call__(self, result=None, **kwargs):
     translate = self.context.translate
     path = '++resource++bika.lims.images'
     alerts = {}
     # We look for IResultOutOfRange adapters for this object
     for name, adapter in getAdapters((self.context, ), IResultOutOfRange):
         ret = adapter(result)
         if not ret:
             continue
         spec = ret["spec_values"]
         rngstr = "{0} {1}, {2} {3}".format(
             t(_("min")), str(spec['min']),
             t(_("max")), str(spec['max']))
         if ret["out_of_range"]:
             if ret["acceptable"]:
                 message = "{0} ({1})".format(
                     t(_('Result in shoulder range')),
                     rngstr
                 )
                 icon = path + '/warning.png'
             else:
                 message = "{0} ({1})".format(
                     t(_('Result out of range')),
                     rngstr
                 )
                 icon = path + '/exclamation.png'
             alerts[self.context.UID()] = [
                 {
                     'icon': icon,
                     'msg': message,
                     'field': 'Result',
                 },
             ]
             break
     return alerts
예제 #5
0
파일: view.py 프로젝트: nafwa03/olims
 def get_custom_fields(self):
     """ Returns a dictionary with custom fields to be rendered after
         header_table with this structure:
         {<fieldid>:{title:<title>, value:<html>}
     """
     custom = {}
     ar = self.context
     workflow = getToolByName(self.context, 'portal_workflow')
     # If is a retracted AR, show the link to child AR and show a warn msg
     if workflow.getInfoFor(ar, 'review_state') == 'invalid':
         childar = hasattr(ar, 'getChildAnalysisRequest') \
                     and ar.getChildAnalysisRequest() or None
         anchor = childar and ("<a href='%s'>%s</a>" % (childar.absolute_url(), childar.getRequestID())) or None
         if anchor:
             custom['ChildAR'] = {
                 'title': t(_("AR for retested results")),
                 'value': anchor
             }
     # If is an AR automatically generated due to a Retraction, show it's
     # parent AR information
     if hasattr(ar, 'getParentAnalysisRequest') \
         and ar.getParentAnalysisRequest():
         par = ar.getParentAnalysisRequest()
         anchor = "<a href='%s'>%s</a>" % (par.absolute_url(), par.getRequestID())
         custom['ParentAR'] = {
             'title': t(_("Invalid AR retested")),
             'value': anchor
         }
     return custom
예제 #6
0
    def render_field_view(self, field):
        fieldname = field.getName()
        field = self.context.Schema()[fieldname]
        ret = {'fieldName': fieldname, 'mode': 'view'}
        try:
            adapter = getAdapter(self.context,
                                 interface=IHeaderTableFieldRenderer,
                                 name=fieldname)
        except ComponentLookupError:
            adapter = None
        if adapter:
            ret = {'fieldName': fieldname,
                   'mode': 'structure',
                   'html': adapter(field)}
        else:
            if field.getType().find("ool") > -1:
                value = field.get(self.context)
                ret = {'fieldName': fieldname,
                       'mode': 'structure',
                       'html': t(_('Yes')) if value else t(_('No'))
                }
            elif field.getType().find("Reference") > -1:
                # Prioritize method retrieval over schema's field
                targets = None
                if hasattr(self.context, 'get%s' % fieldname):
                    fieldaccessor = getattr(self.context, 'get%s' % fieldname)
                    if callable(fieldaccessor):
                        targets = fieldaccessor()
                if not targets:
                    targets = field.get(self.context)

                if targets:
                    if not type(targets) == list:
                        targets = [targets,]
                    sm = getSecurityManager()
                    if all([sm.checkPermission(view, ta) for ta in targets]):
                        a = ["<a href='%s'>%s</a>" % (target.absolute_url(),
                                                      target.Title())
                             for target in targets]
                        ret = {'fieldName': fieldname,
                               'mode': 'structure',
                               'html': ", ".join(a)}
                    else:
                        ret = {'fieldName': fieldname,
                               'mode': 'structure',
                               'html': ", ".join([ta.Title() for ta in targets])}
                else:
                    ret = {'fieldName': fieldname,
                           'mode': 'structure',
                           'html': ''}
            elif field.getType().lower().find('datetime') > -1:
                value = field.get(self.context)
                ret = {'fieldName': fieldname,
                       'mode': 'structure',
                       'html': self.ulocalized_time(value, long_format=True)
                }
        return ret
예제 #7
0
        def make_title(o):
            # the javascript uses these strings to decide if it should
            # check the blank or hazardous checkboxes when a reference
            # definition is selected (./js/referencesample.js)
            if not o:
                return ''
            title = _u(o.Title())
            if o.getBlank():
                title += " %s" % t(_('(Blank)'))
            if o.getHazardous():
                title += " %s" % t(_('(Hazardous)'))

            return title
예제 #8
0
        def make_title(o):
            # the javascript uses these strings to decide if it should
            # check the blank or hazardous checkboxes when a reference
            # definition is selected (./js/referencesample.js)
            if not o:
                return ''
            title = _u(o.Title())
            if o.getBlank():
                title += " %s" % t(_('(Blank)'))
            if o.getHazardous():
                title += " %s" % t(_('(Hazardous)'))

            return title
예제 #9
0
 def __call__(self):
     ar = self.context
     workflow = getToolByName(ar, 'portal_workflow')
     # If is a retracted AR, show the link to child AR and show a warn msg
     if workflow.getInfoFor(ar, 'review_state') == 'invalid':
         childar = hasattr(ar, 'getChildAnalysisRequest') \
                     and ar.getChildAnalysisRequest() or None
         childid = childar and childar.getRequestID() or None
         message = _(
             'This Analysis Request has been withdrawn and is shown '
             'for trace-ability purposes only. Retest: '
             '${retest_child_id}.',
             mapping={'retest_child_id': safe_unicode(childid) or ''})
         self.context.plone_utils.addPortalMessage(message, 'warning')
     # If is an AR automatically generated due to a Retraction, show it's
     # parent AR information
     if hasattr(ar, 'getParentAnalysisRequest') \
         and ar.getParentAnalysisRequest():
         par = ar.getParentAnalysisRequest()
         message = _(
             'This Analysis Request has been '
             'generated automatically due to '
             'the retraction of the Analysis '
             'Request ${retracted_request_id}.',
             mapping={
                 'retracted_request_id': safe_unicode(par.getRequestID())
             })
         self.context.plone_utils.addPortalMessage(t(message), 'info')
     template = LogView.__call__(self)
     return template
예제 #10
0
파일: log.py 프로젝트: nafwa03/olims
    def __init__(self, context, request):
        BikaListingView.__init__(self, context, request)

        self.show_sort_column = False
        self.show_select_row = False
        self.show_select_column = False
        self.show_workflow_action_buttons = False
        self.pagesize = 999999

        self.icon = self.portal_url + "/++resource++bika.lims.images/%s_big.png" % \
            context.portal_type.lower()
        self.title = to_utf8(self.context.Title()) + " " + t(_("Log"))
        self.description = ""

        self.columns = {
            'Version': {'title': _('Version'), 'sortable': False},
            'Date': {'title': _('Date'), 'sortable': False},
            'User': {'title': _('User'), 'sortable': False},
            'Action': {'title': _('Action'), 'sortable': False},
            'Description': {'title': _('Description'), 'sortable': False},
        }
        self.review_states = [
            {'id': 'default',
             'title': 'All',
             'contentFilter': {},
             'columns': ['Version',
                         'Date',
                         'User',
                         'Action',
                         'Description']},
        ]
예제 #11
0
파일: log.py 프로젝트: nafwa03/olims
 def __call__(self):
     ar = self.context
     workflow = getToolByName(ar, 'portal_workflow')
     # If is a retracted AR, show the link to child AR and show a warn msg
     if workflow.getInfoFor(ar, 'review_state') == 'invalid':
         childar = hasattr(ar, 'getChildAnalysisRequest') \
                     and ar.getChildAnalysisRequest() or None
         childid = childar and childar.getRequestID() or None
         message = _('This Analysis Request has been withdrawn and is shown '
                       'for trace-ability purposes only. Retest: '
                       '${retest_child_id}.',
                       mapping={'retest_child_id': safe_unicode(childid) or ''})
         self.context.plone_utils.addPortalMessage(message, 'warning')
     # If is an AR automatically generated due to a Retraction, show it's
     # parent AR information
     if hasattr(ar, 'getParentAnalysisRequest') \
         and ar.getParentAnalysisRequest():
         par = ar.getParentAnalysisRequest()
         message = _('This Analysis Request has been '
                     'generated automatically due to '
                     'the retraction of the Analysis '
                     'Request ${retracted_request_id}.',
                     mapping={'retracted_request_id': safe_unicode(par.getRequestID())})
         self.context.plone_utils.addPortalMessage(
             t(message), 'info')
     template = LogView.__call__(self)
     return template
예제 #12
0
 def __call__(self, result=None, specification=None, **kwargs):
     workflow = getToolByName(self.context, 'portal_workflow')
     astate = workflow.getInfoFor(self.context, 'review_state')
     if astate == 'retracted':
         return {}
     result = self.context.getResult() if result is None else result
     alerts = {}
     path = '++resource++bika.lims.images'
     uid = self.context.UID()
     try:
         indet = result.startswith("<") or result.startswith(">")
     except AttributeError:
         indet = False
     if indet:
         alert = {
             'field': 'Result',
             'icon': path + '/exclamation.png',
             'msg': t(_("Indeterminate result"))
         }
         if uid in alerts:
             alerts[uid].append(alert)
         else:
             alerts[uid] = [
                 alert,
             ]
     return alerts
예제 #13
0
파일: add.py 프로젝트: nafwa03/olims
def ajax_form_error(errors, field=None, arnum=None, message=None):
    if not message:
        message = t(PMF('Input is required but no input given.'))
    if (arnum or field):
        error_key = ' %s.%s' % (int(arnum) + 1, field or '')
    else:
        error_key = 'Form Error'
    errors[error_key] = message
예제 #14
0
def ajax_form_error(errors, field=None, arnum=None, message=None):
    if not message:
        message = t(PMF('Input is required but no input given.'))
    if (arnum or field):
        error_key = ' %s.%s' % (int(arnum) + 1, field or '')
    else:
        error_key = 'Form Error'
    errors[error_key] = message
예제 #15
0
    def get_workflow_actions(self):
        """ Compile a list of possible workflow transitions for items
            in this Table.
        """

        # cbb return empty list if we are unable to select items
        if not self.show_select_column:
            return []

        workflow = getToolByName(self.context, 'portal_workflow')

        # get all transitions for all items.
        transitions = {}
        actions = []
        for obj in [i.get('obj', '') for i in self.items]:
            obj = hasattr(obj, 'getObject') and obj.getObject() or obj
            for it in workflow.getTransitionsFor(obj):
                transitions[it['id']] = it

        # the list is restricted to and ordered by these transitions.
        if 'transitions' in self.review_state:
            for transition_dict in self.review_state['transitions']:
                if transition_dict['id'] in transitions:
                    actions.append(transitions[transition_dict['id']])
        else:
            actions = transitions.values()

        new_actions = []
        # remove any invalid items with a warning
        for a,action in enumerate(actions):
            if isinstance(action, dict) \
                    and 'id' in action:
                new_actions.append(action)
            else:
                logger.warning("bad action in custom_actions: %s. (complete list: %s)."%(action,actions))

        # and these are removed
        if 'hide_transitions' in self.review_state:
            actions = [a for a in actions
                       if a['id'] not in self.review_state['hide_transitions']]

        # cheat: until workflow_action is abolished, all URLs defined in
        # GS workflow setup will be ignored, and the default will apply.
        # (that means, WorkflowAction-bound URL is called).
        for i, action in enumerate(actions):
            actions[i]['url'] = ''

        # if there is a self.review_state['some_state']['custom_actions'] attribute
        # on the BikaListingView, add these actions to the list.
        if 'custom_actions' in self.review_state:
            for action in self.review_state['custom_actions']:
                if isinstance(action, dict) \
                        and 'id' in action:
                    actions.append(action)

        for a,action in enumerate(actions):
            actions[a]['title'] = t(PMF(actions[a]['id'] + "_transition_title"))
        return actions
예제 #16
0
 def __call__(self):
     uc = getToolByName(self.context, 'uid_catalog')
     if 'copy_form_submitted' not in self.request:
         uids = self.request.form.get('uids', [])
         self.services = []
         for uid in uids:
             proxies = uc(UID=uid)
             if proxies:
                 self.services.append(proxies[0].getObject())
         return self.template()
     else:
         self.savepoint = savepoint()
         sources = self.request.form.get('uids', [])
         titles = self.request.form.get('dst_title', [])
         keywords = self.request.form.get('dst_keyword', [])
         self.created = []
         for i, s in enumerate(sources):
             if not titles[i]:
                 message = _('Validation failed: title is required')
                 self.context.plone_utils.addPortalMessage(message, 'info')
                 self.savepoint.rollback()
                 self.created = []
                 break
             if not keywords[i]:
                 message = _('Validation failed: keyword is required')
                 self.context.plone_utils.addPortalMessage(message, 'info')
                 self.savepoint.rollback()
                 self.created = []
                 break
             title = self.copy_service(s, titles[i], keywords[i])
             if title:
                 self.created.append(title)
         if len(self.created) > 1:
             message = t(
                 _('${items} were successfully created.',
                   mapping={'items':
                            safe_unicode(', '.join(self.created))}))
         elif len(self.created) == 1:
             message = t(
                 _('${item} was successfully created.',
                   mapping={'item': safe_unicode(self.created[0])}))
         else:
             message = _('No new items were created.')
         self.context.plone_utils.addPortalMessage(message, 'info')
         self.request.response.redirect(self.context.absolute_url())
예제 #17
0
    def folderitems(self):
        items = BikaListingView.folderitems(self)
        valid = [c.UID() for c in self.context.getValidCertifications()]
        latest = self.context.getLatestValidCertification()
        latest = latest.UID() if latest else ''
        for x in range(len(items)):
            if not items[x].has_key('obj'): continue
            obj = items[x]['obj']
            # items[x]['getAgency'] = obj.getAgency()
            items[x]['getDate'] = self.ulocalized_time(obj.getDate(),
                                                       long_format=0)
            items[x]['getValidFrom'] = self.ulocalized_time(obj.getValidFrom(),
                                                            long_format=0)
            items[x]['getValidTo'] = self.ulocalized_time(obj.getValidTo(),
                                                          long_format=0)
            items[x]['replace']['Title'] = "<a href='%s'>%s</a>" % \
                 (items[x]['url'], items[x]['Title'])
            if obj.getInternal() == True:
                items[x]['replace']['getAgency'] = ""
                items[x]['state_class'] = '%s %s' % (items[x]['state_class'],
                                                     'internalcertificate')

            items[x]['getDocument'] = ""
            items[x]['replace']['getDocument'] = ""
            try:
                doc = obj.getDocument()
                if doc and doc.get_size() > 0:
                    anchor = "<a href='%s/at_download/Document'>%s</a>" % \
                            (obj.absolute_url(), doc.filename)
                    items[x]['getDocument'] = doc.filename
                    items[x]['replace']['getDocument'] = anchor
            except:
                # POSKeyError: 'No blob file'
                # Show the record, but not the link
                title = _('Not available')
                items[x]['getDocument'] = _('Not available')
                items[x]['replace']['getDocument'] = _('Not available')

            uid = obj.UID()
            if uid in valid:
                # Valid calibration.
                items[x]['state_class'] = '%s %s' % (items[x]['state_class'],
                                                     'active')
            elif uid == latest:
                # Latest valid certificate
                img = "<img title='%s' src='%s/++resource++bika.lims.images/exclamation.png'/>&nbsp;" \
                % (t(_('Out of date')), self.portal_url)
                items[x]['replace']['getValidTo'] = '%s %s' % (
                    items[x]['getValidTo'], img)
                items[x]['state_class'] = '%s %s' % (items[x]['state_class'],
                                                     'inactive outofdate')
            else:
                # Old and further calibrations
                items[x]['state_class'] = '%s %s' % (items[x]['state_class'],
                                                     'inactive')

        return items
예제 #18
0
 def msg(self, array, msg, numline=None, line=None, mapping={}):
     prefix = ''
     suffix = ''
     msg = t(_(safe_unicode(msg), mapping=mapping))
     if numline:
         prefix = "[%s] " % numline
     if line:
         suffix = ": %s" % line
     array.append(prefix + msg + suffix)
예제 #19
0
파일: supplier.py 프로젝트: pureboy8/OLiMS
    def folderitems(self):
        items = BikaListingView.folderitems(self)
        outitems = []
        workflow = getToolByName(self.context, 'portal_workflow')
        for x in range(len(items)):
            if not items[x].has_key('obj'): continue
            obj = items[x]['obj']
            if workflow.getInfoFor(obj, 'review_state') == 'current':
                # Check expiry date
                from dependencies.dependency import DT2dt
                from dependencies.dependency import datetime
                expirydate = DT2dt(obj.getExpiryDate()).replace(tzinfo=None)
                if (datetime.today() > expirydate):
                    workflow.doActionFor(obj, 'expire')
                    items[x]['review_state'] = 'expired'
                    items[x]['obj'] = obj
                    if 'review_state' in self.contentFilter \
                        and self.contentFilter['review_state'] == 'current':
                        continue
            items[x]['ID'] = obj.id
            items[x]['Manufacturer'] = obj.getReferenceManufacturer() and \
                 obj.getReferenceManufacturer().Title() or ''
            items[x]['Definition'] = obj.getReferenceDefinition() and \
                 obj.getReferenceDefinition().Title() or ''
            items[x]['DateSampled'] = self.ulocalized_time(
                obj.getDateSampled())
            items[x]['DateReceived'] = self.ulocalized_time(
                obj.getDateReceived())
            items[x]['DateOpened'] = self.ulocalized_time(obj.getDateOpened())
            items[x]['ExpiryDate'] = self.ulocalized_time(obj.getExpiryDate())

            after_icons = ''
            if obj.getBlank():
                after_icons += "<img\
                src='%s/++resource++bika.lims.images/blank.png' \
                title='%s'>" % (self.portal_url, t(_('Blank')))
            if obj.getHazardous():
                after_icons += "<img\
                src='%s/++resource++bika.lims.images/hazardous.png' \
                title='%s'>" % (self.portal_url, t(_('Hazardous')))
            items[x]['replace']['ID'] = "<a href='%s/base_view'>%s</a>&nbsp;%s" % \
                 (items[x]['url'], items[x]['ID'], after_icons)
            outitems.append(items[x])
        return outitems
예제 #20
0
 def __call__(self):
     uc = getToolByName(self.context, 'uid_catalog')
     if 'copy_form_submitted' not in self.request:
         uids = self.request.form.get('uids', [])
         self.services = []
         for uid in uids:
             proxies = uc(UID=uid)
             if proxies:
                 self.services.append(proxies[0].getObject())
         return self.template()
     else:
         self.savepoint = savepoint()
         sources = self.request.form.get('uids', [])
         titles = self.request.form.get('dst_title', [])
         keywords = self.request.form.get('dst_keyword', [])
         self.created = []
         for i, s in enumerate(sources):
             if not titles[i]:
                 message = _('Validation failed: title is required')
                 self.context.plone_utils.addPortalMessage(message, 'info')
                 self.savepoint.rollback()
                 self.created = []
                 break
             if not keywords[i]:
                 message = _('Validation failed: keyword is required')
                 self.context.plone_utils.addPortalMessage(message, 'info')
                 self.savepoint.rollback()
                 self.created = []
                 break
             title = self.copy_service(s, titles[i], keywords[i])
             if title:
                 self.created.append(title)
         if len(self.created) > 1:
             message = t(_(
                 '${items} were successfully created.',
                 mapping={'items': safe_unicode(', '.join(self.created))}))
         elif len(self.created) == 1:
             message = t(_(
                 '${item} was successfully created.',
                 mapping={'item': safe_unicode(self.created[0])}))
         else:
             message = _('No new items were created.')
         self.context.plone_utils.addPortalMessage(message, 'info')
         self.request.response.redirect(self.context.absolute_url())
예제 #21
0
    def extra(self):
        workflow = self.tools.workflow()
        state = self.context_state.workflow_state()
        stateTitle = self._currentStateTitle()

        if workflow.getInfoFor(self.context, 'cancellation_state',
                               '') == 'cancelled':
            title2 = t(_('Cancelled'))
            # cater for bika_one_state_workflow (always Active)
            if not stateTitle or \
               workflow.getInfoFor(self.context, 'review_state', '') == 'active':
                stateTitle = t(_('Cancelled'))
            else:
                stateTitle = "%s (%s)" % (stateTitle, _(title2))
            return {
                'id': 'plone-contentmenu-workflow',
                'class': 'state-cancelled',
                'state': state,
                'stateTitle': stateTitle,
            }
        elif workflow.getInfoFor(self.context, 'inactive_state',
                                 '') == 'inactive':
            title2 = t(_('Dormant'))
            # cater for bika_one_state_workflow (always Active)
            if not stateTitle or \
               (workflow.getInfoFor(self.context, 'review_state', '') in
                                                    ('active', 'current')):
                stateTitle = t(_('Dormant'))
            else:
                stateTitle = "%s (%s)" % (stateTitle, _(title2))
            return {
                'id': 'plone-contentmenu-workflow',
                'class': 'state-inactive',
                'state': state,
                'stateTitle': stateTitle,
            }
        else:
            return {
                'id': 'plone-contentmenu-workflow',
                'class': 'state-%s' % state,
                'state': state,
                'stateTitle': stateTitle,
            }
예제 #22
0
파일: supplier.py 프로젝트: nafwa03/olims
    def folderitems(self):
        items = BikaListingView.folderitems(self)
        outitems = []
        workflow = getToolByName(self.context, 'portal_workflow')
        for x in range(len(items)):
            if not items[x].has_key('obj'): continue
            obj = items[x]['obj']
            if workflow.getInfoFor(obj, 'review_state') == 'current':
                # Check expiry date
                from dependencies.dependency import DT2dt
                from dependencies.dependency import datetime
                expirydate = DT2dt(obj.getExpiryDate()).replace(tzinfo=None)
                if (datetime.today() > expirydate):
                    workflow.doActionFor(obj, 'expire')
                    items[x]['review_state'] = 'expired'
                    items[x]['obj'] = obj
                    if 'review_state' in self.contentFilter \
                        and self.contentFilter['review_state'] == 'current':
                        continue
            items[x]['ID'] = obj.id
            items[x]['Manufacturer'] = obj.getReferenceManufacturer() and \
                 obj.getReferenceManufacturer().Title() or ''
            items[x]['Definition'] = obj.getReferenceDefinition() and \
                 obj.getReferenceDefinition().Title() or ''
            items[x]['DateSampled'] = self.ulocalized_time(obj.getDateSampled())
            items[x]['DateReceived'] = self.ulocalized_time(obj.getDateReceived())
            items[x]['DateOpened'] = self.ulocalized_time(obj.getDateOpened())
            items[x]['ExpiryDate'] = self.ulocalized_time(obj.getExpiryDate())

            after_icons = ''
            if obj.getBlank():
                after_icons += "<img\
                src='%s/++resource++bika.lims.images/blank.png' \
                title='%s'>" % (self.portal_url, t(_('Blank')))
            if obj.getHazardous():
                after_icons += "<img\
                src='%s/++resource++bika.lims.images/hazardous.png' \
                title='%s'>" % (self.portal_url, t(_('Hazardous')))
            items[x]['replace']['ID'] = "<a href='%s/base_view'>%s</a>&nbsp;%s" % \
                 (items[x]['url'], items[x]['ID'], after_icons)
            outitems.append(items[x])
        return outitems
예제 #23
0
def getDataInterfaces(context):
    """ Return the current list of data interfaces
    """
    from lims.exportimport import instruments
    exims = []
    for exim_id in instruments.__all__:
        exim = instruments.getExim(exim_id)
        exims.append((exim_id, exim.title))
    exims.sort(lambda x, y: cmp(x[1].lower(), y[1].lower()))
    exims.insert(0, ('', t(_('None'))))
    return DisplayList(exims)
예제 #24
0
 def _getAvailableMethods(self):
     """ Returns the available (active) methods.
         One method can be done by multiple instruments, but one
         instrument can only be used in one method.
     """
     bsc = getToolByName(self, 'bika_setup_catalog')
     items = [(c.UID, c.Title) \
             for c in bsc(portal_type='Method',
                          inactive_state = 'active')]
     items.sort(lambda x,y:cmp(x[1], y[1]))
     items.insert(0, ('', t(_('None'))))
     return DisplayList(items)
예제 #25
0
 def _getCalculations(self):
     """ Returns a DisplayList with the available Calculations
         registered in Bika-Setup. Used to fill the Calculation
         ReferenceWidget.
     """
     bsc = getToolByName(self, 'bika_setup_catalog')
     items = [(c.UID, c.Title) \
             for c in bsc(portal_type='Calculation',
                          inactive_state = 'active')]
     items.sort(lambda x, y: cmp(x[1], y[1]))
     items.insert(0, ('', t(_('None'))))
     return DisplayList(list(items))
예제 #26
0
파일: method.py 프로젝트: nafwa03/olims
 def _getCalculations(self):
     """ Returns a DisplayList with the available Calculations
         registered in Bika-Setup. Used to fill the Calculation
         ReferenceWidget.
     """
     bsc = getToolByName(self, 'bika_setup_catalog')
     items = [(c.UID, c.Title) \
             for c in bsc(portal_type='Calculation',
                          inactive_state = 'active')]
     items.sort(lambda x,y: cmp(x[1], y[1]))
     items.insert(0, ('', t(_('None'))))
     return DisplayList(list(items))
예제 #27
0
파일: invoice.py 프로젝트: nafwa03/olims
    def emailInvoice(self, templateHTML, to=[]):
        """
        Send the invoice via email.
        :param templateHTML: The invoice template in HTML, ready to be send.
        :param to: A list with the addresses to send the invoice.
        """
        ar = self.aq_parent
        # SMTP errors are silently ignored if server is in debug mode
#         debug_mode = App.config.getConfiguration().debug_mode "By Yasir"
        # Useful variables
        lab = ar.bika_setup.laboratory
        # Compose and send email.
        subject = t(_('Invoice')) + ' ' + ar.getInvoice().getId()
        mime_msg = MIMEMultipart('related')
        mime_msg['Subject'] = subject
        mime_msg['From'] = formataddr(
            (encode_header(lab.getName()), lab.getEmailAddress()))
        mime_msg.preamble = 'This is a multi-part MIME message.'
        msg_txt_t = MIMEText(templateHTML.encode('utf-8'), _subtype='html')
        mime_msg.attach(msg_txt_t)

        # Build the responsible's addresses
        mngrs = ar.getResponsible()
        for mngrid in mngrs['ids']:
            name = mngrs['dict'][mngrid].get('name', '')
            email = mngrs['dict'][mngrid].get('email', '')
            if (email != ''):
                to.append(formataddr((encode_header(name), email)))
        # Build the client's address
        caddress = ar.aq_parent.getEmailAddress()
        cname = ar.aq_parent.getName()
        if (caddress != ''):
                to.append(formataddr((encode_header(cname), caddress)))
        if len(to) > 0:
            # Send the emails
            mime_msg['To'] = ','.join(to)
            try:
                host = getToolByName(ar, 'MailHost')
                host.send(mime_msg.as_string(), immediate=True)
            except SMTPServerDisconnected as msg:
                pass
                if not debug_mode:
                    raise SMTPServerDisconnected(msg)
            except SMTPRecipientsRefused as msg:
                raise WorkflowException(str(msg))
예제 #28
0
파일: invoice.py 프로젝트: pureboy8/OLiMS
    def emailInvoice(self, templateHTML, to=[]):
        """
        Send the invoice via email.
        :param templateHTML: The invoice template in HTML, ready to be send.
        :param to: A list with the addresses to send the invoice.
        """
        ar = self.aq_parent
        # SMTP errors are silently ignored if server is in debug mode
        #         debug_mode = App.config.getConfiguration().debug_mode "By Yasir"
        # Useful variables
        lab = ar.bika_setup.laboratory
        # Compose and send email.
        subject = t(_('Invoice')) + ' ' + ar.getInvoice().getId()
        mime_msg = MIMEMultipart('related')
        mime_msg['Subject'] = subject
        mime_msg['From'] = formataddr(
            (encode_header(lab.getName()), lab.getEmailAddress()))
        mime_msg.preamble = 'This is a multi-part MIME message.'
        msg_txt_t = MIMEText(templateHTML.encode('utf-8'), _subtype='html')
        mime_msg.attach(msg_txt_t)

        # Build the responsible's addresses
        mngrs = ar.getResponsible()
        for mngrid in mngrs['ids']:
            name = mngrs['dict'][mngrid].get('name', '')
            email = mngrs['dict'][mngrid].get('email', '')
            if (email != ''):
                to.append(formataddr((encode_header(name), email)))
        # Build the client's address
        caddress = ar.aq_parent.getEmailAddress()
        cname = ar.aq_parent.getName()
        if (caddress != ''):
            to.append(formataddr((encode_header(cname), caddress)))
        if len(to) > 0:
            # Send the emails
            mime_msg['To'] = ','.join(to)
            try:
                host = getToolByName(ar, 'MailHost')
                host.send(mime_msg.as_string(), immediate=True)
            except SMTPServerDisconnected as msg:
                pass
                if not debug_mode:
                    raise SMTPServerDisconnected(msg)
            except SMTPRecipientsRefused as msg:
                raise WorkflowException(str(msg))
예제 #29
0
파일: log.py 프로젝트: pureboy8/OLiMS
    def __init__(self, context, request):
        BikaListingView.__init__(self, context, request)

        self.show_sort_column = False
        self.show_select_row = False
        self.show_select_column = False
        self.show_workflow_action_buttons = False
        self.pagesize = 999999

        self.icon = self.portal_url + "/++resource++bika.lims.images/%s_big.png" % \
            context.portal_type.lower()
        self.title = to_utf8(self.context.Title()) + " " + t(_("Log"))
        self.description = ""

        self.columns = {
            'Version': {
                'title': _('Version'),
                'sortable': False
            },
            'Date': {
                'title': _('Date'),
                'sortable': False
            },
            'User': {
                'title': _('User'),
                'sortable': False
            },
            'Action': {
                'title': _('Action'),
                'sortable': False
            },
            'Description': {
                'title': _('Description'),
                'sortable': False
            },
        }
        self.review_states = [
            {
                'id': 'default',
                'title': 'All',
                'contentFilter': {},
                'columns':
                ['Version', 'Date', 'User', 'Action', 'Description']
            },
        ]
예제 #30
0
파일: __init__.py 프로젝트: nafwa03/olims
 def __call__(self, context):
     site = getSite()
     request = aq_get(site, 'REQUEST', None)
     items = []
     wf = site.portal_workflow
     for folder in self.folders:
         folder = site.restrictedTraverse(folder)
         for portal_type in self.portal_types:
             objects = list(folder.objectValues(portal_type))
             objects = [o for o in objects if
                        wf.getInfoFor(o, 'inactive_state') == 'active']
             if not objects:
                 continue
             objects.sort(lambda x, y: cmp(x.Title().lower(),
                                           y.Title().lower()))
             xitems = [(t(item.Title()), item.Title()) for item in objects]
             xitems = [SimpleTerm(i[1], i[1], i[0]) for i in xitems]
             items += xitems
     return SimpleVocabulary(items)
예제 #31
0
파일: __init__.py 프로젝트: nafwa03/olims
    def __call__(self, context):
        portal = getSite()
        wftool = getToolByName(portal, 'portal_workflow', None)
        if wftool is None:
            return SimpleVocabulary([])

        # XXX This is evil. A vocabulary shouldn't be request specific.
        # The sorting should go into a separate widget.

        # we get REQUEST from wftool because context may be an adapter
        request = aq_get(wftool, 'REQUEST', None)

        wf = wftool.getWorkflowById('bika_ar_workflow')
        items = wftool.listWFStatesByTitle(filter_similar=True)
        items_dict = dict([(i[1], t(i[0])) for i in items])
        items_list = [(k, v) for k, v in items_dict.items()]
        items_list.sort(lambda x, y: cmp(x[1], y[1]))
        terms = [SimpleTerm(k, title=u'%s' % v) for k, v in items_list]
        return SimpleVocabulary(terms)
예제 #32
0
파일: __init__.py 프로젝트: pureboy8/OLiMS
    def __call__(self, context):
        portal = getSite()
        wftool = getToolByName(portal, 'portal_workflow', None)
        if wftool is None:
            return SimpleVocabulary([])

        # XXX This is evil. A vocabulary shouldn't be request specific.
        # The sorting should go into a separate widget.

        # we get REQUEST from wftool because context may be an adapter
        request = aq_get(wftool, 'REQUEST', None)

        wf = wftool.getWorkflowById('bika_ar_workflow')
        items = wftool.listWFStatesByTitle(filter_similar=True)
        items_dict = dict([(i[1], t(i[0])) for i in items])
        items_list = [(k, v) for k, v in items_dict.items()]
        items_list.sort(lambda x, y: cmp(x[1], y[1]))
        terms = [SimpleTerm(k, title=u'%s' % v) for k, v in items_list]
        return SimpleVocabulary(terms)
예제 #33
0
파일: __init__.py 프로젝트: pureboy8/OLiMS
    def __call__(self, **kwargs):
        site = getSite()
        request = aq_get(site, 'REQUEST', None)
        catalog = getToolByName(site, self.catalog)
        if 'inactive_state' in catalog.indexes():
            self.contentFilter['inactive_state'] = 'active'
        if 'cancellation_state' in catalog.indexes():
            self.contentFilter['cancellation_state'] = 'active'
        self.contentFilter.update(**kwargs)
        objects = (b.getObject() for b in catalog(self.contentFilter))

        items = []
        for obj in objects:
            key = obj[self.key]
            key = callable(key) and key() or key
            value = obj[self.value]
            value = callable(value) and value() or value
            items.append((key, t(value)))

        return DisplayList(items)
예제 #34
0
파일: __init__.py 프로젝트: nafwa03/olims
    def __call__(self, **kwargs):
        site = getSite()
        request = aq_get(site, 'REQUEST', None)
        catalog = getToolByName(site, self.catalog)
        if 'inactive_state' in catalog.indexes():
            self.contentFilter['inactive_state'] = 'active'
        if 'cancellation_state' in catalog.indexes():
            self.contentFilter['cancellation_state'] = 'active'
        self.contentFilter.update(**kwargs)
        objects = (b.getObject() for b in catalog(self.contentFilter))

        items = []
        for obj in objects:
            key = obj[self.key]
            key = callable(key) and key() or key
            value = obj[self.value]
            value = callable(value) and value() or value
            items.append((key, t(value)))

        return DisplayList(items)
예제 #35
0
파일: __init__.py 프로젝트: pureboy8/OLiMS
 def __call__(self, context):
     site = getSite()
     request = aq_get(site, 'REQUEST', None)
     items = []
     wf = site.portal_workflow
     for folder in self.folders:
         folder = site.restrictedTraverse(folder)
         for portal_type in self.portal_types:
             objects = list(folder.objectValues(portal_type))
             objects = [
                 o for o in objects
                 if wf.getInfoFor(o, 'inactive_state') == 'active'
             ]
             if not objects:
                 continue
             objects.sort(lambda x, y: cmp(x.Title().lower(),
                                           y.Title().lower()))
             xitems = [(t(item.Title()), item.Title()) for item in objects]
             xitems = [SimpleTerm(i[1], i[1], i[0]) for i in xitems]
             items += xitems
     return SimpleVocabulary(items)
예제 #36
0
파일: view.py 프로젝트: nafwa03/olims
 def artemplates(self):
     """ Return applicable client and Lab ARTemplate records
     """
     res = []
     templates = []
     client = self.context.portal_type == 'AnalysisRequest' \
         and self.context.aq_parent or self.context
     for template in client.objectValues("ARTemplate"):
         if isActive(template):
             templates.append((template.Title(), template))
     templates.sort(lambda x, y: cmp(x[0], y[0]))
     res += templates
     templates = []
     for template in self.context.bika_setup.bika_artemplates.objectValues("ARTemplate"):
         if isActive(template):
             lab = t(_('Lab'))
             title = to_utf8(template.Title())
             templates.append(("%s: %s" % (lab, title), template))
     templates.sort(lambda x, y: cmp(x[0], y[0]))
     res += templates
     return res
예제 #37
0
    def __call__(self):
        self.context_actions = {}
        wf = getToolByName(self.context, 'portal_workflow')
        mtool = getToolByName(self.context, 'portal_membership')
        addPortalMessage = self.context.plone_utils.addPortalMessage
        translate = self.context.translate
        # client contact required
        active_contacts = [c for c in self.context.objectValues('Contact') if
                           wf.getInfoFor(c, 'inactive_state', '') == 'active']
        if isActive(self.context):
            if self.context.portal_type == "Client" and not active_contacts:
                msg = _("Client contact required before request may be submitted")
                addPortalMessage(msg)
            else:
                if mtool.checkPermission(AddAnalysisRequest, self.context):
                    self.context_actions[t(_('Add'))] = {
                        'url': self.context.absolute_url() + "/portal_factory/"
                        "AnalysisRequest/Request new analyses/ar_add",
                        'icon': '++resource++bika.lims.images/add.png'}

        return super(ClientAnalysisRequestsView, self).__call__()
예제 #38
0
파일: view.py 프로젝트: nafwa03/olims
 def analysisprofiles(self):
     """ Return applicable client and Lab AnalysisProfile records
     """
     res = []
     profiles = []
     client = self.context.portal_type == 'AnalysisRequest' \
         and self.context.aq_parent or self.context
     for profile in client.objectValues("AnalysisProfile"):
         if isActive(profile):
             profiles.append((profile.Title(), profile))
     profiles.sort(lambda x, y: cmp(x[0], y[0]))
     res += profiles
     profiles = []
     for profile in self.context.bika_setup.bika_analysisprofiles.objectValues("AnalysisProfile"):
         if isActive(profile):
             lab = t(_('Lab'))
             title = to_utf8(profile.Title())
             profiles.append(("%s: %s" % (lab, title), profile))
     profiles.sort(lambda x, y: cmp(x[0], y[0]))
     res += profiles
     return res
예제 #39
0
    def __call__(self, result=None, **kwargs):

        translate = self.context.translate
        path = "++resource++bika.lims.images"
        alerts = {}
        for name, adapter in getAdapters((self.context,), IResultOutOfRange):
            ret = adapter(result, **kwargs)
            if not ret:
                continue
            out_of_range = ret["out_of_range"]
            spec = ret["spec_values"]
            if out_of_range:
                message = t(
                    _(
                        "Relative percentage difference, ${variation_here} %, is out of valid range (${variation} %))",
                        mapping={"variation_here": ret["variation_here"], "variation": ret["variation"]},
                    )
                )
                alerts[self.context.UID()] = [{"msg": message, "field": "Result", "icon": path + "/exclamation.png"}]
                break
        return alerts
예제 #40
0
파일: calcs.py 프로젝트: nafwa03/olims
 def __call__(self, result=None, specification=None, **kwargs):
     workflow = getToolByName(self.context, 'portal_workflow')
     astate = workflow.getInfoFor(self.context, 'review_state')
     if astate == 'retracted':
         return {}
     result = self.context.getResult() if result is None else result
     alerts = {}
     path = '++resource++bika.lims.images'
     uid = self.context.UID()
     try:
         indet = result.startswith("<") or result.startswith(">")
     except AttributeError:
         indet = False
     if indet:
         alert = {'field': 'Result',
                  'icon': path + '/exclamation.png',
                  'msg': t(_("Indeterminate result"))}
         if uid in alerts:
             alerts[uid].append(alert)
         else:
             alerts[uid] = [alert, ]
     return alerts
예제 #41
0
 def artemplates(self):
     """ Return applicable client and Lab ARTemplate records
     """
     res = []
     templates = []
     client = self.context.portal_type == 'AnalysisRequest' \
         and self.context.aq_parent or self.context
     for template in client.objectValues("ARTemplate"):
         if isActive(template):
             templates.append((template.Title(), template))
     templates.sort(lambda x, y: cmp(x[0], y[0]))
     res += templates
     templates = []
     for template in self.context.bika_setup.bika_artemplates.objectValues(
             "ARTemplate"):
         if isActive(template):
             lab = t(_('Lab'))
             title = to_utf8(template.Title())
             templates.append(("%s: %s" % (lab, title), template))
     templates.sort(lambda x, y: cmp(x[0], y[0]))
     res += templates
     return res
예제 #42
0
 def analysisprofiles(self):
     """ Return applicable client and Lab AnalysisProfile records
     """
     res = []
     profiles = []
     client = self.context.portal_type == 'AnalysisRequest' \
         and self.context.aq_parent or self.context
     for profile in client.objectValues("AnalysisProfile"):
         if isActive(profile):
             profiles.append((profile.Title(), profile))
     profiles.sort(lambda x, y: cmp(x[0], y[0]))
     res += profiles
     profiles = []
     for profile in self.context.bika_setup.bika_analysisprofiles.objectValues(
             "AnalysisProfile"):
         if isActive(profile):
             lab = t(_('Lab'))
             title = to_utf8(profile.Title())
             profiles.append(("%s: %s" % (lab, title), profile))
     profiles.sort(lambda x, y: cmp(x[0], y[0]))
     res += profiles
     return res
예제 #43
0
    def __call__(self, result=None, **kwargs):

        translate = self.context.translate
        path = "++resource++bika.lims.images"
        alerts = {}
        for name, adapter in getAdapters((self.context, ), IResultOutOfRange):
            ret = adapter(result, **kwargs)
            if not ret:
                continue
            out_of_range = ret["out_of_range"]
            spec = ret["spec_values"]
            if out_of_range:
                message = t(_("Relative percentage difference, ${variation_here} %, is out of valid range (${variation} %))",
                      mapping={'variation_here': ret['variation_here'], 'variation': ret['variation'], } ))
                alerts[self.context.UID()] = [
                    {
                        'msg': message,
                        'field': 'Result',
                        'icon': path + '/exclamation.png',
                    },
                ]
                break
        return alerts
예제 #44
0
def pretty_title_or_id(context, obj, empty_value=_marker, domain='plone'):
    _ = MessageFactory(domain)
    title = _pretty_title_or_id(context, obj, empty_value=_marker)
    return t(context.translate(_(safe_unicode(title))))
    def __call__(self):

        # get all the data into datalines
        sc = getToolByName(self.context, 'bika_setup_catalog')
        bac = getToolByName(self.context, 'bika_analysis_catalog')
        rc = getToolByName(self.context, 'reference_catalog')
        self.report_content = {}
        parm_lines = {}
        parms = []
        headings = {}
        headings['header'] = _("Analyses per sample type")
        headings['subheader'] = _(
            "Number of analyses requested per sample type")

        count_all = 0
        query = {'portal_type': 'Analysis'}
        client_title = None
        if 'ClientUID' in self.request.form:
            client_uid = self.request.form['ClientUID']
            query['getClientUID'] = client_uid
            client = rc.lookupObject(client_uid)
            client_title = client.Title()
        else:
            client = logged_in_client(self.context)
            if client:
                client_title = client.Title()
                query['getClientUID'] = client.UID()
        if client_title:
            parms.append({
                'title': _('Client'),
                'value': client_title,
                'type': 'text'
            })

        date_query = formatDateQuery(self.context, 'Requested')
        if date_query:
            query['created'] = date_query
            requested = formatDateParms(self.context, 'Requested')
            parms.append({
                'title': _('Requested'),
                'value': requested,
                'type': 'text'
            })

        workflow = getToolByName(self.context, 'portal_workflow')
        if 'bika_analysis_workflow' in self.request.form:
            query['review_state'] = self.request.form['bika_analysis_workflow']
            review_state = workflow.getTitleForStateOnType(
                self.request.form['bika_analysis_workflow'], 'Analysis')
            parms.append({
                'title': _('Status'),
                'value': review_state,
                'type': 'text'
            })

        if 'bika_cancellation_workflow' in self.request.form:
            query['cancellation_state'] = self.request.form[
                'bika_cancellation_workflow']
            cancellation_state = workflow.getTitleForStateOnType(
                self.request.form['bika_cancellation_workflow'], 'Analysis')
            parms.append({
                'title': _('Active'),
                'value': cancellation_state,
                'type': 'text'
            })

        if 'bika_worksheetanalysis_workflow' in self.request.form:
            query['worksheetanalysis_review_state'] = self.request.form[
                'bika_worksheetanalysis_workflow']
            ws_review_state = workflow.getTitleForStateOnType(
                self.request.form['bika_worksheetanalysis_workflow'],
                'Analysis')
            parms.append({
                'title': _('Assigned to worksheet'),
                'value': ws_review_state,
                'type': 'text'
            })

        # and now lets do the actual report lines
        formats = {
            'columns': 2,
            'col_heads': [_('Sample type'),
                          _('Number of analyses')],
            'class': '',
        }

        datalines = []
        for sampletype in sc(portal_type="SampleType",
                             sort_on='sortable_title'):
            query['getSampleTypeUID'] = sampletype.UID
            analyses = bac(query)
            count_analyses = len(analyses)

            dataline = []
            dataitem = {'value': sampletype.Title}
            dataline.append(dataitem)
            dataitem = {'value': count_analyses}

            dataline.append(dataitem)

            datalines.append(dataline)

            count_all += count_analyses

        # footer data
        footlines = []
        footline = []
        footitem = {'value': _('Total'), 'class': 'total_label'}
        footline.append(footitem)
        footitem = {'value': count_all}
        footline.append(footitem)
        footlines.append(footline)

        self.report_content = {
            'headings': headings,
            'parms': parms,
            'formats': formats,
            'datalines': datalines,
            'footings': footlines
        }

        if self.request.get('output_format', '') == 'CSV':
            import csv
            import StringIO
            import datetime

            fieldnames = [
                'Sample Type',
                'Analyses',
            ]
            output = StringIO.StringIO()
            dw = csv.DictWriter(output,
                                extrasaction='ignore',
                                fieldnames=fieldnames)
            dw.writerow(dict((fn, fn) for fn in fieldnames))
            for row in datalines:
                dw.writerow({
                    'Sample Type': row[0]['value'],
                    'Analyses': row[1]['value'],
                })
            report_data = output.getvalue()
            output.close()
            date = datetime.datetime.now().strftime("%Y%m%d%H%M")
            setheader = self.request.RESPONSE.setHeader
            setheader('Content-Type', 'text/csv')
            setheader(
                "Content-Disposition",
                "attachment;filename=\"analysespersampletype_%s.csv\"" % date)
            self.request.RESPONSE.write(report_data)
        else:
            return {
                'report_title': t(headings['header']),
                'report_data': self.template()
            }
예제 #46
0
    def __init__(self, context, request):
        super(AnalysisRequestsView, self).__init__(context, request)

        request.set('disable_plone.rightcolumn', 1)

        self.catalog = "bika_catalog"
        self.contentFilter = {
            'portal_type': 'AnalysisRequest',
            'sort_on': 'created',
            'sort_order': 'reverse',
            'path': {
                "query": "/",
                "level": 0
            }
        }

        self.context_actions = {}

        if self.context.portal_type == "AnalysisRequestsFolder":
            self.request.set('disable_border', 1)

        if self.view_url.find("analysisrequests") == -1:
            self.view_url = self.view_url + "/analysisrequests"

        self.allow_edit = True

        self.show_sort_column = False
        self.show_select_row = False
        self.show_select_column = True
        self.form_id = "analysisrequests"

        self.icon = self.portal_url + "/++resource++bika.lims.images/analysisrequest_big.png"
        self.title = self.context.translate(_("Analysis Requests"))
        self.description = ""

        SamplingWorkflowEnabled = self.context.bika_setup.getSamplingWorkflowEnabled(
        )

        mtool = getToolByName(self.context, 'portal_membership')
        member = mtool.getAuthenticatedMember()
        user_is_preserver = 'Preserver' in member.getRoles()

        self.columns = {
            'getRequestID': {
                'title': _('Request ID'),
                'index': 'getRequestID'
            },
            'getClientOrderNumber': {
                'title': _('Client Order'),
                'index': 'getClientOrderNumber',
                'toggle': True
            },
            'Creator': {
                'title': PMF('Creator'),
                'index': 'Creator',
                'toggle': True
            },
            'Created': {
                'title': PMF('Date Created'),
                'index': 'created',
                'toggle': False
            },
            'getSample': {
                'title': _("Sample"),
                'toggle': True,
            },
            'BatchID': {
                'title': _("Batch ID"),
                'toggle': True
            },
            'SubGroup': {
                'title': _('Sub-group')
            },
            'Client': {
                'title': _('Client'),
                'toggle': True
            },
            'getClientReference': {
                'title': _('Client Ref'),
                'index': 'getClientReference',
                'toggle': True
            },
            'getClientSampleID': {
                'title': _('Client SID'),
                'index': 'getClientSampleID',
                'toggle': True
            },
            'ClientContact': {
                'title': _('Contact'),
                'toggle': False
            },
            'getSampleTypeTitle': {
                'title': _('Sample Type'),
                'index': 'getSampleTypeTitle',
                'toggle': True
            },
            'getSamplePointTitle': {
                'title': _('Sample Point'),
                'index': 'getSamplePointTitle',
                'toggle': False
            },
            'getStorageLocation': {
                'title': _('Storage Location'),
                'toggle': False
            },
            'SamplingDeviation': {
                'title': _('Sampling Deviation'),
                'toggle': False
            },
            'Priority': {
                'title': _('Priority'),
                'toggle': True,
                'index': 'Priority',
                'sortable': True
            },
            'AdHoc': {
                'title': _('Ad-Hoc'),
                'toggle': False
            },
            'SamplingDate': {
                'title': _('Sampling Date'),
                'index': 'getSamplingDate',
                'toggle': True
            },
            'getDateSampled': {
                'title': _('Date Sampled'),
                'index': 'getDateSampled',
                'toggle': SamplingWorkflowEnabled,
                'input_class': 'datepicker_nofuture',
                'input_width': '10'
            },
            'getSampler': {
                'title': _('Sampler'),
                'toggle': SamplingWorkflowEnabled
            },
            'getDatePreserved': {
                'title': _('Date Preserved'),
                'toggle': user_is_preserver,
                'input_class': 'datepicker_nofuture',
                'input_width': '10',
                'sortable': False
            },  # no datesort without index
            'getPreserver': {
                'title': _('Preserver'),
                'toggle': user_is_preserver
            },
            'getDateReceived': {
                'title': _('Date Received'),
                'index': 'getDateReceived',
                'toggle': False
            },
            'getDatePublished': {
                'title': _('Date Published'),
                'index': 'getDatePublished',
                'toggle': False
            },
            'state_title': {
                'title': _('State'),
                'index': 'review_state'
            },
            'getProfilesTitle': {
                'title': _('Profile'),
                'index': 'getProfilesTitle',
                'toggle': False
            },
            'getAnalysesNum': {
                'title': _('Number of Analyses'),
                'index': 'getAnalysesNum',
                'sortable': True,
                'toggle': False
            },
            'getTemplateTitle': {
                'title': _('Template'),
                'index': 'getTemplateTitle',
                'toggle': False
            },
        }
        self.review_states = [
            {
                'id':
                'default',
                'title':
                _('Active'),
                'contentFilter': {
                    'cancellation_state': 'active',
                    'sort_on': 'created',
                    'sort_order': 'reverse'
                },
                'transitions': [{
                    'id': 'sample'
                }, {
                    'id': 'preserve'
                }, {
                    'id': 'receive'
                }, {
                    'id': 'retract'
                }, {
                    'id': 'verify'
                }, {
                    'id': 'prepublish'
                }, {
                    'id': 'publish'
                }, {
                    'id': 'republish'
                }, {
                    'id': 'cancel'
                }, {
                    'id': 'reinstate'
                }],
                'custom_actions': [],
                'columns': [
                    'getRequestID', 'getSample', 'BatchID', 'SubGroup',
                    'Client', 'Creator', 'Created', 'getClientOrderNumber',
                    'getClientReference', 'ClientContact', 'getClientSampleID',
                    'getProfilesTitle', 'getTemplateTitle',
                    'getSampleTypeTitle', 'getSamplePointTitle',
                    'getStorageLocation', 'SamplingDeviation', 'Priority',
                    'AdHoc', 'SamplingDate', 'getDateSampled', 'getSampler',
                    'getDatePreserved', 'getPreserver', 'getDateReceived',
                    'getAnalysesNum', 'state_title'
                ]
            },
            {
                'id':
                'sample_due',
                'title':
                _('Due'),
                'contentFilter': {
                    'review_state':
                    ('to_be_sampled', 'to_be_preserved', 'sample_due'),
                    'sort_on':
                    'created',
                    'sort_order':
                    'reverse'
                },
                'transitions': [{
                    'id': 'sample'
                }, {
                    'id': 'preserve'
                }, {
                    'id': 'receive'
                }, {
                    'id': 'cancel'
                }, {
                    'id': 'reinstate'
                }],
                'custom_actions': [],
                'columns': [
                    'getRequestID', 'getSample', 'BatchID', 'SubGroup',
                    'Client', 'getProfilesTitle', 'getTemplateTitle',
                    'Creator', 'Created', 'getClientOrderNumber',
                    'getClientReference', 'getClientSampleID', 'ClientContact',
                    'getDateSampled', 'getSampler', 'getDatePreserved',
                    'getPreserver', 'getSampleTypeTitle',
                    'getSamplePointTitle', 'getStorageLocation',
                    'SamplingDeviation', 'Priority', 'AdHoc', 'getAnalysesNum',
                    'state_title'
                ]
            },
            {
                'id':
                'sample_received',
                'title':
                _('Received'),
                'contentFilter': {
                    'review_state': 'sample_received',
                    'sort_on': 'created',
                    'sort_order': 'reverse'
                },
                'transitions': [{
                    'id': 'prepublish'
                }, {
                    'id': 'cancel'
                }, {
                    'id': 'reinstate'
                }],
                'custom_actions': [],
                'columns': [
                    'getRequestID', 'getSample', 'BatchID', 'SubGroup',
                    'Client', 'getProfilesTitle', 'getTemplateTitle',
                    'Creator', 'Created', 'getClientOrderNumber',
                    'getClientReference', 'getClientSampleID', 'ClientContact',
                    'getSampleTypeTitle', 'getSamplePointTitle',
                    'getStorageLocation', 'SamplingDeviation', 'Priority',
                    'AdHoc', 'getDateSampled', 'getSampler',
                    'getDatePreserved', 'getPreserver', 'getAnalysesNum',
                    'getDateReceived'
                ]
            },
            {
                'id':
                'to_be_verified',
                'title':
                _('To be verified'),
                'contentFilter': {
                    'review_state': 'to_be_verified',
                    'sort_on': 'created',
                    'sort_order': 'reverse'
                },
                'transitions': [{
                    'id': 'retract'
                }, {
                    'id': 'verify'
                }, {
                    'id': 'prepublish'
                }, {
                    'id': 'cancel'
                }, {
                    'id': 'reinstate'
                }],
                'custom_actions': [],
                'columns': [
                    'getRequestID', 'getSample', 'BatchID', 'SubGroup',
                    'Client', 'getProfilesTitle', 'getTemplateTitle',
                    'Creator', 'Created', 'getClientOrderNumber',
                    'getClientReference', 'getClientSampleID', 'ClientContact',
                    'getSampleTypeTitle', 'getSamplePointTitle',
                    'getStorageLocation', 'SamplingDeviation', 'Priority',
                    'AdHoc', 'getDateSampled', 'getSampler',
                    'getDatePreserved', 'getPreserver', 'getAnalysesNum',
                    'getDateReceived'
                ]
            },
            {
                'id':
                'verified',
                'title':
                _('Verified'),
                'contentFilter': {
                    'review_state': 'verified',
                    'sort_on': 'created',
                    'sort_order': 'reverse'
                },
                'transitions': [{
                    'id': 'publish'
                }],
                'custom_actions': [],
                'columns': [
                    'getRequestID', 'getSample', 'BatchID', 'SubGroup',
                    'Client', 'getProfilesTitle', 'getTemplateTitle',
                    'Creator', 'Created', 'getClientOrderNumber',
                    'getClientReference', 'getClientSampleID', 'ClientContact',
                    'getSampleTypeTitle', 'getSamplePointTitle',
                    'getStorageLocation', 'SamplingDeviation', 'Priority',
                    'AdHoc', 'getDateSampled', 'getSampler',
                    'getDatePreserved', 'getPreserver', 'getAnalysesNum',
                    'getDateReceived'
                ]
            },
            {
                'id':
                'published',
                'title':
                _('Published'),
                'contentFilter': {
                    'review_state': ('published', 'invalid'),
                    'sort_on': 'created',
                    'sort_order': 'reverse'
                },
                'transitions': [{
                    'id': 'republish'
                }],
                'custom_actions': [],
                'columns': [
                    'getRequestID', 'getSample', 'BatchID', 'SubGroup',
                    'Client', 'getProfilesTitle', 'getTemplateTitle',
                    'Creator', 'Created', 'getClientOrderNumber',
                    'getClientReference', 'getClientSampleID', 'ClientContact',
                    'getSampleTypeTitle', 'getSamplePointTitle',
                    'getStorageLocation', 'SamplingDeviation', 'Priority',
                    'AdHoc', 'getDateSampled', 'getSampler',
                    'getDatePreserved', 'getPreserver', 'getDateReceived',
                    'getAnalysesNum', 'getDatePublished'
                ]
            },
            {
                'id':
                'cancelled',
                'title':
                _('Cancelled'),
                'contentFilter': {
                    'cancellation_state':
                    'cancelled',
                    'review_state':
                    ('to_be_sampled', 'to_be_preserved', 'sample_due',
                     'sample_received', 'to_be_verified', 'attachment_due',
                     'verified', 'published'),
                    'sort_on':
                    'created',
                    'sort_order':
                    'reverse'
                },
                'transitions': [{
                    'id': 'reinstate'
                }],
                'custom_actions': [],
                'columns': [
                    'getRequestID', 'getSample', 'BatchID', 'SubGroup',
                    'Client', 'getProfilesTitle', 'getTemplateTitle',
                    'Creator', 'Created', 'getClientOrderNumber',
                    'getClientReference', 'getClientSampleID', 'ClientContact',
                    'getSampleTypeTitle', 'getSamplePointTitle',
                    'getStorageLocation', 'SamplingDeviation', 'Priority',
                    'AdHoc', 'getDateSampled', 'getSampler',
                    'getDatePreserved', 'getPreserver', 'getDateReceived',
                    'getDatePublished', 'getAnalysesNum', 'state_title'
                ]
            },
            {
                'id':
                'invalid',
                'title':
                _('Invalid'),
                'contentFilter': {
                    'review_state': 'invalid',
                    'sort_on': 'created',
                    'sort_order': 'reverse'
                },
                'transitions': [],
                'custom_actions': [],
                'columns': [
                    'getRequestID', 'getSample', 'BatchID', 'SubGroup',
                    'Client', 'getProfilesTitle', 'getTemplateTitle',
                    'Creator', 'Created', 'getClientOrderNumber',
                    'getClientReference', 'getClientSampleID', 'ClientContact',
                    'getSampleTypeTitle', 'getSamplePointTitle',
                    'getStorageLocation', 'SamplingDeviation', 'Priority',
                    'AdHoc', 'getDateSampled', 'getSampler',
                    'getDatePreserved', 'getPreserver', 'getDateReceived',
                    'getAnalysesNum', 'getDatePublished'
                ]
            },
            {
                'id':
                'assigned',
                'title':
                "<img title='%s'\
                       src='%s/++resource++bika.lims.images/assigned.png'/>" %
                (t(_("Assigned")), self.portal_url),
                'contentFilter': {
                    'worksheetanalysis_review_state':
                    'assigned',
                    'review_state':
                    ('sample_received', 'to_be_verified', 'attachment_due',
                     'verified', 'published'),
                    'sort_on':
                    'created',
                    'sort_order':
                    'reverse'
                },
                'transitions': [{
                    'id': 'retract'
                }, {
                    'id': 'verify'
                }, {
                    'id': 'prepublish'
                }, {
                    'id': 'publish'
                }, {
                    'id': 'republish'
                }, {
                    'id': 'cancel'
                }, {
                    'id': 'reinstate'
                }],
                'custom_actions': [],
                'columns': [
                    'getRequestID', 'getSample', 'BatchID', 'SubGroup',
                    'Client', 'getProfilesTitle', 'getTemplateTitle',
                    'Creator', 'Created', 'getClientOrderNumber',
                    'getClientReference', 'getClientSampleID', 'ClientContact',
                    'getSampleTypeTitle', 'getSamplePointTitle',
                    'getStorageLocation', 'SamplingDeviation', 'Priority',
                    'AdHoc', 'getDateSampled', 'getSampler',
                    'getDatePreserved', 'getPreserver', 'getDateReceived',
                    'getAnalysesNum', 'state_title'
                ]
            },
            {
                'id':
                'unassigned',
                'title':
                "<img title='%s'\
                       src='%s/++resource++bika.lims.images/unassigned.png'/>"
                % (t(_("Unassigned")), self.portal_url),
                'contentFilter': {
                    'worksheetanalysis_review_state':
                    'unassigned',
                    'review_state':
                    ('sample_received', 'to_be_verified', 'attachment_due',
                     'verified', 'published'),
                    'sort_on':
                    'created',
                    'sort_order':
                    'reverse'
                },
                'transitions': [{
                    'id': 'receive'
                }, {
                    'id': 'retract'
                }, {
                    'id': 'verify'
                }, {
                    'id': 'prepublish'
                }, {
                    'id': 'publish'
                }, {
                    'id': 'republish'
                }, {
                    'id': 'cancel'
                }, {
                    'id': 'reinstate'
                }],
                'custom_actions': [],
                'columns': [
                    'getRequestID', 'getSample', 'BatchID', 'SubGroup',
                    'Client', 'getProfilesTitle', 'getTemplateTitle',
                    'Creator', 'Created', 'getClientOrderNumber',
                    'getClientReference', 'getClientSampleID', 'ClientContact',
                    'getSampleTypeTitle', 'getSamplePointTitle',
                    'getStorageLocation', 'SamplingDeviation', 'Priority',
                    'AdHoc', 'SamplingDate', 'getDateSampled', 'getSampler',
                    'getDatePreserved', 'getPreserver', 'getDateReceived',
                    'getAnalysesNum', 'state_title'
                ]
            },
        ]
    def __call__(self):
        bsc = getToolByName(self.context, 'bika_setup_catalog')
        bac = getToolByName(self.context, 'bika_analysis_catalog')
        self.report_content = {}
        parms = []
        headings = {}
        headings['header'] = _("Analyses out of range")
        headings['subheader'] = _("Analyses results out of specified range")

        count_all = 0

        query = {"portal_type": "Analysis",
                 "sort_order": "reverse"}

        spec_uid = self.request.form.get("spec", False)
        spec_obj = None
        spec_title = ""
        if spec_uid:
            brains = bsc(UID=spec_uid)
            if brains:
                spec_obj = brains[0].getObject()
                spec_title = spec_obj.Title()
        parms.append(
            {"title": _("Range spec"),
             "value": spec_title,
             "type": "text"})

        date_query = formatDateQuery(self.context, 'Received')
        if date_query:
            query['getDateReceived'] = date_query
            received = formatDateParms(self.context, 'Received')
        else:
            received = 'Undefined'
        parms.append(
            {'title': _('Received'),
             'value': received,
             'type': 'text'})

        wf_tool = getToolByName(self.context, 'portal_workflow')
        if self.request.form.has_key('bika_analysis_workflow'):
            query['review_state'] = self.request.form['bika_analysis_workflow']
            review_state = wf_tool.getTitleForStateOnType(
                self.request.form['bika_analysis_workflow'], 'Analysis')
        else:
            review_state = 'Undefined'
        parms.append(
            {'title': _('Status'),
             'value': review_state,
             'type': 'text'})

        if self.request.form.has_key('bika_cancellation_workflow'):
            query['cancellation_state'] = self.request.form[
                'bika_cancellation_workflow']
            cancellation_state = wf_tool.getTitleForStateOnType(
                self.request.form['bika_cancellation_workflow'], 'Analysis')
        else:
            cancellation_state = 'Undefined'
        parms.append(
            {'title': _('Active'),
             'value': cancellation_state,
             'type': 'text'})

        if self.request.form.has_key('bika_worksheetanalysis_workflow'):
            query['worksheetanalysis_review_state'] = self.request.form[
                'bika_worksheetanalysis_workflow']
            ws_review_state = wf_tool.getTitleForStateOnType(
                self.request.form['bika_worksheetanalysis_workflow'], 'Analysis')
        else:
            ws_review_state = 'Undefined'
        parms.append(
            {'title': _('Assigned to worksheet'),
             'value': ws_review_state,
             'type': 'text'})

        # and now lets do the actual report lines
        col_heads = [_('Client'),
                     _('Request'),
                     _('Sample type'),
                     _('Sample point'),
                     _('Category'),
                     _('Analysis'),
                     _('Result'),
                     _('Min'),
                     _('Max'),
                     _('Status'),
        ]
        if isAttributeHidden('Sample', 'SamplePoint'):
            col_heads.remove(_('Sample point'))

        formats = {'columns': 10,
                   'col_heads': col_heads,
                   'class': '',
        }

        datalines = []

        for a_proxy in bac(query):
            analysis = a_proxy.getObject()
            if analysis.getResult():
                try:
                    result = float(analysis.getResult())
                except:
                    continue
            else:
                continue

            keyword = analysis.getKeyword()

            # determine which specs to use for this particular analysis
            # 1) if a spec is given in the query form, use it.
            # 2) if a spec is entered directly on the analysis, use it.
            # otherwise just continue to the next object.
            spec_dict = False
            if spec_obj:
                rr = spec_obj.getResultsRangeDict()
                if keyword in rr:
                    spec_dict = rr[keyword]
            else:
                ar = analysis.aq_parent
                rr = dicts_to_dict(ar.getResultsRange(), 'keyword')
                if keyword in rr:
                    spec_dict = rr[keyword]
                else:
                    continue
            if not spec_dict:
                continue
            try:
                spec_min = float(spec_dict['min'])
                spec_max = float(spec_dict['max'])
            except ValueError:
                continue
            if spec_min <= result <= spec_max:
                continue

            # check if in shoulder: out of range, but in acceptable
            # error percentage
            shoulder = False
            error = 0
            try:
                error = float(spec_dict.get('error', '0'))
            except:
                error = 0
                pass
            error_amount = (result / 100) * error
            error_min = result - error_amount
            error_max = result + error_amount
            if ((result < spec_min) and (error_max >= spec_min)) or \
                    ((result > spec_max) and (error_min <= spec_max)):
                shoulder = True

            dataline = []

            dataitem = {'value': analysis.getClientTitle()}
            dataline.append(dataitem)

            dataitem = {'value': analysis.getRequestID()}
            dataline.append(dataitem)

            dataitem = {'value': analysis.aq_parent.getSampleTypeTitle()}
            dataline.append(dataitem)

            if isAttributeHidden('Sample', 'SamplePoint'):
                dataitem = {'value': analysis.aq_parent.getSamplePointTitle()}
                dataline.append(dataitem)

            dataitem = {'value': analysis.getCategoryTitle()}
            dataline.append(dataitem)

            dataitem = {'value': analysis.getServiceTitle()}
            dataline.append(dataitem)

            if shoulder:
                dataitem = {'value': analysis.getResult(),
                            'img_after': '++resource++bika.lims.images/exclamation.png'}
            else:
                dataitem = {'value': analysis.getResult()}

            dataline.append(dataitem)

            dataitem = {'value': spec_dict['min']}
            dataline.append(dataitem)

            dataitem = {'value': spec_dict['max']}
            dataline.append(dataitem)

            state = wf_tool.getInfoFor(analysis, 'review_state', '')
            review_state = wf_tool.getTitleForStateOnType(
                state, 'Analysis')
            dataitem = {'value': review_state}
            dataline.append(dataitem)

            datalines.append(dataline)

            count_all += 1

        # table footer data
        footlines = []
        footline = []
        footitem = {'value': _('Number of analyses out of range for period'),
                    'colspan': 9,
                    'class': 'total_label'}
        footline.append(footitem)
        footitem = {'value': count_all}
        footline.append(footitem)
        footlines.append(footline)

        # report footer data
        footnotes = []
        footline = []
        footitem = {'value': _('Analysis result within error range'),
                    'img_before': '++resource++bika.lims.images/exclamation.png'
        }
        footline.append(footitem)
        footnotes.append(footline)

        self.report_content = {
            'headings': headings,
            'parms': parms,
            'formats': formats,
            'datalines': datalines,
            'footings': footlines,
            'footnotes': footnotes}

        title = t(headings['header'])

        return {'report_title': title,
                'report_data': self.template()}
예제 #48
0
    def __call__(self):
        # get all the data into datalines

        sc = getToolByName(self.context, 'bika_setup_catalog')
        bc = getToolByName(self.context, 'bika_analysis_catalog')
        rc = getToolByName(self.context, 'reference_catalog')
        self.report_content = {}
        parms = []
        headings = {}
        headings['header'] = _("Analysis turnaround times")
        headings['subheader'] = _("The turnaround time of analyses")

        query = {'portal_type': 'Analysis'}
        client_title = None
        if 'ClientUID' in self.request.form:
            client_uid = self.request.form['ClientUID']
            query['getClientUID'] = client_uid
            client = rc.lookupObject(client_uid)
            client_title = client.Title()
        else:
            client = logged_in_client(self.context)
            if client:
                client_title = client.Title()
                query['getClientUID'] = client.UID()
        if client_title:
            parms.append({
                'title': _('Client'),
                'value': client_title,
                'type': 'text'
            })

        date_query = formatDateQuery(self.context, 'Received')
        if date_query:
            query['created'] = date_query
            received = formatDateParms(self.context, 'Received')
            parms.append({
                'title': _('Received'),
                'value': received,
                'type': 'text'
            })

        query['review_state'] = 'published'

        workflow = getToolByName(self.context, 'portal_workflow')
        if 'bika_worksheetanalysis_workflow' in self.request.form:
            query['worksheetanalysis_review_state'] = self.request.form[
                'bika_worksheetanalysis_workflow']
            ws_review_state = workflow.getTitleForStateOnType(
                self.request.form['bika_worksheetanalysis_workflow'],
                'Analysis')
            parms.append({
                'title': _('Assigned to worksheet'),
                'value': ws_review_state,
                'type': 'text'
            })

        # query all the analyses and increment the counts
        count_early = 0
        mins_early = 0
        count_late = 0
        mins_late = 0
        count_undefined = 0
        services = {}

        analyses = bc(query)
        for a in analyses:
            analysis = a.getObject()
            service_uid = analysis.getServiceUID()
            if service_uid not in services:
                services[service_uid] = {
                    'count_early': 0,
                    'count_late': 0,
                    'mins_early': 0,
                    'mins_late': 0,
                    'count_undefined': 0,
                }
            earliness = analysis.getEarliness()
            if earliness < 0:
                count_late = services[service_uid]['count_late']
                mins_late = services[service_uid]['mins_late']
                count_late += 1
                mins_late -= earliness
                services[service_uid]['count_late'] = count_late
                services[service_uid]['mins_late'] = mins_late
            if earliness > 0:
                count_early = services[service_uid]['count_early']
                mins_early = services[service_uid]['mins_early']
                count_early += 1
                mins_early += earliness
                services[service_uid]['count_early'] = count_early
                services[service_uid]['mins_early'] = mins_early
            if earliness == 0:
                count_undefined = services[service_uid]['count_undefined']
                count_undefined += 1
                services[service_uid]['count_undefined'] = count_undefined

        # calculate averages
        for service_uid in services.keys():
            count_early = services[service_uid]['count_early']
            mins_early = services[service_uid]['mins_early']
            if count_early == 0:
                services[service_uid]['ave_early'] = ''
            else:
                avemins = (mins_early) / count_early
                services[service_uid]['ave_early'] = formatDuration(
                    self.context, avemins)
            count_late = services[service_uid]['count_late']
            mins_late = services[service_uid]['mins_late']
            if count_late == 0:
                services[service_uid]['ave_late'] = ''
            else:
                avemins = mins_late / count_late
                services[service_uid]['ave_late'] = formatDuration(
                    self.context, avemins)

        # and now lets do the actual report lines
        formats = {
            'columns':
            7,
            'col_heads': [
                _('Analysis'),
                _('Count'),
                _('Undefined'),
                _('Late'),
                _('Average late'),
                _('Early'),
                _('Average early'),
            ],
            'class':
            '',
        }

        total_count_early = 0
        total_count_late = 0
        total_mins_early = 0
        total_mins_late = 0
        total_count_undefined = 0
        datalines = []

        for cat in sc(portal_type='AnalysisCategory',
                      sort_on='sortable_title'):
            catline = [
                {
                    'value': cat.Title,
                    'class': 'category_heading',
                    'colspan': 7
                },
            ]
            first_time = True
            cat_count_early = 0
            cat_count_late = 0
            cat_count_undefined = 0
            cat_mins_early = 0
            cat_mins_late = 0
            for service in sc(portal_type="AnalysisService",
                              getCategoryUID=cat.UID,
                              sort_on='sortable_title'):

                dataline = [
                    {
                        'value': service.Title,
                        'class': 'testgreen'
                    },
                ]
                if service.UID not in services:
                    continue

                if first_time:
                    datalines.append(catline)
                    first_time = False

                # analyses found
                cat_count_early += services[service.UID]['count_early']
                cat_count_late += services[service.UID]['count_late']
                cat_count_undefined += services[service.UID]['count_undefined']
                cat_mins_early += services[service.UID]['mins_early']
                cat_mins_late += services[service.UID]['mins_late']

                count = services[service.UID]['count_early'] + \
                        services[service.UID]['count_late'] + \
                        services[service.UID]['count_undefined']

                dataline.append({'value': count, 'class': 'number'})
                dataline.append({
                    'value':
                    services[service.UID]['count_undefined'],
                    'class':
                    'number'
                })
                dataline.append({
                    'value': services[service.UID]['count_late'],
                    'class': 'number'
                })
                dataline.append({
                    'value': services[service.UID]['ave_late'],
                    'class': 'number'
                })
                dataline.append({
                    'value': services[service.UID]['count_early'],
                    'class': 'number'
                })
                dataline.append({
                    'value': services[service.UID]['ave_early'],
                    'class': 'number'
                })

                datalines.append(dataline)

            # category totals
            dataline = [
                {
                    'value': '%s - total' % (cat.Title),
                    'class': 'subtotal_label'
                },
            ]

            dataline.append({
                'value':
                cat_count_early + cat_count_late + cat_count_undefined,
                'class': 'subtotal_number'
            })

            dataline.append({
                'value': cat_count_undefined,
                'class': 'subtotal_number'
            })

            dataline.append({
                'value': cat_count_late,
                'class': 'subtotal_number'
            })

            if cat_count_late:
                dataitem = {
                    'value': cat_mins_late / cat_count_late,
                    'class': 'subtotal_number'
                }
            else:
                dataitem = {'value': 0, 'class': 'subtotal_number'}

            dataline.append(dataitem)

            dataline.append({
                'value': cat_count_early,
                'class': 'subtotal_number'
            })

            if cat_count_early:
                dataitem = {
                    'value': cat_mins_early / cat_count_early,
                    'class': 'subtotal_number'
                }
            else:
                dataitem = {'value': 0, 'class': 'subtotal_number'}

            dataline.append(dataitem)

            total_count_early += cat_count_early
            total_count_late += cat_count_late
            total_count_undefined += cat_count_undefined
            total_mins_early += cat_mins_early
            total_mins_late += cat_mins_late

        # footer data
        footlines = []
        footline = []
        footline = [
            {
                'value': _('Total'),
                'class': 'total'
            },
        ]

        footline.append({
            'value':
            total_count_early + total_count_late + total_count_undefined,
            'class': 'total number'
        })

        footline.append({
            'value': total_count_undefined,
            'class': 'total number'
        })

        footline.append({'value': total_count_late, 'class': 'total number'})

        if total_count_late:
            ave_mins = total_mins_late / total_count_late
            footline.append({
                'value': formatDuration(self.context, ave_mins),
                'class': 'total number'
            })
        else:
            footline.append({'value': ''})

        footline.append({'value': total_count_early, 'class': 'total number'})

        if total_count_early:
            ave_mins = total_mins_early / total_count_early
            footline.append({
                'value': formatDuration(self.context, ave_mins),
                'class': 'total number'
            })
        else:
            footline.append({'value': '', 'class': 'total number'})

        footlines.append(footline)

        self.report_content = {
            'headings': headings,
            'parms': parms,
            'formats': formats,
            'datalines': datalines,
            'footings': footlines
        }

        if self.request.get('output_format', '') == 'CSV':
            import csv
            import StringIO
            import datetime

            fieldnames = [
                'Analysis',
                'Count',
                'Undefined',
                'Late',
                'Average late',
                'Early',
                'Average early',
            ]
            output = StringIO.StringIO()
            dw = csv.DictWriter(output,
                                extrasaction='ignore',
                                fieldnames=fieldnames)
            dw.writerow(dict((fn, fn) for fn in fieldnames))
            for row in datalines:
                if len(row) == 1:
                    # category heading thingy
                    continue
                dw.writerow({
                    'Analysis': row[0]['value'],
                    'Count': row[1]['value'],
                    'Undefined': row[2]['value'],
                    'Late': row[3]['value'],
                    'Average late': row[4]['value'],
                    'Early': row[5]['value'],
                    'Average early': row[6]['value'],
                })
            report_data = output.getvalue()
            output.close()
            date = datetime.datetime.now().strftime("%Y%m%d%H%M")
            setheader = self.request.RESPONSE.setHeader
            setheader('Content-Type', 'text/csv')
            setheader("Content-Disposition",
                      "attachment;filename=\"analysestats_%s.csv\"" % date)
            self.request.RESPONSE.write(report_data)
        else:
            return {
                'report_title': t(headings['header']),
                'report_data': self.template()
            }
예제 #49
0
파일: ulite.py 프로젝트: pureboy8/OLiMS
def Import(context, request):
    """ Read biodrop analysis results
    """
    infile = request.form['filename']
    fileformat = request.form['format']
    artoapply = request.form['artoapply']
    override = request.form['override']
    sample = request.form.get('sample', 'requestid')
    instrument = request.form.get('instrument', None)
    errors = []
    logs = []
    warns = []

    # Load the suitable parser
    parser = None
    if not hasattr(infile, 'filename'):
        errors.append(_("No file selected"))
    elif fileformat == 'csv':
        analysis = request.form.get('analysis', None)
        if analysis:
            parser = BioDropCSVParser(infile, analysis)
        else:
            errors.append(t(_("No analysis selected")))
    else:
        errors.append(
            t(
                _("Unrecognized file format ${fileformat}",
                  mapping={"fileformat": fileformat})))

    if parser:
        # Load the importer
        status = ['sample_received', 'attachment_due', 'to_be_verified']
        if artoapply == 'received':
            status = ['sample_received']
        elif artoapply == 'received_tobeverified':
            status = ['sample_received', 'attachment_due', 'to_be_verified']

        over = [False, False]
        if override == 'nooverride':
            over = [False, False]
        elif override == 'override':
            over = [True, False]
        elif override == 'overrideempty':
            over = [True, True]

        sam = ['getRequestID', 'getSampleID', 'getClientSampleID']
        if sample == 'requestid':
            sam = ['getRequestID']
        if sample == 'sampleid':
            sam = ['getSampleID']
        elif sample == 'clientsid':
            sam = ['getClientSampleID']
        elif sample == 'sample_clientsid':
            sam = ['getSampleID', 'getClientSampleID']

        importer = BioDropImporter(parser=parser,
                                   context=context,
                                   idsearchcriteria=sam,
                                   allowed_ar_states=status,
                                   allowed_analysis_states=None,
                                   override=over,
                                   instrument_uid=instrument)

        tbex = ''
        try:
            importer.process()
        except:
            tbex = traceback.format_exc()
        errors = importer.errors
        logs = importer.logs
        warns = importer.warns
        if tbex:
            errors.append(tbex)

    results = {'errors': errors, 'log': logs, 'warns': warns}
    return json.dumps(results)
예제 #50
0
    def folderitems(self, full_objects = False):
        """
        >>> portal = layer['portal']
        >>> portal_url = portal.absolute_url()
        >>> from plone.app.testing import SITE_OWNER_NAME
        >>> from plone.app.testing import SITE_OWNER_PASSWORD

        Test page batching https://github.com/bikalabs/Bika-LIMS/issues/1276
        When visiting the second page, the Water sampletype should be displayed:

        >>> browser = layer['getBrowser'](portal, loggedIn=True, username=SITE_OWNER_NAME, password=SITE_OWNER_PASSWORD)
        >>> browser.open(portal_url+"/bika_setup/bika_sampletypes/folder_view?",
        ... "list_pagesize=10&list_review_state=default&list_pagenumber=2")
        >>> browser.contents
        '...Water...'
        """

        #self.contentsMethod = self.context.getFolderContents
        if not hasattr(self, 'contentsMethod'):
            self.contentsMethod = getToolByName(self.context, self.catalog)

        context = aq_inner(self.context)
        plone_layout = getMultiAdapter((context, self.request), name = u'plone_layout')
        plone_utils = getToolByName(context, 'plone_utils')
        plone_view = getMultiAdapter((context, self.request), name = u'plone')
        portal_properties = getToolByName(context, 'portal_properties')
        portal_types = getToolByName(context, 'portal_types')
        workflow = getToolByName(context, 'portal_workflow')
        site_properties = portal_properties.site_properties
        norm = getUtility(IIDNormalizer).normalize
        if self.request.get('show_all', '').lower() == 'true' \
                or self.show_all == True \
                or self.pagesize == 0:
            show_all = True
        else:
            show_all = False

        pagenumber = int(self.request.get('pagenumber', 1) or 1)
        pagesize = self.pagesize
        start = (pagenumber - 1) * pagesize
        end = start + pagesize - 1

        if (hasattr(self, 'And') and self.And) \
           or (hasattr(self, 'Or') and self.Or):
            # if contentsMethod is capable, we do an AdvancedQuery.
            if hasattr(self.contentsMethod, 'makeAdvancedQuery'):
                aq = self.contentsMethod.makeAdvancedQuery(self.contentFilter)
                if hasattr(self, 'And') and self.And:
                    tmpAnd = And()
                    for q in self.And:
                        tmpAnd.addSubquery(q)
                    aq &= tmpAnd
                if hasattr(self, 'Or') and self.Or:
                    tmpOr = Or()
                    for q in self.Or:
                        tmpOr.addSubquery(q)
                    aq &= tmpOr
                brains = self.contentsMethod.evalAdvancedQuery(aq)
            else:
                # otherwise, self.contentsMethod must handle contentFilter
                brains = self.contentsMethod(self.contentFilter)
        else:
            brains = self.contentsMethod(self.contentFilter)

        results = []
        self.page_start_index = 0
        current_index = -1
        for i, obj in enumerate(brains):
            # we don't know yet if it's a brain or an object
            path = hasattr(obj, 'getPath') and obj.getPath() or \
                 "/".join(obj.getPhysicalPath())

            if hasattr(obj, 'getObject'):
                obj = obj.getObject()

            # check if the item must be rendered or not (prevents from
            # doing it later in folderitems) and dealing with paging
            if not self.isItemAllowed(obj):
                continue

            # avoid creating unnecessary info for items outside the current
            # batch;  only the path is needed for the "select all" case...
            # we only take allowed items into account
            current_index += 1
            if not show_all and not (start <= current_index <= end):
                results.append(dict(path = path, uid = obj.UID()))
                continue

            uid = obj.UID()
            title = obj.Title()
            description = obj.Description()
            icon = plone_layout.getIcon(obj)
            url = obj.absolute_url()
            relative_url = obj.absolute_url(relative = True)

            fti = portal_types.get(obj.portal_type)
            if fti is not None:
                type_title_msgid = fti.Title()
            else:
                type_title_msgid = obj.portal_type

            url_href_title = '%s at %s: %s' % (
                t(type_title_msgid),
                path,
                to_utf8(description))

            modified = self.ulocalized_time(obj.modified()),

            # element css classes
            type_class = 'contenttype-' + \
                plone_utils.normalizeString(obj.portal_type)

            state_class = ''
            states = {}
            for w in workflow.getWorkflowsFor(obj):
                state = w._getWorkflowStateOf(obj).id
                states[w.state_var] = state
                state_class += "state-%s " % state

            results_dict = dict(
                obj = obj,
                id = obj.getId(),
                title = title,
                uid = uid,
                path = path,
                url = url,
                fti = fti,
                item_data = json.dumps([]),
                url_href_title = url_href_title,
                obj_type = obj.Type,
                size = obj.getObjSize,
                modified = modified,
                icon = icon.html_tag(),
                type_class = type_class,
                # a list of lookups for single-value-select fields
                choices = {},
                state_class = state_class,
                relative_url = relative_url,
                view_url = url,
                table_row_class = "",
                category = 'None',

                # a list of names of fields that may be edited on this item
                allow_edit = [],

                # a list of names of fields that are compulsory (if editable)
                required = [],

                # "before", "after" and replace: dictionary (key is column ID)
                # A snippet of HTML which will be rendered
                # before/after/instead of the table cell content.
                before = {}, # { before : "<a href=..>" }
                after = {},
                replace = {},
            )
            try:
                rs = workflow.getInfoFor(obj, 'review_state')
                st_title = workflow.getTitleForStateOnType(rs, obj.portal_type)
                st_title = t(PMF(st_title))
            except:
                rs = 'active'
                st_title = None
            if rs:
                results_dict['review_state'] = rs
            for state_var, state in states.items():
                if not st_title:
                    st_title = workflow.getTitleForStateOnType(
                        state, obj.portal_type)
                results_dict[state_var] = state
            results_dict['state_title'] = st_title

            # extra classes for individual fields on this item { field_id : "css classes" }
            results_dict['class'] = {}
            for name, adapter in getAdapters((obj, ), IFieldIcons):
                auid = obj.UID() if hasattr(obj, 'UID') and callable(obj.UID) else None
                if not auid:
                    continue
                alerts = adapter()
                # logger.info(str(alerts))
                if alerts and auid in alerts:
                    if auid in self.field_icons:
                        self.field_icons[auid].extend(alerts[auid])
                    else:
                        self.field_icons[auid] = alerts[auid]

            # Search for values for all columns in obj
            for key in self.columns.keys():
                if hasattr(obj, key):
                    # if the key is already in the results dict
                    # then we don't replace it's value
                    if results_dict.has_key(key):
                        continue
                    value = getattr(obj, key)
                    if callable(value):
                        value = value()
                    results_dict[key] = value
            results.append(results_dict)

        return results
예제 #51
0
    def folderitems(self, full_objects=False):
        workflow = getToolByName(self.context, "portal_workflow")
        items = BikaListingView.folderitems(self)
        mtool = getToolByName(self.context, 'portal_membership')
        member = mtool.getAuthenticatedMember()
        roles = member.getRoles()
        hideclientlink = 'RegulatoryInspector' in roles \
            and 'Manager' not in roles \
            and 'LabManager' not in roles \
            and 'LabClerk' not in roles

        for x in range(len(items)):
            if 'obj' not in items[x]:
                continue
            obj = items[x]['obj']
            sample = obj.getSample()

            if getSecurityManager().checkPermission(EditResults, obj):
                url = obj.absolute_url() + "/manage_results"
            else:
                url = obj.absolute_url()

            items[x]['Client'] = obj.aq_parent.Title()
            if (hideclientlink is False):
                items[x]['replace']['Client'] = "<a href='%s'>%s</a>" % \
                    (obj.aq_parent.absolute_url(), obj.aq_parent.Title())
            items[x]['Creator'] = self.user_fullname(obj.Creator())
            items[x]['getRequestID'] = obj.getRequestID()
            items[x]['replace']['getRequestID'] = "<a href='%s'>%s</a>" % \
                 (url, items[x]['getRequestID'])
            items[x]['getSample'] = sample
            items[x]['replace']['getSample'] = \
                "<a href='%s'>%s</a>" % (sample.absolute_url(), sample.Title())

            if obj.getAnalysesNum():
                items[x]['getAnalysesNum'] = str(
                    obj.getAnalysesNum()[0]) + '/' + str(
                        obj.getAnalysesNum()[1])
            else:
                items[x]['getAnalysesNum'] = ''

            batch = obj.getBatch()
            if batch:
                items[x]['BatchID'] = batch.getBatchID()
                items[x]['replace']['BatchID'] = "<a href='%s'>%s</a>" % \
                     (batch.absolute_url(), items[x]['BatchID'])
            else:
                items[x]['BatchID'] = ''

            val = obj.Schema().getField('SubGroup').get(obj)
            items[x]['SubGroup'] = val.Title() if val else ''

            samplingdate = obj.getSample().getSamplingDate()
            items[x]['SamplingDate'] = self.ulocalized_time(samplingdate,
                                                            long_format=1)
            items[x]['getDateReceived'] = self.ulocalized_time(
                obj.getDateReceived())
            items[x]['getDatePublished'] = self.ulocalized_time(
                obj.getDatePublished())

            deviation = sample.getSamplingDeviation()
            items[x]['SamplingDeviation'] = deviation and deviation.Title(
            ) or ''
            priority = obj.getPriority()
            items[x]['Priority'] = ''  # priority.Title()

            items[x]['getStorageLocation'] = sample.getStorageLocation(
            ) and sample.getStorageLocation().Title() or ''
            items[x]['AdHoc'] = sample.getAdHoc() and True or ''

            after_icons = ""
            state = workflow.getInfoFor(obj, 'worksheetanalysis_review_state')
            if state == 'assigned':
                after_icons += "<img src='%s/++resource++bika.lims.images/worksheet.png' title='%s'/>" % \
                    (self.portal_url, t(_("All analyses assigned")))
            if workflow.getInfoFor(obj, 'review_state') == 'invalid':
                after_icons += "<img src='%s/++resource++bika.lims.images/delete.png' title='%s'/>" % \
                    (self.portal_url, t(_("Results have been withdrawn")))
            if obj.getLate():
                after_icons += "<img src='%s/++resource++bika.lims.images/late.png' title='%s'>" % \
                    (self.portal_url, t(_("Late Analyses")))
            if samplingdate > DateTime():
                after_icons += "<img src='%s/++resource++bika.lims.images/calendar.png' title='%s'>" % \
                    (self.portal_url, t(_("Future dated sample")))
            if obj.getInvoiceExclude():
                after_icons += "<img src='%s/++resource++bika.lims.images/invoice_exclude.png' title='%s'>" % \
                    (self.portal_url, t(_("Exclude from invoice")))
            if sample.getSampleType().getHazardous():
                after_icons += "<img src='%s/++resource++bika.lims.images/hazardous.png' title='%s'>" % \
                    (self.portal_url, t(_("Hazardous")))
            if after_icons:
                items[x]['after']['getRequestID'] = after_icons

            items[x]['Created'] = self.ulocalized_time(obj.created())

            contact = obj.getContact()
            if contact:
                items[x]['ClientContact'] = contact.Title()
                items[x]['replace']['ClientContact'] = "<a href='%s'>%s</a>" % \
                    (contact.absolute_url(), contact.Title())
            else:
                items[x]['ClientContact'] = ""

            SamplingWorkflowEnabled = sample.getSamplingWorkflowEnabled()
            if SamplingWorkflowEnabled and not samplingdate > DateTime():
                datesampled = self.ulocalized_time(sample.getDateSampled())
                if not datesampled:
                    datesampled = self.ulocalized_time(DateTime(),
                                                       long_format=1)
                    items[x]['class']['getDateSampled'] = 'provisional'
                sampler = sample.getSampler().strip()
                if sampler:
                    items[x]['replace']['getSampler'] = self.user_fullname(
                        sampler)
                if 'Sampler' in member.getRoles() and not sampler:
                    sampler = member.id
                    items[x]['class']['getSampler'] = 'provisional'
            else:
                datesampled = ''
                sampler = ''
            items[x]['getDateSampled'] = datesampled
            items[x]['getSampler'] = sampler

            # sampling workflow - inline edits for Sampler and Date Sampled
            checkPermission = self.context.portal_membership.checkPermission
            state = workflow.getInfoFor(obj, 'review_state')
            if state == 'to_be_sampled' \
                    and checkPermission(SampleSample, obj) \
                    and not samplingdate > DateTime():
                items[x]['required'] = ['getSampler', 'getDateSampled']
                items[x]['allow_edit'] = ['getSampler', 'getDateSampled']
                samplers = getUsers(sample,
                                    ['Sampler', 'LabManager', 'Manager'])
                username = member.getUserName()
                users = [({
                    'ResultValue': u,
                    'ResultText': samplers.getValue(u)
                }) for u in samplers]
                items[x]['choices'] = {'getSampler': users}
                Sampler = sampler and sampler or \
                    (username in samplers.keys() and username) or ''
                items[x]['getSampler'] = Sampler

            # These don't exist on ARs
            # XXX This should be a list of preservers...
            items[x]['getPreserver'] = ''
            items[x]['getDatePreserved'] = ''

            # inline edits for Preserver and Date Preserved
            checkPermission = self.context.portal_membership.checkPermission
            if checkPermission(PreserveSample, obj):
                items[x]['required'] = ['getPreserver', 'getDatePreserved']
                items[x]['allow_edit'] = ['getPreserver', 'getDatePreserved']
                preservers = getUsers(obj,
                                      ['Preserver', 'LabManager', 'Manager'])
                username = member.getUserName()
                users = [({
                    'ResultValue': u,
                    'ResultText': preservers.getValue(u)
                }) for u in preservers]
                items[x]['choices'] = {'getPreserver': users}
                preserver = username in preservers.keys() and username or ''
                items[x]['getPreserver'] = preserver
                items[x]['getDatePreserved'] = self.ulocalized_time(
                    DateTime(), long_format=1)
                items[x]['class']['getPreserver'] = 'provisional'
                items[x]['class']['getDatePreserved'] = 'provisional'

            # Submitting user may not verify results
            if items[x]['review_state'] == 'to_be_verified' and \
               not checkPermission(VerifyOwnResults, obj):
                self_submitted = False
                try:
                    review_history = list(
                        workflow.getInfoFor(obj, 'review_history'))
                    review_history.reverse()
                    for event in review_history:
                        if event.get('action') == 'submit':
                            if event.get('actor') == member.getId():
                                self_submitted = True
                            break
                    if self_submitted:
                        items[x]['after']['state_title'] = \
                             "<img src='++resource++bika.lims.images/submitted-by-current-user.png' title='%s'/>" % \
                             t(_("Cannot verify: Submitted by current user"))
                except Exception:
                    pass

        # Hide Preservation/Sampling workflow actions if the edit columns
        # are not displayed.
        toggle_cols = self.get_toggle_cols()
        new_states = []
        for i, state in enumerate(self.review_states):
            if state['id'] == self.review_state:
                if 'getSampler' not in toggle_cols \
                   or 'getDateSampled' not in toggle_cols:
                    if 'hide_transitions' in state:
                        state['hide_transitions'].append('sample')
                    else:
                        state['hide_transitions'] = [
                            'sample',
                        ]
                if 'getPreserver' not in toggle_cols \
                   or 'getDatePreserved' not in toggle_cols:
                    if 'hide_transitions' in state:
                        state['hide_transitions'].append('preserve')
                    else:
                        state['hide_transitions'] = [
                            'preserve',
                        ]
            new_states.append(state)
        self.review_states = new_states

        return items
예제 #52
0
파일: i500.py 프로젝트: nafwa03/olims
def Import(context, request, instrumentname='sysmex_xs_500i'):
    """ Sysmex XS - 500i analysis results
    """
    # I don't really know how this file works, for this reason I added an 'Analysis Service selector'.
    # If non Analysis Service is selected, each 'data' column will be interpreted as a different Analysis Service. In
    # the case that an Analysis Service was selected, all the data columns would be interpreted as different data from
    # an unique Analysis Service.
    formitems = getForm(instrumentname, request)
    infile = formitems['infile']
    fileformat = formitems['fileformat']
    artoapply = formitems['artoapply']
    override = formitems['override']
    sample = formitems['sample']
    instrument = formitems['instrument']
    errors = []
    logs = []
    warns = []

    # Load the most suitable parser according to file extension/options/etc...
    parser = None
    if not hasattr(infile, 'filename'):
        errors.append(_("No file selected"))
    if fileformat == 'csv':
        # Get the Analysis Service selected, if there is one.
        analysis = request.form.get('analysis_service', None)
        if analysis:
            # Get default result key
            defaultresult = request.form.get('default_result', None)
            # Rise an error if default result is missing.
            parser = SysmexXS500iCSVParser(infile, analysis, defaultresult) if defaultresult \
                     else errors.append(t(_("You should introduce a default result key.",
                                             mapping={"fileformat": fileformat})))
        else:
            parser = SysmexXS500iCSVParser(infile)
    else:
        errors.append(t(_("Unrecognized file format ${fileformat}",
                          mapping={"fileformat": fileformat})))

    if parser:
        # Load the importer
        status = ['sample_received', 'attachment_due', 'to_be_verified']
        if artoapply == 'received':
            status = ['sample_received']
        elif artoapply == 'received_tobeverified':
            status = ['sample_received', 'attachment_due', 'to_be_verified']

        over = [False, False]
        if override == 'nooverride':
            over = [False, False]
        elif override == 'override':
            over = [True, False]
        elif override == 'overrideempty':
            over = [True, True]

        sam = ['getRequestID', 'getSampleID', 'getClientSampleID']
        if sample == 'requestid':
            sam = ['getRequestID']
        if sample == 'sampleid':
            sam = ['getSampleID']
        elif sample == 'clientsid':
            sam = ['getClientSampleID']
        elif sample == 'sample_clientsid':
            sam = ['getSampleID', 'getClientSampleID']

        importer = SysmexXS500iImporter(parser=parser,
                                        context=context,
                                        idsearchcriteria=sam,
                                        allowed_ar_states=status,
                                        allowed_analysis_states=None,
                                        override=over,
                                        instrument_uid=instrument)
        tbex = ''
        try:
            importer.process()
        except:
            tbex = traceback.format_exc()
        errors = importer.errors
        logs = importer.logs
        warns = importer.warns
        if tbex:
            errors.append(tbex)

    results = {'errors': errors, 'log': logs, 'warns': warns}

    return json.dumps(results)
예제 #53
0
파일: vs2.py 프로젝트: nafwa03/olims
def Import(context, request):
    """ Abaxix VetScan VS2 analysis results
    """
    infile = request.form['data_file']
    fileformat = request.form['format']
    artoapply = request.form['artoapply']
    override = request.form['override']
    sample = request.form.get('sample',
                              'requestid')
    instrument = request.form.get('instrument', None)
    errors = []
    logs = []
    warns = []

    # Load the most suitable parser according to file extension/options/etc...
    parser = None
    if not hasattr(infile, 'filename'):
        errors.append(_("No file selected"))
    if fileformat == 'csv':
        parser = AbaxisVetScanCSVVS2Parser(infile)
    else:
        errors.append(t(_("Unrecognized file format ${fileformat}",
                          mapping={"fileformat": fileformat})))

    if parser:
        # Load the importer
        status = ['sample_received', 'attachment_due', 'to_be_verified']
        if artoapply == 'received':
            status = ['sample_received']
        elif artoapply == 'received_tobeverified':
            status = ['sample_received', 'attachment_due', 'to_be_verified']

        over = [False, False]
        if override == 'nooverride':
            over = [False, False]
        elif override == 'override':
            over = [True, False]
        elif override == 'overrideempty':
            over = [True, True]

        sam = ['getRequestID', 'getSampleID', 'getClientSampleID']
        if sample == 'requestid':
            sam = ['getRequestID']
        if sample == 'sampleid':
            sam = ['getSampleID']
        elif sample == 'clientsid':
            sam = ['getClientSampleID']
        elif sample == 'sample_clientsid':
            sam = ['getSampleID', 'getClientSampleID']

        importer = AbaxisVetScanVS2Importer(parser=parser,
                                            context=context,
                                            idsearchcriteria=sam,
                                            allowed_ar_states=status,
                                            allowed_analysis_states=None,
                                            override=over,
                                            instrument_uid=instrument)
        tbex = ''
        try:
            importer.process()
        except:
            tbex = traceback.format_exc()
        errors = importer.errors
        logs = importer.logs
        warns = importer.warns
        if tbex:
            errors.append(tbex)

    results = {'errors': errors, 'log': logs, 'warns': warns}

    return json.dumps(results)
예제 #54
0
    def __call__(self):
        form = self.request.form
        CheckAuthenticator(self.request.form)
        PostOnly(self.request.form)
        uc = getToolByName(self.context, 'uid_catalog')
        bsc = getToolByName(self.context, 'bika_setup_catalog')
        portal_catalog = getToolByName(self.context, 'portal_catalog')

        # Load the form data from request.state.  If anything goes wrong here,
        # put a bullet through the whole process.
        try:
            states = json.loads(form['state'])
        except Exception as e:
            message = t(
                _('Badly formed state: ${errmsg}',
                  mapping={'errmsg': e.message}))
            ajax_form_error(self.errors, message=message)
            return json.dumps({'errors': self.errors})

        # Validate incoming form data
        required = [
            field.getName()
            for field in AnalysisRequestSchema.fields() if field.required
        ] + ["Analyses"]

        # First remove all states which are completely empty; if all
        # required fields are not present, we assume that the current
        # AR had no data entered, and can be ignored
        nonblank_states = {}
        for arnum, state in states.items():
            for key, val in state.items():
                if val \
                        and "%s_hidden" % key not in state \
                        and not key.endswith('hidden'):
                    nonblank_states[arnum] = state
                    break

        # in valid_states, all ars that pass validation will be stored
        valid_states = {}
        for arnum, state in nonblank_states.items():
            # Secondary ARs are a special case, these fields are not required
            if state.get('Sample', ''):
                if 'SamplingDate' in required:
                    required.remove('SamplingDate')
                if 'SampleType' in required:
                    required.remove('SampleType')
            # fields flagged as 'hidden' are not considered required because
            # they will already have default values inserted in them
            for fieldname in required:
                if fieldname + '_hidden' in state:
                    required.remove(fieldname)
            missing = [f for f in required if not state.get(f, '')]
            # If there are required fields missing, flag an error
            if missing:
                msg = t(
                    _('Required fields have no values: '
                      '${field_names}',
                      mapping={'field_names': ', '.join(missing)}))
                ajax_form_error(self.errors, arnum=arnum, message=msg)
                continue
            # This ar is valid!
            valid_states[arnum] = state

        # - Expand lists of UIDs returned by multiValued reference widgets
        # - Transfer _uid values into their respective fields
        for arnum in valid_states.keys():
            for field, value in valid_states[arnum].items():
                if field.endswith('_uid') and ',' in value:
                    valid_states[arnum][field] = value.split(',')
                elif field.endswith('_uid'):
                    valid_states[arnum][field] = value

        if self.errors:
            return json.dumps({'errors': self.errors})

        # Now, we will create the specified ARs.
        ARs = []
        for arnum, state in valid_states.items():
            # Create the Analysis Request
            ar = create_analysisrequest(
                portal_catalog(UID=state['Client'])[0].getObject(),
                self.request, state)
            ARs.append(ar.Title())

        # Display the appropriate message after creation
        if len(ARs) > 1:
            message = _('Analysis requests ${ARs} were successfully created.',
                        mapping={'ARs': safe_unicode(', '.join(ARs))})
        else:
            message = _('Analysis request ${AR} was successfully created.',
                        mapping={'AR': safe_unicode(ARs[0])})
        self.context.plone_utils.addPortalMessage(message, 'info')
        # Automatic label printing won't print "register" labels for Secondary. ARs
        new_ars = [ar for ar in ARs if ar[-2:] == '01']
        if 'register' in self.context.bika_setup.getAutoPrintStickers() \
                and new_ars:
            return json.dumps({
                'success':
                message,
                'stickers':
                new_ars,
                'stickertemplate':
                self.context.bika_setup.getAutoStickerTemplate()
            })
        else:
            return json.dumps({'success': message})
    def __call__(self):
        # get all the data into datalines

        sc = getToolByName(self.context, "bika_setup_catalog")
        bc = getToolByName(self.context, "bika_analysis_catalog")
        rc = getToolByName(self.context, "reference_catalog")
        self.report_content = {}
        parms = []
        headings = {}
        headings["header"] = _("Analyses per analysis service")
        headings["subheader"] = _("Number of analyses requested per analysis service")

        query = {"portal_type": "Analysis"}
        client_title = None
        if "ClientUID" in self.request.form:
            client_uid = self.request.form["ClientUID"]
            query["getClientUID"] = client_uid
            client = rc.lookupObject(client_uid)
            client_title = client.Title()
        else:
            client = logged_in_client(self.context)
            if client:
                client_title = client.Title()
                query["getClientUID"] = client.UID()
        if client_title:
            parms.append({"title": _("Client"), "value": client_title, "type": "text"})

        date_query = formatDateQuery(self.context, "Requested")
        if date_query:
            query["created"] = date_query
            requested = formatDateParms(self.context, "Requested")
            parms.append({"title": _("Requested"), "value": requested, "type": "text"})

        date_query = formatDateQuery(self.context, "Published")
        if date_query:
            query["getDatePublished"] = date_query
            published = formatDateParms(self.context, "Published")
            parms.append({"title": _("Published"), "value": published, "type": "text"})

        workflow = getToolByName(self.context, "portal_workflow")
        if "bika_analysis_workflow" in self.request.form:
            query["review_state"] = self.request.form["bika_analysis_workflow"]
            review_state = workflow.getTitleForStateOnType(self.request.form["bika_analysis_workflow"], "Analysis")
            parms.append({"title": _("Status"), "value": review_state, "type": "text"})

        if "bika_cancellation_workflow" in self.request.form:
            query["cancellation_state"] = self.request.form["bika_cancellation_workflow"]
            cancellation_state = workflow.getTitleForStateOnType(
                self.request.form["bika_cancellation_workflow"], "Analysis"
            )
            parms.append({"title": _("Active"), "value": cancellation_state, "type": "text"})

        if "bika_worksheetanalysis_workflow" in self.request.form:
            query["worksheetanalysis_review_state"] = self.request.form["bika_worksheetanalysis_workflow"]
            ws_review_state = workflow.getTitleForStateOnType(
                self.request.form["bika_worksheetanalysis_workflow"], "Analysis"
            )
            parms.append({"title": _("Assigned to worksheet"), "value": ws_review_state, "type": "text"})

        # and now lets do the actual report lines
        formats = {"columns": 2, "col_heads": [_("Analysis service"), _("Number of analyses")], "class": ""}

        datalines = []
        count_all = 0
        for cat in sc(portal_type="AnalysisCategory", sort_on="sortable_title"):
            dataline = [{"value": cat.Title, "class": "category_heading", "colspan": 2}]
            datalines.append(dataline)
            for service in sc(portal_type="AnalysisService", getCategoryUID=cat.UID, sort_on="sortable_title"):
                query["getServiceUID"] = service.UID
                analyses = bc(query)
                count_analyses = len(analyses)

                dataline = []
                dataitem = {"value": service.Title}
                dataline.append(dataitem)
                dataitem = {"value": count_analyses}

                dataline.append(dataitem)

                datalines.append(dataline)

                count_all += count_analyses

        # footer data
        footlines = []
        footline = []
        footitem = {"value": _("Total"), "class": "total_label"}
        footline.append(footitem)
        footitem = {"value": count_all}
        footline.append(footitem)
        footlines.append(footline)

        self.report_content = {
            "headings": headings,
            "parms": parms,
            "formats": formats,
            "datalines": datalines,
            "footings": footlines,
        }

        title = t(headings["header"])

        if self.request.get("output_format", "") == "CSV":
            import csv
            import StringIO
            import datetime

            fieldnames = ["Analysis Service", "Analyses"]
            output = StringIO.StringIO()
            dw = csv.DictWriter(output, extrasaction="ignore", fieldnames=fieldnames)
            dw.writerow(dict((fn, fn) for fn in fieldnames))
            for row in datalines:
                if len(row) == 1:
                    # category heading thingy
                    continue
                dw.writerow({"Analysis Service": row[0]["value"], "Analyses": row[1]["value"]})
            report_data = output.getvalue()
            output.close()
            date = datetime.datetime.now().strftime("%Y%m%d%H%M")
            setheader = self.request.RESPONSE.setHeader
            setheader("Content-Type", "text/csv")
            setheader("Content-Disposition", 'attachment;filename="analysesperservice_%s.csv"' % date)
            self.request.RESPONSE.write(report_data)
        else:
            return {"report_title": title, "report_data": self.template()}
예제 #56
0
    def __call__(self):
        # get all the data into datalines

        sc = getToolByName(self.context, "bika_setup_catalog")
        bc = getToolByName(self.context, "bika_analysis_catalog")
        rc = getToolByName(self.context, "reference_catalog")
        self.report_content = {}
        parms = []
        headings = {}
        headings["header"] = _("Analysis turnaround times")
        headings["subheader"] = _("The turnaround time of analyses")

        query = {"portal_type": "Analysis"}
        client_title = None
        if "ClientUID" in self.request.form:
            client_uid = self.request.form["ClientUID"]
            query["getClientUID"] = client_uid
            client = rc.lookupObject(client_uid)
            client_title = client.Title()
        else:
            client = logged_in_client(self.context)
            if client:
                client_title = client.Title()
                query["getClientUID"] = client.UID()
        if client_title:
            parms.append({"title": _("Client"), "value": client_title, "type": "text"})

        date_query = formatDateQuery(self.context, "Received")
        if date_query:
            query["created"] = date_query
            received = formatDateParms(self.context, "Received")
            parms.append({"title": _("Received"), "value": received, "type": "text"})

        query["review_state"] = "published"

        workflow = getToolByName(self.context, "portal_workflow")
        if "bika_worksheetanalysis_workflow" in self.request.form:
            query["worksheetanalysis_review_state"] = self.request.form["bika_worksheetanalysis_workflow"]
            ws_review_state = workflow.getTitleForStateOnType(
                self.request.form["bika_worksheetanalysis_workflow"], "Analysis"
            )
            parms.append({"title": _("Assigned to worksheet"), "value": ws_review_state, "type": "text"})

        # query all the analyses and increment the counts
        count_early = 0
        mins_early = 0
        count_late = 0
        mins_late = 0
        count_undefined = 0
        services = {}

        analyses = bc(query)
        for a in analyses:
            analysis = a.getObject()
            service_uid = analysis.getServiceUID()
            if service_uid not in services:
                services[service_uid] = {
                    "count_early": 0,
                    "count_late": 0,
                    "mins_early": 0,
                    "mins_late": 0,
                    "count_undefined": 0,
                }
            earliness = analysis.getEarliness()
            if earliness < 0:
                count_late = services[service_uid]["count_late"]
                mins_late = services[service_uid]["mins_late"]
                count_late += 1
                mins_late -= earliness
                services[service_uid]["count_late"] = count_late
                services[service_uid]["mins_late"] = mins_late
            if earliness > 0:
                count_early = services[service_uid]["count_early"]
                mins_early = services[service_uid]["mins_early"]
                count_early += 1
                mins_early += earliness
                services[service_uid]["count_early"] = count_early
                services[service_uid]["mins_early"] = mins_early
            if earliness == 0:
                count_undefined = services[service_uid]["count_undefined"]
                count_undefined += 1
                services[service_uid]["count_undefined"] = count_undefined

        # calculate averages
        for service_uid in services.keys():
            count_early = services[service_uid]["count_early"]
            mins_early = services[service_uid]["mins_early"]
            if count_early == 0:
                services[service_uid]["ave_early"] = ""
            else:
                avemins = (mins_early) / count_early
                services[service_uid]["ave_early"] = formatDuration(self.context, avemins)
            count_late = services[service_uid]["count_late"]
            mins_late = services[service_uid]["mins_late"]
            if count_late == 0:
                services[service_uid]["ave_late"] = ""
            else:
                avemins = mins_late / count_late
                services[service_uid]["ave_late"] = formatDuration(self.context, avemins)

        # and now lets do the actual report lines
        formats = {
            "columns": 7,
            "col_heads": [
                _("Analysis"),
                _("Count"),
                _("Undefined"),
                _("Late"),
                _("Average late"),
                _("Early"),
                _("Average early"),
            ],
            "class": "",
        }

        total_count_early = 0
        total_count_late = 0
        total_mins_early = 0
        total_mins_late = 0
        total_count_undefined = 0
        datalines = []

        for cat in sc(portal_type="AnalysisCategory", sort_on="sortable_title"):
            catline = [{"value": cat.Title, "class": "category_heading", "colspan": 7}]
            first_time = True
            cat_count_early = 0
            cat_count_late = 0
            cat_count_undefined = 0
            cat_mins_early = 0
            cat_mins_late = 0
            for service in sc(portal_type="AnalysisService", getCategoryUID=cat.UID, sort_on="sortable_title"):

                dataline = [{"value": service.Title, "class": "testgreen"}]
                if service.UID not in services:
                    continue

                if first_time:
                    datalines.append(catline)
                    first_time = False

                # analyses found
                cat_count_early += services[service.UID]["count_early"]
                cat_count_late += services[service.UID]["count_late"]
                cat_count_undefined += services[service.UID]["count_undefined"]
                cat_mins_early += services[service.UID]["mins_early"]
                cat_mins_late += services[service.UID]["mins_late"]

                count = (
                    services[service.UID]["count_early"]
                    + services[service.UID]["count_late"]
                    + services[service.UID]["count_undefined"]
                )

                dataline.append({"value": count, "class": "number"})
                dataline.append({"value": services[service.UID]["count_undefined"], "class": "number"})
                dataline.append({"value": services[service.UID]["count_late"], "class": "number"})
                dataline.append({"value": services[service.UID]["ave_late"], "class": "number"})
                dataline.append({"value": services[service.UID]["count_early"], "class": "number"})
                dataline.append({"value": services[service.UID]["ave_early"], "class": "number"})

                datalines.append(dataline)

            # category totals
            dataline = [{"value": "%s - total" % (cat.Title), "class": "subtotal_label"}]

            dataline.append(
                {"value": cat_count_early + cat_count_late + cat_count_undefined, "class": "subtotal_number"}
            )

            dataline.append({"value": cat_count_undefined, "class": "subtotal_number"})

            dataline.append({"value": cat_count_late, "class": "subtotal_number"})

            if cat_count_late:
                dataitem = {"value": cat_mins_late / cat_count_late, "class": "subtotal_number"}
            else:
                dataitem = {"value": 0, "class": "subtotal_number"}

            dataline.append(dataitem)

            dataline.append({"value": cat_count_early, "class": "subtotal_number"})

            if cat_count_early:
                dataitem = {"value": cat_mins_early / cat_count_early, "class": "subtotal_number"}
            else:
                dataitem = {"value": 0, "class": "subtotal_number"}

            dataline.append(dataitem)

            total_count_early += cat_count_early
            total_count_late += cat_count_late
            total_count_undefined += cat_count_undefined
            total_mins_early += cat_mins_early
            total_mins_late += cat_mins_late

        # footer data
        footlines = []
        footline = []
        footline = [{"value": _("Total"), "class": "total"}]

        footline.append(
            {"value": total_count_early + total_count_late + total_count_undefined, "class": "total number"}
        )

        footline.append({"value": total_count_undefined, "class": "total number"})

        footline.append({"value": total_count_late, "class": "total number"})

        if total_count_late:
            ave_mins = total_mins_late / total_count_late
            footline.append({"value": formatDuration(self.context, ave_mins), "class": "total number"})
        else:
            footline.append({"value": ""})

        footline.append({"value": total_count_early, "class": "total number"})

        if total_count_early:
            ave_mins = total_mins_early / total_count_early
            footline.append({"value": formatDuration(self.context, ave_mins), "class": "total number"})
        else:
            footline.append({"value": "", "class": "total number"})

        footlines.append(footline)

        self.report_content = {
            "headings": headings,
            "parms": parms,
            "formats": formats,
            "datalines": datalines,
            "footings": footlines,
        }

        if self.request.get("output_format", "") == "CSV":
            import csv
            import StringIO
            import datetime

            fieldnames = ["Analysis", "Count", "Undefined", "Late", "Average late", "Early", "Average early"]
            output = StringIO.StringIO()
            dw = csv.DictWriter(output, extrasaction="ignore", fieldnames=fieldnames)
            dw.writerow(dict((fn, fn) for fn in fieldnames))
            for row in datalines:
                if len(row) == 1:
                    # category heading thingy
                    continue
                dw.writerow(
                    {
                        "Analysis": row[0]["value"],
                        "Count": row[1]["value"],
                        "Undefined": row[2]["value"],
                        "Late": row[3]["value"],
                        "Average late": row[4]["value"],
                        "Early": row[5]["value"],
                        "Average early": row[6]["value"],
                    }
                )
            report_data = output.getvalue()
            output.close()
            date = datetime.datetime.now().strftime("%Y%m%d%H%M")
            setheader = self.request.RESPONSE.setHeader
            setheader("Content-Type", "text/csv")
            setheader("Content-Disposition", 'attachment;filename="analysestats_%s.csv"' % date)
            self.request.RESPONSE.write(report_data)
        else:
            return {"report_title": t(headings["header"]), "report_data": self.template()}
예제 #57
0
파일: workflow.py 프로젝트: nafwa03/olims
    def workflow_action_retract_ar(self):
        workflow = getToolByName(self.context, 'portal_workflow')
        # AR should be retracted
        # Can't transition inactive ARs
        if not isActive(self.context):
            message = _('Item is inactive.')
            self.context.plone_utils.addPortalMessage(message, 'info')
            self.request.response.redirect(self.context.absolute_url())
            return

        # 1. Copies the AR linking the original one and viceversa
        ar = self.context
        newar = self.cloneAR(ar)

        # 2. The old AR gets a status of 'invalid'
        workflow.doActionFor(ar, 'retract_ar')

        # 3. The new AR copy opens in status 'to be verified'
        changeWorkflowState(newar, 'bika_ar_workflow', 'to_be_verified')

        # 4. The system immediately alerts the client contacts who ordered
        # the results, per email and SMS, that a possible mistake has been
        # picked up and is under investigation.
        # A much possible information is provided in the email, linking
        # to the AR online.
        laboratory = self.context.bika_setup.laboratory
        lab_address = "<br/>".join(laboratory.getPrintAddress())
        mime_msg = MIMEMultipart('related')
        mime_msg['Subject'] = t(_("Erroneus result publication from ${request_id}",
                                mapping={"request_id": ar.getRequestID()}))
        mime_msg['From'] = formataddr(
            (encode_header(laboratory.getName()),
             laboratory.getEmailAddress()))
        to = []
        contact = ar.getContact()
        if contact:
            to.append(formataddr((encode_header(contact.Title()),
                                   contact.getEmailAddress())))
        for cc in ar.getCCContact():
            formatted = formataddr((encode_header(cc.Title()),
                                   cc.getEmailAddress()))
            if formatted not in to:
                to.append(formatted)

        managers = self.context.portal_groups.getGroupMembers('LabManagers')
        for bcc in managers:
            user = self.portal.acl_users.getUser(bcc)
            if user:
                uemail = user.getProperty('email')
                ufull = user.getProperty('fullname')
                formatted = formataddr((encode_header(ufull), uemail))
                if formatted not in to:
                    to.append(formatted)
        mime_msg['To'] = ','.join(to)
        aranchor = "<a href='%s'>%s</a>" % (ar.absolute_url(),
                                            ar.getRequestID())
        naranchor = "<a href='%s'>%s</a>" % (newar.absolute_url(),
                                             newar.getRequestID())
        addremarks = ('addremarks' in self.request
                      and ar.getRemarks()) \
                    and ("<br/><br/>"
                         + _("Additional remarks:")
                         + "<br/>"
                         + ar.getRemarks().split("===")[1].strip()
                         + "<br/><br/>") \
                    or ''
        sub_d = dict(request_link=aranchor,
                     new_request_link=naranchor,
                     remarks=addremarks,
                     lab_address=lab_address)
        body = Template("Some errors have been detected in the results report "
                        "published from the Analysis Request $request_link. The Analysis "
                        "Request $new_request_link has been created automatically and the "
                        "previous has been invalidated.<br/>The possible mistake "
                        "has been picked up and is under investigation.<br/><br/>"
                        "$remarks $lab_address").safe_substitute(sub_d)
        msg_txt = MIMEText(safe_unicode(body).encode('utf-8'),
                           _subtype='html')
        mime_msg.preamble = 'This is a multi-part MIME message.'
        mime_msg.attach(msg_txt)
        try:
            host = getToolByName(self.context, 'MailHost')
            host.send(mime_msg.as_string(), immediate=True)
        except Exception as msg:
            message = _('Unable to send an email to alert lab '
                        'client contacts that the Analysis Request has been '
                        'retracted: ${error}',
                        mapping={'error': safe_unicode(msg)})
            self.context.plone_utils.addPortalMessage(message, 'warning')

        message = _('${items} invalidated.',
                    mapping={'items': ar.getRequestID()})
        self.context.plone_utils.addPortalMessage(message, 'warning')
        self.request.response.redirect(newar.absolute_url())
예제 #58
0
파일: add.py 프로젝트: nafwa03/olims
    def __call__(self):
        form = self.request.form
        CheckAuthenticator(self.request.form)
        PostOnly(self.request.form)
        uc = getToolByName(self.context, 'uid_catalog')
        bsc = getToolByName(self.context, 'bika_setup_catalog')
        portal_catalog = getToolByName(self.context, 'portal_catalog')

        # Load the form data from request.state.  If anything goes wrong here,
        # put a bullet through the whole process.
        try:
            states = json.loads(form['state'])
        except Exception as e:
            message = t(_('Badly formed state: ${errmsg}',
                          mapping={'errmsg': e.message}))
            ajax_form_error(self.errors, message=message)
            return json.dumps({'errors': self.errors})

        # Validate incoming form data
        required = [field.getName() for field
                    in AnalysisRequestSchema.fields()
                    if field.required] + ["Analyses"]

        # First remove all states which are completely empty; if all
        # required fields are not present, we assume that the current
        # AR had no data entered, and can be ignored
        nonblank_states = {}
        for arnum, state in states.items():
            for key, val in state.items():
                if val \
                        and "%s_hidden" % key not in state \
                        and not key.endswith('hidden'):
                    nonblank_states[arnum] = state
                    break

        # in valid_states, all ars that pass validation will be stored
        valid_states = {}
        for arnum, state in nonblank_states.items():
            # Secondary ARs are a special case, these fields are not required
            if state.get('Sample', ''):
                if 'SamplingDate' in required:
                    required.remove('SamplingDate')
                if 'SampleType' in required:
                    required.remove('SampleType')
            # fields flagged as 'hidden' are not considered required because
            # they will already have default values inserted in them
            for fieldname in required:
                if fieldname + '_hidden' in state:
                    required.remove(fieldname)
            missing = [f for f in required if not state.get(f, '')]
            # If there are required fields missing, flag an error
            if missing:
                msg = t(_('Required fields have no values: '
                          '${field_names}',
                          mapping={'field_names': ', '.join(missing)}))
                ajax_form_error(self.errors, arnum=arnum, message=msg)
                continue
            # This ar is valid!
            valid_states[arnum] = state

        # - Expand lists of UIDs returned by multiValued reference widgets
        # - Transfer _uid values into their respective fields
        for arnum in valid_states.keys():
            for field, value in valid_states[arnum].items():
                if field.endswith('_uid') and ',' in value:
                    valid_states[arnum][field] = value.split(',')
                elif field.endswith('_uid'):
                    valid_states[arnum][field] = value

        if self.errors:
            return json.dumps({'errors': self.errors})

        # Now, we will create the specified ARs.
        ARs = []
        for arnum, state in valid_states.items():
            # Create the Analysis Request
            ar = create_analysisrequest(
                portal_catalog(UID=state['Client'])[0].getObject(),
                self.request,
                state
            )
            ARs.append(ar.Title())

        # Display the appropriate message after creation
        if len(ARs) > 1:
            message = _('Analysis requests ${ARs} were successfully created.',
                        mapping={'ARs': safe_unicode(', '.join(ARs))})
        else:
            message = _('Analysis request ${AR} was successfully created.',
                        mapping={'AR': safe_unicode(ARs[0])})
        self.context.plone_utils.addPortalMessage(message, 'info')
        # Automatic label printing won't print "register" labels for Secondary. ARs
        new_ars = [ar for ar in ARs if ar[-2:] == '01']
        if 'register' in self.context.bika_setup.getAutoPrintStickers() \
                and new_ars:
            return json.dumps({
                'success': message,
                'stickers': new_ars,
                'stickertemplate': self.context.bika_setup.getAutoStickerTemplate()
            })
        else:
            return json.dumps({'success': message})