Ejemplo n.º 1
0
 def _sampler_data(self, sample=None):
     data = {}
     if not sample or not sample.getSampler():
         return data
     sampler = sample.getSampler()
     mtool = getToolByName(self, 'portal_membership')
     member = mtool.getMemberById(sampler)
     if member:
         mfullname = member.getProperty('fullname')
         memail = member.getProperty('email')
         mhomepage = member.getProperty('home_page')
         pc = getToolByName(self, 'portal_catalog')
         c = pc(portal_type='Contact', getUsername=member.id)
         c = c[0].getObject() if c else None
         cfullname = c.getFullname() if c else None
         cemail = c.getEmailAddress() if c else None
         data = {'id': member.id,
                 'fullname': to_utf8(cfullname) if cfullname else to_utf8(mfullname),
                 'email': cemail if cemail else memail,
                 'business_phone': c.getBusinessPhone() if c else '',
                 'business_fax': c.getBusinessFax() if c else '',
                 'home_phone': c.getHomePhone() if c else '',
                 'mobile_phone': c.getMobilePhone() if c else '',
                 'job_title': to_utf8(c.getJobTitle()) if c else '',
                 'department': to_utf8(c.getDepartment()) if c else '',
                 'physical_address': to_utf8(c.getPhysicalAddress()) if c else '',
                 'postal_address': to_utf8(c.getPostalAddress()) if c else '',
                 'home_page': to_utf8(mhomepage)}
     return data
Ejemplo n.º 2
0
 def __init__(self, parser, context,
              idsearchcriteria=None,
              override=[False, False],
              allowed_ar_states=None,
              allowed_analysis_states=None,
              instrument_uid=None):
     Logger.__init__(self)
     self._parser = parser
     self.context = context
     self._allowed_ar_states = allowed_ar_states
     self._allowed_analysis_states = allowed_analysis_states
     self._override = override
     self._idsearch = idsearchcriteria
     self._priorizedsearchcriteria = ''
     self.bsc = getToolByName(self.context, 'bika_setup_catalog')
     self.bac = getToolByName(self.context, 'bika_analysis_catalog')
     self.pc = getToolByName(self.context, 'portal_catalog')
     self.bc = getToolByName(self.context, 'bika_catalog')
     self.wf = getToolByName(self.context, 'portal_workflow')
     if not self._allowed_ar_states:
         self._allowed_ar_states=['sample_received',
                                  'attachment_due',
                                  'to_be_verified']
     if not self._allowed_analysis_states:
         self._allowed_analysis_states=['sampled',
                                        'sample_received',
                                        'attachment_due',
                                        'to_be_verified']
     if not self._idsearch:
         self._idsearch=['getRequestID']
     self.instrument_uid=instrument_uid
Ejemplo n.º 3
0
    def __call__(self, value, *args, **kwargs):
        instance = kwargs['instance']
        # fieldname = kwargs['field'].getName()
        # request = kwargs.get('REQUEST', {})
        # form = request.get('form', {})

        translate = getToolByName(instance, 'translation_service').translate
        bsc = getToolByName(instance, 'bika_setup_catalog')
        # uc = getToolByName(instance, 'uid_catalog')

        failures = []

        for category in value:
            if not category:
                continue
            services = bsc(portal_type="AnalysisService",
                           getCategoryUID=category)
            for service in services:
                service = service.getObject()
                calc = service.getCalculation()
                deps = calc and calc.getDependentServices() or []
                for dep in deps:
                    if dep.getCategoryUID() not in value:
                        title = dep.getCategoryTitle()
                        if title not in failures:
                            failures.append(title)
        if failures:
            msg = _("Validation failed: The selection requires the following "
                    "categories to be selected: ${categories}",
                    mapping={'categories': safe_unicode(','.join(failures))})
            return to_utf8(translate(msg))

        return True
Ejemplo n.º 4
0
        def next_id(prefix):
            # normalize before anything
            prefix = fn_normalize(prefix)
            plone = context.portal_url.getPortalObject()
            # grab the first catalog we are indexed in.
            at = getToolByName(plone, 'archetype_tool')
            if context.portal_type in at.catalog_map:
                catalog_name = at.catalog_map[context.portal_type][0]
            else:
                catalog_name = 'portal_catalog'
            catalog = getToolByName(plone, catalog_name)

            # get all IDS that start with prefix
            # this must specifically exclude AR IDs (two -'s)
            rr = re.compile("^"+prefix+separator+"[\d+]+$")
            ids = [int(i.split(prefix+separator)[1]) \
                   for i in catalog.Indexes['id'].uniqueValues() \
                   if rr.match(i)]

            #plone_tool = getToolByName(context, 'plone_utils')
            #if not plone_tool.isIDAutoGenerated(l.id):
            ids.sort()
            _id = ids and ids[-1] or 0
            new_id = _id + 1
            return str(new_id)
Ejemplo n.º 5
0
 def get_analysisrequest_verifier(self, analysisrequest):
     """Get the name of the member who last verified this AR
     """
     wtool = getToolByName(self.context, 'portal_workflow')
     mtool = getToolByName(self.context, 'portal_membership')
     verifier = None
     try:
         review_history = wtool.getInfoFor(analysisrequest, 'review_history')
     except:
         return 'access denied'
     review_history = [review for review in review_history if review.get('action', '')]
     if not review_history:
         return 'no history'
     for items in review_history:
         action = items.get('action')
         if action != 'verify':
             continue
         actor = items.get('actor')
         member = mtool.getMemberById(actor)
         if not member:
             verifier = actor
             continue
         verifier = member.getProperty('fullname')
         if verifier is None or verifier == '':
             verifier = actor
     return verifier
Ejemplo n.º 6
0
    def workflow_script_unassign(self):
        if skip(self, "unassign"):
            return
        workflow = getToolByName(self, 'portal_workflow')
        self.reindexObject(idxs=["review_state", ])
        rc = getToolByName(self, REFERENCE_CATALOG)
        wsUID = self.REQUEST['context_uid']
        ws = rc.lookupObject(wsUID)

        # May need to promote the Worksheet's review_state
        #  if all other analyses are at a higher state than this one was.
        # (or maybe retract it if there are no analyses left)
        # Note: duplicates, controls and blanks have 'assigned' as a review_state.
        can_submit = True
        can_attach = True
        can_verify = True
        ws_empty = False

        analyses = ws.getAnalyses()

        # We flag this worksheet as empty if there is ONE UNASSIGNED
        # analysis left: worksheet.removeAnalysis() hasn't removed it from
        # the layout yet at this stage.
        if len(analyses) == 1 \
           and workflow.getInfoFor(analyses[0], 'review_state') == 'unassigned':
            ws_empty = True

        for a in analyses:
            a_state = workflow.getInfoFor(a, 'review_state')
            if a_state in \
               ('assigned', 'sample_due', 'sample_received',):
                can_submit = False
            else:
                if not ws.getAnalyst():
                    can_submit = False
            if a_state in \
               ('assigned', 'sample_due', 'sample_received', 'attachment_due',):
                can_attach = False
            if a_state in \
               ('assigned', 'sample_due', 'sample_received', 'attachment_due', 'to_be_verified',):
                can_verify = False

        if not ws_empty:
        # Note: WS adds itself to the skiplist so we have to take it off again
        #       to allow multiple promotions (maybe by more than one self).
            if can_submit and workflow.getInfoFor(ws, 'review_state') == 'open':
                workflow.doActionFor(ws, 'submit')
                skip(ws, 'submit', unskip=True)
            if can_attach and workflow.getInfoFor(ws, 'review_state') == 'attachment_due':
                workflow.doActionFor(ws, 'attach')
                skip(ws, 'attach', unskip=True)
            if can_verify and workflow.getInfoFor(ws, 'review_state') == 'to_be_verified':
                self.REQUEST["workflow_skiplist"].append('verify all analyses')
                workflow.doActionFor(ws, 'verify')
                skip(ws, 'verify', unskip=True)
        else:
            if workflow.getInfoFor(ws, 'review_state') != 'open':
                workflow.doActionFor(ws, 'retract')
                skip(ws, 'retract', unskip=True)
Ejemplo n.º 7
0
    def get(self, instance, aslist=False, **kwargs):
        """get() returns the list of objects referenced under the relationship.
        """
        uc = getToolByName(instance, "uid_catalog")

        try:
            res = instance.getRefs(relationship=self.relationship)
        except:
            pass

        pr = getToolByName(instance, 'portal_repository')

        rd = {}
        for r in res:
            if r is None:
                continue
            uid = r.UID()
            r = uc(UID=uid)[0].getObject()
            if hasattr(instance, 'reference_versions') and \
               hasattr(r, 'version_id') and \
               uid in instance.reference_versions and \
               instance.reference_versions[uid] != r.version_id and \
               r.version_id is not None:

                version_id = instance.reference_versions[uid]
                try:
                    o = pr._retrieve(r,
                                     selector=version_id,
                                     preserve=(),
                                     countPurged=True).object
                except ArchivistRetrieveError:
                    o = r
            else:
                o = r
            rd[uid] = o

        # singlevalued ref fields return only the object, not a list,
        # unless explicitely specified by the aslist option

        if not self.multiValued:
            if len(rd) > 1:
                log("%s references for non multivalued field %s of %s" %
                    (len(rd), self.getName(), instance))
            if not aslist:
                if rd:
                    rd = [rd[uid] for uid in rd.keys()][0]
                else:
                    rd = None

        if not self.referencesSortable or not hasattr(aq_base(instance),
                                                      'at_ordered_refs'):
            if isinstance(rd, dict):
                return [rd[uid] for uid in rd.keys()]
            else:
                return rd
        refs = instance.at_ordered_refs
        order = refs[self.relationship]

        return [rd[uid] for uid in order if uid in rd.keys()]
Ejemplo n.º 8
0
    def addReferences(self, reference, service_uids):
        """ Add reference analyses to reference
        """
        addedanalyses = []
        wf = getToolByName(self, 'portal_workflow')
        bsc = getToolByName(self, 'bika_setup_catalog')
        bac = getToolByName(self, 'bika_analysis_catalog')
        ref_type = reference.getBlank() and 'b' or 'c'
        ref_uid = reference.UID()
        postfix = 1
        for refa in reference.getReferenceAnalyses():
            grid = refa.getReferenceAnalysesGroupID()
            try:
                cand = int(grid.split('-')[2])
                if cand >= postfix:
                    postfix = cand + 1
            except:
                pass
        postfix = str(postfix).zfill(int(3))
        refgid = 'I%s-%s' % (reference.id, postfix)
        for service_uid in service_uids:
            # services with dependents don't belong in references
            service = bsc(portal_type='AnalysisService', UID=service_uid)[0].getObject()
            calc = service.getCalculation()
            if calc and calc.getDependentServices():
                continue
            ref_uid = reference.addReferenceAnalysis(service_uid, ref_type)
            ref_analysis = bac(portal_type='ReferenceAnalysis', UID=ref_uid)[0].getObject()

            # Set ReferenceAnalysesGroupID (same id for the analyses from
            # the same Reference Sample and same Worksheet)
            # https://github.com/bikalabs/Bika-LIMS/issues/931
            ref_analysis.setReferenceAnalysesGroupID(refgid)
            ref_analysis.reindexObject()

            # copy the interimfields
            calculation = service.getCalculation()
            if calc:
                ref_analysis.setInterimFields(calc.getInterimFields())

            # Comes from a worksheet or has been attached directly?
            ws = ref_analysis.getBackReferences('WorksheetAnalysis')
            if not ws or len(ws) == 0:
                # This is a reference analysis attached directly to the
                # Instrument, we apply the assign state
                wf.doActionFor(ref_analysis, 'assign')
            addedanalyses.append(ref_analysis)

        self.setAnalyses(self.getAnalyses() + addedanalyses)

        # Initialize LatestReferenceAnalyses cache
        self.cleanReferenceAnalysesCache()

        # Set DisposeUntilNextCalibrationTest to False
        if (len(addedanalyses) > 0):
            self.getField('DisposeUntilNextCalibrationTest').set(self, False)

        return addedanalyses
Ejemplo n.º 9
0
    def addReferences(self, position, reference, service_uids):
        """ Add reference analyses to reference, and add to worksheet layout
        """
        workflow = getToolByName(self, 'portal_workflow')
        rc = getToolByName(self, REFERENCE_CATALOG)
        layout = self.getLayout()
        wst = self.getWorksheetTemplate()
        wstlayout = wst and wst.getLayout() or []
        ref_type = reference.getBlank() and 'b' or 'c'
        ref_uid = reference.UID()

        if position == 'new':
            highest_existing_position = len(wstlayout)
            for pos in [int(slot['position']) for slot in layout]:
                if pos > highest_existing_position:
                    highest_existing_position = pos
            position = highest_existing_position + 1

        postfix = 1
        for refa in reference.getReferenceAnalyses():
            grid = refa.getReferenceAnalysesGroupID()
            try:
                cand = int(grid.split('-')[2])
                if cand >= postfix:
                    postfix = cand + 1
            except:
                pass
        postfix = str(postfix).zfill(int(3))
        refgid = '%s-%s' % (reference.id, postfix)
        for service_uid in service_uids:
            # services with dependents don't belong in references
            service = rc.lookupObject(service_uid)
            calc = service.getCalculation()
            if calc and calc.getDependentServices():
                continue
            ref_uid = reference.addReferenceAnalysis(service_uid, ref_type)
            ref_analysis = rc.lookupObject(ref_uid)

            # Set ReferenceAnalysesGroupID (same id for the analyses from
            # the same Reference Sample and same Worksheet)
            # https://github.com/bikalabs/Bika-LIMS/issues/931
            ref_analysis.setReferenceAnalysesGroupID(refgid)
            ref_analysis.reindexObject(idxs=["getReferenceAnalysesGroupID"])

            # copy the interimfields
            if calc:
                ref_analysis.setInterimFields(calc.getInterimFields())

            self.setLayout(
                self.getLayout() + [{'position': position,
                                     'type': ref_type,
                                     'container_uid': reference.UID(),
                                     'analysis_uid': ref_analysis.UID()}])
            self.setAnalyses(
                self.getAnalyses() + [ref_analysis, ])
            workflow.doActionFor(ref_analysis, 'assign')
Ejemplo n.º 10
0
 def getContacts(self):
     pc = getToolByName(self, 'portal_catalog')
     bc = getToolByName(self, 'bika_catalog')
     bsc = getToolByName(self, 'bika_setup_catalog')
     # fallback - all Lab Contacts
     pairs = []
     for contact in bsc(portal_type = 'LabContact',
                        inactive_state = 'active',
                        sort_on = 'sortable_title'):
         pairs.append((contact.UID, contact.Title))
     return DisplayList(pairs)
Ejemplo n.º 11
0
 def getDefaultSpec(self):
     """ Returns 'lab' or 'client' to set the initial value of the
         specification radios
     """
     mt = getToolByName(self.context, 'portal_membership')
     pg = getToolByName(self.context, 'portal_groups')
     member = mt.getAuthenticatedMember()
     member_groups = [pg.getGroupById(group.id).getGroupName()
                      for group in pg.getGroupsByUserId(member.id)]
     default_spec = ('Clients' in member_groups) and 'client' or 'lab'
     return default_spec
Ejemplo n.º 12
0
 def workflow_script_deactivat(self):
     # A instance cannot be deactivated if it contains services
     pu = getToolByName(self, 'plone_utils')
     bsc = getToolByName(self, 'bika_setup_catalog')
     ars = bsc(portal_type='AnalysisService', getCategoryUID=self.UID())
     if ars:
         message = _("Category cannot be deactivated because "
                     "it contains Analysis Services")
         pu.addPortalMessage(message, 'error')
         transaction.get().abort()
         raise WorkflowException
Ejemplo n.º 13
0
 def render(self):
     portal_factory = getToolByName(self.context, 'portal_factory')
     if portal_factory.isTemporary(self.context):
         return self.index()
     self.actions = []
     portal_actions = getToolByName(self.context, 'portal_actions')
     actions = portal_actions.listFilteredActionsFor(self.context)
     if 'document_actions' in actions:
         for action in actions['document_actions']:
             self.actions.append(action)
     return self.index()
Ejemplo n.º 14
0
    def __call__(self):
        CheckAuthenticator(self.request)
        uid = self.request.get('UID', '')
        title = self.request.get('Title', '')
        ret = {
               'UID': '',
               'Title': '',
               'Prefix': '',
               'Hazardous': '',
               'SampleMatrixUID': '',
               'SampleMatrixTitle': '',
               'MinimumVolume':  '',
               'ContainerTypeUID': '',
               'ContainerTypeTitle': '',
               'SamplePoints': ('',),
               'StorageLocations': ('',),
               }
        proxies = None
        if uid:
            try:
                bsc = getToolByName(self.context, 'bika_setup_catalog')
                proxies = bsc(UID=uid)
            except ParseError:
                pass
        elif title:
            try:
                bsc = getToolByName(self.context, 'bika_setup_catalog')
                proxies = bsc(portal_type='SampleType', title=to_unicode(title))
            except ParseError:
                pass

        if proxies and len(proxies) == 1:
            st = proxies[0].getObject();
            ret = {
               'UID': st.UID(),
               'Title': st.Title(),
               'Prefix': st.getPrefix(),
               'Hazardous': st.getHazardous(),
               'SampleMatrixUID': st.getSampleMatrix() and \
                                  st.getSampleMatrix().UID() or '',
               'SampleMatrixTitle': st.getSampleMatrix() and \
                                  st.getSampleMatrix().Title() or '',
               'MinimumVolume':  st.getMinimumVolume(),
               'ContainerTypeUID': st.getContainerType() and \
                                   st.getContainerType().UID() or '',
               'ContainerTypeTitle': st.getContainerType() and \
                                     st.getContainerType().Title() or '',
               'SamplePoints': dict((sp.UID(),sp.Title()) for sp in st.getSamplePoints()),
               'StorageLocations': dict((sp.UID(),sp.Title()) for sp in st.getStorageLocations()),
               }

        return json.dumps(ret)
Ejemplo n.º 15
0
def upgrade(tool):
    """Added bika.lims.analysisservice.edit.js
    """
    # Hack prevent out-of-date upgrading
    # Related: PR #1484
    # https://github.com/bikalabs/Bika-LIMS/pull/1484
    from lims.upgrade import skip_pre315
    if skip_pre315(aq_parent(aq_inner(tool))):
        return True

    portal = aq_parent(aq_inner(tool))
    setup = portal.portal_setup

    # update affected tools
    setup.runImportStepFromProfile('profile-bika.lims:default', 'jsregistry')

    # apply new method/calculation data to old Analysis Services
    portal = aq_parent(aq_inner(tool))
    pc = getToolByName(portal, 'portal_catalog')
    bsc = getToolByName(portal, 'bika_setup_catalog')
    rc=getToolByName(portal, REFERENCE_CATALOG)
    proxies = pc(portal_type="AnalysisService")
    for proxy in proxies:
        an = proxy.getObject()
        rels = an.at_references.objectValues()
        oldmethod = None
        oldinstrument = None
        oldcalc = None
        for rel in rels:
            if rel.relationship == 'AnalysisServiceCalculation':
                oldcalc=rc.lookupObject(rel.targetUID)
            elif rel.relationship == 'AnalysisServiceInstrument':
                oldinstrument=rc.lookupObject(rel.targetUID)
            elif rel.relationship == 'AnalysisServiceMethod':
                oldmethod=rc.lookupObject(rel.targetUID)
            if oldmethod and oldcalc and oldinstrument:
                break

        # Reset the method, instrument and calculations
        if oldmethod:
            an.Schema().getField('Methods').set(an, [oldmethod.UID(),])
            an.Schema().getField('_Method').set(an, oldmethod.UID())
        if oldinstrument:
            an.Schema().getField('Instruments').set(an, [oldinstrument.UID(),])
            an.Schema().getField('Instrument').set(an, oldinstrument.UID())
        if oldcalc:
            an.setUseDefaultCalculation(False);
            an.Schema().getField('DeferredCalculation').set(an, oldcalc.UID())

    return True
Ejemplo n.º 16
0
def isBasicTransitionAllowed(context, permission=None):
    """Most transition guards need to check the same conditions:

    - Is the object active (cancelled or inactive objects can't transition)
    - Has the user a certain permission, required for transition.  This should
    normally be set in the guard_permission in workflow definition.

    """
    workflow = getToolByName(context, "portal_workflow")
    mtool = getToolByName(context, "portal_membership")
    if workflow.getInfoFor(context, "cancellation_state", "") == "cancelled" \
            or workflow.getInfoFor(context, "inactive_state", "") == "inactive" \
            or (permission and mtool.checkPermission(permission, context)):
        return False
    return True
Ejemplo n.º 17
0
 def lookupMime(self, name):
     mimetool = getToolByName(self, 'mimetypes_registry')
     mimetypes = mimetool.lookup(name)
     if len(mimetypes):
         return mimetypes[0].name()
     else:
         return name
Ejemplo n.º 18
0
    def __call__(self, value, *args, **kwargs):
        instance = kwargs['instance']
        translate = getToolByName(instance, 'translation_service').translate

        # remove spaces from formatted
        IBAN = ''.join(c for c in value if c.isalnum())

        IBAN = IBAN[4:] + IBAN[:4]
        country = IBAN[-4:-2]

        if country not in country_dic:
            msg = _('Unknown IBAN country %s' % country)
            return to_utf8(translate(msg))

        length_c, name_c = country_dic[country]

        if len(IBAN) != length_c:
            diff = len(IBAN) - length_c
            msg = _('Wrong IBAN length by %s: %s' % (('short by %i' % -diff) if diff < 0 else
            ('too long by %i' % diff), value))
            return to_utf8(translate(msg))
        # Validating procedure
        elif int("".join(str(letter_dic[x]) for x in IBAN)) % 97 != 1:
            msg = _('Incorrect IBAN number: %s' % value)
            return to_utf8(translate(msg))

        else:
            # Accepted:
            return True
Ejemplo n.º 19
0
    def submitTransition(self, action, came_from, items):
        """ Performs the action's transition for the specified items
            Returns (numtransitions, destination), where:
            - numtransitions: the number of objects successfully transitioned.
                If no objects have been successfully transitioned, gets 0 value
            - destination: the destination url to be loaded immediately
        """
        dest = None
        transitioned = []
        workflow = getToolByName(self.context, 'portal_workflow')

        # transition selected items from the bika_listing/Table.
        for item in items:
            # the only actions allowed on inactive/cancelled
            # items are "reinstate" and "activate"
            if not isActive(item) and action not in ('reinstate', 'activate'):
                continue
            if not skip(item, action, peek=True):
                allowed_transitions = [it['id'] for it in \
                                       workflow.getTransitionsFor(item)]
                if action in allowed_transitions:
                    success, message = doActionFor(item, action)
                    if success:
                        transitioned.append(item.id)
                    else:
                        self.context.plone_utils.addPortalMessage(message, 'error')
        # automatic label printing
        if transitioned and action == 'receive' \
            and 'receive' in self.portal.bika_setup.getAutoPrintStickers():
            q = "/sticker?autoprint=1&template=%s&items=" % (self.portal.bika_setup.getAutoStickerTemplate())
            # selected_items is a list of UIDs (stickers for AR_add use IDs)
            q += ",".join(transitioned)
            dest = self.context.absolute_url() + q

        return len(transitioned), dest
Ejemplo n.º 20
0
    def setSamplePoints(self, value, **kw):
        """ For the moment, we're manually trimming the sampletype<>samplepoint
            relation to be equal on both sides, here.
            It's done strangely, because it may be required to behave strangely.
        """
        bsc = getToolByName(self, 'bika_setup_catalog')
        ## convert value to objects
        if value and type(value) == str:
            value = [bsc(UID=value)[0].getObject(),]
        elif value and type(value) in (list, tuple) and type(value[0]) == str:
            value = [bsc(UID=uid)[0].getObject() for uid in value if uid]
        ## Find all SamplePoints that were removed
        existing = self.Schema()['SamplePoints'].get(self)
        removed = existing and [s for s in existing if s not in value] or []
        added = value and [s for s in value if s not in existing] or []
        ret = self.Schema()['SamplePoints'].set(self, value)

        for sp in removed:
            sampletypes = sp.getSampleTypes()
            if self in sampletypes:
                sampletypes.remove(self)
                sp.setSampleTypes(sampletypes)

        for sp in added:
            sp.setSampleTypes(list(sp.getSampleTypes()) + [self,])

        return ret
Ejemplo n.º 21
0
    def __call__(self, value, *args, **kwargs):
        instance = kwargs['instance']
        fieldname = kwargs['field'].getName()
        # request = kwargs.get('REQUEST', {})
        # form = request.get('form', {})

        translate = getToolByName(instance, 'translation_service').translate

        if value == instance.get(fieldname):
            return True

        for item in aq_parent(instance).objectValues():
            if hasattr(item, 'UID') and item.UID() != instance.UID() and \
               fieldname in item.Schema() and \
               str(item.Schema()[fieldname].get(item)) == str(value):   # We have to compare them as strings because
                                                                        # even if a number (as an  id) is saved inside
                                                                        # a string widget and string field, it will be
                                                                        # returned as an int. I don't know if it is
                                                                        # caused because is called with
                                                                        # <item.Schema()[fieldname].get(item)>,
                                                                        # but it happens...
                msg = _("Validation failed: '${value}' is not unique",
                        mapping={'value': safe_unicode(value)})
                return to_utf8(translate(msg))
        return True
Ejemplo n.º 22
0
 def getInstrumentTypes(self):
     bsc = getToolByName(self, 'bika_setup_catalog')
     items = [(c.UID, c.Title) \
             for c in bsc(portal_type='InstrumentType',
                          inactive_state = 'active')]
     items.sort(lambda x,y:cmp(x[1], y[1]))
     return DisplayList(items)
Ejemplo n.º 23
0
    def __init__(self, context, request, **kwargs):
        self.field_icons = {}
        super(BikaListingView, self).__init__(context, request)
        path = hasattr(context, 'getPath') and context.getPath() \
            or "/".join(context.getPhysicalPath())
        if hasattr(self, 'contentFilter'):
            if not 'path' in self.contentFilter:
                self.contentFilter['path'] = {"query": path, "level" : 0 }
        else:
            if not 'path' in self.contentFilter:
                self.contentFilter = {'path': {"query": path, "level" : 0 }}

        if 'show_categories' in kwargs:
            self.show_categories = kwargs['show_categories']

        if 'expand_all_categories' in kwargs:
            self.expand_all_categories = kwargs['expand_all_categories']

        self.portal = getToolByName(context, 'portal_url').getPortalObject()
        self.portal_url = self.portal.absolute_url()

        self.base_url = context.absolute_url()
        self.view_url = self.base_url

        self.translate = self.context.translate
        self.show_all = False
Ejemplo n.º 24
0
    def folderitems(self):
        mtool = getToolByName(self.context, 'portal_membership')
        if mtool.checkPermission(ManageBika, self.context):
            del self.review_states[0]['transitions']
            self.show_select_column = True
            self.review_states.append(
                {'id':'active',
                 'title': _('Active'),
                 'contentFilter': {'inactive_state': 'active'},
                 'transitions': [{'id':'deactivate'}, ],
                 'columns': ['Title', 'Description']})
            self.review_states.append(
                {'id':'inactive',
                 'title': _('Dormant'),
                 'contentFilter': {'inactive_state': 'inactive'},
                 'transitions': [{'id':'activate'}, ],
                 'columns': ['Title', 'Description']})

        items = BikaListingView.folderitems(self)
        for x in range(len(items)):
            if not items[x].has_key('obj'): continue
            items[x]['replace']['Title'] = "<a href='%s'>%s</a>" % \
                 (items[x]['url'], items[x]['Title'])

        return items
Ejemplo n.º 25
0
Archivo: add.py Proyecto: nafwa03/olims
 def __call__(self):
     uid = self.request.get('Sample_uid', False)
     if not uid:
         return []
     uc = getToolByName(self.context, "uid_catalog")
     proxies = uc(UID=uid)
     if not proxies:
         return []
     sample = proxies[0].getObject()
     sample_schema = sample.Schema()
     sample_fields = dict([(f.getName(), f) for f in sample_schema.fields()])
     ar_schema = self.context.Schema()
     ar_fields = [f.getName() for f in ar_schema.fields()
                  if f.widget.isVisible(self.context, 'secondary') == 'disabled']
     ret = []
     for fieldname in ar_fields:
         if fieldname in sample_fields:
             fieldvalue = sample_fields[fieldname].getAccessor(sample)()
             if fieldvalue is None:
                 fieldvalue = ''
             if hasattr(fieldvalue, 'Title'):
                 fieldvalue = fieldvalue.Title()
             if hasattr(fieldvalue, 'year'):
                 fieldvalue = fieldvalue.strftime(self.date_format_short)
         else:
             fieldvalue = ''
         ret.append([fieldname, fieldvalue])
     return json.dumps(ret)
Ejemplo n.º 26
0
def upgrade(tool):
    """ Sort by Type in instruments
    """
    # Hack prevent out-of-date upgrading
    # Related: PR #1484
    # https://github.com/bikalabs/Bika-LIMS/pull/1484
    from lims.upgrade import skip_pre315
    if skip_pre315(aq_parent(aq_inner(tool))):
        return True


    portal = aq_parent(aq_inner(tool))
    bsc = getToolByName(portal, 'bika_setup_catalog', None)

    if 'getInstrumentType' not in bsc.indexes():
        bsc.addIndex('getInstrumentType', 'FieldIndex')
        bsc.addColumn('getInstrumentType')

        bsc.addIndex('getInstrumentTypeName','FieldIndex')
        bsc.addColumn('getInstrumentTypeName')

    #Del old "getType" Index, it's not used now.
    if 'getType' in bsc.indexes():
        bsc.delIndex('getType')
    if 'getType' in bsc.indexes():
        bsc.delColumn('getType')

    setup = portal.portal_setup

    logger.info("Reindex added indexes in bika_setup_catalog")
    bsc.manage_reindexIndex(
        ids=['getInstrumentType', 'getInstrumentTypeName', ])

    return True
Ejemplo n.º 27
0
def upgrade(tool):
    # Hack prevent out-of-date upgrading
    # Related: PR #1484
    # https://github.com/bikalabs/Bika-LIMS/pull/1484
    from lims.upgrade import skip_pre315
    if skip_pre315(aq_parent(aq_inner(tool))):
        return True

    portal = aq_parent(aq_inner(tool))
    setup = portal.portal_setup

    # update affected tools
    setup.runImportStepFromProfile('profile-bika.lims:default', 'workflow-csv')

    # /supplyorders folder permissions
    mp = portal.supplyorders.manage_permission
    mp(CancelAndReinstate, ['Manager', 'LabManager', 'LabClerk'], 0)
    mp(ManagePricelists, ['Manager', 'LabManager', 'Owner'], 1)
    mp(permissions.ListFolderContents, ['Member'], 1)
    mp(permissions.AddPortalContent, ['Manager', 'LabManager', 'Owner'], 0)
    mp(permissions.DeleteObjects, ['Manager', 'LabManager', 'Owner'], 0)
    mp(permissions.View, ['Manager', 'LabManager'], 0)
    portal.supplyorders.reindexObject()

    wf = getToolByName(portal, 'portal_workflow')
    wf.updateRoleMappings()

    return True
Ejemplo n.º 28
0
 def copy_to_new_allowed(self):
     mtool = getToolByName(self.context, 'portal_membership')
     if mtool.checkPermission(ManageAnalysisRequests, self.context) \
             or mtool.checkPermission(ModifyPortalContent, self.context) \
             or mtool.checkPermission(AddAnalysisRequest, self.portal):
         return True
     return False
Ejemplo n.º 29
0
    def addReferenceAnalysis(self, service_uid, reference_type):
        """ add an analysis to the sample """
        rc = getToolByName(self, REFERENCE_CATALOG)
        service = rc.lookupObject(service_uid)

        analysis = _createObjectByType("ReferenceAnalysis", self, tmpID())
        analysis.unmarkCreationFlag()

        calculation = service.getCalculation()
        interim_fields = calculation and calculation.getInterimFields() or []
        renameAfterCreation(analysis)

        # maxtime = service.getMaxTimeAllowed() and service.getMaxTimeAllowed() \
        #     or {'days':0, 'hours':0, 'minutes':0}
        # starttime = DateTime()
        # max_days = float(maxtime.get('days', 0)) + \
        #          (
        #              (float(maxtime.get('hours', 0)) * 3600 + \
        #               float(maxtime.get('minutes', 0)) * 60)
        #              / 86400
        #          )
        # duetime = starttime + max_days

        analysis.setReferenceType(reference_type)
        analysis.setService(service_uid)
        analysis.setInterimFields(interim_fields)
        return analysis.UID()
Ejemplo n.º 30
0
 def getSuppliers(self):
     bsc = getToolByName(self, 'bika_setup_catalog')
     items = [(c.UID, c.getName) \
             for c in bsc(portal_type='Supplier',
                          inactive_state = 'active')]
     items.sort(lambda x,y:cmp(x[1], y[1]))
     return DisplayList(items)
Ejemplo n.º 31
0
    def __init__(self, context, request):
        super(WorksheetFolderListingView, self).__init__(context, request)
        self.catalog = 'bika_catalog'
        self.contentFilter = {
            'portal_type': 'Worksheet',
            'review_state': ['open', 'to_be_verified', 'verified', 'rejected'],
            'sort_on': 'id',
            'sort_order': 'reverse'
        }
        self.context_actions = {
            _('Add'): {
                'url': 'worksheet_add',
                'icon': '++resource++bika.lims.images/add.png',
                'class': 'worksheet_add'
            }
        }
        self.show_table_only = False
        self.show_sort_column = False
        self.show_select_row = False
        self.show_select_all_checkbox = True
        self.show_select_column = True
        self.pagesize = 25
        self.restrict_results = False

        request.set('disable_border', 1)

        self.icon = self.portal_url + "/++resource++bika.lims.images/worksheet_big.png"
        self.title = self.context.translate(_("Worksheets"))
        self.description = ""

        pm = getToolByName(context, "portal_membership")
        # this is a property of self, because self.getAnalysts returns it
        self.analysts = getUsers(self, ['Manager', 'LabManager', 'Analyst'])
        self.analysts = self.analysts.sortedByKey()

        bsc = getToolByName(context, 'bika_setup_catalog')
        templates = [
            t for t in bsc(portal_type='WorksheetTemplate',
                           inactive_state='active')
        ]

        self.templates = [(t.UID, t.Title) for t in templates]
        self.templates.sort(lambda x, y: cmp(x[1], y[1]))

        self.instruments = [
            (i.UID, i.Title)
            for i in bsc(portal_type='Instrument', inactive_state='active')
        ]
        self.instruments.sort(lambda x, y: cmp(x[1], y[1]))

        self.templateinstruments = {}
        for t in templates:
            i = t.getObject().getInstrument()
            if i:
                self.templateinstruments[t.UID] = i.UID()
            else:
                self.templateinstruments[t.UID] = ''

        self.columns = {
            'Title': {
                'title': _('Worksheet'),
                'index': 'sortable_title'
            },
            'Priority': {
                'title': _('Priority'),
                'index': 'Priority',
                'toggle': True
            },
            'Analyst': {
                'title': _('Analyst'),
                'index': 'getAnalyst',
                'toggle': True
            },
            'Template': {
                'title': _('Template'),
                'toggle': True
            },
            'Services': {
                'title': _('Services'),
                'sortable': False,
                'toggle': False
            },
            'SampleTypes': {
                'title': _('Sample Types'),
                'sortable': False,
                'toggle': False
            },
            'Instrument': {
                'title': _('Instrument'),
                'sortable': False,
                'toggle': False
            },
            'QC': {
                'title': _('QC'),
                'sortable': False,
                'toggle': False
            },
            'QCTotals': {
                'title': _('QC Samples(Analyses)'),
                'sortable': False,
                'toggle': False
            },
            'RoutineTotals': {
                'title': _('Routine Samples(Analyses)'),
                'sortable': False,
                'toggle': False
            },
            'CreationDate': {
                'title': PMF('Date Created'),
                'toggle': True,
                'index': 'created'
            },
            'state_title': {
                'title': _('State'),
                'index': 'review_state'
            },
        }
        self.review_states = [
            {
                'id':
                'default',
                'title':
                _('All'),
                'contentFilter': {
                    'portal_type':
                    'Worksheet',
                    'review_state':
                    ['open', 'to_be_verified', 'verified', 'rejected'],
                    'sort_on':
                    'id',
                    'sort_order':
                    'reverse'
                },
                'transitions': [{
                    'id': 'retract'
                }, {
                    'id': 'verify'
                }, {
                    'id': 'reject'
                }],
                'columns': [
                    'Title', 'Priority', 'Analyst', 'Template', 'Services',
                    'SampleTypes', 'Instrument', 'QC', 'QCTotals',
                    'RoutineTotals', 'CreationDate', 'state_title'
                ]
            },
            # getAuthenticatedMember does not work in __init__
            # so 'mine' is configured further in 'folderitems' below.
            {
                'id':
                'mine',
                'title':
                _('Mine'),
                'contentFilter': {
                    'portal_type':
                    'Worksheet',
                    'review_state':
                    ['open', 'to_be_verified', 'verified', 'rejected'],
                    'sort_on':
                    'id',
                    'sort_order':
                    'reverse'
                },
                'transitions': [{
                    'id': 'retract'
                }, {
                    'id': 'verify'
                }, {
                    'id': 'reject'
                }],
                'columns': [
                    'Title', 'Priority', 'Analyst', 'Template', 'Services',
                    'SampleTypes', 'Instrument', 'QC', 'QCTotals',
                    'RoutineTotals', 'CreationDate', 'state_title'
                ]
            },
            {
                'id':
                'open',
                'title':
                _('Open'),
                'contentFilter': {
                    'portal_type': 'Worksheet',
                    'review_state': 'open',
                    'sort_on': 'id',
                    'sort_order': 'reverse'
                },
                'transitions': [],
                'columns': [
                    'Title', 'Priority', 'Analyst', 'Template', 'Services',
                    'SampleTypes', 'Instrument', 'QC', 'QCTotals',
                    'RoutineTotals', 'CreationDate', 'state_title'
                ]
            },
            {
                'id':
                'to_be_verified',
                'title':
                _('To be verified'),
                'contentFilter': {
                    'portal_type': 'Worksheet',
                    'review_state': 'to_be_verified',
                    'sort_on': 'id',
                    'sort_order': 'reverse'
                },
                'transitions': [{
                    'id': 'retract'
                }, {
                    'id': 'verify'
                }, {
                    'id': 'reject'
                }],
                'columns': [
                    'Title', 'Priority', 'Analyst', 'Template', 'Services',
                    'SampleTypes', 'Instrument', 'QC', 'QCTotals',
                    'RoutineTotals', 'CreationDate', 'state_title'
                ]
            },
            {
                'id':
                'verified',
                'title':
                _('Verified'),
                'contentFilter': {
                    'portal_type': 'Worksheet',
                    'review_state': 'verified',
                    'sort_on': 'id',
                    'sort_order': 'reverse'
                },
                'transitions': [],
                'columns': [
                    'Title', 'Priority', 'Analyst', 'Template', 'Services',
                    'SampleTypes', 'Instrument', 'QC', 'QCTotals',
                    'RoutineTotals', 'CreationDate', 'state_title'
                ]
            },
        ]
Ejemplo n.º 32
0
 def isSubmitted(self):
     wftool = getToolByName(self, 'portal_workflow')
     return wftool.getInfoFor(
             self.context.aq_parent, 'review_state') == 'submitted'
Ejemplo n.º 33
0
    def __call__(self):

        parms = []
        titles = []

        rt = getToolByName(self.context, 'portal_repository')
        mt = getToolByName(self.context, 'portal_membership')

        # Apply filters
        self.contentFilter = {
            'portal_type':
            ('Analysis', 'AnalysisCategory', 'AnalysisProfile',
             'AnalysisRequest', 'AnalysisService', 'AnalysisSpec',
             'ARTemplate', 'Attachment', 'Batch', 'Calculation', 'Client',
             'Contact', 'Container', 'ContainerType', 'Department',
             'DuplicateAnalysis', 'Instrument', 'InstrumentCalibration',
             'InstrumentCertification', 'InstrumentMaintenanceTask',
             'InstrumentScheduledTask', 'InstrumentType',
             'InstrumentValidation', 'Manufacturer'
             'Method', 'Preservation', 'Pricelist', 'ReferenceAnalysis',
             'ReferenceDefinition', 'ReferenceSample', 'Sample',
             'SampleMatrix', 'SamplePoint', 'SampleType', 'Supplier',
             'SupplierContact', 'Worksheet', 'WorksheetTemplate')
        }

        val = self.selection_macros.parse_daterange(self.request,
                                                    'getModificationDate',
                                                    _('Modification date'))
        if val:
            self.contentFilter['modified'] = val['contentFilter'][1]
            parms.append(val['parms'])
            titles.append(val['titles'])

        user = ''
        userfullname = ''
        titles.append(user)
        if self.request.form.get('User', '') != '':
            user = self.request.form['User']
            userobj = mt.getMemberById(user)
            userfullname = userobj.getProperty('fullname') \
                           if userobj else ''
            parms.append({
                'title': _('User'),
                'value': ("%s (%s)" % (userfullname, user))
            })

        # Query the catalog and store results in a dictionary
        entities = self.bika_setup_catalog(self.contentFilter)

        if not entities:
            message = _("No historical actions matched your query")
            self.context.plone_utils.addPortalMessage(message, "error")
            return self.default_template()

        datalines = []
        tmpdatalines = {}
        footlines = {}

        for entity in entities:
            entity = entity.getObject()
            entitytype = _(entity.__class__.__name__)

            # Workflow states retrieval
            for workflowid, workflow in entity.workflow_history.iteritems():
                for action in workflow:
                    actiontitle = _('Created')
                    if not action['action'] or (action['action'] and
                                                action['action'] == 'create'):
                        if workflowid == 'bika_inactive_workflow':
                            continue
                        actiontitle = _('Created')
                    else:
                        actiontitle = _(action['action'])

                    if (user == '' or action['actor'] == user):
                        actorfullname = userfullname == '' and mt.getMemberById(
                            user) or userfullname
                        dataline = {
                            'EntityNameOrId':
                            entity.title_or_id(),
                            'EntityAbsoluteUrl':
                            entity.absolute_url(),
                            'EntityCreationDate':
                            entity.CreationDate(),
                            'EntityModificationDate':
                            entity.ModificationDate(),
                            'EntityType':
                            entitytype,
                            'Workflow':
                            _(workflowid),
                            'Action':
                            actiontitle,
                            'ActionDate':
                            action['time'],
                            'ActionDateStr':
                            self.ulocalized_time(action['time'], 1),
                            'ActionActor':
                            action['actor'],
                            'ActionActorFullName':
                            actorfullname,
                            'ActionComments':
                            action['comments']
                        }
                        tmpdatalines[action['time']] = dataline

            # History versioning retrieval
            history = rt.getHistoryMetadata(entity)
            if history:
                hislen = history.getLength(countPurged=False)
                for index in range(hislen):
                    meta = history.retrieve(index)['metadata']['sys_metadata']
                    metatitle = _(meta['comment'])
                    if (user == '' or meta['principal'] == user):
                        actorfullname = userfullname == '' and \
                            mt.getMemberById(user) or userfullname
                        dataline = {
                            'EntityNameOrId': entity.title_or_id(),
                            'EntityAbsoluteUrl': entity.absolute_url(),
                            'EntityCreationDate': entity.CreationDate(),
                            'EntityModificationDate':
                            entity.ModificationDate(),
                            'EntityType': entitytype,
                            'Workflow': '',
                            'Action': metatitle,
                            'ActionDate': meta['timestamp'],
                            'ActionDateStr': meta['timestamp'],
                            'ActionActor': meta['principal'],
                            'ActionActorFullName': actorfullname,
                            'ActionComments': ''
                        }
                        tmpdatalines[meta['timestamp']] = dataline
        if len(tmpdatalines) == 0:
            message = _("No actions found for user ${user}",
                        mapping={"user": userfullname})
            self.context.plone_utils.addPortalMessage(message, "error")
            return self.default_template()
        else:
            # Sort datalines
            tmpkeys = tmpdatalines.keys()
            tmpkeys.sort(reverse=True)
            for index in range(len(tmpkeys)):
                datalines.append(tmpdatalines[tmpkeys[index]])

            self.report_data = {
                'parameters': parms,
                'datalines': datalines,
                'footlines': footlines
            }

            return {
                'report_title': _('Users history'),
                'report_data': self.template()
            }
Ejemplo n.º 34
0
    def folderitems(self):
        rc = getToolByName(self.context, REFERENCE_CATALOG)
        bsc = getToolByName(self.context, 'bika_setup_catalog')
        workflow = getToolByName(self.context, 'portal_workflow')
        mtool = getToolByName(self.context, 'portal_membership')
        checkPermission = mtool.checkPermission
        if not self.allow_edit:
            can_edit_analyses = False
        else:
            if self.contentFilter.get('getPointOfCapture', '') == 'field':
                can_edit_analyses = checkPermission(EditFieldResults,
                                                    self.context)
            else:
                can_edit_analyses = checkPermission(EditResults, self.context)
            self.allow_edit = can_edit_analyses
        self.show_select_column = self.allow_edit
        context_active = isActive(self.context)

        self.categories = []
        items = super(AnalysesView, self).folderitems(full_objects=True)

        # manually skim retracted analyses from the list
        new_items = []
        for i, item in enumerate(items):
            # self.contentsMethod may return brains or objects.
            if not ('obj' in items[i]):
                continue
            obj = hasattr(items[i]['obj'], 'getObject') and \
                items[i]['obj'].getObject() or \
                items[i]['obj']
            if workflow.getInfoFor(obj, 'review_state') == 'retracted' \
                and not checkPermission(ViewRetractedAnalyses, self.context):
                continue
            new_items.append(item)
        items = new_items

        methods = self.get_methods_vocabulary()

        self.interim_fields = {}
        self.interim_columns = {}
        self.specs = {}
        show_methodinstr_columns = False
        for i, item in enumerate(items):
            # self.contentsMethod may return brains or objects.
            obj = hasattr(items[i]['obj'], 'getObject') and \
                items[i]['obj'].getObject() or \
                items[i]['obj']
            if workflow.getInfoFor(obj, 'review_state') == 'retracted' \
                and not checkPermission(ViewRetractedAnalyses, self.context):
                continue

            result = obj.getResult()
            service = obj.getService()
            calculation = service.getCalculation()
            unit = service.getUnit()
            keyword = service.getKeyword()

            if self.show_categories:
                cat = obj.getService().getCategoryTitle()
                items[i]['category'] = cat
                if cat not in self.categories:
                    self.categories.append(cat)

            # Check for InterimFields attribute on our object,
            interim_fields = hasattr(obj, 'getInterimFields') \
                and obj.getInterimFields() or []
            # kick some pretty display values in.
            for x in range(len(interim_fields)):
                interim_fields[x]['formatted_value'] = \
                    format_numeric_result(obj, interim_fields[x]['value'])
            self.interim_fields[obj.UID()] = interim_fields
            items[i]['service_uid'] = service.UID()
            items[i]['Service'] = service.Title()
            items[i]['Keyword'] = keyword
            items[i]['Unit'] = format_supsub(unit) if unit else ''
            items[i]['Result'] = ''
            items[i]['formatted_result'] = ''
            items[i]['interim_fields'] = interim_fields
            items[i]['Remarks'] = obj.getRemarks()
            items[i]['Uncertainty'] = ''
            items[i]['DetectionLimit'] = ''
            items[i]['retested'] = obj.getRetested()
            items[i]['class']['retested'] = 'center'
            items[i]['result_captured'] = self.ulocalized_time(
                obj.getResultCaptureDate(), long_format=0)
            items[i]['calculation'] = calculation and True or False
            try:
                items[i]['Partition'] = obj.getSamplePartition().getId()
            except AttributeError:
                items[i]['Partition'] = ''
            if obj.portal_type == "ReferenceAnalysis":
                items[i]['DueDate'] = self.ulocalized_time(
                    obj.aq_parent.getExpiryDate(), long_format=0)
            else:
                items[i]['DueDate'] = self.ulocalized_time(obj.getDueDate(),
                                                           long_format=1)
            cd = obj.getResultCaptureDate()
            items[i]['CaptureDate'] = cd and self.ulocalized_time(
                cd, long_format=1) or ''
            items[i]['Attachments'] = ''

            item['allow_edit'] = []
            client_or_lab = ""

            tblrowclass = items[i].get('table_row_class')
            if obj.portal_type == 'ReferenceAnalysis':
                items[i]['st_uid'] = obj.aq_parent.UID()
                items[i]['table_row_class'] = ' '.join(
                    [tblrowclass, 'qc-analysis'])
            elif obj.portal_type == 'DuplicateAnalysis' and \
                obj.getAnalysis().portal_type == 'ReferenceAnalysis':
                items[i]['st_uid'] = obj.aq_parent.UID()
                items[i]['table_row_class'] = ' '.join(
                    [tblrowclass, 'qc-analysis'])
            else:
                sample = None
                if self.context.portal_type == 'AnalysisRequest':
                    sample = self.context.getSample()
                elif self.context.portal_type == 'Worksheet':
                    if obj.portal_type in ('DuplicateAnalysis',
                                           'RejectAnalysis'):
                        sample = obj.getAnalysis().getSample()
                    else:
                        sample = obj.aq_parent.getSample()
                elif self.context.portal_type == 'Sample':
                    sample = self.context
                st_uid = sample.getSampleType().UID() if sample else ''
                items[i]['st_uid'] = st_uid

            if checkPermission(ManageBika, self.context):
                service_uid = service.UID()
                latest = rc.lookupObject(service_uid).version_id
                items[i]['Service'] = service.Title()
                items[i]['class']['Service'] = "service_title"

            # Show version number of out-of-date objects
            # No: This should be done in another column, if at all.
            # The (vX) value confuses some more fragile forms.
            #     if hasattr(obj, 'reference_versions') and \
            #        service_uid in obj.reference_versions and \
            #        latest != obj.reference_versions[service_uid]:
            #         items[i]['after']['Service'] = "(v%s)" % \
            #              (obj.reference_versions[service_uid])

            # choices defined on Service apply to result fields.
            choices = service.getResultOptions()
            if choices:
                item['choices']['Result'] = choices

            # permission to view this item's results
            can_view_result = \
                getSecurityManager().checkPermission(ViewResults, obj)

            # permission to edit this item's results
            # Editing Field Results is possible while in Sample Due.
            poc = self.contentFilter.get("getPointOfCapture", 'lab')
            can_edit_analysis = self.allow_edit and context_active and \
                ( (poc == 'field' and getSecurityManager().checkPermission(EditFieldResults, obj))
                  or
                  (poc != 'field' and getSecurityManager().checkPermission(EditResults, obj)) )

            allowed_method_states = [
                'to_be_sampled', 'to_be_preserved', 'sample_received',
                'sample_registered', 'sampled', 'assigned'
            ]

            # Prevent from being edited if the instrument assigned
            # is not valid (out-of-date or uncalibrated), except if
            # the analysis is a QC with assigned status
            can_edit_analysis = can_edit_analysis \
                and (obj.isInstrumentValid() \
                    or (obj.portal_type == 'ReferenceAnalysis' \
                        and item['review_state'] in allowed_method_states))

            if can_edit_analysis:
                items[i]['allow_edit'].extend(['Analyst', 'Result', 'Remarks'])
                # if the Result field is editable, our interim fields are too
                for f in self.interim_fields[obj.UID()]:
                    items[i]['allow_edit'].append(f['keyword'])

                # if there isn't a calculation then result must be re-testable,
                # and if there are interim fields, they too must be re-testable.
                if not items[i]['calculation'] or \
                   (items[i]['calculation'] and self.interim_fields[obj.UID()]):
                    items[i]['allow_edit'].append('retested')

            # TODO: Only the labmanager must be able to change the method
            # can_set_method = getSecurityManager().checkPermission(SetAnalysisMethod, obj)
            can_set_method = can_edit_analysis \
                and item['review_state'] in allowed_method_states
            method = obj.getMethod() \
                        if hasattr(obj, 'getMethod') and obj.getMethod() \
                        else service.getMethod()

            # Display the methods selector if the AS has at least one
            # method assigned
            item['Method'] = ''
            item['replace']['Method'] = ''
            if can_set_method:
                voc = self.get_methods_vocabulary(obj)
                if voc:
                    # The service has at least one method available
                    item['Method'] = method.UID() if method else ''
                    item['choices']['Method'] = voc
                    item['allow_edit'].append('Method')
                    show_methodinstr_columns = True

                elif method:
                    # This should never happen
                    # The analysis has set a method, but its parent
                    # service hasn't any method available O_o
                    item['Method'] = method.Title()
                    item['replace']['Method'] = "<a href='%s'>%s</a>" % \
                        (method.absolute_url(), method.Title())
                    show_methodinstr_columns = True

            elif method:
                # Edition not allowed, but method set
                item['Method'] = method.Title()
                item['replace']['Method'] = "<a href='%s'>%s</a>" % \
                    (method.absolute_url(), method.Title())
                show_methodinstr_columns = True

            # TODO: Instrument selector dynamic behavior in worksheet Results
            # Only the labmanager must be able to change the instrument to be used. Also,
            # the instrument selection should be done in accordance with the method selected
            # can_set_instrument = service.getInstrumentEntryOfResults() and getSecurityManager().checkPermission(SetAnalysisInstrument, obj)
            can_set_instrument = service.getInstrumentEntryOfResults() \
                and can_edit_analysis \
                and item['review_state'] in allowed_method_states

            item['Instrument'] = ''
            item['replace']['Instrument'] = ''
            if service.getInstrumentEntryOfResults():
                instrument = None

                # If the analysis has an instrument already assigned, use it
                if service.getInstrumentEntryOfResults() \
                    and hasattr(obj, 'getInstrument') \
                    and obj.getInstrument():
                    instrument = obj.getInstrument()

                # Otherwise, use the Service's default instrument
                elif service.getInstrumentEntryOfResults():
                    instrument = service.getInstrument()

                if can_set_instrument:
                    # Edition allowed
                    voc = self.get_instruments_vocabulary(obj)
                    if voc:
                        # The service has at least one instrument available
                        item['Instrument'] = instrument.UID(
                        ) if instrument else ''
                        item['choices']['Instrument'] = voc
                        item['allow_edit'].append('Instrument')
                        show_methodinstr_columns = True

                    elif instrument:
                        # This should never happen
                        # The analysis has an instrument set, but the
                        # service hasn't any available instrument
                        item['Instrument'] = instrument.Title()
                        item['replace']['Instrument'] = "<a href='%s'>%s</a>" % \
                            (instrument.absolute_url(), instrument.Title())
                        show_methodinstr_columns = True

                elif instrument:
                    # Edition not allowed, but instrument set
                    item['Instrument'] = instrument.Title()
                    item['replace']['Instrument'] = "<a href='%s'>%s</a>" % \
                        (instrument.absolute_url(), instrument.Title())
                    show_methodinstr_columns = True

            else:
                # Manual entry of results, instrument not allowed
                item['Instrument'] = _('Manual')
                msgtitle = t(
                    _(
                        "Instrument entry of results not allowed for ${service}",
                        mapping={"service": safe_unicode(service.Title())},
                    ))
                item['replace']['Instrument'] = \
                    '<a href="#" title="%s">%s</a>' % (msgtitle, t(_('Manual')))

            # Sets the analyst assigned to this analysis
            if can_edit_analysis:
                analyst = obj.getAnalyst()
                # widget default: current user
                if not analyst:
                    analyst = mtool.getAuthenticatedMember().getUserName()
                items[i]['Analyst'] = analyst
                item['choices']['Analyst'] = self.getAnalysts()
            else:
                items[i]['Analyst'] = obj.getAnalystName()

            # If the user can attach files to analyses, show the attachment col
            can_add_attachment = \
                getSecurityManager().checkPermission(AddAttachment, obj)
            if can_add_attachment or can_view_result:
                attachments = ""
                if hasattr(obj, 'getAttachment'):
                    for attachment in obj.getAttachment():
                        af = attachment.getAttachmentFile()
                        icon = af.getBestIcon()
                        attachments += "<span class='attachment' attachment_uid='%s'>" % (
                            attachment.UID())
                        if icon:
                            attachments += "<img src='%s/%s'/>" % (
                                self.portal_url, icon)
                        attachments += '<a href="%s/at_download/AttachmentFile"/>%s</a>' % (
                            attachment.absolute_url(), af.filename)
                        if can_edit_analysis:
                            attachments += "<img class='deleteAttachmentButton' attachment_uid='%s' src='%s'/>" % (
                                attachment.UID(),
                                "++resource++bika.lims.images/delete.png")
                        attachments += "</br></span>"
                items[i]['replace'][
                    'Attachments'] = attachments[:-12] + "</span>"

            # Only display data bearing fields if we have ViewResults
            # permission, otherwise just put an icon in Result column.
            if can_view_result:
                items[i]['Result'] = result
                scinot = self.context.bika_setup.getScientificNotationResults()
                dmk = self.context.bika_setup.getResultsDecimalMark()
                items[i]['formatted_result'] = obj.getFormattedResult(
                    sciformat=int(scinot), decimalmark=dmk)

                # LIMS-1379 Allow manual uncertainty value input
                # https://jira.bikalabs.com/browse/LIMS-1379
                fu = format_uncertainty(obj,
                                        result,
                                        decimalmark=dmk,
                                        sciformat=int(scinot))
                fu = fu if fu else ''
                if can_edit_analysis and service.getAllowManualUncertainty(
                ) == True:
                    unc = obj.getUncertainty(result)
                    item['allow_edit'].append('Uncertainty')
                    items[i]['Uncertainty'] = unc if unc else ''
                    items[i]['before']['Uncertainty'] = '&plusmn;&nbsp;'
                    items[i]['after'][
                        'Uncertainty'] = '<em class="discreet" style="white-space:nowrap;"> %s</em>' % items[
                            i]['Unit']
                elif fu:
                    items[i]['Uncertainty'] = fu
                    items[i]['before']['Uncertainty'] = '&plusmn;&nbsp;'
                    items[i]['after'][
                        'Uncertainty'] = '<em class="discreet" style="white-space:nowrap;"> %s</em>' % items[
                            i]['Unit']

                # LIMS-1700. Allow manual input of Detection Limits
                # LIMS-1775. Allow to select LDL or UDL defaults in results with readonly mode
                # https://jira.bikalabs.com/browse/LIMS-1700
                # https://jira.bikalabs.com/browse/LIMS-1775
                if can_edit_analysis and \
                    hasattr(obj, 'getDetectionLimitOperand') and \
                    hasattr(service, 'getDetectionLimitSelector') and \
                    service.getDetectionLimitSelector() == True:
                    isldl = obj.isBelowLowerDetectionLimit()
                    isudl = obj.isAboveUpperDetectionLimit()
                    dlval = ''
                    if isldl or isudl:
                        dlval = '<' if isldl else '>'
                    item['allow_edit'].append('DetectionLimit')
                    item['DetectionLimit'] = dlval
                    choices = [{
                        'ResultValue': '<',
                        'ResultText': '<'
                    }, {
                        'ResultValue': '>',
                        'ResultText': '>'
                    }]
                    item['choices']['DetectionLimit'] = choices
                    self.columns['DetectionLimit']['toggle'] = True
                    srv = obj.getService()
                    defdls = {
                        'min': srv.getLowerDetectionLimit(),
                        'max': srv.getUpperDetectionLimit(),
                        'manual': srv.getAllowManualDetectionLimit()
                    }
                    defin = '<input type="hidden" id="DefaultDLS.%s" value=\'%s\'/>'
                    defin = defin % (obj.UID(), json.dumps(defdls))
                    item['after']['DetectionLimit'] = defin

                # LIMS-1769. Allow to use LDL and UDL in calculations.
                # https://jira.bikalabs.com/browse/LIMS-1769
                # Since LDL, UDL, etc. are wildcards that can be used
                # in calculations, these fields must be loaded always
                # for 'live' calculations.
                if can_edit_analysis:
                    dls = {
                        'default_ldl': 'none',
                        'default_udl': 'none',
                        'below_ldl': False,
                        'above_udl': False,
                        'is_ldl': False,
                        'is_udl': False,
                        'manual_allowed': False,
                        'dlselect_allowed': False
                    }
                    if hasattr(obj, 'getDetectionLimits'):
                        dls['below_ldl'] = obj.isBelowLowerDetectionLimit()
                        dls['above_udl'] = obj.isBelowLowerDetectionLimit()
                        dls['is_ldl'] = obj.isLowerDetectionLimit()
                        dls['is_udl'] = obj.isUpperDetectionLimit()
                        dls['default_ldl'] = service.getLowerDetectionLimit()
                        dls['default_udl'] = service.getUpperDetectionLimit()
                        dls['manual_allowed'] = service.getAllowManualDetectionLimit(
                        )
                        dls['dlselect_allowed'] = service.getDetectionLimitSelector(
                        )
                    dlsin = '<input type="hidden" id="AnalysisDLS.%s" value=\'%s\'/>'
                    dlsin = dlsin % (obj.UID(), json.dumps(dls))
                    item['after']['Result'] = dlsin

            else:
                items[i]['Specification'] = ""
                if 'Result' in items[i]['allow_edit']:
                    items[i]['allow_edit'].remove('Result')
                items[i]['before']['Result'] = \
                    '<img width="16" height="16" ' + \
                    'src="%s/++resource++bika.lims.images/to_follow.png"/>' % \
                    (self.portal_url)
            # Everyone can see valid-ranges
            spec = self.get_analysis_spec(obj)
            if spec:
                min_val = spec.get('min', '')
                min_str = ">{0}".format(min_val) if min_val else ''
                max_val = spec.get('max', '')
                max_str = "<{0}".format(max_val) if max_val else ''
                error_val = spec.get('error', '')
                error_str = "{0}%".format(error_val) if error_val else ''
                rngstr = ",".join(
                    [x for x in [min_str, max_str, error_str] if x])
            else:
                rngstr = ""
            items[i]['Specification'] = rngstr
            # Add this analysis' interim fields to the interim_columns list
            for f in self.interim_fields[obj.UID()]:
                if f['keyword'] not in self.interim_columns and not f.get(
                        'hidden', False):
                    self.interim_columns[f['keyword']] = f['title']
                # and to the item itself
                items[i][f['keyword']] = f
                items[i]['class'][f['keyword']] = 'interim'

            # check if this analysis is late/overdue

            resultdate = obj.aq_parent.getDateSampled() \
                if obj.portal_type == 'ReferenceAnalysis' \
                else obj.getResultCaptureDate()

            duedate = obj.aq_parent.getExpiryDate() \
                if obj.portal_type == 'ReferenceAnalysis' \
                else obj.getDueDate()

            items[i]['replace']['DueDate'] = \
                self.ulocalized_time(duedate, long_format=1)

            if items[i]['review_state'] not in [
                    'to_be_sampled', 'to_be_preserved', 'sample_due',
                    'published'
            ]:

                if (resultdate and resultdate > duedate) \
                    or (not resultdate and DateTime() > duedate):

                    items[i]['replace']['DueDate'] = '%s <img width="16" height="16" src="%s/++resource++bika.lims.images/late.png" title="%s"/>' % \
                        (self.ulocalized_time(duedate, long_format=1),
                         self.portal_url,
                         t(_("Late Analysis")))

            # Submitting user may not verify results (admin can though)
            if items[i]['review_state'] == 'to_be_verified' and \
               not checkPermission(VerifyOwnResults, obj):
                user_id = getSecurityManager().getUser().getId()
                self_submitted = False
                try:
                    review_history = list(
                        workflow.getInfoFor(obj, 'review_history'))
                    review_history.reverse()
                    for event in review_history:
                        if event.get('action') == 'submit':
                            if event.get('actor') == user_id:
                                self_submitted = True
                            break
                    if self_submitted:
                        items[i]['after']['state_title'] = \
                             "<img src='++resource++bika.lims.images/submitted-by-current-user.png' title='%s'/>" % \
                             (t(_("Cannot verify: Submitted by current user")))
                except WorkflowException:
                    pass

            # add icon for assigned analyses in AR views
            if self.context.portal_type == 'AnalysisRequest':
                obj = items[i]['obj']
                if obj.portal_type in ['ReferenceAnalysis',
                                       'DuplicateAnalysis'] or \
                   workflow.getInfoFor(obj, 'worksheetanalysis_review_state') == 'assigned':
                    br = obj.getBackReferences('WorksheetAnalysis')
                    if len(br) > 0:
                        ws = br[0]
                        items[i]['after']['state_title'] = \
                             "<a href='%s'><img src='++resource++bika.lims.images/worksheet.png' title='%s'/></a>" % \
                             (ws.absolute_url(),
                              t(_("Assigned to: ${worksheet_id}",
                                  mapping={'worksheet_id': safe_unicode(ws.id)})))

        # the TAL requires values for all interim fields on all
        # items, so we set blank values in unused cells
        for item in items:
            for field in self.interim_columns:
                if field not in item:
                    item[field] = ''

        # XXX order the list of interim columns
        interim_keys = self.interim_columns.keys()
        interim_keys.reverse()

        # add InterimFields keys to columns
        for col_id in interim_keys:
            if col_id not in self.columns:
                self.columns[col_id] = {
                    'title': self.interim_columns[col_id],
                    'input_width': '6',
                    'input_class': 'ajax_calculate numeric',
                    'sortable': False
                }

        if can_edit_analyses:
            new_states = []
            for state in self.review_states:
                # InterimFields are displayed in review_state
                # They are anyway available through View.columns though.
                # In case of hidden fields, the calcs.py should check calcs/services
                # for additional InterimFields!!
                pos = 'Result' in state['columns'] and \
                    state['columns'].index('Result') or len(state['columns'])
                for col_id in interim_keys:
                    if col_id not in state['columns']:
                        state['columns'].insert(pos, col_id)
                # retested column is added after Result.
                pos = 'Result' in state['columns'] and \
                    state['columns'].index('Uncertainty') + 1 or len(state['columns'])
                state['columns'].insert(pos, 'retested')
                new_states.append(state)
            self.review_states = new_states
            # Allow selecting individual analyses
            self.show_select_column = True

        # Dry Matter.
        # The Dry Matter column is never enabled for reference sample contexts
        # and refers to getReportDryMatter in ARs.
        if items and \
            (hasattr(self.context, 'getReportDryMatter') and \
             self.context.getReportDryMatter()):

            # look through all items
            # if the item's Service supports ReportDryMatter, add getResultDM().
            for item in items:
                if item['obj'].getService().getReportDryMatter():
                    item['ResultDM'] = item['obj'].getResultDM()
                else:
                    item['ResultDM'] = ''
                if item['ResultDM']:
                    item['after']['ResultDM'] = "<em class='discreet'>%</em>"

            # modify the review_states list to include the ResultDM column
            new_states = []
            for state in self.review_states:
                pos = 'Result' in state['columns'] and \
                    state['columns'].index('Uncertainty') + 1 or len(state['columns'])
                state['columns'].insert(pos, 'ResultDM')
                new_states.append(state)
            self.review_states = new_states

        self.categories.sort()

        # self.json_specs = json.dumps(self.specs)
        self.json_interim_fields = json.dumps(self.interim_fields)
        self.items = items

        # Method and Instrument columns must be shown or hidden at the
        # same time, because the value assigned to one causes
        # a value reassignment to the other (one method can be performed
        # by different instruments)
        self.columns['Method']['toggle'] = show_methodinstr_columns
        self.columns['Instrument']['toggle'] = show_methodinstr_columns

        return items
Ejemplo n.º 35
0
    def _create_ar(self, context, request):
        """Creates AnalysisRequest object, with supporting Sample, Partition
        and Analysis objects.  The client is retrieved from the obj_path
        key in the request.

        Required request parameters:

            - Contact: One client contact Fullname.  The contact must exist
              in the specified client.  The first Contact with the specified
              value in it's Fullname field will be used.

            - SampleType_<index> - Must be an existing sample type.

        Optional request parameters:

        - CCContacts: A list of contact Fullnames, which will be copied on
          all messages related to this AR and it's sample or results.

        - CCEmails: A list of email addresses to include as above.

        - Sample_id: Create a secondary AR with an existing sample.  If
          unspecified, a new sample is created.

        - Specification: a lookup to set Analysis specs default values
          for all analyses

        - Analysis_Specification: specs (or overrides) per analysis, using
          a special lookup format.

            &Analysis_Specification:list=<Keyword>:min:max:error&...


        """

        wftool = getToolByName(context, 'portal_workflow')
        bc = getToolByName(context, 'bika_catalog')
        bsc = getToolByName(context, 'bika_setup_catalog')
        pc = getToolByName(context, 'portal_catalog')
        ret = {
            "url": router.url_for("create", force_external=True),
            "success": True,
            "error": False,
        }
        SamplingWorkflowEnabled = context.bika_setup.getSamplingWorkflowEnabled()
        for field in [
            'Client',
            'SampleType',
            'Contact',
            'SamplingDate',
            'Services']:
            self.require(field)
            self.used(field)

        try:
            client = resolve_request_lookup(context, request, 'Client')[0].getObject()
        except IndexError:
            raise Exception("Client not found")

        # Sample_id
        if 'Sample' in request:
            try:
                sample = resolve_request_lookup(context, request, 'Sample')[0].getObject()
            except IndexError:
                raise Exception("Sample not found")
        else:
            # Primary AR
            sample = _createObjectByType("Sample", client, tmpID())
            sample.unmarkCreationFlag()
            fields = set_fields_from_request(sample, request)
            for field in fields:
                self.used(field)
            sample._renameAfterCreation()
            sample.setSampleID(sample.getId())
            event.notify(ObjectInitializedEvent(sample))
            sample.at_post_create_script()

            if SamplingWorkflowEnabled:
                wftool.doActionFor(sample, 'sampling_workflow')
            else:
                wftool.doActionFor(sample, 'no_sampling_workflow')

        ret['sample_id'] = sample.getId()

        parts = [{'services': [],
                  'container': [],
                  'preservation': '',
                  'separate': False}]

        specs = self.get_specs_from_request()
        ar = _createObjectByType("AnalysisRequest", client, tmpID())
        ar.unmarkCreationFlag()
        fields = set_fields_from_request(ar, request)
        for field in fields:
            self.used(field)
        ar.setSample(sample.UID())
        ar._renameAfterCreation()
        ret['ar_id'] = ar.getId()
        brains = resolve_request_lookup(context, request, 'Services')
        service_uids = [p.UID for p in brains]
        new_analyses = ar.setAnalyses(service_uids, specs=specs)
        ar.setRequestID(ar.getId())
        ar.reindexObject()
        event.notify(ObjectInitializedEvent(ar))
        ar.at_post_create_script()

        # Create sample partitions
        parts_and_services = {}
        for _i in range(len(parts)):
            p = parts[_i]
            part_prefix = sample.getId() + "-P"
            if '%s%s' % (part_prefix, _i + 1) in sample.objectIds():
                parts[_i]['object'] = sample['%s%s' % (part_prefix, _i + 1)]
                parts_and_services['%s%s' % (part_prefix, _i + 1)] = p['services']
                part = parts[_i]['object']
            else:
                part = _createObjectByType("SamplePartition", sample, tmpID())
                parts[_i]['object'] = part
                container = None
                preservation = p['preservation']
                parts[_i]['prepreserved'] = False
                part.edit(
                    Container=container,
                    Preservation=preservation,
                )
                part.processForm()
                if SamplingWorkflowEnabled:
                    wftool.doActionFor(part, 'sampling_workflow')
                else:
                    wftool.doActionFor(part, 'no_sampling_workflow')
                parts_and_services[part.id] = p['services']

        if SamplingWorkflowEnabled:
            wftool.doActionFor(ar, 'sampling_workflow')
        else:
            wftool.doActionFor(ar, 'no_sampling_workflow')

        # Add analyses to sample partitions
        # XXX jsonapi create AR: right now, all new analyses are linked to the first samplepartition
        if new_analyses:
            analyses = list(part.getAnalyses())
            analyses.extend(new_analyses)
            part.edit(
                Analyses=analyses,
            )
            for analysis in new_analyses:
                analysis.setSamplePartition(part)

        # If Preservation is required for some partitions,
        # and the SamplingWorkflow is disabled, we need
        # to transition to to_be_preserved manually.
        if not SamplingWorkflowEnabled:
            to_be_preserved = []
            sample_due = []
            lowest_state = 'sample_due'
            for p in sample.objectValues('SamplePartition'):
                if p.getPreservation():
                    lowest_state = 'to_be_preserved'
                    to_be_preserved.append(p)
                else:
                    sample_due.append(p)
            for p in to_be_preserved:
                doActionFor(p, 'to_be_preserved')
            for p in sample_due:
                doActionFor(p, 'sample_due')
            doActionFor(sample, lowest_state)
            for analysis in ar.objectValues('Analysis'):
                doActionFor(analysis, lowest_state)
            doActionFor(ar, lowest_state)

        # receive secondary AR
        if request.get('Sample_id', ''):
            doActionFor(ar, 'sampled')
            doActionFor(ar, 'sample_due')
            not_receive = ['to_be_sampled', 'sample_due', 'sampled',
                           'to_be_preserved']
            sample_state = wftool.getInfoFor(sample, 'review_state')
            if sample_state not in not_receive:
                doActionFor(ar, 'receive')
            for analysis in ar.getAnalyses(full_objects=1):
                doActionFor(analysis, 'sampled')
                doActionFor(analysis, 'sample_due')
                if sample_state not in not_receive:
                    doActionFor(analysis, 'receive')

        if self.unused:
            raise BadRequest("The following request fields were not used: %s.  Request aborted." % self.unused)

        return ret
Ejemplo n.º 36
0
    def workflow_action_retract_ar(self):
        workflow = getToolByName(self.context, 'portal_workflow')
        # AR should be retracted
        # Can't transition inactive ARs
        if not isActive(self.context):
            message = _('Item is inactive.')
            self.context.plone_utils.addPortalMessage(message, 'info')
            self.request.response.redirect(self.context.absolute_url())
            return

        # 1. Copies the AR linking the original one and viceversa
        ar = self.context
        newar = self.cloneAR(ar)

        # 2. The old AR gets a status of 'invalid'
        workflow.doActionFor(ar, 'retract_ar')

        # 3. The new AR copy opens in status 'to be verified'
        changeWorkflowState(newar, 'bika_ar_workflow', 'to_be_verified')

        # 4. The system immediately alerts the client contacts who ordered
        # the results, per email and SMS, that a possible mistake has been
        # picked up and is under investigation.
        # A much possible information is provided in the email, linking
        # to the AR online.
        laboratory = self.context.bika_setup.laboratory
        lab_address = "<br/>".join(laboratory.getPrintAddress())
        mime_msg = MIMEMultipart('related')
        mime_msg['Subject'] = t(
            _("Erroneus result publication from ${request_id}",
              mapping={"request_id": ar.getRequestID()}))
        mime_msg['From'] = formataddr((encode_header(laboratory.getName()),
                                       laboratory.getEmailAddress()))
        to = []
        contact = ar.getContact()
        if contact:
            to.append(
                formataddr((encode_header(contact.Title()),
                            contact.getEmailAddress())))
        for cc in ar.getCCContact():
            formatted = formataddr(
                (encode_header(cc.Title()), cc.getEmailAddress()))
            if formatted not in to:
                to.append(formatted)

        managers = self.context.portal_groups.getGroupMembers('LabManagers')
        for bcc in managers:
            user = self.portal.acl_users.getUser(bcc)
            if user:
                uemail = user.getProperty('email')
                ufull = user.getProperty('fullname')
                formatted = formataddr((encode_header(ufull), uemail))
                if formatted not in to:
                    to.append(formatted)
        mime_msg['To'] = ','.join(to)
        aranchor = "<a href='%s'>%s</a>" % (ar.absolute_url(),
                                            ar.getRequestID())
        naranchor = "<a href='%s'>%s</a>" % (newar.absolute_url(),
                                             newar.getRequestID())
        addremarks = ('addremarks' in self.request
                      and ar.getRemarks()) \
                    and ("<br/><br/>"
                         + _("Additional remarks:")
                         + "<br/>"
                         + ar.getRemarks().split("===")[1].strip()
                         + "<br/><br/>") \
                    or ''
        sub_d = dict(request_link=aranchor,
                     new_request_link=naranchor,
                     remarks=addremarks,
                     lab_address=lab_address)
        body = Template(
            "Some errors have been detected in the results report "
            "published from the Analysis Request $request_link. The Analysis "
            "Request $new_request_link has been created automatically and the "
            "previous has been invalidated.<br/>The possible mistake "
            "has been picked up and is under investigation.<br/><br/>"
            "$remarks $lab_address").safe_substitute(sub_d)
        msg_txt = MIMEText(safe_unicode(body).encode('utf-8'), _subtype='html')
        mime_msg.preamble = 'This is a multi-part MIME message.'
        mime_msg.attach(msg_txt)
        try:
            host = getToolByName(self.context, 'MailHost')
            host.send(mime_msg.as_string(), immediate=True)
        except Exception as msg:
            message = _(
                'Unable to send an email to alert lab '
                'client contacts that the Analysis Request has been '
                'retracted: ${error}',
                mapping={'error': safe_unicode(msg)})
            self.context.plone_utils.addPortalMessage(message, 'warning')

        message = _('${items} invalidated.',
                    mapping={'items': ar.getRequestID()})
        self.context.plone_utils.addPortalMessage(message, 'warning')
        self.request.response.redirect(newar.absolute_url())
Ejemplo n.º 37
0
    def __call__(self):
        context = self.context
        workflow = getToolByName(context, 'portal_workflow')
        # Collect related data and objects
        invoice = context.getInvoice()
        sample = context.getSample()
        samplePoint = sample.getSamplePoint()
        reviewState = workflow.getInfoFor(context, 'review_state')
        # Collection invoice information
        if invoice:
            self.invoiceId = invoice.getId()
        else:
            self.invoiceId = _('Proforma (Not yet invoiced)')
        # Collect verified invoice information
        verified = reviewState in VERIFIED_STATES
        if verified:
            self.verifiedBy = context.getVerifier()
        self.verified = verified
        self.request['verified'] = verified
        # Collect published date
        datePublished = context.getDatePublished()
        if datePublished is not None:
            datePublished = self.ulocalized_time(datePublished, long_format=1)
        self.datePublished = datePublished
        # Collect received date
        dateReceived = context.getDateReceived()
        if dateReceived is not None:
            dateReceived = self.ulocalized_time(dateReceived, long_format=1)
        self.dateReceived = dateReceived
        # Collect general information
        self.reviewState = reviewState
        contact = context.getContact()
        self.contact = contact.Title() if contact else ""
        self.clientOrderNumber = context.getClientOrderNumber()
        self.clientReference = context.getClientReference()
        self.clientSampleId = sample.getClientSampleID()
        self.sampleType = sample.getSampleType().Title()
        self.samplePoint = samplePoint and samplePoint.Title()
        self.requestId = context.getRequestID()
        self.headers = [
            {
                'title': 'Invoice ID',
                'value': self.invoiceId
            },
            {
                'title': 'Client Reference',
                'value': self.clientReference
            },
            {
                'title': 'Sample Type',
                'value': self.sampleType
            },
            {
                'title': 'Request ID',
                'value': self.requestId
            },
            {
                'title': 'Date Received',
                'value': self.dateReceived
            },
        ]
        if not isAttributeHidden('AnalysisRequest', 'ClientOrderNumber'):
            self.headers.append({
                'title': 'Client Sample Id',
                'value': self.clientOrderNumber
            })
        if not isAttributeHidden('AnalysisRequest', 'SamplePoint'):
            self.headers.append({
                'title': 'Sample Point',
                'value': self.samplePoint
            })
        if self.verified:
            self.headers.append({
                'title': 'Verified By',
                'value': self.verifiedBy
            })
            if self.datePublished:
                self.headers.append({
                    'title': 'datePublished',
                    'value': self.datePublished
                })

        #   <tal:published tal:condition="view/datePublished">
        #       <th i18n:translate="">Date Published</th>
        #       <td tal:content="view/datePublished"></td>
        #   </tal:published>
        #</tr>
        analyses = []
        profiles = []
        # Retrieve required data from analyses collection
        all_analyses, all_profiles, analyses_from_profiles = context.getServicesAndProfiles(
        )
        # Relating category with solo analysis
        for analysis in all_analyses:
            service = analysis.getService()
            categoryName = service.getCategory().Title()
            # Find the category
            try:
                category = (o for o in analyses
                            if o['name'] == categoryName).next()
            except:
                category = {'name': categoryName, 'analyses': []}
                analyses.append(category)
            # Append the analysis to the category
            category['analyses'].append({
                'title':
                analysis.Title(),
                'price':
                analysis.getPrice(),
                'priceVat':
                "%.2f" % analysis.getVATAmount(),
                'priceTotal':
                "%.2f" % analysis.getTotalPrice(),
            })
        # Relating analysis services with their profiles
        # We'll take the analysis contained on each profile
        for profile in all_profiles:
            # If profile's checkbox "Use Analysis Profile Price" is enabled, only the profile price will be displayed.
            # Otherwise each analysis will display its own price.
            pservices = []
            if profile.getUseAnalysisProfilePrice():
                # We have to use the profiles price only
                for pservice in profile.getService():
                    pservices.append({
                        'title': pservice.Title(),
                        'price': None,
                        'priceVat': None,
                        'priceTotal': None,
                    })
                profiles.append({
                    'name': profile.title,
                    'price': profile.getAnalysisProfilePrice(),
                    'priceVat': profile.getVATAmount(),
                    'priceTotal': profile.getTotalPrice(),
                    'analyses': pservices
                })
            else:
                # We need the analyses prices instead of profile price
                for pservice in profile.getService():
                    # We want the analysis instead of the service, because we want the price for the client
                    # (for instance the bulk price)
                    panalysis = self._getAnalysisForProfileService(
                        pservice.getKeyword(), analyses_from_profiles)
                    pservices.append({
                        'title':
                        pservice.Title(),
                        'price':
                        panalysis.getPrice()
                        if panalysis else pservice.getPrice(),
                        'priceVat':
                        "%.2f" % panalysis.getVATAmount()
                        if panalysis else pservice.getVATAmount(),
                        'priceTotal':
                        "%.2f" % panalysis.getTotalPrice()
                        if panalysis else pservice.getTotalPrice(),
                    })
                profiles.append({
                    'name': profile.title,
                    'price': None,
                    'priceVat': None,
                    'priceTotal': None,
                    'analyses': pservices
                })
        self.analyses = analyses
        self.profiles = profiles
        # Get subtotals
        self.subtotal = context.getSubtotal()
        self.subtotalVATAmount = "%.2f" % context.getSubtotalVATAmount()
        self.subtotalTotalPrice = "%.2f" % context.getSubtotalTotalPrice()
        # Get totals
        self.memberDiscount = Decimal(context.getDefaultMemberDiscount())
        self.discountAmount = context.getDiscountAmount()
        self.VATAmount = "%.2f" % context.getVATAmount()
        self.totalPrice = "%.2f" % context.getTotalPrice()
        # Render the template
        return self.template()
    def __call__(self):
        bsc = getToolByName(self.context, 'bika_setup_catalog')
        bac = getToolByName(self.context, 'bika_analysis_catalog')
        self.report_content = {}
        parms = []
        headings = {}
        headings['header'] = _("Analyses out of range")
        headings['subheader'] = _("Analyses results out of specified range")

        count_all = 0

        query = {"portal_type": "Analysis", "sort_order": "reverse"}

        spec_uid = self.request.form.get("spec", False)
        spec_obj = None
        spec_title = ""
        if spec_uid:
            brains = bsc(UID=spec_uid)
            if brains:
                spec_obj = brains[0].getObject()
                spec_title = spec_obj.Title()
        parms.append({
            "title": _("Range spec"),
            "value": spec_title,
            "type": "text"
        })

        date_query = formatDateQuery(self.context, 'Received')
        if date_query:
            query['getDateReceived'] = date_query
            received = formatDateParms(self.context, 'Received')
        else:
            received = 'Undefined'
        parms.append({
            'title': _('Received'),
            'value': received,
            'type': 'text'
        })

        wf_tool = getToolByName(self.context, 'portal_workflow')
        if self.request.form.has_key('bika_analysis_workflow'):
            query['review_state'] = self.request.form['bika_analysis_workflow']
            review_state = wf_tool.getTitleForStateOnType(
                self.request.form['bika_analysis_workflow'], 'Analysis')
        else:
            review_state = 'Undefined'
        parms.append({
            'title': _('Status'),
            'value': review_state,
            'type': 'text'
        })

        if self.request.form.has_key('bika_cancellation_workflow'):
            query['cancellation_state'] = self.request.form[
                'bika_cancellation_workflow']
            cancellation_state = wf_tool.getTitleForStateOnType(
                self.request.form['bika_cancellation_workflow'], 'Analysis')
        else:
            cancellation_state = 'Undefined'
        parms.append({
            'title': _('Active'),
            'value': cancellation_state,
            'type': 'text'
        })

        if self.request.form.has_key('bika_worksheetanalysis_workflow'):
            query['worksheetanalysis_review_state'] = self.request.form[
                'bika_worksheetanalysis_workflow']
            ws_review_state = wf_tool.getTitleForStateOnType(
                self.request.form['bika_worksheetanalysis_workflow'],
                'Analysis')
        else:
            ws_review_state = 'Undefined'
        parms.append({
            'title': _('Assigned to worksheet'),
            'value': ws_review_state,
            'type': 'text'
        })

        # and now lets do the actual report lines
        col_heads = [
            _('Client'),
            _('Request'),
            _('Sample type'),
            _('Sample point'),
            _('Category'),
            _('Analysis'),
            _('Result'),
            _('Min'),
            _('Max'),
            _('Status'),
        ]
        if isAttributeHidden('Sample', 'SamplePoint'):
            col_heads.remove(_('Sample point'))

        formats = {
            'columns': 10,
            'col_heads': col_heads,
            'class': '',
        }

        datalines = []

        for a_proxy in bac(query):
            analysis = a_proxy.getObject()
            if analysis.getResult():
                try:
                    result = float(analysis.getResult())
                except:
                    continue
            else:
                continue

            keyword = analysis.getKeyword()

            # determine which specs to use for this particular analysis
            # 1) if a spec is given in the query form, use it.
            # 2) if a spec is entered directly on the analysis, use it.
            # otherwise just continue to the next object.
            spec_dict = False
            if spec_obj:
                rr = spec_obj.getResultsRangeDict()
                if keyword in rr:
                    spec_dict = rr[keyword]
            else:
                ar = analysis.aq_parent
                rr = dicts_to_dict(ar.getResultsRange(), 'keyword')
                if keyword in rr:
                    spec_dict = rr[keyword]
                else:
                    continue
            if not spec_dict:
                continue
            try:
                spec_min = float(spec_dict['min'])
                spec_max = float(spec_dict['max'])
            except ValueError:
                continue
            if spec_min <= result <= spec_max:
                continue

            # check if in shoulder: out of range, but in acceptable
            # error percentage
            shoulder = False
            error = 0
            try:
                error = float(spec_dict.get('error', '0'))
            except:
                error = 0
                pass
            error_amount = (result / 100) * error
            error_min = result - error_amount
            error_max = result + error_amount
            if ((result < spec_min) and (error_max >= spec_min)) or \
                    ((result > spec_max) and (error_min <= spec_max)):
                shoulder = True

            dataline = []

            dataitem = {'value': analysis.getClientTitle()}
            dataline.append(dataitem)

            dataitem = {'value': analysis.getRequestID()}
            dataline.append(dataitem)

            dataitem = {'value': analysis.aq_parent.getSampleTypeTitle()}
            dataline.append(dataitem)

            if isAttributeHidden('Sample', 'SamplePoint'):
                dataitem = {'value': analysis.aq_parent.getSamplePointTitle()}
                dataline.append(dataitem)

            dataitem = {'value': analysis.getCategoryTitle()}
            dataline.append(dataitem)

            dataitem = {'value': analysis.getServiceTitle()}
            dataline.append(dataitem)

            if shoulder:
                dataitem = {
                    'value': analysis.getResult(),
                    'img_after': '++resource++bika.lims.images/exclamation.png'
                }
            else:
                dataitem = {'value': analysis.getResult()}

            dataline.append(dataitem)

            dataitem = {'value': spec_dict['min']}
            dataline.append(dataitem)

            dataitem = {'value': spec_dict['max']}
            dataline.append(dataitem)

            state = wf_tool.getInfoFor(analysis, 'review_state', '')
            review_state = wf_tool.getTitleForStateOnType(state, 'Analysis')
            dataitem = {'value': review_state}
            dataline.append(dataitem)

            datalines.append(dataline)

            count_all += 1

        # table footer data
        footlines = []
        footline = []
        footitem = {
            'value': _('Number of analyses out of range for period'),
            'colspan': 9,
            'class': 'total_label'
        }
        footline.append(footitem)
        footitem = {'value': count_all}
        footline.append(footitem)
        footlines.append(footline)

        # report footer data
        footnotes = []
        footline = []
        footitem = {
            'value': _('Analysis result within error range'),
            'img_before': '++resource++bika.lims.images/exclamation.png'
        }
        footline.append(footitem)
        footnotes.append(footline)

        self.report_content = {
            'headings': headings,
            'parms': parms,
            'formats': formats,
            'datalines': datalines,
            'footings': footlines,
            'footnotes': footnotes
        }

        title = t(headings['header'])

        return {'report_title': title, 'report_data': self.template()}
Ejemplo n.º 39
0
    def set(self, instance, value, **kwargs):
        """ Mutator. """

        rc = getToolByName(instance, REFERENCE_CATALOG)
        targetUIDs = [
            ref.targetUID
            for ref in rc.getReferences(instance, self.relationship)
        ]

        # empty value
        if not value:
            value = ()
        # list with one empty item
        if type(value) in (list, tuple) and len(value) == 1 and not value[0]:
            value = ()

        if not value and not targetUIDs:
            return

        if not isinstance(value, (list, tuple)):
            value = value,
        elif not self.multiValued and len(value) > 1:
            raise ValueError(
                "Multiple values given for single valued field %r" % self)

        ts = getToolByName(instance, "translation_service").translate

        #convert objects to uids
        #convert uids to objects
        uids = []
        targets = {}
        for v in value:
            if isinstance(v, basestring):
                uids.append(v)
                targets[v] = rc.lookupObject(v)
            elif hasattr(v, 'UID'):
                target_uid = callable(v.UID) and v.UID() or v.UID
                uids.append(target_uid)
                targets[target_uid] = v
            else:
                logger.info("Target has no UID: %s/%s" % (v, value))

        sub = [t for t in targetUIDs if t not in uids]
        add = [v for v in uids if v and v not in targetUIDs]

        newuids = [t for t in list(targetUIDs) + list(uids) if t not in sub]
        for uid in newuids:
            # update version_id of all existing references that aren't
            # about to be removed anyway (contents of sub)
            version_id = hasattr(targets[uid], 'version_id') and \
                       targets[uid].version_id or None
            if version_id is None:
                # attempt initial save of unversioned targets
                pr = getToolByName(instance, 'portal_repository')
                if pr.isVersionable(targets[uid]):
                    pr.save(obj=targets[uid],
                            comment=to_utf8(ts(_("Initial revision"))))
            if not hasattr(instance, 'reference_versions'):
                instance.reference_versions = {}
            if not hasattr(targets[uid], 'version_id'):
                targets[uid].version_id = None
            instance.reference_versions[uid] = targets[uid].version_id

        # tweak keyword arguments for addReference
        addRef_kw = kwargs.copy()
        addRef_kw.setdefault('referenceClass', self.referenceClass)
        if 'schema' in addRef_kw:
            del addRef_kw['schema']
        for uid in add:
            __traceback_info__ = (instance, uid, value, targetUIDs)
            # throws IndexError if uid is invalid
            rc.addReference(instance, uid, self.relationship, **addRef_kw)

        for uid in sub:
            rc.deleteReference(instance, uid, self.relationship)

        if self.referencesSortable:
            if not hasattr(aq_base(instance), 'at_ordered_refs'):
                instance.at_ordered_refs = {}

            instance.at_ordered_refs[self.relationship] = \
                tuple(filter(None, uids))

        if self.callStorageOnSet:
            #if this option is set the reference fields's values get written
            #to the storage even if the reference field never use the storage
            #e.g. if i want to store the reference UIDs into an SQL field
            ObjectField.set(self, instance, self.getRaw(instance), **kwargs)
Ejemplo n.º 40
0
    def __call__(self):
        # get all the data into datalines

        pc = getToolByName(self.context, 'portal_catalog')
        rc = getToolByName(self.context, 'reference_catalog')
        self.report_content = {}
        parms = []
        headings = {}
        headings['header'] = _("Attachments")
        headings['subheader'] = _(
            "The attachments linked to analysis requests and analyses")

        count_all = 0
        query = {'portal_type': 'Attachment'}
        if 'ClientUID' in self.request.form:
            client_uid = self.request.form['ClientUID']
            query['getClientUID'] = client_uid
            client = rc.lookupObject(client_uid)
            client_title = client.Title()
        else:
            client = logged_in_client(self.context)
            if client:
                client_title = client.Title()
                query['getClientUID'] = client.UID()
            else:
                client_title = 'All'
        parms.append(
            {'title': _('Client'),
             'value': client_title,
             'type': 'text'})

        date_query = formatDateQuery(self.context, 'Loaded')
        if date_query:
            query['getDateLoaded'] = date_query
            loaded = formatDateParms(self.context, 'Loaded')
            parms.append(
                {'title': _('Loaded'),
                 'value': loaded,
                 'type': 'text'})

        # and now lets do the actual report lines
        formats = {'columns': 6,
                   'col_heads': [_('Request'),
                                 _('File'),
                                 _('Attachment type'),
                                 _('Content type'),
                                 _('Size'),
                                 _('Loaded'),
                   ],
                   'class': '',
        }

        datalines = []
        attachments = pc(query)
        for a_proxy in attachments:
            attachment = a_proxy.getObject()
            attachment_file = attachment.getAttachmentFile()
            icon = attachment_file.getBestIcon()
            filename = attachment_file.filename
            filesize = attachment_file.get_size()
            filesize = filesize / 1024
            sizeunit = "Kb"
            if filesize > 1024:
                filesize = filesize / 1024
                sizeunit = "Mb"
            dateloaded = attachment.getDateLoaded()
            dataline = []
            dataitem = {'value': attachment.getTextTitle()}
            dataline.append(dataitem)
            dataitem = {'value': filename,
                        'img_before': icon}
            dataline.append(dataitem)
            dataitem = {
            'value': attachment.getAttachmentType().Title() if attachment.getAttachmentType() else ''}
            dataline.append(dataitem)
            dataitem = {
            'value': self.context.lookupMime(attachment_file.getContentType())}
            dataline.append(dataitem)
            dataitem = {'value': '%s%s' % (filesize, sizeunit)}
            dataline.append(dataitem)
            dataitem = {'value': self.ulocalized_time(dateloaded)}
            dataline.append(dataitem)

            datalines.append(dataline)

            count_all += 1

        # footer data
        footlines = []
        footline = []
        footitem = {'value': _('Total'),
                    'colspan': 5,
                    'class': 'total_label'}
        footline.append(footitem)
        footitem = {'value': count_all}
        footline.append(footitem)
        footlines.append(footline)

        self.report_content = {
            'headings': headings,
            'parms': parms,
            'formats': formats,
            'datalines': datalines,
            'footings': footlines}

        if self.request.get('output_format', '') == 'CSV':
            import csv
            import StringIO
            import datetime

            fieldnames = [
                _('Request'),
                _('File'),
                _('Attachment type'),
                _('Content type'),
                _('Size'),
                _('Loaded'),
            ]
            output = StringIO.StringIO()
            dw = csv.DictWriter(output, fieldnames=fieldnames)
            dw.writerow(dict((fn, fn) for fn in fieldnames))
            for row in datalines:
                dw.writerow(row)
            report_data = output.getvalue()
            output.close()
            date = datetime.datetime.now().strftime("%Y%m%d%H%M")
            setheader = self.request.RESPONSE.setHeader
            setheader('Content-Type', 'text/csv')
            setheader("Content-Disposition",
                      "attachment;filename=\"analysesattachments_%s.csv\"" % date)
            self.request.RESPONSE.write(report_data)
        else:
            return {'report_title': t(headings['header']),
                    'report_data': self.template()}
    def __call__(self):
        # get all the data into datalines

        pc = getToolByName(self.context, 'portal_catalog')
        bac = getToolByName(self.context, 'bika_analysis_catalog')
        bc = getToolByName(self.context, 'bika_catalog')
        rc = getToolByName(self.context, 'reference_catalog')

        self.report_content = {}
        parm_lines = {}
        parms = []
        headings = {}
        count_all_ars = 0
        count_all_analyses = 0
        query = {}

        this_client = logged_in_client(self.context)

        if not this_client and 'ClientUID' in self.request.form:
            client_uid = self.request.form['ClientUID']
            this_client = rc.lookupObject(client_uid)
            parms.append({
                'title': _('Client'),
                'value': this_client.Title(),
                'type': 'text'
            })

        if this_client:
            headings['header'] = _("Analysis requests and analyses")
            headings['subheader'] = _(
                "Number of Analysis requests and analyses")
        else:
            headings['header'] = _("Analysis requests and analyses per client")
            headings['subheader'] = _(
                "Number of Analysis requests and analyses per client")

        date_query = formatDateQuery(self.context, 'Requested')
        if date_query:
            query['created'] = date_query
            requested = formatDateParms(self.context, 'Requested')
            parms.append({
                'title': _('Requested'),
                'value': requested,
                'type': 'text'
            })

        workflow = getToolByName(self.context, 'portal_workflow')
        if 'bika_analysis_workflow' in self.request.form:
            query['review_state'] = self.request.form['bika_analysis_workflow']
            review_state = workflow.getTitleForStateOnType(
                self.request.form['bika_analysis_workflow'], 'Analysis')
            parms.append({
                'title': _('Status'),
                'value': review_state,
                'type': 'text'
            })

        if 'bika_cancellation_workflow' in self.request.form:
            query['cancellation_state'] = self.request.form[
                'bika_cancellation_workflow']
            cancellation_state = workflow.getTitleForStateOnType(
                self.request.form['bika_cancellation_workflow'], 'Analysis')
            parms.append({
                'title': _('Active'),
                'value': cancellation_state,
                'type': 'text'
            })

        if 'bika_worksheetanalysis_workflow' in self.request.form:
            query['worksheetanalysis_review_state'] = self.request.form[
                'bika_worksheetanalysis_workflow']
            ws_review_state = workflow.getTitleForStateOnType(
                self.request.form['bika_worksheetanalysis_workflow'],
                'Analysis')
            parms.append({
                'title': _('Assigned to worksheet'),
                'value': ws_review_state,
                'type': 'text'
            })

        if 'bika_worksheetanalysis_workflow' in self.request.form:
            query['worksheetanalysis_review_state'] = self.request.form[
                'bika_worksheetanalysis_workflow']
            ws_review_state = workflow.getTitleForStateOnType(
                self.request.form['bika_worksheetanalysis_workflow'],
                'Analysis')
            parms.append({
                'title': _('Assigned to worksheet'),
                'value': ws_review_state,
                'type': 'text'
            })

        # and now lets do the actual report lines
        formats = {
            'columns':
            3,
            'col_heads':
            [_('Client'),
             _('Number of requests'),
             _('Number of analyses')],
            'class':
            ''
        }

        datalines = []

        if this_client:
            c_proxies = pc(portal_type="Client", UID=this_client.UID())
        else:
            c_proxies = pc(portal_type="Client", sort_on='sortable_title')

        for client in c_proxies:
            query['getClientUID'] = client.UID
            dataline = [
                {
                    'value': client.Title
                },
            ]
            query['portal_type'] = 'AnalysisRequest'
            ars = bc(query)
            count_ars = len(ars)
            dataitem = {'value': count_ars}
            dataline.append(dataitem)

            query['portal_type'] = 'Analysis'
            analyses = bac(query)
            count_analyses = len(analyses)
            dataitem = {'value': count_analyses}
            dataline.append(dataitem)

            datalines.append(dataline)

            count_all_analyses += count_analyses
            count_all_ars += count_ars

        # footer data
        footlines = []
        if not this_client:
            footline = []
            footitem = {'value': _('Total'), 'class': 'total_label'}
            footline.append(footitem)

            footitem = {'value': count_all_ars}
            footline.append(footitem)
            footitem = {'value': count_all_analyses}
            footline.append(footitem)

            footlines.append(footline)

        self.report_content = {
            'headings': headings,
            'parms': parms,
            'formats': formats,
            'datalines': datalines,
            'footings': footlines
        }

        if self.request.get('output_format', '') == 'CSV':
            import csv
            import StringIO
            import datetime

            fieldnames = [
                'Client',
                'Analysis Requests',
                'Analyses',
            ]
            output = StringIO.StringIO()
            dw = csv.DictWriter(output,
                                extrasaction='ignore',
                                fieldnames=fieldnames)
            dw.writerow(dict((fn, fn) for fn in fieldnames))
            for row in datalines:
                dw.writerow({
                    'Client': row[0]['value'],
                    'Analysis Requests': row[1]['value'],
                    'Analyses': row[2]['value'],
                })
            report_data = output.getvalue()
            output.close()
            date = datetime.datetime.now().strftime("%Y%m%d%H%M")
            setheader = self.request.RESPONSE.setHeader
            setheader('Content-Type', 'text/csv')
            setheader(
                "Content-Disposition",
                "attachment;filename=\"analysesperclient_%s.csv\"" % date)
            self.request.RESPONSE.write(report_data)
        else:
            return {
                'report_title': t(headings['header']),
                'report_data': self.template()
            }
Ejemplo n.º 42
0
def ObjectModifiedEventHandler(obj, event):
    """ Various types need automation on edit.
    """
    if not hasattr(obj, 'portal_type'):
        return

    if obj.portal_type == 'Calculation':
        pr = getToolByName(obj, 'portal_repository')
        uc = getToolByName(obj, 'uid_catalog')
        obj = uc(UID=obj.UID())[0].getObject()

        backrefs = obj.getBackReferences('AnalysisServiceCalculation')
        for i, target in enumerate(backrefs):
            target = uc(UID=target.UID())[0].getObject()
            pr.save(obj=target,
                    comment="Calculation updated to version %s" %
                    (obj.version_id + 1, ))
            reference_versions = getattr(target, 'reference_versions', {})
            reference_versions[obj.UID()] = obj.version_id + 1
            target.reference_versions = reference_versions

        backrefs = obj.getBackReferences('MethodCalculation')
        for i, target in enumerate(backrefs):
            target = uc(UID=target.UID())[0].getObject()
            pr.save(obj=target,
                    comment="Calculation updated to version %s" %
                    (obj.version_id + 1, ))
            reference_versions = getattr(target, 'reference_versions', {})
            reference_versions[obj.UID()] = obj.version_id + 1
            target.reference_versions = reference_versions

    elif obj.portal_type == 'Client':
        mp = obj.manage_permission
        mp(permissions.ListFolderContents, [
            'Manager', 'LabManager', 'LabClerk', 'Analyst', 'Sampler',
            'Preserver', 'Owner'
        ], 0)
        mp(permissions.View, [
            'Manager', 'LabManager', 'LabClerk', 'Analyst', 'Sampler',
            'Preserver', 'Owner'
        ], 0)
        mp(permissions.ModifyPortalContent, ['Manager', 'LabManager', 'Owner'],
           0)
        mp(AddSupplyOrder, ['Manager', 'LabManager', 'Owner'], 0)
        mp('Access contents information', [
            'Manager', 'LabManager', 'Member', 'LabClerk', 'Analyst',
            'Sampler', 'Preserver', 'Owner'
        ], 0)

    elif obj.portal_type == 'Contact':
        mp = obj.manage_permission
        mp(permissions.View, [
            'Manager', 'LabManager', 'LabClerk', 'Owner', 'Analyst', 'Sampler',
            'Preserver'
        ], 0)
        mp(permissions.ModifyPortalContent, ['Manager', 'LabManager', 'Owner'],
           0)
    elif obj.portal_type == 'AnalysisCategory':
        for analysis in obj.getBackReferences(
                'AnalysisServiceAnalysisCategory'):
            analysis.reindexObject(idxs=[
                "getCategoryTitle",
                "getCategoryUID",
            ])
Ejemplo n.º 43
0
 def getDepartments(self):
     bsc = getToolByName(self, 'bika_setup_catalog')
     deps = []
     for d in bsc(portal_type='Department', inactive_state='active'):
         deps.append((d.UID, d.Title))
     return DisplayList(deps)
Ejemplo n.º 44
0
    def folderitems(self):
        rc = getToolByName(self.context, REFERENCE_CATALOG)
        wf = getToolByName(self.context, 'portal_workflow')
        pr = getToolByName(self.context, 'portal_repository')

        isVersionable = pr.isVersionable(aq_inner(self.context))
        try:
            review_history = wf.getInfoFor(self.context, 'review_history')
            review_history = list(review_history)
            review_history.reverse()
        except WorkflowException:
            review_history = []
        items = []
        for entry in review_history:
            # this folderitems doesn't subclass from the bika_listing.py
            # so we create items from scratch
            review_state = entry.get('review_state')
            state_title = wf.getTitleForStateOnType(review_state,
                                                    self.context.portal_type)
            item = {
                'obj': self.context,
                'id': self.context.id,
                'uid': self.context.UID(),
                'title': self.context.title_or_id(),
                'type_class': '',
                'url': self.context.absolute_url(),
                'relative_url': self.context.absolute_url(),
                'view_url': self.context.absolute_url(),
                'path': "/".join(self.context.getPhysicalPath()),
                'replace': {},
                'before': {},
                'after': {},
                'choices': {},
                'class': {},
                'state_class': '',
                'allow_edit': [],
                'required': [],
                'Version':
                isVersionable and self.context.get('version_id', '') or '0',
                'Date': self.ulocalized_time(entry.get('time')),
                'sortable_date': entry.get('time'),
                'User': entry.get('actor'),
                'Action': entry.get('action') and entry.get('action')
                or 'Create',
                'Description': "review state: %s" % state_title,
            }
            items.append(item)

        if isVersionable:
            request = TestRequest()
            chv = ContentHistoryViewlet(self.context, request, None, None)
            chv.navigation_root_url = chv.site_url = 'http://localhost:8080/bikas'
            version_history = chv.revisionHistory()
        else:
            version_history = []

        for entry in version_history:
            # this folderitems doesn't subclass from the bika_listing.py
            # so we create items from scratch
            # disregard the first entry of version history, as it is
            # represented by the first entry in review_history
            if not entry.get('version_id'):
                continue
            item = {
                'obj': self.context,
                'id': self.context.id,
                'uid': self.context.UID(),
                'title': self.context.title_or_id(),
                'type_class': '',
                'url': self.context.absolute_url(),
                'relative_url': self.context.absolute_url(),
                'view_url': self.context.absolute_url(),
                'path': "/".join(self.context.getPhysicalPath()),
                'replace': {},
                'before': {},
                'after': {},
                'choices': {},
                'class': {},
                'state_class': '',
                'allow_edit': [],
                'required': [],
                'Version': entry.get('version_id'),
                'Date': self.ulocalized_time(DateTime(entry.get('time'))),
                'sortable_date': entry.get('time'),
                'User': entry.get('actor').get('fullname'),
                'Action': entry.get('action') and entry.get('action')
                or 'Create',
                'Description': entry.get('comments'),
            }
            items.append(item)

        items = sorted(items, key=itemgetter('sortable_date'))
        items.reverse()

        return items
Ejemplo n.º 45
0
 def getDefaultLifetime(self):
     """ get the default retention period """
     settings = getToolByName(self, 'bika_setup')
     return settings.getDefaultSampleLifetime()
Ejemplo n.º 46
0
    def workflow_action_save_analyses_button(self):
        form = self.request.form
        workflow = getToolByName(self.context, 'portal_workflow')
        bsc = self.context.bika_setup_catalog
        action, came_from = WorkflowAction._get_form_workflow_action(self)
        # AR Manage Analyses: save Analyses
        ar = self.context
        sample = ar.getSample()
        objects = WorkflowAction._get_selected_items(self)
        if not objects:
            message = _("No analyses have been selected")
            self.context.plone_utils.addPortalMessage(message, 'info')
            self.destination_url = self.context.absolute_url() + "/analyses"
            self.request.response.redirect(self.destination_url)
            return
        Analyses = objects.keys()
        prices = form.get("Price", [None])[0]

        # Hidden analyses?
        # https://jira.bikalabs.com/browse/LIMS-1324
        outs = []
        hiddenans = form.get('Hidden', {})
        for uid in Analyses:
            hidden = hiddenans.get(uid, '')
            hidden = True if hidden == 'on' else False
            outs.append({'uid': uid, 'hidden': hidden})
        ar.setAnalysisServicesSettings(outs)

        specs = {}
        if form.get("min", None):
            for service_uid in Analyses:
                service = bsc(UID=service_uid)[0].getObject()
                keyword = service.getKeyword()
                specs[service_uid] = {
                    "min": form["min"][0][service_uid],
                    "max": form["max"][0][service_uid],
                    "error": form["error"][0][service_uid],
                    "keyword": keyword,
                    "uid": service_uid,
                }
        else:
            for service_uid in Analyses:
                service = bsc(UID=service_uid)[0].getObject()
                keyword = service.getKeyword()
                specs[service_uid] = {
                    "min": "",
                    "max": "",
                    "error": "",
                    "keyword": keyword,
                    "uid": service_uid
                }
        new = ar.setAnalyses(Analyses, prices=prices, specs=specs.values())
        # link analyses and partitions
        # If Bika Setup > Analyses > 'Display individual sample
        # partitions' is checked, no Partitions available.
        # https://github.com/bikalabs/Bika-LIMS/issues/1030
        if 'Partition' in form:
            for service_uid, service in objects.items():
                part_id = form['Partition'][0][service_uid]
                part = sample[part_id]
                analysis = ar[service.getKeyword()]
                analysis.setSamplePartition(part)
                analysis.reindexObject()

        if new:
            for analysis in new:
                # if the AR has progressed past sample_received, we need to bring it back.
                ar_state = workflow.getInfoFor(ar, 'review_state')
                if ar_state in ('attachment_due', 'to_be_verified'):
                    # Apply to AR only; we don't want this transition to cascade.
                    ar.REQUEST['workflow_skiplist'].append(
                        "retract all analyses")
                    workflow.doActionFor(ar, 'retract')
                    ar.REQUEST['workflow_skiplist'].remove(
                        "retract all analyses")
                    ar_state = workflow.getInfoFor(ar, 'review_state')
                # Then we need to forward new analyses state
                analysis.updateDueDate()
                changeWorkflowState(analysis, 'bika_analysis_workflow',
                                    ar_state)

        message = PMF("Changes saved.")
        self.context.plone_utils.addPortalMessage(message, 'info')
        self.destination_url = self.context.absolute_url()
        self.request.response.redirect(self.destination_url)
Ejemplo n.º 47
0
    def folderitems(self):
        wf = getToolByName(self, 'portal_workflow')
        rc = getToolByName(self, REFERENCE_CATALOG)
        pm = getToolByName(self.context, "portal_membership")

        self.member = pm.getAuthenticatedMember()
        roles = self.member.getRoles()
        self.restrict_results = 'Manager' not in roles \
                and 'LabManager' not in roles \
                and 'LabClerk' not in roles \
                and 'RegulatoryInspector' not in roles \
                and self.context.bika_setup.getRestrictWorksheetUsersAccess()

        if self.restrict_results == True:
            # Remove 'Mine' button and hide 'Analyst' column
            del self.review_states[1]  # Mine
            self.columns['Analyst']['toggle'] = False

        can_manage = pm.checkPermission(ManageWorksheets, self.context)

        self.selected_state = self.request.get(
            "%s_review_state" % self.form_id, 'default')

        items = BikaListingView.folderitems(self)
        new_items = []
        analyst_choices = []
        for a in self.analysts:
            analyst_choices.append({
                'ResultValue': a,
                'ResultText': self.analysts.getValue(a)
            })
        can_reassign = False
        self.allow_edit = self.isEditionAllowed()

        for x in range(len(items)):
            if not items[x].has_key('obj'):
                new_items.append(items[x])
                continue

            obj = items[x]['obj']

            analyst = obj.getAnalyst().strip()
            creator = obj.Creator().strip()
            items[x]['Analyst'] = analyst

            priority = obj.getPriority()
            items[x]['Priority'] = ''

            instrument = obj.getInstrument()
            items[x]['Instrument'] = instrument and instrument.Title() or ''

            items[x]['Title'] = obj.Title()
            wst = obj.getWorksheetTemplate()
            items[x]['Template'] = wst and wst.Title() or ''
            if wst:
                items[x]['replace']['Template'] = "<a href='%s'>%s</a>" % \
                    (wst.absolute_url(), wst.Title())

            items[x]['getPriority'] = ''
            items[x]['CreationDate'] = self.ulocalized_time(obj.creation_date)

            nr_analyses = len(obj.getAnalyses())
            if nr_analyses == '0':
                # give empties pretty classes.
                items[x]['table_row_class'] = 'state-empty-worksheet'

            layout = obj.getLayout()

            if len(layout) > 0:
                items[x]['replace']['Title'] = "<a href='%s/manage_results'>%s</a>" % \
                    (items[x]['url'], items[x]['Title'])
            else:
                items[x]['replace']['Title'] = "<a href='%s/add_analyses'>%s</a>" % \
                    (items[x]['url'], items[x]['Title'])

            # set Services
            ws_services = {}
            for slot in [s for s in layout if s['type'] == 'a']:
                analysis = rc.lookupObject(slot['analysis_uid'])
                service = analysis.getService()
                title = service.Title()
                if title not in ws_services:
                    ws_services[title] = "<a href='%s'>%s,</a>" % \
                        (service.absolute_url(), title)
            keys = list(ws_services.keys())
            keys.sort()
            services = []
            for key in keys:
                services.append(ws_services[key])
            if services:
                services[-1] = services[-1].replace(",", "")
            items[x]['Services'] = ""
            items[x]['replace']['Services'] = " ".join(services)

            # set Sample Types
            pos_parent = {}
            for slot in layout:
                # compensate for bad data caused by a stupid bug.
                if type(slot['position']) in (list, tuple):
                    slot['position'] = slot['position'][0]
                if slot['position'] == 'new':
                    continue
                if slot['position'] in pos_parent:
                    continue
                pos_parent[slot['position']] = rc.lookupObject(
                    slot['container_uid'])

            sampletypes = {}
            blanks = {}
            controls = {}
            for container in pos_parent.values():
                if container.portal_type == 'AnalysisRequest':
                    sampletypes["<a href='%s'>%s,</a>" % \
                               (container.getSample().getSampleType().absolute_url(),
                                container.getSample().getSampleType().Title())] = 1
                if container.portal_type == 'ReferenceSample' and container.getBlank(
                ):
                    blanks["<a href='%s'>%s,</a>" % \
                           (container.absolute_url(),
                            container.Title())] = 1
                if container.portal_type == 'ReferenceSample' and not container.getBlank(
                ):
                    controls["<a href='%s'>%s,</a>" % \
                           (container.absolute_url(),
                            container.Title())] = 1
            sampletypes = list(sampletypes.keys())
            sampletypes.sort()
            blanks = list(blanks.keys())
            blanks.sort()
            controls = list(controls.keys())
            controls.sort()

            # remove trailing commas
            if sampletypes:
                sampletypes[-1] = sampletypes[-1].replace(",", "")
            if controls:
                controls[-1] = controls[-1].replace(",", "")
            else:
                if blanks:
                    blanks[-1] = blanks[-1].replace(",", "")

            items[x]['SampleTypes'] = ""
            items[x]['replace']['SampleTypes'] = " ".join(sampletypes)
            items[x]['QC'] = ""
            items[x]['replace']['QC'] = " ".join(blanks + controls)
            items[x]['QCTotals'] = ''

            # Get all Worksheet QC Analyses
            totalQCAnalyses = [
                a for a in obj.getAnalyses()
                if a.portal_type == 'ReferenceAnalysis'
                or a.portal_type == 'DuplicateAnalysis'
            ]
            totalQCSamples = []
            # Get all Worksheet QC samples
            for analysis in totalQCAnalyses:
                if analysis.getSample().UID() not in totalQCSamples:
                    totalQCSamples.append(analysis.getSample().UID())
            # Total QC Samples (Total Routine Analyses)
            items[x]['QCTotals'] = str(len(totalQCSamples)) + '(' + str(
                len(totalQCAnalyses)) + ')'

            totalRoutineAnalyses = [
                a for a in obj.getAnalyses() if a not in totalQCAnalyses
            ]
            totalRoutineSamples = []
            for analysis in totalRoutineAnalyses:
                if analysis.getSample().UID() not in totalRoutineSamples:
                    totalRoutineSamples.append(analysis.getSample().UID())

            # Total Routine Samples (Total Routine Analyses)
            items[x]['RoutineTotals'] = str(
                len(totalRoutineSamples)) + '(' + str(
                    len(totalRoutineAnalyses)) + ')'

            if items[x]['review_state'] == 'open' \
                and self.allow_edit \
                and self.restrict_results == False \
                and can_manage == True:
                items[x]['allow_edit'] = [
                    'Analyst',
                ]
                items[x]['required'] = [
                    'Analyst',
                ]
                can_reassign = True
                items[x]['choices'] = {'Analyst': analyst_choices}

            new_items.append(items[x])

        if can_reassign:
            for x in range(len(self.review_states)):
                if self.review_states[x]['id'] in ['default', 'mine', 'open']:
                    self.review_states[x]['custom_actions'] = [
                        {
                            'id': 'reassign',
                            'title': _('Reassign')
                        },
                    ]

        self.show_select_column = can_reassign
        self.show_workflow_action_buttons = can_reassign

        return new_items
Ejemplo n.º 48
0
    def workflow_action_preserve(self):
        form = self.request.form
        workflow = getToolByName(self.context, 'portal_workflow')
        action, came_from = WorkflowAction._get_form_workflow_action(self)
        checkPermission = self.context.portal_membership.checkPermission
        # Partition Preservation
        # the partition table shown in AR and Sample views sends it's
        # action button submits here.
        objects = WorkflowAction._get_selected_items(self)
        transitioned = []
        incomplete = []
        for obj_uid, obj in objects.items():
            part = obj
            # can't transition inactive items
            if workflow.getInfoFor(part, 'inactive_state', '') == 'inactive':
                continue
            if not checkPermission(PreserveSample, part):
                continue
            # grab this object's Preserver and DatePreserved from the form
            Preserver = form['getPreserver'][0][obj_uid].strip()
            Preserver = Preserver and Preserver or ''
            DatePreserved = form['getDatePreserved'][0][obj_uid].strip()
            DatePreserved = DatePreserved and DateTime(DatePreserved) or ''
            # write them to the sample
            part.setPreserver(Preserver)
            part.setDatePreserved(DatePreserved)
            # transition the object if both values are present
            if Preserver and DatePreserved:
                workflow.doActionFor(part, action)
                transitioned.append(part.id)
            else:
                incomplete.append(part.id)
            part.reindexObject()
            part.aq_parent.reindexObject()
        message = None
        if len(transitioned) > 1:
            message = _(
                '${items} are waiting to be received.',
                mapping={'items': safe_unicode(', '.join(transitioned))})
            self.context.plone_utils.addPortalMessage(message, 'info')
        elif len(transitioned) == 1:
            message = _(
                '${item} is waiting to be received.',
                mapping={'item': safe_unicode(', '.join(transitioned))})
            self.context.plone_utils.addPortalMessage(message, 'info')
        if not message:
            message = _('No changes made.')
            self.context.plone_utils.addPortalMessage(message, 'info')

        if len(incomplete) > 1:
            message = _('${items} are missing Preserver or Date Preserved',
                        mapping={'items': safe_unicode(', '.join(incomplete))})
            self.context.plone_utils.addPortalMessage(message, 'error')
        elif len(incomplete) == 1:
            message = _('${item} is missing Preserver or Preservation Date',
                        mapping={'item': safe_unicode(', '.join(incomplete))})
            self.context.plone_utils.addPortalMessage(message, 'error')

        self.destination_url = self.request.get_header(
            "referer", self.context.absolute_url())
        self.request.response.redirect(self.destination_url)
Ejemplo n.º 49
0
 def isEditionAllowed(self):
     pm = getToolByName(self.context, "portal_membership")
     checkPermission = self.context.portal_membership.checkPermission
     return checkPermission(EditWorksheet, self.context)
Ejemplo n.º 50
0
    def workflow_action_submit(self):
        form = self.request.form
        rc = getToolByName(self.context, REFERENCE_CATALOG)
        action, came_from = WorkflowAction._get_form_workflow_action(self)
        checkPermission = self.context.portal_membership.checkPermission
        if not isActive(self.context):
            message = _('Item is inactive.')
            self.context.plone_utils.addPortalMessage(message, 'info')
            self.request.response.redirect(self.context.absolute_url())
            return
        # calcs.js has kept item_data and form input interim values synced,
        # so the json strings from item_data will be the same as the form values
        item_data = {}
        if 'item_data' in form:
            if isinstance(form['item_data'], list):
                for i_d in form['item_data']:
                    for i, d in json.loads(i_d).items():
                        item_data[i] = d
            else:
                item_data = json.loads(form['item_data'])
        selected_analyses = WorkflowAction._get_selected_items(self)
        results = {}
        hasInterims = {}
        # check that the form values match the database
        # save them if not.
        for uid, result in self.request.form.get('Result', [{}])[0].items():
            # if the AR has ReportDryMatter set, get dry_result from form.
            dry_result = ''
            if hasattr(self.context, 'getReportDryMatter') \
               and self.context.getReportDryMatter():
                for k, v in self.request.form['ResultDM'][0].items():
                    if uid == k:
                        dry_result = v
                        break
            if uid in selected_analyses:
                analysis = selected_analyses[uid]
            else:
                analysis = rc.lookupObject(uid)
            if not analysis:
                # ignore result if analysis object no longer exists
                continue
            results[uid] = result
            interimFields = item_data[uid]
            if len(interimFields) > 0:
                hasInterims[uid] = True
            else:
                hasInterims[uid] = False
            retested = 'retested' in form and uid in form['retested']
            remarks = form.get('Remarks', [
                {},
            ])[0].get(uid, '')
            # Don't save uneccessary things
            # https://github.com/bikalabs/Bika-LIMS/issues/766:
            #    Somehow, using analysis.edit() fails silently when
            #    logged in as Analyst.
            if analysis.getInterimFields() != interimFields or \
               analysis.getRetested() != retested or \
               analysis.getRemarks() != remarks:
                analysis.setInterimFields(interimFields)
                analysis.setRetested(retested)
                analysis.setRemarks(remarks)
            # save results separately, otherwise capture date is rewritten
            if analysis.getResult() != result or \
               analysis.getResultDM() != dry_result:
                analysis.setResultDM(dry_result)
                analysis.setResult(result)
        methods = self.request.form.get('Method', [{}])[0]
        instruments = self.request.form.get('Instrument', [{}])[0]
        analysts = self.request.form.get('Analyst', [{}])[0]
        uncertainties = self.request.form.get('Uncertainty', [{}])[0]
        dlimits = self.request.form.get('DetectionLimit', [{}])[0]
        # discover which items may be submitted
        submissable = []
        for uid, analysis in selected_analyses.items():
            analysis_active = isActive(analysis)

            # Need to save the instrument?
            if uid in instruments and analysis_active:
                # TODO: Add SetAnalysisInstrument permission
                # allow_setinstrument = sm.checkPermission(SetAnalysisInstrument)
                allow_setinstrument = True
                # ---8<-----
                if allow_setinstrument == True:
                    # The current analysis allows the instrument regards
                    # to its analysis service and method?
                    if (instruments[uid] == ''):
                        previnstr = analysis.getInstrument()
                        if previnstr:
                            previnstr.removeAnalysis(analysis)
                        analysis.setInstrument(None)
                    elif analysis.isInstrumentAllowed(instruments[uid]):
                        previnstr = analysis.getInstrument()
                        if previnstr:
                            previnstr.removeAnalysis(analysis)
                        analysis.setInstrument(instruments[uid])
                        instrument = analysis.getInstrument()
                        instrument.addAnalysis(analysis)

            # Need to save the method?
            if uid in methods and analysis_active:
                # TODO: Add SetAnalysisMethod permission
                # allow_setmethod = sm.checkPermission(SetAnalysisMethod)
                allow_setmethod = True
                # ---8<-----
                if allow_setmethod == True and analysis.isMethodAllowed(
                        methods[uid]):
                    analysis.setMethod(methods[uid])

            # Need to save the analyst?
            if uid in analysts and analysis_active:
                analysis.setAnalyst(analysts[uid])

            # Need to save the uncertainty?
            if uid in uncertainties and analysis_active:
                analysis.setUncertainty(uncertainties[uid])

            # Need to save the detection limit?
            if analysis_active:
                analysis.setDetectionLimitOperand(dlimits.get(uid, None))

            if uid not in results or not results[uid]:
                continue
            can_submit = True
            # guard_submit does a lot of the same stuff, too.
            # the code there has also been commented.
            # we must find a better way to allow dependencies to control
            # this process.
            # for dependency in analysis.getDependencies():
            #     dep_state = workflow.getInfoFor(dependency, 'review_state')
            #     if hasInterims[uid]:
            #         if dep_state in ('to_be_sampled', 'to_be_preserved',
            #                          'sample_due', 'sample_received',
            #                          'attachment_due', 'to_be_verified',):
            #             can_submit = False
            #             break
            #     else:
            #         if dep_state in ('to_be_sampled', 'to_be_preserved',
            #                          'sample_due', 'sample_received',):
            #             can_submit = False
            #             break
            if can_submit and analysis not in submissable:
                submissable.append(analysis)
        # and then submit them.
        for analysis in submissable:
            doActionFor(analysis, 'submit')
        message = PMF("Changes saved.")
        self.context.plone_utils.addPortalMessage(message, 'info')
        if checkPermission(EditResults, self.context):
            self.destination_url = self.context.absolute_url(
            ) + "/manage_results"
        else:
            self.destination_url = self.context.absolute_url()
        self.request.response.redirect(self.destination_url)
Ejemplo n.º 51
0
    def __init__(self, context, request):
        super(ReferenceSamplesView, self).__init__(context, request)
        portal = getToolByName(context, 'portal_url').getPortalObject()
        self.icon = self.portal_url + "/++resource++bika.lims.images/referencesample_big.png"
        self.title = self.context.translate(_("Reference Samples"))
        self.description = self.context.translate(
            _("All reference samples in the system are displayed here."))
        self.catalog = 'bika_catalog'
        self.contentFilter = {
            'portal_type': 'ReferenceSample',
            'sort_on': 'id',
            'sort_order': 'reverse',
            'path': {
                "query": ["/"],
                "level": 0
            },
        }
        self.context_actions = {}
        self.show_sort_column = False
        self.show_select_row = False
        self.show_select_column = True
        self.pagesize = 50

        request.set('disable_border', 1)

        self.columns = {
            'ID': {
                'title': _('ID'),
                'index': 'id'
            },
            'Title': {
                'title': _('Title'),
                'index': 'sortable_title',
                'toggle': True
            },
            'Supplier': {
                'title': _('Supplier'),
                'toggle': True
            },
            'Definition': {
                'title': _('Reference Definition'),
                'toggle': True
            },
            'DateSampled': {
                'title': _('Date Sampled'),
                'index': 'getDateSampled',
                'toggle': True
            },
            'DateReceived': {
                'title': _('Date Received'),
                'index': 'getDateReceived',
                'toggle': True
            },
            'ExpiryDate': {
                'title': _('Expiry Date'),
                'index': 'getExpiryDate',
                'toggle': True
            },
            'state_title': {
                'title': _('State'),
                'toggle': True
            },
        }
        self.review_states = [
            {
                'id':
                'default',
                'title':
                _('Current'),
                'contentFilter': {
                    'review_state': 'current'
                },
                'columns': [
                    'ID', 'Title', 'Supplier', 'Definition', 'DateSampled',
                    'DateReceived', 'ExpiryDate'
                ]
            },
            {
                'id':
                'expired',
                'title':
                _('Expired'),
                'contentFilter': {
                    'review_state': 'expired'
                },
                'columns': [
                    'ID', 'Title', 'Supplier', 'Definition', 'DateSampled',
                    'DateReceived', 'ExpiryDate'
                ]
            },
            {
                'id':
                'disposed',
                'title':
                _('Disposed'),
                'contentFilter': {
                    'review_state': 'disposed'
                },
                'columns': [
                    'ID', 'Title', 'Supplier', 'Definition', 'DateSampled',
                    'DateReceived', 'ExpiryDate'
                ]
            },
            {
                'id':
                'all',
                'title':
                _('All'),
                'contentFilter': {},
                'columns': [
                    'ID', 'Title', 'Supplier', 'Definition', 'DateSampled',
                    'DateReceived', 'ExpiryDate', 'state_title'
                ]
            },
        ]
Ejemplo n.º 52
0
 def __call__(self):
     wf = getToolByName(self.context, 'portal_workflow')
     mtool = getToolByName(self.context, 'portal_membership')
     self.allow_edit = mtool.checkPermission('Modify portal content',
                                             self.context)
     return self.template()
Ejemplo n.º 53
0
    def __call__(self):
        ar = self.context
        workflow = getToolByName(self.context, 'portal_workflow')
        if 'transition' in self.request.form:
            doActionFor(self.context, self.request.form['transition'])
        # Contacts get expanded for view
        contact = self.context.getContact()
        contacts = []
        for cc in self.context.getCCContact():
            contacts.append(cc)
        if contact in contacts:
            contacts.remove(contact)
        ccemails = []
        for cc in contacts:
            ccemails.append(
                "%s &lt;<a href='mailto:%s'>%s</a>&gt;" %
                (cc.Title(), cc.getEmailAddress(), cc.getEmailAddress()))
        # CC Emails become mailto links
        emails = self.context.getCCEmails()
        if isinstance(emails, str):
            emails = emails and [
                emails,
            ] or []
        cc_emails = []
        cc_hrefs = []
        for cc in emails:
            cc_emails.append(cc)
            cc_hrefs.append("<a href='mailto:%s'>%s</a>" % (cc, cc))
        # render header table
        self.header_table = HeaderTableView(self.context, self.request)()
        # Create Partitions View for this ARs sample
        p = SamplePartitionsView(self.context.getSample(), self.request)
        p.show_column_toggles = False
        self.parts = p.contents_table()
        # Create Field and Lab Analyses tables
        self.tables = {}
        for poc in POINTS_OF_CAPTURE:
            if self.context.getAnalyses(getPointOfCapture=poc):
                t = self.createAnalysesView(
                    ar,
                    self.request,
                    getPointOfCapture=poc,
                    show_categories=self.context.bika_setup.
                    getCategoriseAnalysisServices())
                t.allow_edit = True
                t.form_id = "%s_analyses" % poc
                t.review_states[0]['transitions'] = [{
                    'id': 'submit'
                }, {
                    'id': 'retract'
                }, {
                    'id': 'verify'
                }]
                t.show_workflow_action_buttons = True
                t.show_select_column = True
                if getSecurityManager().checkPermission(EditFieldResults, self.context) \
                   and poc == 'field':
                    t.review_states[0]['columns'].remove('DueDate')
                self.tables[POINTS_OF_CAPTURE.getValue(
                    poc)] = t.contents_table()
        # Un-captured field analyses may cause confusion
        if ar.getAnalyses(getPointOfCapture='field',
                          review_state=['sampled', 'sample_due']):
            message = _("There are field analyses without submitted results.")
            self.addMessage(message, 'info')
        # Create QC Analyses View for this AR
        show_cats = self.context.bika_setup.getCategoriseAnalysisServices()
        qcview = self.createQCAnalyesView(ar,
                                          self.request,
                                          show_categories=show_cats)
        qcview.allow_edit = False
        qcview.show_select_column = False
        qcview.show_workflow_action_buttons = False
        qcview.form_id = "%s_qcanalyses"
        qcview.review_states[0]['transitions'] = [{
            'id': 'submit'
        }, {
            'id': 'retract'
        }, {
            'id': 'verify'
        }]
        self.qctable = qcview.contents_table()

        # Create the ResultsInterpretation by department view
        from dependencies.dependency import ARResultsInterpretationView
        self.riview = ARResultsInterpretationView(ar, self.request)

        # If a general retracted is done, rise a waring
        if workflow.getInfoFor(ar, 'review_state') == 'sample_received':
            allstatus = list()
            for analysis in ar.getAnalyses():
                status = workflow.getInfoFor(analysis.getObject(),
                                             'review_state')
                if status not in ['retracted', 'to_be_verified', 'verified']:
                    allstatus = []
                    break
                else:
                    allstatus.append(status)
            if len(allstatus) > 0:
                self.addMessage("General Retract Done", 'warning')

        # If is a retracted AR, show the link to child AR and show a warn msg
        if workflow.getInfoFor(ar, 'review_state') == 'invalid':
            childar = hasattr(ar, 'getChildAnalysisRequest') \
                        and ar.getChildAnalysisRequest() or None
            message = _(
                'These results have been withdrawn and are '
                'listed here for trace-ability purposes. Please follow '
                'the link to the retest')
            if childar:
                message = (message + " %s.") % childar.getRequestID()
            else:
                message = message + "."
            self.addMessage(message, 'warning')
        # If is an AR automatically generated due to a Retraction, show it's
        # parent AR information
        if hasattr(ar, 'getParentAnalysisRequest') \
            and ar.getParentAnalysisRequest():
            par = ar.getParentAnalysisRequest()
            message = _(
                'This Analysis Request has been '
                'generated automatically due to '
                'the retraction of the Analysis '
                'Request ${retracted_request_id}.',
                mapping={'retracted_request_id': par.getRequestID()})
            self.addMessage(message, 'info')
        self.renderMessages()
        return self.template()
Ejemplo n.º 54
0
    def __init__(self, context, request, **kwargs):
        self.catalog = "bika_analysis_catalog"
        self.contentFilter = dict(kwargs)
        self.contentFilter['portal_type'] = 'Analysis'
        self.contentFilter['sort_on'] = 'sortable_title'
        self.context_actions = {}
        self.show_sort_column = False
        self.show_select_row = False
        self.show_select_column = False
        self.show_column_toggles = False
        self.pagesize = 999999
        self.form_id = 'analyses_form'

        self.portal = getToolByName(context, 'portal_url').getPortalObject()
        self.portal_url = self.portal.absolute_url()

        request.set('disable_plone.rightcolumn', 1)

        # each editable item needs it's own allow_edit
        # which is a list of field names.
        self.allow_edit = False

        self.columns = {
            'Service': {
                'title': _('Analysis'),
                'sortable': False},
            'Partition': {
                'title': _("Partition"),
                'sortable':False},
            'Method': {
                'title': _('Method'),
                'sortable': False,
                'toggle': True},
            'Instrument': {
                'title': _('Instrument'),
                'sortable': False,
                'toggle': True},
            'Analyst': {
                'title': _('Analyst'),
                'sortable': False,
                'toggle': True},
            'state_title': {
                'title': _('Status'),
                'sortable': False},
            'DetectionLimit': {
                'title': _('DL'),
                'sortable': False,
                'toggle': False},
            'Result': {
                'title': _('Result'),
                'input_width': '6',
                'input_class': 'ajax_calculate numeric',
                'sortable': False},
            'Specification': {
                'title': _('Specification'),
                'sortable': False},
            'ResultDM': {
                'title': _('Dry'),
                'sortable': False},
            'Uncertainty': {
                'title': _('+-'),
                'sortable': False},
            'retested': {
                'title': "<img title='%s' src='%s/++resource++bika.lims.images/retested.png'/>"%\
                    (t(_('Retested')), self.portal_url),
                'type':'boolean',
                'sortable': False},
            'Attachments': {
                'title': _('Attachments'),
                'sortable': False},
            'CaptureDate': {
                'title': _('Captured'),
                'index': 'getResultCaptureDate',
                'sortable':False},
            'DueDate': {
                'title': _('Due Date'),
                'index': 'getDueDate',
                'sortable':False},
        }

        self.review_states = [
            {
                'id':
                'default',
                'title':
                _('All'),
                'contentFilter': {},
                'columns': [
                    'Service', 'Partition', 'DetectionLimit', 'Result',
                    'Specification', 'Method', 'Instrument', 'Analyst',
                    'Uncertainty', 'CaptureDate', 'DueDate', 'state_title'
                ]
            },
        ]
        if not context.bika_setup.getShowPartitions():
            self.review_states[0]['columns'].remove('Partition')

        super(AnalysesView, self).__init__(
            context,
            request,
            show_categories=context.bika_setup.getCategoriseAnalysisServices(),
            expand_all_categories=True)
Ejemplo n.º 55
0
    def setupCatalogs(self, portal):
        # an item should belong to only one catalog.
        # that way looking it up means first looking up *the* catalog
        # in which it is indexed, as well as making it cheaper to index.

        def addIndex(cat, *args):
            try:
                cat.addIndex(*args)
            except:
                pass

        def addColumn(cat, col):
            try:
                cat.addColumn(col)
            except:
                pass

        # create lexicon
        wordSplitter = Empty()
        wordSplitter.group = 'Word Splitter'
        wordSplitter.name = 'Unicode Whitespace splitter'
        caseNormalizer = Empty()
        caseNormalizer.group = 'Case Normalizer'
        caseNormalizer.name = 'Unicode Case Normalizer'
        stopWords = Empty()
        stopWords.group = 'Stop Words'
        stopWords.name = 'Remove listed and single char words'
        elem = [wordSplitter, caseNormalizer, stopWords]
        zc_extras = Empty()
        zc_extras.index_type = 'Okapi BM25 Rank'
        zc_extras.lexicon_id = 'Lexicon'

        # bika_analysis_catalog

        bac = getToolByName(portal, 'bika_analysis_catalog', None)
        if bac == None:
            logger.warning('Could not find the bika_analysis_catalog tool.')
            return

        try:
            bac.manage_addProduct['ZCTextIndex'].manage_addLexicon(
                'Lexicon', 'Lexicon', elem)
        except:
            logger.warning(
                'Could not add ZCTextIndex to bika_analysis_catalog')
            pass

        at = getToolByName(portal, 'archetype_tool')
        at.setCatalogsByType('Analysis', ['bika_analysis_catalog'])
        at.setCatalogsByType('ReferenceAnalysis', ['bika_analysis_catalog'])
        at.setCatalogsByType('DuplicateAnalysis', ['bika_analysis_catalog'])

        addIndex(bac, 'path', 'ExtendedPathIndex', ('getPhysicalPath'))
        addIndex(bac, 'allowedRolesAndUsers', 'KeywordIndex')
        addIndex(bac, 'UID', 'FieldIndex')
        addIndex(bac, 'Title', 'FieldIndex')
        addIndex(bac, 'Description', 'ZCTextIndex', zc_extras)
        addIndex(bac, 'id', 'FieldIndex')
        addIndex(bac, 'Type', 'FieldIndex')
        addIndex(bac, 'portal_type', 'FieldIndex')
        addIndex(bac, 'created', 'DateIndex')
        addIndex(bac, 'Creator', 'FieldIndex')
        addIndex(bac, 'title', 'FieldIndex', 'Title')
        addIndex(bac, 'sortable_title', 'FieldIndex')
        addIndex(bac, 'description', 'FieldIndex', 'Description')
        addIndex(bac, 'review_state', 'FieldIndex')
        addIndex(bac, 'worksheetanalysis_review_state', 'FieldIndex')
        addIndex(bac, 'cancellation_state', 'FieldIndex')

        addIndex(bac, 'getDueDate', 'DateIndex')
        addIndex(bac, 'getDateSampled', 'DateIndex')
        addIndex(bac, 'getDateReceived', 'DateIndex')
        addIndex(bac, 'getResultCaptureDate', 'DateIndex')
        addIndex(bac, 'getDateAnalysisPublished', 'DateIndex')

        addIndex(bac, 'getClientUID', 'FieldIndex')
        addIndex(bac, 'getAnalyst', 'FieldIndex')
        addIndex(bac, 'getClientTitle', 'FieldIndex')
        addIndex(bac, 'getRequestID', 'FieldIndex')
        addIndex(bac, 'getClientOrderNumber', 'FieldIndex')
        addIndex(bac, 'getKeyword', 'FieldIndex')
        addIndex(bac, 'getServiceTitle', 'FieldIndex')
        addIndex(bac, 'getServiceUID', 'FieldIndex')
        addIndex(bac, 'getCategoryUID', 'FieldIndex')
        addIndex(bac, 'getCategoryTitle', 'FieldIndex')
        addIndex(bac, 'getPointOfCapture', 'FieldIndex')
        addIndex(bac, 'getDateReceived', 'DateIndex')
        addIndex(bac, 'getResultCaptureDate', 'DateIndex')
        addIndex(bac, 'getSampleTypeUID', 'FieldIndex')
        addIndex(bac, 'getSamplePointUID', 'FieldIndex')
        addIndex(bac, 'getRawSamplePoints', 'KeywordsIndex')
        addIndex(bac, 'getRawSampleTypes', 'KeywordIndex')
        addIndex(bac, 'getRetested', 'FieldIndex')
        addIndex(bac, 'getReferenceAnalysesGroupID', 'FieldIndex')

        addColumn(bac, 'path')
        addColumn(bac, 'UID')
        addColumn(bac, 'id')
        addColumn(bac, 'Type')
        addColumn(bac, 'portal_type')
        addColumn(bac, 'getObjPositionInParent')
        addColumn(bac, 'Title')
        addColumn(bac, 'Description')
        addColumn(bac, 'title')
        addColumn(bac, 'sortable_title')
        addColumn(bac, 'description')
        addColumn(bac, 'review_state')
        addColumn(bac, 'cancellation_state')
        addColumn(bac, 'getRequestID')
        addColumn(bac, 'getReferenceAnalysesGroupID')
        addColumn(bac, 'getResultCaptureDate')
        addColumn(bac, 'Priority')

        # bika_catalog

        bc = getToolByName(portal, 'bika_catalog', None)
        if bc == None:
            logger.warning('Could not find the bika_catalog tool.')
            return

        try:
            bc.manage_addProduct['ZCTextIndex'].manage_addLexicon(
                'Lexicon', 'Lexicon', elem)
        except:
            logger.warning('Could not add ZCTextIndex to bika_catalog')
            pass

        at = getToolByName(portal, 'archetype_tool')
        at.setCatalogsByType('Batch', ['bika_catalog', 'portal_catalog'])
        at.setCatalogsByType('AnalysisRequest',
                             ['bika_catalog', 'portal_catalog'])
        at.setCatalogsByType('Sample', ['bika_catalog', 'portal_catalog'])
        at.setCatalogsByType('SamplePartition',
                             ['bika_catalog', 'portal_catalog'])
        at.setCatalogsByType('ReferenceSample',
                             ['bika_catalog', 'portal_catalog'])
        at.setCatalogsByType('Report', [
            'bika_catalog',
        ])
        at.setCatalogsByType('Worksheet', ['bika_catalog', 'portal_catalog'])

        addIndex(bc, 'path', 'ExtendedPathIndex', ('getPhysicalPath'))
        addIndex(bc, 'allowedRolesAndUsers', 'KeywordIndex')
        addIndex(bc, 'UID', 'FieldIndex')
        addIndex(bc, 'SearchableText', 'ZCTextIndex', zc_extras)
        addIndex(bc, 'Title', 'ZCTextIndex', zc_extras)
        addIndex(bc, 'Description', 'ZCTextIndex', zc_extras)
        addIndex(bc, 'id', 'FieldIndex')
        addIndex(bc, 'getId', 'FieldIndex')
        addIndex(bc, 'Type', 'FieldIndex')
        addIndex(bc, 'portal_type', 'FieldIndex')
        addIndex(bc, 'created', 'DateIndex')
        addIndex(bc, 'Creator', 'FieldIndex')
        addIndex(bc, 'getObjPositionInParent', 'GopipIndex')
        addIndex(bc, 'title', 'FieldIndex', 'Title')
        addIndex(bc, 'sortable_title', 'FieldIndex')
        addIndex(bc, 'description', 'FieldIndex', 'Description')
        addIndex(bc, 'review_state', 'FieldIndex')
        addIndex(bc, 'inactive_state', 'FieldIndex')
        addIndex(bc, 'worksheetanalysis_review_state', 'FieldIndex')
        addIndex(bc, 'cancellation_state', 'FieldIndex')

        addIndex(bc, 'getAnalysisCategory', 'KeywordIndex')
        addIndex(bc, 'getAnalysisService', 'KeywordIndex')
        addIndex(bc, 'getAnalyst', 'FieldIndex')
        addIndex(bc, 'getAnalysts', 'KeywordIndex')
        addIndex(bc, 'BatchDate', 'DateIndex')
        addIndex(bc, 'getClientOrderNumber', 'FieldIndex')
        addIndex(bc, 'getClientReference', 'FieldIndex')
        addIndex(bc, 'getClientSampleID', 'FieldIndex')
        addIndex(bc, 'getClientTitle', 'FieldIndex')
        addIndex(bc, 'getClientUID', 'FieldIndex')
        addIndex(bc, 'getContactTitle', 'FieldIndex')
        addIndex(bc, 'getDateDisposed', 'DateIndex')
        addIndex(bc, 'getDateExpired', 'DateIndex')
        addIndex(bc, 'getDateOpened', 'DateIndex')
        addIndex(bc, 'getDatePublished', 'DateIndex')
        addIndex(bc, 'getDateReceived', 'DateIndex')
        addIndex(bc, 'getDateSampled', 'DateIndex')
        addIndex(bc, 'getDisposalDate', 'DateIndex')
        addIndex(bc, 'getDueDate', 'DateIndex')
        addIndex(bc, 'getExpiryDate', 'DateIndex')
        addIndex(bc, 'getInvoiced', 'FieldIndex')
        addIndex(bc, 'getPreserver', 'FieldIndex')
        addIndex(bc, 'getProfilesTitle', 'FieldIndex')
        addIndex(bc, 'getReferenceDefinitionUID', 'FieldIndex')
        addIndex(bc, 'getRequestID', 'FieldIndex')
        addIndex(bc, 'getSampleID', 'FieldIndex')
        addIndex(bc, 'getSamplePointTitle', 'FieldIndex')
        addIndex(bc, 'getSamplePointUID', 'FieldIndex')
        addIndex(bc, 'getSampler', 'FieldIndex')
        addIndex(bc, 'getSampleTypeTitle', 'FieldIndex')
        addIndex(bc, 'getSampleTypeUID', 'FieldIndex')
        addIndex(bc, 'getSampleUID', 'FieldIndex')
        addIndex(bc, 'getSamplingDate', 'DateIndex')
        addIndex(bc, 'getServiceTitle', 'FieldIndex')
        addIndex(bc, 'getWorksheetTemplateTitle', 'FieldIndex')
        addIndex(bc, 'Priority', 'FieldIndex')
        addIndex(bc, 'BatchUID', 'FieldIndex')
        addColumn(bc, 'path')
        addColumn(bc, 'UID')
        addColumn(bc, 'id')
        addColumn(bc, 'Type')
        addColumn(bc, 'portal_type')
        addColumn(bc, 'creator')
        addColumn(bc, 'Created')
        addColumn(bc, 'Title')
        addColumn(bc, 'Description')
        addColumn(bc, 'sortable_title')
        addColumn(bc, 'review_state')
        addColumn(bc, 'inactive_state')
        addColumn(bc, 'cancellation_state')
        addColumn(bc, 'getAnalysts')
        addColumn(bc, 'getSampleID')
        addColumn(bc, 'getRequestID')
        addColumn(bc, 'getClientOrderNumber')
        addColumn(bc, 'getClientReference')
        addColumn(bc, 'getClientSampleID')
        addColumn(bc, 'getContactTitle')
        addColumn(bc, 'getClientTitle')
        addColumn(bc, 'getProfilesTitle')
        addColumn(bc, 'getSamplePointTitle')
        addColumn(bc, 'getSampleTypeTitle')
        addColumn(bc, 'getAnalysisCategory')
        addColumn(bc, 'getAnalysisService')
        addColumn(bc, 'getDatePublished')
        addColumn(bc, 'getDateReceived')
        addColumn(bc, 'getDateSampled')
        addColumn(bc, 'review_state')

        # bika_setup_catalog

        bsc = getToolByName(portal, 'bika_setup_catalog', None)
        if bsc == None:
            logger.warning('Could not find the setup catalog tool.')
            return

        try:
            bsc.manage_addProduct['ZCTextIndex'].manage_addLexicon(
                'Lexicon', 'Lexicon', elem)
        except:
            logger.warning('Could not add ZCTextIndex to bika_setup_catalog')
            pass

        at = getToolByName(portal, 'archetype_tool')
        at.setCatalogsByType('Department', [
            'bika_setup_catalog',
        ])
        at.setCatalogsByType('Container', [
            'bika_setup_catalog',
        ])
        at.setCatalogsByType('ContainerType', [
            'bika_setup_catalog',
        ])
        at.setCatalogsByType('AnalysisCategory', [
            'bika_setup_catalog',
        ])
        at.setCatalogsByType('AnalysisService',
                             ['bika_setup_catalog', 'portal_catalog'])
        at.setCatalogsByType('AnalysisSpec', [
            'bika_setup_catalog',
        ])
        at.setCatalogsByType('SampleCondition', ['bika_setup_catalog'])
        at.setCatalogsByType('SampleMatrix', [
            'bika_setup_catalog',
        ])
        at.setCatalogsByType('SampleType',
                             ['bika_setup_catalog', 'portal_catalog'])
        at.setCatalogsByType('SamplePoint',
                             ['bika_setup_catalog', 'portal_catalog'])
        at.setCatalogsByType('StorageLocation',
                             ['bika_setup_catalog', 'portal_catalog'])
        at.setCatalogsByType('SamplingDeviation', [
            'bika_setup_catalog',
        ])
        at.setCatalogsByType('Instrument', [
            'bika_setup_catalog',
        ])
        at.setCatalogsByType('InstrumentType', [
            'bika_setup_catalog',
        ])
        at.setCatalogsByType('Method',
                             ['bika_setup_catalog', 'portal_catalog'])
        at.setCatalogsByType('Multifile', ['bika_setup_catalog'])
        at.setCatalogsByType('AttachmentType', [
            'bika_setup_catalog',
        ])
        at.setCatalogsByType('Calculation',
                             ['bika_setup_catalog', 'portal_catalog'])
        at.setCatalogsByType('AnalysisProfile',
                             ['bika_setup_catalog', 'portal_catalog'])
        at.setCatalogsByType('ARTemplate',
                             ['bika_setup_catalog', 'portal_catalog'])
        at.setCatalogsByType('LabProduct',
                             ['bika_setup_catalog', 'portal_catalog'])
        at.setCatalogsByType('LabContact',
                             ['bika_setup_catalog', 'portal_catalog'])
        at.setCatalogsByType('Manufacturer',
                             ['bika_setup_catalog', 'portal_catalog'])
        at.setCatalogsByType('Preservation', [
            'bika_setup_catalog',
        ])
        at.setCatalogsByType('ReferenceDefinition',
                             ['bika_setup_catalog', 'portal_catalog'])
        at.setCatalogsByType('SRTemplate',
                             ['bika_setup_catalog', 'portal_catalog'])
        at.setCatalogsByType('SubGroup', [
            'bika_setup_catalog',
        ])
        at.setCatalogsByType('Supplier',
                             ['bika_setup_catalog', 'portal_catalog'])
        at.setCatalogsByType('Unit', [
            'bika_setup_catalog',
        ])
        at.setCatalogsByType('WorksheetTemplate',
                             ['bika_setup_catalog', 'portal_catalog'])
        at.setCatalogsByType('BatchLabel', [
            'bika_setup_catalog',
        ])
        at.setCatalogsByType('ARPriority', [
            'bika_setup_catalog',
        ])

        addIndex(bsc, 'path', 'ExtendedPathIndex', ('getPhysicalPath'))
        addIndex(bsc, 'allowedRolesAndUsers', 'KeywordIndex')
        addIndex(bsc, 'UID', 'FieldIndex')
        addIndex(bsc, 'SearchableText', 'ZCTextIndex', zc_extras)
        addIndex(bsc, 'Title', 'ZCTextIndex', zc_extras)
        addIndex(bsc, 'Description', 'ZCTextIndex', zc_extras)
        addIndex(bsc, 'id', 'FieldIndex')
        addIndex(bsc, 'getId', 'FieldIndex')
        addIndex(bsc, 'Type', 'FieldIndex')
        addIndex(bsc, 'portal_type', 'FieldIndex')
        addIndex(bsc, 'created', 'DateIndex')
        addIndex(bsc, 'Creator', 'FieldIndex')
        addIndex(bsc, 'getObjPositionInParent', 'GopipIndex')

        addIndex(bsc, 'title', 'FieldIndex', 'Title')
        addIndex(bsc, 'sortable_title', 'FieldIndex')
        addIndex(bsc, 'description', 'FieldIndex', 'Description')

        addIndex(bsc, 'review_state', 'FieldIndex')
        addIndex(bsc, 'inactive_state', 'FieldIndex')
        addIndex(bsc, 'cancellation_state', 'FieldIndex')

        addIndex(bsc, 'getAccredited', 'FieldIndex')
        addIndex(bsc, 'getAnalyst', 'FieldIndex')
        addIndex(bsc, 'getInstrumentType', 'FieldIndex')
        addIndex(bsc, 'getInstrumentTypeName', 'FieldIndex')
        addIndex(bsc, 'getBlank', 'FieldIndex')
        addIndex(bsc, 'getCalculationTitle', 'FieldIndex')
        addIndex(bsc, 'getCalculationUID', 'FieldIndex')
        addIndex(bsc, 'getCalibrationExpiryDate', 'FieldIndex')
        addIndex(bsc, 'getCategoryTitle', 'FieldIndex')
        addIndex(bsc, 'getCategoryUID', 'FieldIndex')
        addIndex(bsc, 'getClientUID', 'FieldIndex')
        addIndex(bsc, 'getDepartmentTitle', 'FieldIndex')
        addIndex(bsc, 'getDuplicateVariation', 'FieldIndex')
        addIndex(bsc, 'getFormula', 'FieldIndex')
        addIndex(bsc, 'getFullname', 'FieldIndex')
        addIndex(bsc, 'getHazardous', 'FieldIndex')
        addIndex(bsc, 'getInstrumentTitle', 'FieldIndex')
        addIndex(bsc, 'getKeyword', 'FieldIndex')
        addIndex(bsc, 'getManagerName', 'FieldIndex')
        addIndex(bsc, 'getManagerPhone', 'FieldIndex')
        addIndex(bsc, 'getManagerEmail', 'FieldIndex')
        addIndex(bsc, 'getMaxTimeAllowed', 'FieldIndex')
        addIndex(bsc, 'getModel', 'FieldIndex')
        addIndex(bsc, 'getName', 'FieldIndex')
        addIndex(bsc, 'getPointOfCapture', 'FieldIndex')
        addIndex(bsc, 'getPrice', 'FieldIndex')
        addIndex(bsc, 'getSamplePointTitle', 'KeywordIndex')
        addIndex(bsc, 'getSamplePointUID', 'FieldIndex')
        addIndex(bsc, 'getSampleTypeTitle', 'KeywordIndex')
        addIndex(bsc, 'getSampleTypeUID', 'FieldIndex')
        addIndex(bsc, 'getServiceTitle', 'FieldIndex')
        addIndex(bsc, 'getServiceUID', 'FieldIndex')
        addIndex(bsc, 'getTotalPrice', 'FieldIndex')
        addIndex(bsc, 'getUnit', 'FieldIndex')
        addIndex(bsc, 'getVATAmount', 'FieldIndex')
        addIndex(bsc, 'getVolume', 'FieldIndex')
        addIndex(bsc, 'sortKey', 'FieldIndex')
        addIndex(bsc, 'getMethodID', 'FieldIndex')
        addIndex(bsc, 'getDocumentID', 'FieldIndex')

        addColumn(bsc, 'path')
        addColumn(bsc, 'UID')
        addColumn(bsc, 'id')
        addColumn(bsc, 'getId')
        addColumn(bsc, 'Type')
        addColumn(bsc, 'portal_type')
        addColumn(bsc, 'getObjPositionInParent')

        addColumn(bsc, 'Title')
        addColumn(bsc, 'Description')
        addColumn(bsc, 'title')
        addColumn(bsc, 'sortable_title')
        addColumn(bsc, 'description')

        addColumn(bsc, 'review_state')
        addColumn(bsc, 'inactive_state')
        addColumn(bsc, 'cancellation_state')

        addColumn(bsc, 'getAccredited')
        addColumn(bsc, 'getInstrumentType')
        addColumn(bsc, 'getInstrumentTypeName')
        addColumn(bsc, 'getBlank')
        addColumn(bsc, 'getCalculationTitle')
        addColumn(bsc, 'getCalculationUID')
        addColumn(bsc, 'getCalibrationExpiryDate')
        addColumn(bsc, 'getCategoryTitle')
        addColumn(bsc, 'getCategoryUID')
        addColumn(bsc, 'getClientUID')
        addColumn(bsc, 'getDepartmentTitle')
        addColumn(bsc, 'getDuplicateVariation')
        addColumn(bsc, 'getFormula')
        addColumn(bsc, 'getFullname')
        addColumn(bsc, 'getHazardous')
        addColumn(bsc, 'getInstrumentTitle')
        addColumn(bsc, 'getKeyword')
        addColumn(bsc, 'getManagerName')
        addColumn(bsc, 'getManagerPhone')
        addColumn(bsc, 'getManagerEmail')
        addColumn(bsc, 'getMaxTimeAllowed')
        addColumn(bsc, 'getModel')
        addColumn(bsc, 'getName')
        addColumn(bsc, 'getPointOfCapture')
        addColumn(bsc, 'getPrice')
        addColumn(bsc, 'getSamplePointTitle')
        addColumn(bsc, 'getSamplePointUID')
        addColumn(bsc, 'getSampleTypeTitle')
        addColumn(bsc, 'getSampleTypeUID')
        addColumn(bsc, 'getServiceTitle')
        addColumn(bsc, 'getServiceUID')
        addColumn(bsc, 'getTotalPrice')
        addColumn(bsc, 'getUnit')
        addColumn(bsc, 'getVATAmount')
        addColumn(bsc, 'getVolume')
Ejemplo n.º 56
0
    def setupPortalContent(self, portal):
        """ Setup Bika site structure """

        wf = getToolByName(portal, 'portal_workflow')

        obj = portal._getOb('front-page')
        alsoProvides(obj, IHaveNoBreadCrumbs)
        mp = obj.manage_permission
        mp(permissions.View, ['Anonymous'], 1)

        # remove undesired content objects
        del_ids = []
        for obj_id in ['Members', 'news', 'events']:
            if obj_id in portal.objectIds():
                del_ids.append(obj_id)
        if del_ids:
            portal.manage_delObjects(ids=del_ids)

        # index objects - importing through GenericSetup doesn't
        for obj_id in (
                'clients',
                'batches',
                'invoices',
                'pricelists',
                'bika_setup',
                'methods',
                'analysisrequests',
                'referencesamples',
                'samples',
                'supplyorders',
                'worksheets',
                'reports',
                'queries',
                'arimports',
        ):
            try:
                obj = portal._getOb(obj_id)
                obj.unmarkCreationFlag()
                obj.reindexObject()
            except:
                pass

        bika_setup = portal._getOb('bika_setup')
        for obj_id in ('bika_analysiscategories', 'bika_analysisservices',
                       'bika_arpriorities', 'bika_attachmenttypes',
                       'bika_batchlabels', 'bika_calculations',
                       'bika_departments', 'bika_containers',
                       'bika_containertypes', 'bika_preservations',
                       'bika_instruments', 'bika_instrumenttypes',
                       'bika_analysisspecs', 'bika_analysisprofiles',
                       'bika_artemplates', 'bika_labcontacts',
                       'bika_labproducts', 'bika_manufacturers',
                       'bika_sampleconditions', 'bika_samplematrices',
                       'bika_samplingdeviations', 'bika_samplepoints',
                       'bika_sampletypes', 'bika_srtemplates',
                       'bika_storagelocations', 'bika_subgroups',
                       'bika_suppliers', 'bika_referencedefinitions',
                       'bika_worksheettemplates'):
            try:
                obj = bika_setup._getOb(obj_id)
                obj.unmarkCreationFlag()
                obj.reindexObject()
            except:
                pass

        lab = bika_setup.laboratory
        lab.edit(title=_('Laboratory'))
        lab.unmarkCreationFlag()
        lab.reindexObject()
Ejemplo n.º 57
0
 def isManagementAllowed(self):
     mtool = getToolByName(self.context, 'portal_membership')
     return mtool.checkPermission(ManageWorksheets, self.context)
Ejemplo n.º 58
0
def read(context, request):
    tag = AuthenticatorView(context, request).authenticator()
    pattern = '<input .*name="(\w+)".*value="(\w+)"'
    _authenticator = re.match(pattern, tag).groups()[1]

    ret = {
        "url": router.url_for("read", force_external=True),
        "success": True,
        "error": False,
        "objects": [],
        "_authenticator": _authenticator,
    }
    debug_mode = True  #App.config.getConfiguration().debug_mode "Commented by Yasir"
    catalog_name = request.get("catalog_name", "portal_catalog")
    if not catalog_name:
        raise ValueError("bad or missing catalog_name: " + catalog_name)
    catalog = getToolByName(context, catalog_name)
    indexes = catalog.indexes()

    contentFilter = {}
    for index in indexes:
        if index in request:
            if index == 'review_state' and "{" in request[index]:
                continue
            contentFilter[index] = safe_unicode(request[index])
        if "%s[]" % index in request:
            value = request["%s[]" % index]
            contentFilter[index] = [safe_unicode(v) for v in value]

    if 'limit' in request:
        try:
            contentFilter['sort_limit'] = int(request["limit"])
        except ValueError:
            pass
    sort_on = request.get('sort_on', 'id')
    contentFilter['sort_on'] = sort_on
    # sort order
    sort_order = request.get('sort_order', '')
    if sort_order:
        contentFilter['sort_order'] = sort_order
    else:
        sort_order = 'ascending'
        contentFilter['sort_order'] = 'ascending'

    include_fields = get_include_fields(request)
    if debug_mode:
        logger.info("contentFilter: " + str(contentFilter))

    # Get matching objects from catalog
    proxies = catalog(**contentFilter)

    # batching items
    page_nr = int(request.get("page_nr", 0))
    try:
        page_size = int(request.get("page_size", 10))
    except ValueError:
        page_size = 10
    # page_size == 0: show all
    if page_size == 0:
        page_size = len(proxies)
    first_item_nr = page_size * page_nr
    if first_item_nr > len(proxies):
        first_item_nr = 0
    page_proxies = proxies[first_item_nr:first_item_nr + page_size]
    for proxy in page_proxies:
        obj_data = {}

        # Place all proxy attributes into the result.
        obj_data.update(load_brain_metadata(proxy, include_fields))

        # Place all schema fields ino the result.
        obj = proxy.getObject()
        obj_data.update(load_field_values(obj, include_fields))

        obj_data['path'] = "/".join(obj.getPhysicalPath())

        # call any adapters that care to modify this data.
        adapters = getAdapters((obj, ), IJSONReadExtender)
        for name, adapter in adapters:
            adapter(request, obj_data)

        ret['objects'].append(obj_data)

    ret['total_objects'] = len(proxies)
    ret['first_object_nr'] = first_item_nr
    last_object_nr = first_item_nr + len(page_proxies)
    if last_object_nr > ret['total_objects']:
        last_object_nr = ret['total_objects']
    ret['last_object_nr'] = last_object_nr

    if debug_mode:
        logger.info("{0} objects returned".format(len(ret['objects'])))
    return ret
Ejemplo n.º 59
0
 def _transitions(self):
     workflow = getToolByName(self.context, 'portal_workflow')
     return workflow.listActionInfos(object=self.context, max=1)
Ejemplo n.º 60
0
    def workflow_script_unassign(self):
        if skip(self, "unassign"):
            return
        workflow = getToolByName(self, 'portal_workflow')
        self.reindexObject(idxs=[
            "review_state",
        ])
        rc = getToolByName(self, REFERENCE_CATALOG)
        wsUID = self.REQUEST['context_uid']
        ws = rc.lookupObject(wsUID)

        # May need to promote the Worksheet's review_state
        #  if all other analyses are at a higher state than this one was.
        # (or maybe retract it if there are no analyses left)
        # Note: duplicates, controls and blanks have 'assigned' as a review_state.
        can_submit = True
        can_attach = True
        can_verify = True
        ws_empty = False
        analyses = ws.getAnalyses()
        # We flag this worksheet as empty if there is ONE UNASSIGNED
        # analysis left: worksheet.removeAnalysis() hasn't removed it from
        # the layout yet at this stage.
        if len(analyses) == 1 \
           and workflow.getInfoFor(analyses[0], 'review_state') == 'unassigned':
            ws_empty = True
        for a in analyses:
            ws_empty = False
            a_state = workflow.getInfoFor(a, 'review_state')
            if a_state in \
               ('assigned', 'sample_due', 'sample_received',):
                can_submit = False
            else:
                if not ws.getAnalyst():
                    can_submit = False
            if a_state in \
               ('assigned', 'sample_due', 'sample_received', 'attachment_due',):
                can_attach = False
            if a_state in \
               ('assigned', 'sample_due', 'sample_received', 'attachment_due', 'to_be_verified',):
                can_verify = False
        if not ws_empty:
            # Note: WS adds itself to the skiplist so we have to take it off again
            #       to allow multiple promotions (maybe by more than one self).
            if can_submit and workflow.getInfoFor(ws,
                                                  'review_state') == 'open':
                workflow.doActionFor(ws, 'submit')
                skip(ws, 'submit', unskip=True)
            if can_attach and workflow.getInfoFor(
                    ws, 'review_state') == 'attachment_due':
                workflow.doActionFor(ws, 'attach')
                skip(ws, 'attach', unskip=True)
            if can_verify and workflow.getInfoFor(
                    ws, 'review_state') == 'to_be_verified':
                self.REQUEST["workflow_skiplist"].append('verify all analyses')
                workflow.doActionFor(ws, 'verify')
                skip(ws, 'verify', unskip=True)
        else:
            if workflow.getInfoFor(ws, 'review_state') != 'open':
                workflow.doActionFor(ws, 'retract')
                skip(ws, 'retract', unskip=True)