コード例 #1
0
ファイル: analysis.py プロジェクト: doun/Bika-LIMS
def ObjectInitializedEventHandler(instance, event):

    # This handler fires for DuplicateAnalysis because
    # DuplicateAnalysis also provides IAnalysis.
    # DuplicateAnalysis doesn't have analysis_workflow.
    if instance.portal_type == "DuplicateAnalysis":
        return

    workflow = getToolByName(instance, 'portal_workflow')

    ar = instance.aq_parent
    ar_state = workflow.getInfoFor(ar, 'review_state')
    ar_ws_state = workflow.getInfoFor(ar, 'worksheetanalysis_review_state')

    # Set the state of the analysis depending on the state of the AR.
    if ar_state in ('sample_registered', 'to_be_sampled', 'sampled',
                    'to_be_preserved', 'sample_due', 'sample_received'):
        changeWorkflowState(instance, "bika_analysis_workflow", ar_state)
    elif ar_state in ('to_be_verified'):
        # Apply to AR only; we don't want this transition to cascade.
        if 'workflow_skiplist' not in ar.REQUEST:
            ar.REQUEST['workflow_skiplist'] = []
        ar.REQUEST['workflow_skiplist'].append("retract all analyses")
        workflow.doActionFor(ar, 'retract')
        ar.REQUEST['workflow_skiplist'].remove("retract all analyses")

    if ar_ws_state == 'assigned':
        workflow.doActionFor(ar, 'unassign')
        skip(ar, 'unassign', unskip=True)

    instance.updateDueDate()

    return
コード例 #2
0
ファイル: test_barcode_entry.py プロジェクト: xispa/bika.lims
    def test_ar_states_without_batch(self):
        wf = getToolByName(self.portal, 'portal_workflow')
        self.portal.REQUEST['entry'] = self.ar1.id
        self.portal.REQUEST['_authenticator'] = self.getAuthenticator()

        value = json.loads(barcode_entry(self.portal, self.portal.REQUEST)())
        if value.get('failure', False):
            self.fail('failure code in json return: ' + value['error'])
        state = wf.getInfoFor(self.ar1, 'review_state')
        self.assertTrue(state == 'sample_received',
                        'AR is in %s state; should be sample_received' % state)

        value = json.loads(barcode_entry(self.portal, self.portal.REQUEST)())
        if value.get('failure', False):
            self.fail('failure code in json return: ' + value['error'])
        expected = self.ar1.absolute_url() + "/manage_results"
        self.assertEqual(value['url'], expected,
                         "AR redirect should be  %s but it's %s" % (
                             expected, value['url']))

        changeWorkflowState(self.ar1, 'bika_ar_workflow', 'verified')
        wf.getWorkflowById('bika_ar_workflow').updateRoleMappingsFor(self.ar1)
        self.ar1.reindexObject(idxs=['allowedRolesAndUsers'])

        value = json.loads(barcode_entry(self.portal, self.portal.REQUEST)())
        if value.get('failure', False):
            self.fail('failure code in json return: ' + value['error'])
        expected = self.ar1.absolute_url()
        self.assertEqual(value['url'], expected,
                         "AR redirect should be  %s but it's %s" % (
                             expected, value['url']))
コード例 #3
0
ファイル: events.py プロジェクト: senaite/senaite.storage
def after_recover(sample):
    """Unassigns the sample from its storage container and "recover". It also
    transitions the sample to its previous state before it was stored
    """
    container = _api.get_storage_sample(api.get_uid(sample))
    if container:
        container.remove_object(sample)
    else:
        logger.warn("Container for Sample {} not found".format(sample.getId()))

    # Transition the sample to the state before it was stored
    previous_state = get_previous_state(sample) or "sample_due"
    changeWorkflowState(sample, "bika_ar_workflow", previous_state)

    # Notify the sample has ben modified
    modified(sample)

    # Reindex the sample
    sample.reindexObject()

    # If the sample is a partition, try to promote to the primary
    primary = sample.getParentAnalysisRequest()
    if not primary:
        return

    # Recover primary sample if all its partitions have been recovered
    parts = primary.getDescendants()

    # Partitions in some statuses won't be considered.
    skip = ['stored']
    parts = filter(lambda part: api.get_review_status(part) in skip, parts)
    if not parts:
        # There are no partitions left, transition the primary
        do_action_for(primary, "recover")
コード例 #4
0
ファイル: analysis.py プロジェクト: Espurna/senaite.core
def ObjectInitializedEventHandler(instance, event):

    wf_tool = getToolByName(instance, 'portal_workflow')

    ar = instance.getRequest()
    ar_state = wf_tool.getInfoFor(ar, 'review_state')
    ar_ws_state = wf_tool.getInfoFor(ar, 'worksheetanalysis_review_state')

    # Set the state of the analysis depending on the state of the AR.
    if ar_state in ('sample_registered', 'to_be_sampled', 'sampled',
                    'to_be_preserved', 'sample_due', 'sample_received'):
        changeWorkflowState(instance, "bika_analysis_workflow", ar_state)
    elif ar_state in ('to_be_verified'):
        # Apply to AR only; we don't want this transition to cascade.
        if 'workflow_skiplist' not in ar.REQUEST:
            ar.REQUEST['workflow_skiplist'] = []
        ar.REQUEST['workflow_skiplist'].append("retract all analyses")
        wf_tool.doActionFor(ar, 'retract')
        ar.REQUEST['workflow_skiplist'].remove("retract all analyses")

    if ar_ws_state == 'assigned':
        # TODO workflow: analysis request can be 'assigned'?
        wf_tool.doActionFor(ar, 'unassign')
        skip(ar, 'unassign', unskip=True)

    return
コード例 #5
0
def create_retest(ar):
    """Creates a retest (Analysis Request) from an invalidated Analysis Request
    :param ar: The invalidated Analysis Request
    :type ar: IAnalysisRequest
    :rtype: IAnalysisRequest
    """
    if not ar:
        raise ValueError("Source Analysis Request cannot be None")

    if not IAnalysisRequest.providedBy(ar):
        raise ValueError("Type not supported: {}".format(repr(type(ar))))

    if ar.getRetest():
        # Do not allow the creation of another retest!
        raise ValueError("Retest already set")

    if not ar.isInvalid():
        # Analysis Request must be in 'invalid' state
        raise ValueError("Cannot do a retest from an invalid Analysis Request"
                         .format(repr(ar)))

    # 0. Open the actions pool
    actions_pool = ActionHandlerPool.get_instance()
    actions_pool.queue_pool()

    # 1. Create the Retest (Analysis Request)
    ignore = ['Analyses', 'DatePublished', 'Invalidated', 'Sample']
    retest = _createObjectByType("AnalysisRequest", ar.aq_parent, tmpID())
    retest.setSample(ar.getSample())
    copy_field_values(ar, retest, ignore_fieldnames=ignore)
    renameAfterCreation(retest)

    # 2. Copy the analyses from the source
    intermediate_states = ['retracted', 'reflexed']
    for an in ar.getAnalyses(full_objects=True):
        if (api.get_workflow_status_of(an) in intermediate_states):
            # Exclude intermediate analyses
            continue

        nan = _createObjectByType("Analysis", retest, an.getKeyword())

        # Make a copy
        ignore_fieldnames = ['DataAnalysisPublished']
        copy_field_values(an, nan, ignore_fieldnames=ignore_fieldnames)
        nan.unmarkCreationFlag()
        push_reindex_to_actions_pool(nan)

    # 3. Assign the source to retest
    retest.setInvalidated(ar)

    # 4. Transition the retest to "sample_received"!
    changeWorkflowState(retest, 'bika_ar_workflow', 'sample_received')

    # 5. Reindex and other stuff
    push_reindex_to_actions_pool(retest)
    push_reindex_to_actions_pool(retest.aq_parent)

    # 6. Resume the actions pool
    actions_pool.resume()
    return retest
コード例 #6
0
ファイル: analysis.py プロジェクト: xispa/bika.lims
def ObjectInitializedEventHandler(instance, event):

    wf_tool = getToolByName(instance, 'portal_workflow')

    ar = instance.getRequest()
    ar_state = wf_tool.getInfoFor(ar, 'review_state')
    ar_ws_state = wf_tool.getInfoFor(ar, 'worksheetanalysis_review_state')

    # Set the state of the analysis depending on the state of the AR.
    if ar_state in ('sample_registered',
                    'to_be_sampled',
                    'sampled',
                    'to_be_preserved',
                    'sample_due',
                    'sample_received'):
        changeWorkflowState(instance, "bika_analysis_workflow", ar_state)
    elif ar_state in ('to_be_verified'):
        # Apply to AR only; we don't want this transition to cascade.
        if 'workflow_skiplist' not in ar.REQUEST:
            ar.REQUEST['workflow_skiplist'] = []
        ar.REQUEST['workflow_skiplist'].append("retract all analyses")
        wf_tool.doActionFor(ar, 'retract')
        ar.REQUEST['workflow_skiplist'].remove("retract all analyses")

    if ar_ws_state == 'assigned':
        # TODO workflow: analysis request can be 'assigned'?
        wf_tool.doActionFor(ar, 'unassign')
        skip(ar, 'unassign', unskip=True)

    return
コード例 #7
0
ファイル: __init__.py プロジェクト: xispa/bika.lims
def SamplePrepTransitionEventHandler(instance, event):
    """Sample preparation is considered complete when the sampleprep workflow
    reaches a state which has no exit transitions.

    If the stateis state's ID is the same as any AnalysisRequest primary
    workflow ID, then the AnalysisRequest will be sent directly to that state.

    If the final state's ID is not found in the AR workflow, the AR will be
    transitioned to 'sample_received'.
    """
    if not event.transition:
        # creation doesn't have a 'transition'
        return

    if not event.new_state.getTransitions():
        # Is this the final (No exit transitions) state?
        wftool = getToolByName(instance, 'portal_workflow')
        primary_wf_name = list(ToolWorkflowChain(instance, wftool))[0]
        primary_wf = wftool.getWorkflowById(primary_wf_name)
        primary_wf_states = primary_wf.states.keys()
        if event.new_state.id in primary_wf_states:
            # final state name matches review_state in primary workflow:
            dst_state = event.new_state.id
        else:
            # fallback state:
            dst_state = 'sample_received'
        changeWorkflowState(instance, primary_wf_name, dst_state)
コード例 #8
0
    def test_ar_states_without_batch(self):
        wf = getToolByName(self.portal, 'portal_workflow')
        self.portal.REQUEST['entry'] = self.ar1.id
        self.portal.REQUEST['_authenticator'] = self.getAuthenticator()

        value = json.loads(barcode_entry(self.portal, self.portal.REQUEST)())
        if value.get('failure', False):
            self.fail('failure code in json return: ' + value['error'])
        state = wf.getInfoFor(self.ar1, 'review_state')
        self.assertTrue(state == 'sample_received',
                        'AR is in %s state; should be sample_received' % state)

        value = json.loads(barcode_entry(self.portal, self.portal.REQUEST)())
        if value.get('failure', False):
            self.fail('failure code in json return: ' + value['error'])
        expected = self.ar1.absolute_url() + "/manage_results"
        self.assertEqual(
            value['url'], expected,
            "AR redirect should be  %s but it's %s" % (expected, value['url']))

        changeWorkflowState(self.ar1, 'bika_ar_workflow', 'verified')
        wf.getWorkflowById('bika_ar_workflow').updateRoleMappingsFor(self.ar1)
        self.ar1.reindexObject(idxs=['allowedRolesAndUsers'])

        value = json.loads(barcode_entry(self.portal, self.portal.REQUEST)())
        if value.get('failure', False):
            self.fail('failure code in json return: ' + value['error'])
        expected = self.ar1.absolute_url()
        self.assertEqual(
            value['url'], expected,
            "AR redirect should be  %s but it's %s" % (expected, value['url']))
コード例 #9
0
    def workflow_action_retract_ar(self):

        # AR should be retracted
        # Can't transition inactive ARs
        if not api.is_active(self.context):
            message = _('Item is inactive.')
            self.context.plone_utils.addPortalMessage(message, 'info')
            self.request.response.redirect(self.context.absolute_url())
            return

        # 1. Copies the AR linking the original one and viceversa
        ar = self.context
        newar = self.cloneAR(ar)

        # 2. The old AR gets a status of 'invalid'
        api.do_transition_for(ar, 'retract_ar')

        # 3. The new AR copy opens in status 'to be verified'
        changeWorkflowState(newar, 'bika_ar_workflow', 'to_be_verified')

        # 4. The system immediately alerts the client contacts who ordered
        # the results, per email and SMS, that a possible mistake has been
        # picked up and is under investigation.
        # A much possible information is provided in the email, linking
        # to the AR online.
        bika_setup = api.get_bika_setup()
        if bika_setup.getNotifyOnARRetract():
            self.notify_ar_retract(ar, newar)

        message = _('${items} invalidated.', mapping={'items': ar.getId()})
        self.context.plone_utils.addPortalMessage(message, 'warning')
        self.request.response.redirect(newar.absolute_url())
コード例 #10
0
def SamplePrepTransitionEventHandler(instance, event):
    """Sample preparation is considered complete when the sampleprep workflow
    reaches a state which has no exit transitions.

    If the stateis state's ID is the same as any AnalysisRequest primary
    workflow ID, then the AnalysisRequest will be sent directly to that state.

    If the final state's ID is not found in the AR workflow, the AR will be
    transitioned to 'sample_received'.
    """
    if not event.transition:
        # creation doesn't have a 'transition'
        return

    if not event.new_state.getTransitions():
        # Is this the final (No exit transitions) state?
        workflow = ploneapi.portal.get_tool("portal_workflow")
        primary_wf_name = list(ToolWorkflowChain(instance, workflow))[0]
        primary_wf = workflow.getWorkflowById(primary_wf_name)
        primary_wf_states = primary_wf.states.keys()
        if event.new_state.id in primary_wf_states:
            # final state name matches review_state in primary workflow:
            dst_state = event.new_state.id
        else:
            # fallback state:
            dst_state = 'sample_received'
        changeWorkflowState(instance, primary_wf_name, dst_state)
コード例 #11
0
ファイル: workflow.py プロジェクト: bikalabs/bika.lims
    def workflow_action_retract_ar(self):

        # AR should be retracted
        # Can't transition inactive ARs
        if not api.is_active(self.context):
            message = _('Item is inactive.')
            self.context.plone_utils.addPortalMessage(message, 'info')
            self.request.response.redirect(self.context.absolute_url())
            return

        # 1. Copies the AR linking the original one and viceversa
        ar = self.context
        newar = self.cloneAR(ar)

        # 2. The old AR gets a status of 'invalid'
        api.do_transition_for(ar, 'retract_ar')

        # 3. The new AR copy opens in status 'to be verified'
        changeWorkflowState(newar, 'bika_ar_workflow', 'to_be_verified')

        # 4. The system immediately alerts the client contacts who ordered
        # the results, per email and SMS, that a possible mistake has been
        # picked up and is under investigation.
        # A much possible information is provided in the email, linking
        # to the AR online.
        bika_setup = api.get_bika_setup()
        if bika_setup.getNotifyOnARRetract():
            self.notify_ar_retract(ar, newar)

        message = _('${items} invalidated.',
                    mapping={'items': ar.getRequestID()})
        self.context.plone_utils.addPortalMessage(message, 'warning')
        self.request.response.redirect(newar.absolute_url())
コード例 #12
0
ファイル: analysis.py プロジェクト: cpang2/bikalims
def ObjectInitializedEventHandler(instance, event):

    # This is the easiest place to assign IRoutineAnalysis,
    # since other anlaysis types subclass Analysis.
    # (noLongerProvides cannot un-provide interfaces on the class itself)
    if instance.portal_type == 'Analysis':
        alsoProvides(instance, IRoutineAnalysis)

    wf_tool = getToolByName(instance, 'portal_workflow')

    ar = instance.aq_parent
    ar_state = wf_tool.getInfoFor(ar, 'review_state')
    ar_ws_state = wf_tool.getInfoFor(ar, 'worksheetanalysis_review_state')

    # Set the state of the analysis depending on the state of the AR.
    if ar_state in ('sample_registered', 'to_be_sampled', 'sampled',
                    'to_be_preserved', 'sample_due', 'sample_received'):
        changeWorkflowState(instance, "bika_analysis_workflow", ar_state)
    elif ar_state in ('to_be_verified'):
        # Apply to AR only; we don't want this transition to cascade.
        if 'workflow_skiplist' not in ar.REQUEST:
            ar.REQUEST['workflow_skiplist'] = []
        ar.REQUEST['workflow_skiplist'].append("retract all analyses")
        wf_tool.doActionFor(ar, 'retract')
        ar.REQUEST['workflow_skiplist'].remove("retract all analyses")

    if ar_ws_state == 'assigned':
        wf_tool.doActionFor(ar, 'unassign')
        skip(ar, 'unassign', unskip=True)

    instance.updateDueDate()

    return
コード例 #13
0
ファイル: workflow.py プロジェクト: doun/Bika-LIMS
    def workflow_action_save_analyses_button(self):
        form = self.request.form
        workflow = getToolByName(self.context, 'portal_workflow')
        action, came_from = WorkflowAction._get_form_workflow_action(self)
        # AR Manage Analyses: save Analyses
        ar = self.context
        sample = ar.getSample()
        objects = WorkflowAction._get_selected_items(self)
        if not objects:
            message = _("No analyses have been selected")
            self.context.plone_utils.addPortalMessage(message, 'info')
            self.destination_url = self.context.absolute_url() + "/analyses"
            self.request.response.redirect(self.destination_url)
            return
        Analyses = objects.keys()
        prices = form.get("Price", [None])[0]
        specs = {}
        if form.get("min", None):
            for service_uid in Analyses:
                specs[service_uid] = {
                    "min": form["min"][0][service_uid],
                    "max": form["max"][0][service_uid],
                    "error": form["error"][0][service_uid]
                }
        else:
            for service_uid in Analyses:
                specs[service_uid] = {"min": "", "max": "", "error": ""}
        new = ar.setAnalyses(Analyses, prices=prices, specs=specs)
        # link analyses and partitions
        # If Bika Setup > Analyses > 'Display individual sample
        # partitions' is checked, no Partitions available.
        # https://github.com/bikalabs/Bika-LIMS/issues/1030
        if 'Partition' in form:
            for service_uid, service in objects.items():
                part_id = form['Partition'][0][service_uid]
                part = sample[part_id]
                analysis = ar[service.getKeyword()]
                analysis.setSamplePartition(part)
                analysis.reindexObject()
        if new:
            for analysis in new:
                # if the AR has progressed past sample_received, we need to bring it back.
                ar_state = workflow.getInfoFor(ar, 'review_state')
                if ar_state in ('attachment_due', 'to_be_verified'):
                    # Apply to AR only; we don't want this transition to cascade.
                    ar.REQUEST['workflow_skiplist'].append(
                        "retract all analyses")
                    workflow.doActionFor(ar, 'retract')
                    ar.REQUEST['workflow_skiplist'].remove(
                        "retract all analyses")
                    ar_state = workflow.getInfoFor(ar, 'review_state')
                # Then we need to forward new analyses state
                analysis.updateDueDate()
                changeWorkflowState(analysis, 'bika_analysis_workflow',
                                    ar_state)

        message = PMF("Changes saved.")
        self.context.plone_utils.addPortalMessage(message, 'info')
        self.destination_url = self.context.absolute_url()
        self.request.response.redirect(self.destination_url)
コード例 #14
0
ファイル: workflow.py プロジェクト: Adam-Brown/Bika-LIMS
    def workflow_action_save_analyses_button(self):
        form = self.request.form
        workflow = getToolByName(self.context, 'portal_workflow')
        action, came_from = WorkflowAction._get_form_workflow_action(self)
        # AR Manage Analyses: save Analyses
        ar = self.context
        sample = ar.getSample()
        objects = WorkflowAction._get_selected_items(self)
        if not objects:
            message = _("No analyses have been selected")
            self.context.plone_utils.addPortalMessage(message, 'info')
            self.destination_url = self.context.absolute_url() + "/analyses"
            self.request.response.redirect(self.destination_url)
            return
        Analyses = objects.keys()
        prices = form.get("Price", [None])[0]
        specs = {}
        if form.get("min", None):
            for service_uid in Analyses:
                specs[service_uid] = {
                    "min": form["min"][0][service_uid],
                    "max": form["max"][0][service_uid],
                    "error": form["error"][0][service_uid]
                }
        else:
            for service_uid in Analyses:
                specs[service_uid] = {"min": "", "max": "", "error": ""}
        new = ar.setAnalyses(Analyses, prices=prices, specs=specs)
        # link analyses and partitions
        # If Bika Setup > Analyses > 'Display individual sample
        # partitions' is checked, no Partitions available.
        # https://github.com/bikalabs/Bika-LIMS/issues/1030
        if 'Partition' in form:
            for service_uid, service in objects.items():
                part_id = form['Partition'][0][service_uid]
                part = sample[part_id]
                analysis = ar[service.getKeyword()]
                analysis.setSamplePartition(part)
                analysis.reindexObject()
        if new:
            for analysis in new:
                # if the AR has progressed past sample_received, we need to bring it back.
                ar_state = workflow.getInfoFor(ar, 'review_state')
                if ar_state in ('attachment_due', 'to_be_verified'):
                    # Apply to AR only; we don't want this transition to cascade.
                    ar.REQUEST['workflow_skiplist'].append("retract all analyses")
                    workflow.doActionFor(ar, 'retract')
                    ar.REQUEST['workflow_skiplist'].remove("retract all analyses")
                    ar_state = workflow.getInfoFor(ar, 'review_state')
                # Then we need to forward new analyses state
                analysis.updateDueDate()
                changeWorkflowState(analysis, 'bika_analysis_workflow', ar_state)

        message = PMF("Changes saved.")
        self.context.plone_utils.addPortalMessage(message, 'info')
        self.destination_url = self.context.absolute_url()
        self.request.response.redirect(self.destination_url)
コード例 #15
0
def after_retract(obj):
    """Function triggered after a 'retract' transition for the analysis passed
    in is performed. Retracting an analysis cause its transition to 'retracted'
    state and the creation of a new copy of the same analysis as a retest.
    Note that retraction only affects to single Analysis and has no other
    effect in the status of the Worksheet to which the Analysis is assigned or
    to the Analysis Request to which belongs (transition is never proomoted)
    This function is called automatically by
    bika.lims.workflow.AfterTransitionEventHandler
    """
    # TODO Workflow Analysis - review this function
    # Rename the analysis to make way for it's successor.
    # Support multiple retractions by renaming to *-0, *-1, etc
    parent = obj.aq_parent
    kw = obj.getKeyword()
    analyses = [
        x for x in parent.objectValues("Analysis")
        if x.getId().startswith(obj.getId())
    ]

    # LIMS-1290 - Analyst must be able to retract, which creates a new
    # Analysis.  So, _verifyObjectPaste permission check must be cancelled:
    parent._verifyObjectPaste = str
    # This is needed for tests:
    # https://docs.plone.org/develop/plone/content/rename.html
    # Testing warning: Rename mechanism relies of Persistent attribute
    # called _p_jar to be present on the content object. By default, this is
    # not the case on unit tests. You need to call transaction.savepoint() to
    # make _p_jar appear on persistent objects.
    # If you don't do this, you'll receive a "CopyError" when calling
    # manage_renameObjects that the operation is not supported.
    transaction.savepoint()
    parent.manage_renameObject(kw, "{0}-{1}".format(kw, len(analyses)))
    delattr(parent, '_verifyObjectPaste')

    # Create new analysis from the retracted obj
    analysis = create_analysis(parent, obj)
    changeWorkflowState(analysis, "bika_analysis_workflow", "sample_received")

    # Assign the new analysis to this same worksheet, if any.
    ws = obj.getWorksheet()
    if ws:
        ws.addAnalysis(analysis)
    analysis.reindexObject()

    # retract our dependencies
    dependencies = obj.getDependencies()
    for dependency in dependencies:
        doActionFor(dependency, 'retract')

    # Retract our dependents
    dependents = obj.getDependents()
    for dependent in dependents:
        doActionFor(dependent, 'retract')

    _reindex_request(obj)
コード例 #16
0
ファイル: events.py プロジェクト: xispa/bika.lims
def after_retract(obj):
    """Function triggered after a 'retract' transition for the analysis passed
    in is performed. Retracting an analysis cause its transition to 'retracted'
    state and the creation of a new copy of the same analysis as a retest.
    Note that retraction only affects to single Analysis and has no other
    effect in the status of the Worksheet to which the Analysis is assigned or
    to the Analysis Request to which belongs (transition is never proomoted)
    This function is called automatically by
    bika.lims.workflow.AfterTransitionEventHandler
    """
    # TODO Workflow Analysis - review this function
    # Rename the analysis to make way for it's successor.
    # Support multiple retractions by renaming to *-0, *-1, etc
    parent = obj.aq_parent
    kw = obj.getKeyword()
    analyses = [x for x in parent.objectValues("Analysis")
                if x.getId().startswith(obj.getId())]

    # LIMS-1290 - Analyst must be able to retract, which creates a new
    # Analysis.  So, _verifyObjectPaste permission check must be cancelled:
    parent._verifyObjectPaste = str
    # This is needed for tests:
    # https://docs.plone.org/develop/plone/content/rename.html
    # Testing warning: Rename mechanism relies of Persistent attribute
    # called _p_jar to be present on the content object. By default, this is
    # not the case on unit tests. You need to call transaction.savepoint() to
    # make _p_jar appear on persistent objects.
    # If you don't do this, you'll receive a "CopyError" when calling
    # manage_renameObjects that the operation is not supported.
    transaction.savepoint()
    parent.manage_renameObject(kw, "{0}-{1}".format(kw, len(analyses)))
    delattr(parent, '_verifyObjectPaste')

    # Create new analysis from the retracted obj
    analysis = create_analysis(parent, obj)
    changeWorkflowState(
        analysis, "bika_analysis_workflow", "sample_received")

    # Assign the new analysis to this same worksheet, if any.
    ws = obj.getWorksheet()
    if ws:
        ws.addAnalysis(analysis)
    analysis.reindexObject()

    # retract our dependencies
    dependencies = obj.getDependencies()
    for dependency in dependencies:
        doActionFor(dependency, 'retract')

    # Retract our dependents
    dependents = obj.getDependents()
    for dependent in dependents:
        doActionFor(dependent, 'retract')

    _reindex_request(obj)
コード例 #17
0
def create_retest(ar):
    """Creates a retest (Analysis Request) from an invalidated Analysis Request
    :param ar: The invalidated Analysis Request
    :type ar: IAnalysisRequest
    :rtype: IAnalysisRequest
    """
    if not ar:
        raise ValueError("Source Analysis Request cannot be None")

    if not IAnalysisRequest.providedBy(ar):
        raise ValueError("Type not supported: {}".format(repr(type(ar))))

    if ar.getRetest():
        # Do not allow the creation of another retest!
        raise ValueError("Retest already set")

    if not ar.isInvalid():
        # Analysis Request must be in 'invalid' state
        raise ValueError(
            "Cannot do a retest from an invalid Analysis Request".format(
                repr(ar)))

    # 1. Create the Retest (Analysis Request)
    ignore = ['Analyses', 'DatePublished', 'Invalidated', 'Sample']
    retest = _createObjectByType("AnalysisRequest", ar.aq_parent, tmpID())
    retest.setSample(ar.getSample())
    copy_field_values(ar, retest, ignore_fieldnames=ignore)
    renameAfterCreation(retest)

    # 2. Copy the analyses from the source
    criteria = dict(full_objects=True, retracted=False, reflexed=False)
    for an in ar.getAnalyses(**criteria):
        nan = _createObjectByType("Analysis", retest, an.getKeyword())

        # Make a copy
        ignore_fieldnames = ['Verificators', 'DataAnalysisPublished']
        copy_field_values(an, nan, ignore_fieldnames=ignore_fieldnames)
        nan.unmarkCreationFlag()

        # Set the workflow state of the analysis to 'sample_received'. Since we
        # keep the results of the previous analyses, these will be preserved,
        # only awaiting for their submission
        changeWorkflowState(nan, 'bika_analysis_workflow', 'sample_received')
        nan.reindexObject()

    # 3. Assign the source to retest
    retest.setInvalidated(ar)

    # 4. Transition the retest to "sample_received"!
    changeWorkflowState(retest, 'bika_ar_workflow', 'sample_received')

    # 5. Reindex and other stuff
    retest.reindexObject()
    retest.aq_parent.reindexObject()
    return retest
コード例 #18
0
ファイル: sample.py プロジェクト: Ammy2/Bika-LIMS
 def __call__(self):
     wf = getToolByName(self.context, 'portal_workflow')
     part = _createObjectByType("SamplePartition", self.context, tmpID())
     part.processForm()
     SamplingWorkflowEnabled = part.bika_setup.getSamplingWorkflowEnabled()
     ## We force the object to have the same state as the parent
     sample_state = wf.getInfoFor(self.context, 'review_state')
     changeWorkflowState(part, "bika_sample_workflow", sample_state)
     self.request.RESPONSE.redirect(self.context.absolute_url() +
                                    "/partitions")
     return
コード例 #19
0
ファイル: partitions.py プロジェクト: Lunga001/bika.lims-1
 def __call__(self):
     wf = getToolByName(self.context, 'portal_workflow')
     part = _createObjectByType("SamplePartition", self.context, tmpID())
     part.processForm()
     SamplingWorkflowEnabled = part.bika_setup.getSamplingWorkflowEnabled()
     ## We force the object to have the same state as the parent
     sample_state = wf.getInfoFor(self.context, 'review_state')
     changeWorkflowState(part, "bika_sample_workflow", sample_state)
     self.request.RESPONSE.redirect(self.context.absolute_url() +
                                    "/partitions")
     return
コード例 #20
0
 def __call__(self):
     wf = getToolByName(self.context, 'portal_workflow')
     _id = self.context.invokeFactory(type_name='SamplePartition', id='tmp')
     part = self.context[_id]
     part.processForm()
     SamplingWorkflowEnabled = part.bika_setup.getSamplingWorkflowEnabled()
     ## We force the object to have the same state as the parent
     sample_state = wf.getInfoFor(self.context, 'review_state')
     changeWorkflowState(part, "bika_sample_workflow", sample_state)
     self.request.RESPONSE.redirect(self.context.absolute_url() +
                                    "/partitions")
     return
コード例 #21
0
ファイル: sample.py プロジェクト: socheathly/Bika-LIMS
 def __call__(self):
     wf = getToolByName(self.context, 'portal_workflow')
     _id = self.context.invokeFactory(type_name = 'SamplePartition',
                                      id = 'tmp')
     part = self.context[_id]
     part.processForm()
     SamplingWorkflowEnabled = part.bika_setup.getSamplingWorkflowEnabled()
     ## We force the object to have the same state as the parent
     sample_state = wf.getInfoFor(self.context, 'review_state')
     changeWorkflowState(part, sample_state)
     self.request.RESPONSE.redirect(self.context.absolute_url() +
                                    "/partitions")
     return
コード例 #22
0
def after_recover(sample):
    """Unassigns the sample from its storage container and "recover". It also
    transitions the sample to its previous state before it was stored
    """
    container = _api.get_storage_sample(api.get_uid(sample))
    if container:
        container.remove_object(sample)
    else:
        logger.warn("Container for Sample {} not found".format(sample.getId()))

    # Transition the sample to the state before it was stored
    previous_state = get_previous_state(sample, "stored") or "sample_received"
    changeWorkflowState(sample, "bika_ar_workflow", previous_state)
コード例 #23
0
ファイル: analysis.py プロジェクト: tumiemosweu/senaite.core
def ObjectInitializedEventHandler(instance, event):

    wf_tool = getToolByName(instance, 'portal_workflow')

    ar = instance.getRequest()
    ar_state = wf_tool.getInfoFor(ar, 'review_state')

    # Set the state of the analysis depending on the state of the AR.
    if ar_state in ('sample_registered', 'to_be_sampled', 'sampled',
                    'to_be_preserved', 'sample_due', 'sample_received'):
        changeWorkflowState(instance, "bika_analysis_workflow", ar_state)
    elif ar_state in ('to_be_verified'):
        # Apply to AR only; we don't want this transition to cascade.
        changeWorkflowState(ar, "bika_ar_workflow", "sample_received")

    return
コード例 #24
0
def after_reinstate(analysis_request):
    """Method triggered after a 'reinstate' transition for the Analysis Request
    passed in is performed. Sets its status to the last status before it was
    cancelled. Reinstates the descendant partitions and all the analyses
    associated to the analysis request as well.
    """
    do_action_to_descendants(analysis_request, "reinstate")
    do_action_to_analyses(analysis_request, "reinstate")

    # Force the transition to previous state before the request was cancelled
    prev_status = get_prev_status_from_history(analysis_request, "cancelled")
    changeWorkflowState(analysis_request,
                        AR_WORKFLOW_ID,
                        prev_status,
                        action="reinstate")
    analysis_request.reindexObject()
コード例 #25
0
    def cloneAR(self, ar):
        newar = _createObjectByType("AnalysisRequest", ar.aq_parent, tmpID())
        newar.setSample(ar.getSample())
        ignore_fieldnames = [
            'Analyses', 'DatePublished', 'DatePublishedViewer',
            'ParentAnalysisRequest', 'ChildAnaysisRequest', 'Digest', 'Sample'
        ]
        copy_field_values(ar, newar, ignore_fieldnames=ignore_fieldnames)

        # Set the results for each AR analysis
        ans = ar.getAnalyses(full_objects=True)
        # If a whole AR is retracted and contains retracted Analyses, these
        # retracted analyses won't be created/shown in the new AR
        workflow = getToolByName(self, "portal_workflow")
        analyses = [
            x for x in ans
            if workflow.getInfoFor(x, "review_state") not in ("retracted")
        ]
        for an in analyses:
            if hasattr(an, 'IsReflexAnalysis') and an.IsReflexAnalysis:
                # We don't want reflex analyses to be copied
                continue
            try:
                nan = _createObjectByType("Analysis", newar, an.getKeyword())
            except Exception as e:
                from bika.lims import logger
                logger.warn(
                    'Cannot create analysis %s inside %s (%s)' %
                    an.getAnalysisService().Title(), newar, e)
                continue
            # Make a copy
            ignore_fieldnames = ['Verificators', 'DataAnalysisPublished']
            copy_field_values(an, nan, ignore_fieldnames=ignore_fieldnames)
            nan.unmarkCreationFlag()
            zope.event.notify(ObjectInitializedEvent(nan))
            changeWorkflowState(nan, 'bika_analysis_workflow',
                                'to_be_verified')
            nan.reindexObject()

        newar.reindexObject()
        newar.aq_parent.reindexObject()
        renameAfterCreation(newar)

        if hasattr(ar, 'setChildAnalysisRequest'):
            ar.setChildAnalysisRequest(newar)
        newar.setParentAnalysisRequest(ar)
        return newar
コード例 #26
0
def after_retract(obj):
    """Function triggered after a 'retract' transition for the analysis passed
    in is performed. Retracting an analysis cause its transition to 'retracted'
    state and the creation of a new copy of the same analysis as a retest.
    Note that retraction only affects to single Analysis and has no other
    effect in the status of the Worksheet to which the Analysis is assigned or
    to the Analysis Request to which belongs (transition is never proomoted)
    This function is called automatically by
    bika.lims.workflow.AfterTransitionEventHandler
    """
    # TODO Workflow Analysis - review this function
    # Rename the analysis to make way for it's successor.
    # Support multiple retractions by renaming to *-0, *-1, etc
    parent = obj.aq_parent
    kw = obj.getKeyword()
    analyses = [
        x for x in parent.objectValues("Analysis")
        if x.getId().startswith(obj.getId())
    ]

    # LIMS-1290 - Analyst must be able to retract, which creates a new
    # Analysis.  So, _verifyObjectPaste permission check must be cancelled:
    parent._verifyObjectPaste = str
    parent.manage_renameObject(kw, "{0}-{1}".format(kw, len(analyses)))
    delattr(parent, '_verifyObjectPaste')

    # Create new analysis from the retracted obj
    analysis = create_analysis(parent, obj)
    changeWorkflowState(analysis, "bika_analysis_workflow", "sample_received")

    # Assign the new analysis to this same worksheet, if any.
    ws = obj.getWorksheet()
    if ws:
        ws.addAnalysis(analysis)
    analysis.reindexObject()

    # retract our dependencies
    dependencies = obj.getDependencies()
    for dependency in dependencies:
        doActionFor(dependency, 'retract')

    # Retract our dependents
    dependents = obj.getDependents()
    for dependent in dependents:
        doActionFor(dependent, 'retract')

    _reindex_request(obj)
コード例 #27
0
ファイル: analysis.py プロジェクト: xispa/bika.lims
def duplicateAnalysis(analysis):
    """
    Duplicate an analysis consist on creating a new analysis with
    the same analysis service for the same sample. It is used in
    order to reduce the error procedure probability because both
    results must be similar.
    :base: the analysis object used as the creation base.
    """
    ar = analysis.aq_parent
    kw = analysis.getKeyword()
    # Rename the analysis to make way for it's successor.
    # Support multiple duplicates by renaming to *-0, *-1, etc
    cnt = [x for x in ar.objectValues("Analysis") if x.getId().startswith(kw)]
    a_id = "{0}-{1}".format(kw, len(cnt))
    dup = create_analysis(ar, analysis, id=a_id, Retested=True)
    changeWorkflowState(dup, "bika_analysis_workflow", "sample_received")
    return dup
コード例 #28
0
ファイル: analysis.py プロジェクト: Espurna/senaite.core
def duplicateAnalysis(analysis):
    """
    Duplicate an analysis consist on creating a new analysis with
    the same analysis service for the same sample. It is used in
    order to reduce the error procedure probability because both
    results must be similar.
    :base: the analysis object used as the creation base.
    """
    ar = analysis.aq_parent
    kw = analysis.getKeyword()
    # Rename the analysis to make way for it's successor.
    # Support multiple duplicates by renaming to *-0, *-1, etc
    cnt = [x for x in ar.objectValues("Analysis") if x.getId().startswith(kw)]
    a_id = "{0}-{1}".format(kw, len(cnt))
    dup = create_analysis(ar, analysis, id=a_id, Retested=True)
    changeWorkflowState(dup, "bika_analysis_workflow", "sample_received")
    return dup
コード例 #29
0
def fix_items_stuck_in_sample_prep_states(portal, ut):
    """Removing sample preparation workflows from the system may have
    left some samples ARs and Analyses in the state 'sample_prep'.  These
    should be transitioned to 'sample_due'  so that they can be receieved
    normally.
    :param portal: portal object
    :return: None
    """
    wftool = api.get_tool('portal_workflow')
    catalog_ids = [
        'bika_catalog', 'bika_analysis_catalog',
        'bika_catalog_analysisrequest_listing'
    ]
    for catalog_id in catalog_ids:
        catalog = api.get_tool(catalog_id)
        brains = catalog(review_state='sample_prep')
        for brain in brains:
            instance = brain.getObject()
            wfid = get_workflows_for(instance)[0]
            wf = wftool[wfid]
            # get event properties for last event that is not sample_prep
            rh = wftool.getInfoFor(instance, 'review_history')
            event = [
                x for x in rh
                if 'prep' not in x['review_state'] and not x['comments']
            ][-1]
            state_id, action_id = event['review_state'], event['action']
            # set state
            changeWorkflowState(instance, wfid, state_id)
            # fire transition handler for the action that originally was fired.
            old_sdef = new_sdef = wf.states[state_id]
            if action_id is not None:
                tdef = wf.transitions[action_id]
                notify(
                    AfterTransitionEvent(instance, wf, old_sdef, new_sdef,
                                         tdef, event, {}))
            # check AR state matches the analyses
            if IAnalysisRequest.providedBy(instance):
                fix_ar_sample_workflow(instance)
        logger.info("Removed sample_prep state from {} items in {}.".format(
            len(brains), catalog_id))
コード例 #30
0
ファイル: analysis.py プロジェクト: Ammy2/Bika-LIMS
def ObjectInitializedEventHandler(instance, event):

    # This handler fires for DuplicateAnalysis because
    # DuplicateAnalysis also provides IAnalysis.
    # DuplicateAnalysis doesn't have analysis_workflow.
    if instance.portal_type == "DuplicateAnalysis":
        return

    if instance.portal_type == 'Analysis':
        alsoProvides(instance, IRoutineAnalysis)

    workflow = getToolByName(instance, 'portal_workflow')

    ar = instance.aq_parent
    ar_state = workflow.getInfoFor(ar, 'review_state')
    ar_ws_state = workflow.getInfoFor(ar, 'worksheetanalysis_review_state')

    # Set the state of the analysis depending on the state of the AR.
    if ar_state in ('sample_registered',
                    'to_be_sampled',
                    'sampled',
                    'to_be_preserved',
                    'sample_due',
                    'sample_received'):
        changeWorkflowState(instance, "bika_analysis_workflow", ar_state)
    elif ar_state in ('to_be_verified'):
        # Apply to AR only; we don't want this transition to cascade.
        if 'workflow_skiplist' not in ar.REQUEST:
            ar.REQUEST['workflow_skiplist'] = []
        ar.REQUEST['workflow_skiplist'].append("retract all analyses")
        workflow.doActionFor(ar, 'retract')
        ar.REQUEST['workflow_skiplist'].remove("retract all analyses")

    if ar_ws_state == 'assigned':
        workflow.doActionFor(ar, 'unassign')
        skip(ar, 'unassign', unskip=True)

    instance.updateDueDate()

    return
コード例 #31
0
ファイル: analysis.py プロジェクト: AlcyonSuisse/bika.lims
def ObjectInitializedEventHandler(instance, event):

    # This is the easiest place to assign IRoutineAnalysis,
    # since other anlaysis types subclass Analysis.
    # (noLongerProvides cannot un-provide interfaces on the class itself)
    if instance.portal_type == 'Analysis':
        alsoProvides(instance, IRoutineAnalysis)

    wf_tool = getToolByName(instance, 'portal_workflow')

    ar = instance.aq_parent
    ar_state = wf_tool.getInfoFor(ar, 'review_state')
    ar_ws_state = wf_tool.getInfoFor(ar, 'worksheetanalysis_review_state')

    # Set the state of the analysis depending on the state of the AR.
    if ar_state in ('sample_registered',
                    'to_be_sampled',
                    'sampled',
                    'to_be_preserved',
                    'sample_due',
                    'sample_received'):
        changeWorkflowState(instance, "bika_analysis_workflow", ar_state)
    elif ar_state in ('to_be_verified'):
        # Apply to AR only; we don't want this transition to cascade.
        if 'workflow_skiplist' not in ar.REQUEST:
            ar.REQUEST['workflow_skiplist'] = []
        ar.REQUEST['workflow_skiplist'].append("retract all analyses")
        wf_tool.doActionFor(ar, 'retract')
        ar.REQUEST['workflow_skiplist'].remove("retract all analyses")

    if ar_ws_state == 'assigned':
        wf_tool.doActionFor(ar, 'unassign')
        skip(ar, 'unassign', unskip=True)

    instance.updateDueDate()

    return
コード例 #32
0
def fix_ar_sample_workflow(brain_or_object):
    """Re-set the state of an AR, Sample and SamplePartition to match the
    least-early state of all contained valid/current analyses. Ignores
    retracted/rejected/cancelled analyses.
    """

    def log_change_state(ar_id, obj_id, src, dst):
        msg = "While fixing {ar_id}: " \
              "state changed for {obj_id}: " \
              "{src} -> {dst}".format(**locals())

    ar = get_object(brain_or_object)
    if not IAnalysisRequest.providedBy(ar):
        return

    wf = api.get_tool('portal_workflow')
    arwf = wf['bika_ar_workflow']
    anwf = wf['bika_analysis_workflow']
    swf = wf['bika_sample_workflow']
    ignored = ['retracted', 'rejected']

    tmp = filter(lambda x: x[0] not in ignored, arwf.states.items())
    arstates = OrderedDict(tmp)
    tmp = filter(lambda x: x[0] not in ignored, swf.states.items())
    samplestates = OrderedDict(tmp)
    tmp = filter(lambda x: x[0] in arstates, anwf.states.items())
    anstates = OrderedDict(tmp)

    # find least-early analysis state
    # !!! Assumes states in definitions are roughly ordered earliest to latest
    ar_dest_state = arstates.items()[0][0]
    for anstate in anstates:
        if ar.getAnalyses(review_state=anstate):
            ar_dest_state = anstate

    # Force state of AR
    ar_state = get_review_status(ar)
    if ar_state != ar_dest_state:
        changeWorkflowState(ar, arwf.id, ar_dest_state)
        log_change_state(ar.id, ar.id, ar_state, ar_dest_state)

    # Force state of Sample
    sample = ar.getSample()
    sample_state = get_review_status(sample)
    if ar_dest_state in samplestates:
        changeWorkflowState(sample, swf.id, ar_dest_state)
        log_change_state(ar.id, sample.id, sample_state, ar_dest_state)

        # Force states of Partitions
        for part in sample.objectValues():
            part_state = get_review_status(part)
            if get_review_status(part) != ar_dest_state:
                changeWorkflowState(sample, swf.id, ar_dest_state)
                log_change_state(ar.id, part.id, part_state, ar_dest_state)
コード例 #33
0
ファイル: workflow.py プロジェクト: zylinx/als.bika.lims
    def workflow_action_retract_ar(self):
        workflow = getToolByName(self.context, 'portal_workflow')
        # AR should be retracted
        # Can't transition inactive ARs
        if not isActive(self.context):
            message = _('Item is inactive.')
            self.context.plone_utils.addPortalMessage(message, 'info')
            self.request.response.redirect(self.context.absolute_url())
            return

        # 1. Copies the AR linking the original one and viceversa
        ar = self.context
        newar = self.cloneAR(ar)

        # 2. The old AR gets a status of 'invalid'
        workflow.doActionFor(ar, 'retract_ar')

        # 3. The new AR copy opens in status 'to be verified'
        changeWorkflowState(newar, 'bika_ar_workflow', 'to_be_verified')

        # 4. The system immediately alerts the client contacts who ordered
        # the results, per email and SMS, that a possible mistake has been
        # picked up and is under investigation.
        # A much possible information is provided in the email, linking
        # to the AR online.
        laboratory = self.context.bika_setup.laboratory
        lab_address = "<br/>".join(laboratory.getPrintAddress())
        mime_msg = MIMEMultipart('related')
        mime_msg['Subject'] = t(_("Erroneus result publication from ${request_id}",
                                mapping={"request_id": ar.getRequestID()}))
        mime_msg['From'] = formataddr(
            (encode_header(laboratory.getName()),
             laboratory.getEmailAddress()))
        to = []
        contact = ar.getContact()
        if contact:
            to.append(formataddr((encode_header(contact.Title()),
                                   contact.getEmailAddress())))
        for cc in ar.getCCContact():
            formatted = formataddr((encode_header(cc.Title()),
                                   cc.getEmailAddress()))
            if formatted not in to:
                to.append(formatted)

        managers = self.context.portal_groups.getGroupMembers('LabManagers')
        for bcc in managers:
            user = self.portal.acl_users.getUser(bcc)
            if user:
                uemail = user.getProperty('email')
                ufull = user.getProperty('fullname')
                formatted = formataddr((encode_header(ufull), uemail))
                if formatted not in to:
                    to.append(formatted)
        mime_msg['To'] = ','.join(to)
        aranchor = "<a href='%s'>%s</a>" % (ar.absolute_url(),
                                            ar.getRequestID())
        naranchor = "<a href='%s'>%s</a>" % (newar.absolute_url(),
                                             newar.getRequestID())
        addremarks = ('addremarks' in self.request
                      and ar.getRemarks()) \
                    and ("<br/><br/>"
                         + _("Additional remarks:")
                         + "<br/>"
                         + ar.getRemarks().split("===")[1].strip()
                         + "<br/><br/>") \
                    or ''
        sub_d = dict(request_link=aranchor,
                     new_request_link=naranchor,
                     remarks=addremarks,
                     lab_address=lab_address)
        body = Template("Some errors have been detected in the results report "
                        "published from the Analysis Request $request_link. The Analysis "
                        "Request $new_request_link has been created automatically and the "
                        "previous has been invalidated.<br/>The possible mistake "
                        "has been picked up and is under investigation.<br/><br/>"
                        "$remarks $lab_address").safe_substitute(sub_d)
        msg_txt = MIMEText(safe_unicode(body).encode('utf-8'),
                           _subtype='html')
        mime_msg.preamble = 'This is a multi-part MIME message.'
        mime_msg.attach(msg_txt)
        try:
            host = getToolByName(self.context, 'MailHost')
            host.send(mime_msg.as_string(), immediate=True)
        except Exception as msg:
            message = _('Unable to send an email to alert lab '
                        'client contacts that the Analysis Request has been '
                        'retracted: ${error}',
                        mapping={'error': safe_unicode(msg)})
            self.context.plone_utils.addPortalMessage(message, 'warning')

        message = _('${items} invalidated.',
                    mapping={'items': ar.getRequestID()})
        self.context.plone_utils.addPortalMessage(message, 'warning')
        self.request.response.redirect(newar.absolute_url())
コード例 #34
0
ファイル: workflow.py プロジェクト: bikalabs/bika.lims
    def workflow_action_retract_ar(self):
        workflow = getToolByName(self.context, "portal_workflow")
        # AR should be retracted
        # Can't transition inactive ARs
        if not isActive(self.context):
            message = _("Item is inactive.")
            self.context.plone_utils.addPortalMessage(message, "info")
            self.request.response.redirect(self.context.absolute_url())
            return

        # 1. Copies the AR linking the original one and viceversa
        ar = self.context
        newar = self.cloneAR(ar)

        # 2. The old AR gets a status of 'invalid'
        workflow.doActionFor(ar, "retract_ar")

        # 3. The new AR copy opens in status 'to be verified'
        changeWorkflowState(newar, "bika_ar_workflow", "to_be_verified")

        # 4. The system immediately alerts the client contacts who ordered
        # the results, per email and SMS, that a possible mistake has been
        # picked up and is under investigation.
        # A much possible information is provided in the email, linking
        # to the AR online.
        laboratory = self.context.bika_setup.laboratory
        lab_address = "<br/>".join(laboratory.getPrintAddress())
        mime_msg = MIMEMultipart("related")
        mime_msg["Subject"] = t(
            _("Erroneus result publication from ${request_id}", mapping={"request_id": ar.getRequestID()})
        )
        mime_msg["From"] = formataddr((encode_header(laboratory.getName()), laboratory.getEmailAddress()))
        to = []
        contact = ar.getContact()
        if contact:
            to.append(formataddr((encode_header(contact.Title()), contact.getEmailAddress())))
        for cc in ar.getCCContact():
            formatted = formataddr((encode_header(cc.Title()), cc.getEmailAddress()))
            if formatted not in to:
                to.append(formatted)

        managers = self.context.portal_groups.getGroupMembers("LabManagers")
        for bcc in managers:
            user = self.portal.acl_users.getUser(bcc)
            if user:
                uemail = user.getProperty("email")
                ufull = user.getProperty("fullname")
                formatted = formataddr((encode_header(ufull), uemail))
                if formatted not in to:
                    to.append(formatted)
        mime_msg["To"] = ",".join(to)
        aranchor = "<a href='%s'>%s</a>" % (ar.absolute_url(), ar.getRequestID())
        naranchor = "<a href='%s'>%s</a>" % (newar.absolute_url(), newar.getRequestID())
        addremarks = (
            ("addremarks" in self.request and ar.getRemarks())
            and (
                "<br/><br/>"
                + _("Additional remarks:")
                + "<br/>"
                + ar.getRemarks().split("===")[1].strip()
                + "<br/><br/>"
            )
            or ""
        )
        sub_d = dict(request_link=aranchor, new_request_link=naranchor, remarks=addremarks, lab_address=lab_address)
        body = Template(
            "Some errors have been detected in the results report "
            "published from the Analysis Request $request_link. The Analysis "
            "Request $new_request_link has been created automatically and the "
            "previous has been invalidated.<br/>The possible mistake "
            "has been picked up and is under investigation.<br/><br/>"
            "$remarks $lab_address"
        ).safe_substitute(sub_d)
        msg_txt = MIMEText(safe_unicode(body).encode("utf-8"), _subtype="html")
        mime_msg.preamble = "This is a multi-part MIME message."
        mime_msg.attach(msg_txt)
        try:
            host = getToolByName(self.context, "MailHost")
            host.send(mime_msg.as_string(), immediate=True)
        except Exception as msg:
            message = _(
                "Unable to send an email to alert lab "
                "client contacts that the Analysis Request has been "
                "retracted: ${error}",
                mapping={"error": safe_unicode(msg)},
            )
            self.context.plone_utils.addPortalMessage(message, "warning")

        message = _("${items} invalidated.", mapping={"items": ar.getRequestID()})
        self.context.plone_utils.addPortalMessage(message, "warning")
        self.request.response.redirect(newar.absolute_url())
コード例 #35
0
ファイル: worksheet.py プロジェクト: sayan801/bika.lims
    def workflow_script_reject(self):
        """Copy real analyses to RejectAnalysis, with link to real
           create a new worksheet, with the original analyses, and new
           duplicates and references to match the rejected
           worksheet.
        """
        if skip(self, "reject"):
            return
        utils = getToolByName(self, 'plone_utils')
        workflow = self.portal_workflow

        def copy_src_fields_to_dst(src, dst):
            # These will be ignored when copying field values between analyses
            ignore_fields = ['UID',
                             'id',
                             'title',
                             'allowDiscussion',
                             'subject',
                             'description',
                             'location',
                             'contributors',
                             'creators',
                             'effectiveDate',
                             'expirationDate',
                             'language',
                             'rights',
                             'creation_date',
                             'modification_date',
                             'Layout',    # ws
                             'Analyses',  # ws
            ]
            fields = src.Schema().fields()
            for field in fields:
                fieldname = field.getName()
                if fieldname in ignore_fields:
                    continue
                getter = getattr(src, 'get'+fieldname,
                                 src.Schema().getField(fieldname).getAccessor(src))
                setter = getattr(dst, 'set'+fieldname,
                                 dst.Schema().getField(fieldname).getMutator(dst))
                if getter is None or setter is None:
                    # ComputedField
                    continue
                setter(getter())

        analysis_positions = {}
        for item in self.getLayout():
            analysis_positions[item['analysis_uid']] = item['position']
        old_layout = []
        new_layout = []

        # New worksheet
        worksheets = self.aq_parent
        new_ws = _createObjectByType('Worksheet', worksheets, tmpID())
        new_ws.unmarkCreationFlag()
        new_ws_id = renameAfterCreation(new_ws)
        copy_src_fields_to_dst(self, new_ws)
        new_ws.edit(
            Number = new_ws_id,
            Remarks = self.getRemarks()
        )

        # Objects are being created inside other contexts, but we want their
        # workflow handlers to be aware of which worksheet this is occurring in.
        # We save the worksheet in request['context_uid'].
        # We reset it again below....  be very sure that this is set to the
        # UID of the containing worksheet before invoking any transitions on
        # analyses.
        self.REQUEST['context_uid'] = new_ws.UID()

        # loop all analyses
        analyses = self.getAnalyses()
        new_ws_analyses = []
        old_ws_analyses = []
        for analysis in analyses:
            # Skip published or verified analyses
            review_state = workflow.getInfoFor(analysis, 'review_state', '')
            if review_state in ['published', 'verified', 'retracted']:
                old_ws_analyses.append(analysis.UID())
                old_layout.append({'position': position,
                                   'type':'a',
                                   'analysis_uid':analysis.UID(),
                                   'container_uid':analysis.aq_parent.UID()})
                continue
            # Normal analyses:
            # - Create matching RejectAnalysis inside old WS
            # - Link analysis to new WS in same position
            # - Copy all field values
            # - Clear analysis result, and set Retested flag
            if analysis.portal_type == 'Analysis':
                reject = _createObjectByType('RejectAnalysis', self, tmpID())
                reject.unmarkCreationFlag()
                reject_id = renameAfterCreation(reject)
                copy_src_fields_to_dst(analysis, reject)
                reject.setAnalysis(analysis)
                reject.reindexObject()
                analysis.edit(
                    Result = None,
                    Retested = True,
                )
                analysis.reindexObject()
                position = analysis_positions[analysis.UID()]
                old_ws_analyses.append(reject.UID())
                old_layout.append({'position': position,
                                   'type':'r',
                                   'analysis_uid':reject.UID(),
                                   'container_uid':self.UID()})
                new_ws_analyses.append(analysis.UID())
                new_layout.append({'position': position,
                                   'type':'a',
                                   'analysis_uid':analysis.UID(),
                                   'container_uid':analysis.aq_parent.UID()})
            # Reference analyses
            # - Create a new reference analysis in the new worksheet
            # - Transition the original analysis to 'rejected' state
            if analysis.portal_type == 'ReferenceAnalysis':
                service_uid = analysis.getService().UID()
                reference = analysis.aq_parent
                reference_type = analysis.getReferenceType()
                new_analysis_uid = reference.addReferenceAnalysis(service_uid,
                                                                  reference_type)
                position = analysis_positions[analysis.UID()]
                old_ws_analyses.append(analysis.UID())
                old_layout.append({'position': position,
                                   'type':reference_type,
                                   'analysis_uid':analysis.UID(),
                                   'container_uid':reference.UID()})
                new_ws_analyses.append(new_analysis_uid)
                new_layout.append({'position': position,
                                   'type':reference_type,
                                   'analysis_uid':new_analysis_uid,
                                   'container_uid':reference.UID()})
                workflow.doActionFor(analysis, 'reject')
                new_reference = reference.uid_catalog(UID=new_analysis_uid)[0].getObject()
                workflow.doActionFor(new_reference, 'assign')
                analysis.reindexObject()
            # Duplicate analyses
            # - Create a new duplicate inside the new worksheet
            # - Transition the original analysis to 'rejected' state
            if analysis.portal_type == 'DuplicateAnalysis':
                src_analysis = analysis.getAnalysis()
                ar = src_analysis.aq_parent
                service = src_analysis.getService()
                duplicate_id = new_ws.generateUniqueId('DuplicateAnalysis')
                new_duplicate = _createObjectByType('DuplicateAnalysis',
                                                    new_ws, duplicate_id)
                new_duplicate.unmarkCreationFlag()
                copy_src_fields_to_dst(analysis, new_duplicate)
                workflow.doActionFor(new_duplicate, 'assign')
                new_duplicate.reindexObject()
                position = analysis_positions[analysis.UID()]
                old_ws_analyses.append(analysis.UID())
                old_layout.append({'position': position,
                                   'type':'d',
                                   'analysis_uid':analysis.UID(),
                                   'container_uid':self.UID()})
                new_ws_analyses.append(new_duplicate.UID())
                new_layout.append({'position': position,
                                   'type':'d',
                                   'analysis_uid':new_duplicate.UID(),
                                   'container_uid':new_ws.UID()})
                workflow.doActionFor(analysis, 'reject')
                analysis.reindexObject()

        new_ws.setAnalyses(new_ws_analyses)
        new_ws.setLayout(new_layout)
        new_ws.replaces_rejected_worksheet = self.UID()
        for analysis in new_ws.getAnalyses():
            review_state = workflow.getInfoFor(analysis, 'review_state', '')
            if review_state == 'to_be_verified':
                changeWorkflowState(analysis, "bika_analysis_workflow", "sample_received")
        self.REQUEST['context_uid'] = self.UID()
        self.setLayout(old_layout)
        self.setAnalyses(old_ws_analyses)
        self.replaced_by = new_ws.UID()
コード例 #36
0
ファイル: workflow.py プロジェクト: bikalabs/bika.lims
    def cloneAR(self, ar):
        newar = _createObjectByType("AnalysisRequest", ar.aq_parent, tmpID())
        newar.title = ar.title
        newar.description = ar.description
        newar.setContact(ar.getContact())
        newar.setCCContact(ar.getCCContact())
        newar.setCCEmails(ar.getCCEmails())
        newar.setBatch(ar.getBatch())
        newar.setTemplate(ar.getTemplate())
        newar.setProfile(ar.getProfile())
        newar.setSamplingDate(ar.getSamplingDate())
        newar.setSampleType(ar.getSampleType())
        newar.setSamplePoint(ar.getSamplePoint())
        newar.setStorageLocation(ar.getStorageLocation())
        newar.setSamplingDeviation(ar.getSamplingDeviation())
        newar.setPriority(ar.getPriority())
        newar.setSampleCondition(ar.getSampleCondition())
        newar.setSample(ar.getSample())
        newar.setClientOrderNumber(ar.getClientOrderNumber())
        newar.setClientReference(ar.getClientReference())
        newar.setClientSampleID(ar.getClientSampleID())
        newar.setDefaultContainerType(ar.getDefaultContainerType())
        newar.setAdHoc(ar.getAdHoc())
        newar.setComposite(ar.getComposite())
        newar.setReportDryMatter(ar.getReportDryMatter())
        newar.setInvoiceExclude(ar.getInvoiceExclude())
        newar.setAttachment(ar.getAttachment())
        newar.setInvoice(ar.getInvoice())
        newar.setDateReceived(ar.getDateReceived())
        newar.setMemberDiscount(ar.getMemberDiscount())
        # Set the results for each AR analysis
        ans = ar.getAnalyses(full_objects=True)
        # If a whole AR is retracted and contains retracted Analyses, these
        # retracted analyses won't be created/shown in the new AR
        workflow = getToolByName(self, "portal_workflow")
        analyses = [x for x in ans if workflow.getInfoFor(x, "review_state") not in ("retracted")]
        for an in analyses:
            try:
                nan = _createObjectByType("Analysis", newar, an.getKeyword())
            except Exception as e:
                from bika.lims import logger

                logger.warn("Cannot create analysis %s inside %s (%s)" % an.getService().Title(), newar, e)
                continue
            nan.setService(an.getService())
            nan.setCalculation(an.getCalculation())
            nan.setInterimFields(an.getInterimFields())
            nan.setResult(an.getResult())
            nan.setResultDM(an.getResultDM())
            nan.setRetested = (False,)
            nan.setMaxTimeAllowed(an.getMaxTimeAllowed())
            nan.setDueDate(an.getDueDate())
            nan.setDuration(an.getDuration())
            nan.setReportDryMatter(an.getReportDryMatter())
            nan.setAnalyst(an.getAnalyst())
            nan.setInstrument(an.getInstrument())
            nan.setSamplePartition(an.getSamplePartition())
            nan.unmarkCreationFlag()
            zope.event.notify(ObjectInitializedEvent(nan))
            changeWorkflowState(nan, "bika_analysis_workflow", "to_be_verified")
            nan.reindexObject()

        newar.reindexObject()
        newar.aq_parent.reindexObject()
        renameAfterCreation(newar)
        newar.setRequestID(newar.getId())

        if hasattr(ar, "setChildAnalysisRequest"):
            ar.setChildAnalysisRequest(newar)
        newar.setParentAnalysisRequest(ar)
        return newar
コード例 #37
0
def batch_publish(instance):
    workflow = getToolByName(instance, 'portal_workflow')
    for ar in instance.getAnalysisRequests():
        for a in ar.getAnalyses(full_objects=True):
            changeWorkflowState(a, 'bika_analysis_workflow', 'published')
        changeWorkflowState(ar, 'bika_ar_workflow', 'published')
コード例 #38
0
ファイル: v01_02_000.py プロジェクト: xispa/bika.lims
def fix_workflow_transitions(portal):
    """
    Replace target states from some workflow statuses
    """
    logger.info("Fixing workflow transitions...")
    tochange = [
        {'wfid': 'bika_duplicateanalysis_workflow',
         'trid': 'submit',
         'changes': {
             'new_state_id': 'to_be_verified',
             'guard_expr': ''
            },
         'update': {
             'catalog': CATALOG_ANALYSIS_LISTING,
             'portal_type': 'DuplicateAnalysis',
             'status_from': 'attachment_due',
             'status_to': 'to_be_verified'
            }
         }
    ]

    wtool = api.get_tool('portal_workflow')
    for item in tochange:
        wfid = item['wfid']
        trid = item['trid']
        workflow = wtool.getWorkflowById(wfid)
        transitions = workflow.transitions
        transition = transitions[trid]
        changes = item.get('changes', {})

        if 'new_state_id' in changes:
            new_state_id = changes['new_state_id']
            oldstate = transition.new_state_id
            logger.info(
                "Replacing target state '{0}' from '{1}.{2}' to {3}"
                    .format(oldstate, wfid, trid, new_state_id)
            )
            transition.new_state_id = new_state_id

        if 'guard_expr' in changes:
            new_guard = changes['guard_expr']
            if not new_guard:
                transition.guard = None
                logger.info(
                    "Removing guard expression from '{0}.{1}'"
                        .format(wfid, trid))
            else:
                guard = transition.getGuard()
                guard.expr = Expression(new_guard)
                transition.guard = guard
                logger.info(
                    "Replacing guard expression from '{0}.{1}' to {2}"
                        .format(wfid, trid, new_guard))

        update = item.get('update', {})
        if update:
            catalog_id = update['catalog']
            portal_type = update['portal_type']
            catalog = api.get_tool(catalog_id)
            brains = catalog(portal_type=portal_type)
            for brain in brains:
                obj = api.get_object(brain)
                if 'status_from' in update and 'status_to' in update:
                    status_from = update['status_from']
                    status_to = update['status_to']
                    if status_from == brain.review_state:
                        logger.info(
                            "Changing status for {0} from '{1} to {2}"
                                .format(obj.getId(), status_from, status_to))
                        changeWorkflowState(obj, wfid, status_to)

                workflow.updateRoleMappingsFor(obj)
                obj.reindexObject()
コード例 #39
0
ファイル: worksheet.py プロジェクト: naralabs/bika.lims
    def workflow_script_reject(self):
        """Copy real analyses to RejectAnalysis, with link to real
           create a new worksheet, with the original analyses, and new
           duplicates and references to match the rejected
           worksheet.
        """
        if skip(self, "reject"):
            return
        utils = getToolByName(self, "plone_utils")
        workflow = self.portal_workflow

        def copy_src_fields_to_dst(src, dst):
            # These will be ignored when copying field values between analyses
            ignore_fields = [
                "UID",
                "id",
                "title",
                "allowDiscussion",
                "subject",
                "description",
                "location",
                "contributors",
                "creators",
                "effectiveDate",
                "expirationDate",
                "language",
                "rights",
                "creation_date",
                "modification_date",
                "Layout",  # ws
                "Analyses",  # ws
            ]
            fields = src.Schema().fields()
            for field in fields:
                fieldname = field.getName()
                if fieldname in ignore_fields:
                    continue
                getter = getattr(src, "get" + fieldname, src.Schema().getField(fieldname).getAccessor(src))
                setter = getattr(dst, "set" + fieldname, dst.Schema().getField(fieldname).getMutator(dst))
                if getter is None or setter is None:
                    # ComputedField
                    continue
                setter(getter())

        analysis_positions = {}
        for item in self.getLayout():
            analysis_positions[item["analysis_uid"]] = item["position"]
        old_layout = []
        new_layout = []

        # New worksheet
        worksheets = self.aq_parent
        new_ws = _createObjectByType("Worksheet", worksheets, tmpID())
        new_ws.unmarkCreationFlag()
        new_ws_id = renameAfterCreation(new_ws)
        copy_src_fields_to_dst(self, new_ws)
        new_ws.edit(Number=new_ws_id, Remarks=self.getRemarks())

        # Objects are being created inside other contexts, but we want their
        # workflow handlers to be aware of which worksheet this is occurring in.
        # We save the worksheet in request['context_uid'].
        # We reset it again below....  be very sure that this is set to the
        # UID of the containing worksheet before invoking any transitions on
        # analyses.
        self.REQUEST["context_uid"] = new_ws.UID()

        # loop all analyses
        analyses = self.getAnalyses()
        new_ws_analyses = []
        old_ws_analyses = []
        for analysis in analyses:
            # Skip published or verified analyses
            review_state = workflow.getInfoFor(analysis, "review_state", "")
            if review_state in ["published", "verified", "retracted"]:
                old_ws_analyses.append(analysis.UID())
                old_layout.append(
                    {
                        "position": position,
                        "type": "a",
                        "analysis_uid": analysis.UID(),
                        "container_uid": analysis.aq_parent.UID(),
                    }
                )
                continue
            # Normal analyses:
            # - Create matching RejectAnalysis inside old WS
            # - Link analysis to new WS in same position
            # - Copy all field values
            # - Clear analysis result, and set Retested flag
            if analysis.portal_type == "Analysis":
                reject = _createObjectByType("RejectAnalysis", self, tmpID())
                reject.unmarkCreationFlag()
                reject_id = renameAfterCreation(reject)
                copy_src_fields_to_dst(analysis, reject)
                reject.setAnalysis(analysis)
                reject.reindexObject()
                analysis.edit(Result=None, Retested=True)
                analysis.reindexObject()
                position = analysis_positions[analysis.UID()]
                old_ws_analyses.append(reject.UID())
                old_layout.append(
                    {"position": position, "type": "r", "analysis_uid": reject.UID(), "container_uid": self.UID()}
                )
                new_ws_analyses.append(analysis.UID())
                new_layout.append(
                    {
                        "position": position,
                        "type": "a",
                        "analysis_uid": analysis.UID(),
                        "container_uid": analysis.aq_parent.UID(),
                    }
                )
            # Reference analyses
            # - Create a new reference analysis in the new worksheet
            # - Transition the original analysis to 'rejected' state
            if analysis.portal_type == "ReferenceAnalysis":
                service_uid = analysis.getService().UID()
                reference = analysis.aq_parent
                reference_type = analysis.getReferenceType()
                new_analysis_uid = reference.addReferenceAnalysis(service_uid, reference_type)
                position = analysis_positions[analysis.UID()]
                old_ws_analyses.append(analysis.UID())
                old_layout.append(
                    {
                        "position": position,
                        "type": reference_type,
                        "analysis_uid": analysis.UID(),
                        "container_uid": reference.UID(),
                    }
                )
                new_ws_analyses.append(new_analysis_uid)
                new_layout.append(
                    {
                        "position": position,
                        "type": reference_type,
                        "analysis_uid": new_analysis_uid,
                        "container_uid": reference.UID(),
                    }
                )
                workflow.doActionFor(analysis, "reject")
                new_reference = reference.uid_catalog(UID=new_analysis_uid)[0].getObject()
                workflow.doActionFor(new_reference, "assign")
                analysis.reindexObject()
            # Duplicate analyses
            # - Create a new duplicate inside the new worksheet
            # - Transition the original analysis to 'rejected' state
            if analysis.portal_type == "DuplicateAnalysis":
                src_analysis = analysis.getAnalysis()
                ar = src_analysis.aq_parent
                service = src_analysis.getService()
                duplicate_id = new_ws.generateUniqueId("DuplicateAnalysis")
                new_duplicate = _createObjectByType("DuplicateAnalysis", new_ws, duplicate_id)
                new_duplicate.unmarkCreationFlag()
                copy_src_fields_to_dst(analysis, new_duplicate)
                workflow.doActionFor(new_duplicate, "assign")
                new_duplicate.reindexObject()
                position = analysis_positions[analysis.UID()]
                old_ws_analyses.append(analysis.UID())
                old_layout.append(
                    {"position": position, "type": "d", "analysis_uid": analysis.UID(), "container_uid": self.UID()}
                )
                new_ws_analyses.append(new_duplicate.UID())
                new_layout.append(
                    {
                        "position": position,
                        "type": "d",
                        "analysis_uid": new_duplicate.UID(),
                        "container_uid": new_ws.UID(),
                    }
                )
                workflow.doActionFor(analysis, "reject")
                analysis.reindexObject()

        new_ws.setAnalyses(new_ws_analyses)
        new_ws.setLayout(new_layout)
        new_ws.replaces_rejected_worksheet = self.UID()
        for analysis in new_ws.getAnalyses():
            review_state = workflow.getInfoFor(analysis, "review_state", "")
            if review_state == "to_be_verified":
                changeWorkflowState(analysis, "bika_analysis_workflow", "sample_received")
        self.REQUEST["context_uid"] = self.UID()
        self.setLayout(old_layout)
        self.setAnalyses(old_ws_analyses)
        self.replaced_by = new_ws.UID()
コード例 #40
0
ファイル: workflow.py プロジェクト: KaskMartin/Bika-LIMS
    def workflow_action_retract_ar(self):
        workflow = getToolByName(self.context, 'portal_workflow')
        # AR should be retracted
        # Can't transition inactive ARs
        if not isActive(self.context):
            message = _('Item is inactive.')
            self.context.plone_utils.addPortalMessage(message, 'info')
            self.request.response.redirect(self.context.absolute_url())
            return

        # 1. Copies the AR linking the original one and viceversa
        ar = self.context
        newar = self.cloneAR(ar)

        # 2. The old AR gets a status of 'invalid'
        workflow.doActionFor(ar, 'retract_ar')

        # 3. The new AR copy opens in status 'to be verified'
        changeWorkflowState(newar, 'bika_ar_workflow', 'to_be_verified')

        # 4. The system immediately alerts the client contacts who ordered
        # the results, per email and SMS, that a possible mistake has been
        # picked up and is under investigation.
        # A much possible information is provided in the email, linking
        # to the AR online.
        laboratory = self.context.bika_setup.laboratory
        lab_address = "<br/>".join(laboratory.getPrintAddress())
        mime_msg = MIMEMultipart('related')
        mime_msg['Subject'] = t(_("Erroneus result publication from ${request_id}",
                                mapping={"request_id": ar.getRequestID()}))
        mime_msg['From'] = formataddr(
            (encode_header(laboratory.getName()),
             laboratory.getEmailAddress()))
        to = []
        contact = ar.getContact()
        if contact:
            to.append(formataddr((encode_header(contact.Title()),
                                   contact.getEmailAddress())))
        for cc in ar.getCCContact():
            formatted = formataddr((encode_header(cc.Title()),
                                   cc.getEmailAddress()))
            if formatted not in to:
                to.append(formatted)

        managers = self.context.portal_groups.getGroupMembers('LabManagers')
        for bcc in managers:
            user = self.portal.acl_users.getUser(bcc)
            if user:
                uemail = user.getProperty('email')
                ufull = user.getProperty('fullname')
                formatted = formataddr((encode_header(ufull), uemail))
                if formatted not in to:
                    to.append(formatted)
        mime_msg['To'] = ','.join(to)
        aranchor = "<a href='%s'>%s</a>" % (ar.absolute_url(),
                                            ar.getRequestID())
        naranchor = "<a href='%s'>%s</a>" % (newar.absolute_url(),
                                             newar.getRequestID())
        addremarks = ('addremarks' in self.request
                      and ar.getRemarks()) \
                    and ("<br/><br/>"
                         + _("Additional remarks:")
                         + "<br/>"
                         + ar.getRemarks().split("===")[1].strip()
                         + "<br/><br/>") \
                    or ''

        body = _("Some errors have been detected in the results report "
                 "published from the Analysis Request ${request_link}. The Analysis "
                 "Request ${new_request_link} has been created automatically and the "
                 "previous has been invalidated.<br/>The possible mistake "
                 "has been picked up and is under investigation.<br/><br/>"
                 "${remarks}${lab_address}",
                 mapping={"request_link":aranchor,
                          "new_request_link":naranchor,
                          "remarks": addremarks,
                          "lab_address": lab_address})
        msg_txt = MIMEText(safe_unicode(body).encode('utf-8'),
                           _subtype='html')
        mime_msg.preamble = 'This is a multi-part MIME message.'
        mime_msg.attach(msg_txt)
        try:
            host = getToolByName(self.context, 'MailHost')
            host.send(mime_msg.as_string(), immediate=True)
        except Exception as msg:
            message = _('Unable to send an email to alert lab '
                        'client contacts that the Analysis Request has been '
                        'retracted: ${error}',
                        mapping={'error': safe_unicode(msg)})
            self.context.plone_utils.addPortalMessage(message, 'warning')

        message = _('${items} invalidated.',
                    mapping={'items': ar.getRequestID()})
        self.context.plone_utils.addPortalMessage(message, 'warning')
        self.request.response.redirect(newar.absolute_url())
コード例 #41
0
ファイル: batch.py プロジェクト: rockfruit/bika.uw
def batch_publish(instance):
    workflow = getToolByName(instance, 'portal_workflow')
    for ar in instance.getAnalysisRequests():
        for a in ar.getAnalyses(full_objects=True):
            changeWorkflowState(a, 'bika_analysis_workflow', 'published')
        changeWorkflowState(ar, 'bika_ar_workflow', 'published')
コード例 #42
0
ファイル: workflow.py プロジェクト: bikalabs/bika.lims
    def workflow_action_save_analyses_button(self):
        form = self.request.form
        workflow = getToolByName(self.context, "portal_workflow")
        bsc = self.context.bika_setup_catalog
        action, came_from = WorkflowAction._get_form_workflow_action(self)
        # AR Manage Analyses: save Analyses
        ar = self.context
        sample = ar.getSample()
        objects = WorkflowAction._get_selected_items(self)
        if not objects:
            message = _("No analyses have been selected")
            self.context.plone_utils.addPortalMessage(message, "info")
            self.destination_url = self.context.absolute_url() + "/analyses"
            self.request.response.redirect(self.destination_url)
            return
        Analyses = objects.keys()
        prices = form.get("Price", [None])[0]

        # Hidden analyses?
        # https://jira.bikalabs.com/browse/LIMS-1324
        outs = []
        hiddenans = form.get("Hidden", {})
        for uid in Analyses:
            hidden = hiddenans.get(uid, "")
            hidden = True if hidden == "on" else False
            outs.append({"uid": uid, "hidden": hidden})
        ar.setAnalysisServicesSettings(outs)

        specs = {}
        if form.get("min", None):
            for service_uid in Analyses:
                service = bsc(UID=service_uid)[0].getObject()
                keyword = service.getKeyword()
                specs[service_uid] = {
                    "min": form["min"][0][service_uid],
                    "max": form["max"][0][service_uid],
                    "error": form["error"][0][service_uid],
                    "keyword": keyword,
                    "uid": service_uid,
                }
        else:
            for service_uid in Analyses:
                service = bsc(UID=service_uid)[0].getObject()
                keyword = service.getKeyword()
                specs[service_uid] = {"min": "", "max": "", "error": "", "keyword": keyword, "uid": service_uid}
        new = ar.setAnalyses(Analyses, prices=prices, specs=specs.values())
        # link analyses and partitions
        # If Bika Setup > Analyses > 'Display individual sample
        # partitions' is checked, no Partitions available.
        # https://github.com/bikalabs/Bika-LIMS/issues/1030
        if "Partition" in form:
            for service_uid, service in objects.items():
                part_id = form["Partition"][0][service_uid]
                part = sample[part_id]
                analysis = ar[service.getKeyword()]
                analysis.setSamplePartition(part)
                analysis.reindexObject()

        if new:
            for analysis in new:
                # if the AR has progressed past sample_received, we need to bring it back.
                ar_state = workflow.getInfoFor(ar, "review_state")
                if ar_state in ("attachment_due", "to_be_verified"):
                    # Apply to AR only; we don't want this transition to cascade.
                    ar.REQUEST["workflow_skiplist"].append("retract all analyses")
                    workflow.doActionFor(ar, "retract")
                    ar.REQUEST["workflow_skiplist"].remove("retract all analyses")
                    ar_state = workflow.getInfoFor(ar, "review_state")
                # Then we need to forward new analyses state
                analysis.updateDueDate()
                changeWorkflowState(analysis, "bika_analysis_workflow", ar_state)

        message = PMF("Changes saved.")
        self.context.plone_utils.addPortalMessage(message, "info")
        self.destination_url = self.context.absolute_url()
        self.request.response.redirect(self.destination_url)
コード例 #43
0
def create_partition(analysis_request,
                     request,
                     analyses,
                     sample_type=None,
                     container=None,
                     preservation=None,
                     skip_fields=None,
                     internal_use=True):
    """
    Creates a partition for the analysis_request (primary) passed in
    :param analysis_request: uid/brain/object of IAnalysisRequest type
    :param request: the current request object
    :param analyses: uids/brains/objects of IAnalysis type
    :param sampletype: uid/brain/object of SampleType
    :param container: uid/brain/object of Container
    :param preservation: uid/brain/object of Preservation
    :param skip_fields: names of fields to be skipped on copy from primary
    :return: the new partition
    """
    partition_skip_fields = [
        "Analyses",
        "Attachment",
        "Client",
        "DetachedFrom",
        "Profile",
        "Profiles",
        "RejectionReasons",
        "Remarks",
        "ResultsInterpretation",
        "ResultsInterpretationDepts",
        "Sample",
        "Template",
        "creation_date",
        "id",
        "modification_date",
        "ParentAnalysisRequest",
        "PrimaryAnalysisRequest",
    ]
    if skip_fields:
        partition_skip_fields.extend(skip_fields)
        partition_skip_fields = list(set(partition_skip_fields))

    # Copy field values from the primary analysis request
    ar = api.get_object(analysis_request)
    record = fields_to_dict(ar, partition_skip_fields)

    # Update with values that are partition-specific
    record.update({
        "InternalUse": internal_use,
        "ParentAnalysisRequest": api.get_uid(ar),
    })
    if sample_type is not None:
        record["SampleType"] = sample_type and api.get_uid(sample_type) or ""
    if container is not None:
        record["Container"] = container and api.get_uid(container) or ""
    if preservation is not None:
        record["Preservation"] = preservation and api.get_uid(
            preservation) or ""

    # Create the Partition
    client = ar.getClient()
    analyses = list(set(map(api.get_object, analyses)))
    services = map(lambda an: an.getAnalysisService(), analyses)

    # Populate the root's ResultsRanges to partitions
    results_ranges = ar.getResultsRange() or []
    partition = create_analysisrequest(client,
                                       request=request,
                                       values=record,
                                       analyses=services,
                                       results_ranges=results_ranges)

    # Reindex Parent Analysis Request
    ar.reindexObject(idxs=["isRootAncestor"])

    # Manually set the Date Received to match with its parent. This is
    # necessary because crar calls to processForm, so DateReceived is not
    # set because the partition has not been received yet
    partition.setDateReceived(ar.getDateReceived())
    partition.reindexObject(idxs="getDateReceived")

    # Force partition to same status as the primary
    status = api.get_workflow_status_of(ar)
    changeWorkflowState(partition, "bika_ar_workflow", status)
    if IReceived.providedBy(ar):
        alsoProvides(partition, IReceived)

    # And initialize the analyses the partition contains. This is required
    # here because the transition "initialize" of analyses rely on a guard,
    # so the initialization can only be performed when the sample has been
    # received (DateReceived is set)
    ActionHandlerPool.get_instance().queue_pool()
    for analysis in partition.getAnalyses(full_objects=True):
        doActionFor(analysis, "initialize")
    ActionHandlerPool.get_instance().resume()
    return partition
コード例 #44
0
ファイル: workflow.py プロジェクト: zylinx/als.bika.lims
    def cloneAR(self, ar):
        newar = _createObjectByType("AnalysisRequest", ar.aq_parent, tmpID())
        newar.title = ar.title
        newar.description = ar.description
        newar.setContact(ar.getContact())
        newar.setCCContact(ar.getCCContact())
        newar.setCCEmails(ar.getCCEmails())
        newar.setBatch(ar.getBatch())
        newar.setTemplate(ar.getTemplate())
        newar.setProfile(ar.getProfile())
        newar.setSamplingDate(ar.getSamplingDate())
        newar.setSampleType(ar.getSampleType())
        newar.setSamplePoint(ar.getSamplePoint())
        newar.setStorageLocation(ar.getStorageLocation())
        newar.setSamplingDeviation(ar.getSamplingDeviation())
        newar.setPriority(ar.getPriority())
        newar.setSampleCondition(ar.getSampleCondition())
        newar.setSample(ar.getSample())
        newar.setClientOrderNumber(ar.getClientOrderNumber())
        newar.setClientReference(ar.getClientReference())
        newar.setClientSampleID(ar.getClientSampleID())
        newar.setDefaultContainerType(ar.getDefaultContainerType())
        newar.setAdHoc(ar.getAdHoc())
        newar.setComposite(ar.getComposite())
        newar.setReportDryMatter(ar.getReportDryMatter())
        newar.setInvoiceExclude(ar.getInvoiceExclude())
        newar.setAttachment(ar.getAttachment())
        newar.setInvoice(ar.getInvoice())
        newar.setDateReceived(ar.getDateReceived())
        newar.setMemberDiscount(ar.getMemberDiscount())
        # Set the results for each AR analysis
        ans = ar.getAnalyses(full_objects=True)
        # If a whole AR is retracted and contains retracted Analyses, these
        # retracted analyses won't be created/shown in the new AR
        workflow = getToolByName(self, "portal_workflow")
        analyses = [x for x in ans
                if workflow.getInfoFor(x, "review_state") not in ("retracted")]
        for an in analyses:
            try:
                nan = _createObjectByType("Analysis", newar, an.getKeyword())
            except Exception as e:
                from bika.lims import logger
                logger.warn('Cannot create analysis %s inside %s (%s)'%
                            an.getService().Title(), newar, e)
                continue
            nan.setService(an.getService())
            nan.setCalculation(an.getCalculation())
            nan.setInterimFields(an.getInterimFields())
            nan.setResult(an.getResult())
            nan.setResultDM(an.getResultDM())
            nan.setRetested = False,
            nan.setMaxTimeAllowed(an.getMaxTimeAllowed())
            nan.setDueDate(an.getDueDate())
            nan.setDuration(an.getDuration())
            nan.setReportDryMatter(an.getReportDryMatter())
            nan.setAnalyst(an.getAnalyst())
            nan.setInstrument(an.getInstrument())
            nan.setSamplePartition(an.getSamplePartition())
            nan.unmarkCreationFlag()
            zope.event.notify(ObjectInitializedEvent(nan))
            changeWorkflowState(nan, 'bika_analysis_workflow',
                                'to_be_verified')
            nan.reindexObject()

        newar.reindexObject()
        newar.aq_parent.reindexObject()
        renameAfterCreation(newar)
        newar.setRequestID(newar.getId())

        if hasattr(ar, 'setChildAnalysisRequest'):
            ar.setChildAnalysisRequest(newar)
        newar.setParentAnalysisRequest(ar)
        return newar
コード例 #45
0
ファイル: fiastar.py プロジェクト: udithap/Bika-LIMS
def Import(context, request):
    """ Read FIAStar analysis results
    """

    translate = context.translation_service.translate

    template = "fiastar_import.pt"

    csvfile = request.form["file"]

    pc = getToolByName(context, "portal_catalog")
    uc = getToolByName(context, "uid_catalog")
    bsc = getToolByName(context, "bika_setup_catalog")
    workflow = getToolByName(context, "portal_workflow")

    updateable_states = ["sample_received", "assigned", "not_requested"]
    now = DateTime().strftime("%Y%m%d-%H%M")

    res = {"errors": [], "log": []}

    options = {"dilute_factor": 1, "F SO2": "FSO2", "T SO2": "TSO2"}
    for k, v in options.items():
        if k in request:
            options[k] = request.get(k)
        else:
            options[k] = v

    # kw_map to lookup Fiastar parameter -> service keyword and vice versa
    kw_map = {}
    for param in ["F SO2", "T SO2"]:
        service = bsc(getKeyword=options[param])
        if not service:
            msg = _("Service keyword ${keyword} not found", mapping={"keyword": options[param]})
            res["errors"].append(translate(msg))
            continue
        service = service[0].getObject()
        kw_map[param] = service
        kw_map[service.getKeyword()] = param

    # all errors at this point are fatal ones
    if res["errors"]:
        return json.dumps(res)

    rows = []
    batch_headers = None
    fia1 = False
    fia2 = False
    # place all valid rows into list of dict by CSV row title
    for row in csvfile.readlines():
        if not row:
            continue
        row = row.split(";")
        # a new batch starts
        if row[0] == "List name":
            fia1 = False
            fia2 = False
            if row[13] == "Concentration":
                fia1 = True
            elif row[15] == "Concentration":
                row[13] = "Peak Mean"
                row[14] = "Peak St dev"
                row[16] = "Concentration Mean"
                row[17] = "Concentration St dev"
                fia2 = True
            fields = row
            continue
        row = dict(zip(fields, row))
        if row["Parameter"] == "sample" or not row["Concentration"]:
            continue
        if fia1:
            row["Peak Mean"] = 0
            row["Peak St dev"] = 0
            row["Concentration Mean"] = 0
            row["Concentration St dev"] = 0
        rows.append(row)

    log = []
    for row in rows:
        param = row["Parameter"]
        service = kw_map[param]
        keyword = service.getKeyword()
        calc = service.getCalculation()
        interim_fields = calc and calc.getInterimFields() or []

        p_uid = row["Sample name"]
        parent = uc(UID=p_uid)
        if len(parent) == 0:
            msg = _("Analysis parent UID ${parent_uid} not found", mapping={"parent_uid": row["Sample name"]})
            res["errors"].append(translate(msg))
            continue
        parent = parent[0].getObject()

        c_uid = row["Sample type"]
        container = uc(UID=c_uid)
        if len(container) == 0:
            msg = _("Analysis container UID ${parent_uid} not found", mapping={"container_uid": row["Sample type"]})
            res["errors"].append(translate(msg))
            continue
        container = container[0].getObject()

        # Duplicates.
        if p_uid != c_uid:
            dups = [
                d.getObject()
                for d in pc(
                    portal_type="DuplicateAnalysis", path={"query": "/".join(container.getPhysicalPath()), "level": 0}
                )
            ]
            # The analyses should exist already
            # or no results will be imported.
            analysis = None
            for dup in dups:
                if dup.getAnalysis().aq_parent == p_uid and dup.getKeyword() in (options["F SO2"], options["T SO2"]):
                    analysis = dup
            if not analysis:
                msg = _("Duplicate analysis for slot ${slot} not found", mapping={"slot": row["Cup"]})
                res["errors"].append(translate(msg))
                continue
            row["analysis"] = analysis
        else:
            analyses = parent.objectIds()
            if keyword in analyses:
                # analysis exists for this parameter.
                analysis = parent.get(keyword)
                row["analysis"] = analysis
            else:
                # analysis does not exist;
                # create new analysis and set 'results_not_requested' state
                parent.invokeFactory(type_name="Analysis", id=keyword)
                analysis = parent[keyword]
                analysis.edit(Service=service, InterimFields=interim_fields, MaxTimeAllowed=service.getMaxTimeAllowed())
                changeWorkflowState(analysis, "not_requested", comments="FOSS FIAStar")

                analysis.unmarkCreationFlag()
                zope.event.notify(ObjectInitializedEvent(analysis))
                row["analysis"] = analysis

        as_state = workflow.getInfoFor(analysis, "review_state", "")
        if as_state not in updateable_states:
            msg = _(
                "Analysis ${service} at slot ${slot} in state ${state} - not updated",
                mapping={"service": service.Title(), "slot": row["Cup"], "state": as_state},
            )
            res["errors"].append(translate(msg))
            continue
        if analysis.getResult():
            msg = _(
                "Analysis ${service} at slot ${slot} has a result - not updated",
                mapping={"service": service.Title(), "slot": row["Cup"]},
            )
            res["errors"].append(translate(msg))
            continue

        analysis.setInterimFields(
            [
                {"keyword": "dilution_factor", "title": "Dilution Factor", "value": row["Dilution"], "unit": ""},
                {"keyword": "injection", "title": "Injection", "value": row["Injection"], "unit": ""},
                {"keyword": "peak", "title": "Peak Height/Area", "value": row["Peak Height/Area"], "unit": ""},
                {"keyword": "peak_mean", "title": "Peak Mean", "value": row.get("Peak Mean", "0"), "unit": ""},
                {"keyword": "peak_st_dev", "title": "Peak St dev", "value": row.get("Peak St dev", "0"), "unit": ""},
                {"keyword": "concentration", "title": "Concentration", "value": row["Concentration"], "unit": ""},
                {
                    "keyword": "concentration_mean",
                    "title": "Concentration Mean",
                    "value": row["Concentration Mean"],
                    "unit": "",
                },
                {
                    "keyword": "concentration_st_dev",
                    "title": "Concentration St dev",
                    "value": row["Concentration St dev"],
                    "unit": "",
                },
                {"keyword": "deviation", "title": "Deviation", "value": row["Deviation"], "unit": ""},
            ]
        )

        msg = _("Analysis ${service} at slot ${slot}: OK", mapping={"service": service.Title(), "slot": row["Cup"]})
        res["log"].append(translate(msg))

    return json.dumps(res)
コード例 #46
0
ファイル: analysis.py プロジェクト: mas009/Bika-LIMS
 def workflow_script_retract(self):
     # DuplicateAnalysis doesn't have analysis_workflow.
     if self.portal_type == "DuplicateAnalysis":
         return
     if skip(self, "retract"):
         return
     ar = self.aq_parent
     workflow = getToolByName(self, "portal_workflow")
     if workflow.getInfoFor(self, 'cancellation_state', 'active') == "cancelled":
         return False
     # We'll assign the new analysis to this same worksheet, if any.
     ws = self.getBackReferences("WorksheetAnalysis")
     if ws:
         ws = ws[0]
     # Rename the analysis to make way for it's successor.
     # Support multiple retractions by renaming to *-0, *-1, etc
     parent = self.aq_parent
     analyses = [x for x in parent.objectValues("Analysis")
                 if x.getId().startswith(self.id)]
     kw = self.getKeyword()
     parent.manage_renameObject(kw, "{0}-{1}".format(kw, len(analyses)))
     # Create new analysis and copy values from retracted
     analysis = _createObjectByType("Analysis", parent, kw)
     analysis.edit(
         Service=self.getService(),
         Calculation=self.getCalculation(),
         InterimFields=self.getInterimFields(),
         Result=self.getResult(),
         ResultDM=self.getResultDM(),
         Retested=True,  # True
         MaxTimeAllowed=self.getMaxTimeAllowed(),
         DueDate=self.getDueDate(),
         Duration=self.getDuration(),
         ReportDryMatter=self.getReportDryMatter(),
         Analyst=self.getAnalyst(),
         Instrument=self.getInstrument(),
         SamplePartition=self.getSamplePartition())
     analysis.unmarkCreationFlag()
     # zope.event.notify(ObjectInitializedEvent(analysis))
     changeWorkflowState(analysis,
                         "bika_analysis_workflow", "sample_received")
     if ws:
         ws.addAnalysis(analysis)
     analysis.reindexObject()
     # retract our dependencies
     if not "retract all dependencies" in self.REQUEST["workflow_skiplist"]:
         for dependency in self.getDependencies():
             if not skip(dependency, "retract", peek=True):
                 if workflow.getInfoFor(dependency, "review_state") in ("attachment_due", "to_be_verified",):
                     # (NB: don"t retract if it"s verified)
                     workflow.doActionFor(dependency, "retract")
     # Retract our dependents
     for dep in self.getDependents():
         if not skip(dep, "retract", peek=True):
             if workflow.getInfoFor(dep, "review_state") not in ("sample_received", "retracted"):
                 self.REQUEST["workflow_skiplist"].append("retract all dependencies")
                 # just return to "received" state, no cascade
                 workflow.doActionFor(dep, 'retract')
                 self.REQUEST["workflow_skiplist"].remove("retract all dependencies")
     # Escalate action to the parent AR
     if not skip(ar, "retract", peek=True):
         if workflow.getInfoFor(ar, "review_state") == "sample_received":
             skip(ar, "retract")
         else:
             if not "retract all analyses" in self.REQUEST["workflow_skiplist"]:
                 self.REQUEST["workflow_skiplist"].append("retract all analyses")
             workflow.doActionFor(ar, "retract")
     # Escalate action to the Worksheet (if it's on one).
     ws = self.getBackReferences("WorksheetAnalysis")
     if ws:
         ws = ws[0]
         if not skip(ws, "retract", peek=True):
             if workflow.getInfoFor(ws, "review_state") == "open":
                 skip(ws, "retract")
             else:
                 if not "retract all analyses" in self.REQUEST['workflow_skiplist']:
                     self.REQUEST["workflow_skiplist"].append("retract all analyses")
                 workflow.doActionFor(ws, "retract")
         # Add to worksheet Analyses
         analyses = list(ws.getAnalyses())
         analyses += [analysis, ]
         ws.setAnalyses(analyses)
         # Add to worksheet layout
         layout = ws.getLayout()
         pos = [x["position"] for x in layout
                if x["analysis_uid"] == self.UID()][0]
         slot = {"position": pos,
                 "analysis_uid": analysis.UID(),
                 "container_uid": analysis.aq_parent.UID(),
                 "type": "a"}
         layout.append(slot)
         ws.setLayout(layout)
コード例 #47
0
def create_analysisrequest(client,
                           request,
                           values,
                           analyses=None,
                           results_ranges=None,
                           prices=None):
    """Creates a new AnalysisRequest (a Sample) object
    :param client: The container where the Sample will be created
    :param request: The current Http Request object
    :param values: A dict, with keys as AnalaysisRequest's schema field names
    :param analyses: List of Services or Analyses (brains, objects, UIDs,
        keywords). Extends the list from values["Analyses"]
    :param results_ranges: List of Results Ranges. Extends the results ranges
        from the Specification object defined in values["Specification"]
    :param prices: Mapping of AnalysisService UID -> price. If not set, prices
        are read from the associated analysis service.
    """
    # Don't pollute the dict param passed in
    values = dict(values.items())

    # Resolve the Service uids of analyses to be added in the Sample. Values
    # passed-in might contain Profiles and also values that are not uids. Also,
    # additional analyses can be passed-in through either values or services
    service_uids = to_services_uids(values=values, services=analyses)

    # Remove the Analyses from values. We will add them manually
    values.update({"Analyses": []})

    # Create the Analysis Request and submit the form
    ar = _createObjectByType('AnalysisRequest', client, tmpID())
    ar.processForm(REQUEST=request, values=values)

    # Set the analyses manually
    ar.setAnalyses(service_uids, prices=prices, specs=results_ranges)

    # Handle hidden analyses from template and profiles
    # https://github.com/senaite/senaite.core/issues/1437
    # https://github.com/senaite/senaite.core/issues/1326
    apply_hidden_services(ar)

    # Handle rejection reasons
    rejection_reasons = resolve_rejection_reasons(values)
    ar.setRejectionReasons(rejection_reasons)

    # Handle secondary Analysis Request
    primary = ar.getPrimaryAnalysisRequest()
    if primary:
        # Mark the secondary with the `IAnalysisRequestSecondary` interface
        alsoProvides(ar, IAnalysisRequestSecondary)

        # Rename the secondary according to the ID server setup
        renameAfterCreation(ar)

        # Set dates to match with those from the primary
        ar.setDateSampled(primary.getDateSampled())
        ar.setSamplingDate(primary.getSamplingDate())
        ar.setDateReceived(primary.getDateReceived())

        # Force the transition of the secondary to received and set the
        # description/comment in the transition accordingly.
        if primary.getDateReceived():
            primary_id = primary.getId()
            comment = "Auto-received. Secondary Sample of {}".format(
                primary_id)
            changeWorkflowState(ar,
                                AR_WORKFLOW_ID,
                                "sample_received",
                                action="receive",
                                comments=comment)

            # Mark the secondary as received
            alsoProvides(ar, IReceived)

            # Initialize analyses
            do_action_to_analyses(ar, "initialize")

            # Notify the ar has ben modified
            modified(ar)

            # Reindex the AR
            ar.reindexObject()

            # If rejection reasons have been set, reject automatically
            if rejection_reasons:
                do_rejection(ar)

            # In "received" state already
            return ar

    # Try first with no sampling transition, cause it is the most common config
    success, message = doActionFor(ar, "no_sampling_workflow")
    if not success:
        doActionFor(ar, "to_be_sampled")

    # If rejection reasons have been set, reject the sample automatically
    if rejection_reasons:
        do_rejection(ar)

    return ar
コード例 #48
0
    def workflow_action_save_analyses_button(self):
        form = self.request.form
        workflow = getToolByName(self.context, 'portal_workflow')
        bsc = self.context.bika_setup_catalog
        action, came_from = WorkflowAction._get_form_workflow_action(self)
        # AR Manage Analyses: save Analyses
        ar = self.context
        sample = ar.getSample()
        objects = WorkflowAction._get_selected_items(self)
        if not objects:
            message = _("No analyses have been selected")
            self.context.plone_utils.addPortalMessage(message, 'info')
            self.destination_url = self.context.absolute_url() + "/analyses"
            self.request.response.redirect(self.destination_url)
            return
        Analyses = objects.keys()
        prices = form.get("Price", [None])[0]

        # Hidden analyses?
        # https://jira.bikalabs.com/browse/LIMS-1324
        outs = []
        hiddenans = form.get('Hidden', {})
        for uid in Analyses:
            hidden = hiddenans.get(uid, '')
            hidden = True if hidden == 'on' else False
            outs.append({'uid': uid, 'hidden': hidden})
        ar.setAnalysisServicesSettings(outs)

        specs = {}
        if form.get("min", None):
            for service_uid in Analyses:
                service = objects[service_uid]
                keyword = service.getKeyword()
                specs[service_uid] = {
                    "min": form["min"][0][service_uid],
                    "max": form["max"][0][service_uid],
                    "warn_min": form["warn_min"][0][service_uid],
                    "warn_max": form["warn_max"][0][service_uid],
                    "keyword": keyword,
                    "uid": service_uid,
                }
        else:
            for service_uid in Analyses:
                service = objects[service_uid]
                keyword = service.getKeyword()
                specs[service_uid] = ResultsRangeDict(keyword=keyword,
                                                      uid=service_uid)
        new = ar.setAnalyses(Analyses, prices=prices, specs=specs.values())
        # link analyses and partitions
        # If Bika Setup > Analyses > 'Display individual sample
        # partitions' is checked, no Partitions available.
        # https://github.com/bikalabs/Bika-LIMS/issues/1030
        if 'Partition' in form:
            for service_uid, service in objects.items():
                part_id = form['Partition'][0][service_uid]
                part = sample[part_id]
                analysis = ar[service.getKeyword()]
                analysis.setSamplePartition(part)
                analysis.reindexObject()
                partans = part.getAnalyses()
                partans.append(analysis)
                part.setAnalyses(partans)
                part.reindexObject()

        if new:
            ar_state = getCurrentState(ar)
            if wasTransitionPerformed(ar, 'to_be_verified'):
                # Apply to AR only; we don't want this transition to cascade.
                ar.REQUEST['workflow_skiplist'].append("retract all analyses")
                workflow.doActionFor(ar, 'retract')
                ar.REQUEST['workflow_skiplist'].remove("retract all analyses")
                ar_state = getCurrentState(ar)
            for analysis in new:
                changeWorkflowState(analysis, 'bika_analysis_workflow',
                                    ar_state)

        message = PMF("Changes saved.")
        self.context.plone_utils.addPortalMessage(message, 'info')
        self.destination_url = self.context.absolute_url()
        self.request.response.redirect(self.destination_url)
コード例 #49
0
def Import(context, request):
    """ Read FIAStar analysis results
    """

    template = "fiastar_import.pt"

    csvfile = request.form['fiastar_file']

    bac = getToolByName(context, 'bika_analysis_catalog')
    uc = getToolByName(context, 'uid_catalog')
    bsc = getToolByName(context, 'bika_setup_catalog')
    workflow = getToolByName(context, 'portal_workflow')

    updateable_states = ['sample_received', 'assigned', 'not_requested']
    now = DateTime().strftime('%Y%m%d-%H%M')

    res = {
        'errors': [],
        'log': [],
    }

    options = {'dilute_factor': 1, 'F SO2': 'FSO2', 'T SO2': 'TSO2'}
    for k, v in options.items():
        if k in request:
            options[k] = request.get(k)
        else:
            options[k] = v

    # kw_map to lookup Fiastar parameter -> service keyword and vice versa
    kw_map = {}
    for param in ['F SO2', 'T SO2']:
        service = bsc(getKeyword=options[param])
        if not service:
            msg = _('Service keyword ${service_keyword} not found',
                    mapping={
                        'keyword': options[param],
                    })
            res['errors'].append(t(msg))
            continue
        service = service[0].getObject()
        kw_map[param] = service
        kw_map[service.getKeyword()] = param

    # all errors at this point are fatal ones
    if res['errors']:
        return json.dumps(res)

    rows = []
    batch_headers = None
    fia1 = False
    fia2 = False
    # place all valid rows into list of dict by CSV row title
    for row in csvfile.readlines():
        if not row: continue
        row = row.split(';')
        # a new batch starts
        if row[0] == 'List name':
            fia1 = False
            fia2 = False
            if row[13] == 'Concentration':
                fia1 = True
            elif row[15] == 'Concentration':
                row[13] = 'Peak Mean'
                row[14] = 'Peak St dev'
                row[16] = 'Concentration Mean'
                row[17] = 'Concentration St dev'
                fia2 = True
            fields = row
            continue
        row = dict(zip(fields, row))
        if row['Parameter'] == 'sample' or not row['Concentration']:
            continue
        if fia1:
            row['Peak Mean'] = 0
            row['Peak St dev'] = 0
            row['Concentration Mean'] = 0
            row['Concentration St dev'] = 0
        rows.append(row)

    log = []
    if len(rows) == 0:
        res['log'].append(t(_("No valid file or format")))

    for row in rows:
        param = row['Parameter']
        service = kw_map[param]
        keyword = service.getKeyword()
        calc = service.getCalculation()
        interim_fields = calc and calc.getInterimFields() or []

        p_uid = row['Sample name']
        parent = uc(UID=p_uid)
        if len(parent) == 0:
            msg = _('Analysis parent UID ${parent_uid} not found',
                    mapping={
                        'parent_uid': row['Sample name'],
                    })
            res['errors'].append(t(msg))
            continue
        parent = parent[0].getObject()

        c_uid = row['Sample type']
        container = uc(UID=c_uid)
        if len(container) == 0:
            msg = _('Analysis container UID ${parent_uid} not found',
                    mapping={
                        'container_uid': row['Sample type'],
                    })
            res['errors'].append(t(msg))
            continue
        container = container[0].getObject()

        # Duplicates.
        if p_uid != c_uid:
            dups = [
                d.getObject() for d in bac(
                    portal_type='DuplicateAnalysis',
                    path={
                        'query': "/".join(container.getPhysicalPath()),
                        'level': 0,
                    })
            ]
            # The analyses should exist already
            # or no results will be imported.
            analysis = None
            for dup in dups:
                if dup.getAnalysis().aq_parent == p_uid and \
                   dup.getKeyword() in (options['F SO2'], options['T SO2']):
                    analysis = dup
            if not analysis:
                msg = _('Duplicate analysis for slot ${slot} not found',
                        mapping={
                            'slot': row['Cup'],
                        })
                res['errors'].append(t(msg))
                continue
            row['analysis'] = analysis
        else:
            analyses = parent.objectIds()
            if keyword in analyses:
                # analysis exists for this parameter.
                analysis = parent.get(keyword)
                row['analysis'] = analysis
            else:
                # analysis does not exist;
                # create new analysis and set 'results_not_requested' state
                analysis = _createObjectByType("Analysis", parent, keyword)
                analysis.edit(Service=service,
                              InterimFields=interim_fields,
                              MaxTimeAllowed=service.getMaxTimeAllowed())
                changeWorkflowState(analysis,
                                    'not_requested',
                                    comments="FOSS FIAStar")

                analysis.unmarkCreationFlag()
                zope.event.notify(ObjectInitializedEvent(analysis))
                row['analysis'] = analysis

        as_state = workflow.getInfoFor(analysis, 'review_state', '')
        if (as_state not in updateable_states):
            msg = _(
                'Analysis ${service} at slot ${slot} in state ${state} - not updated',
                mapping={
                    'service': service.Title(),
                    'slot': row['Cup'],
                    'state': as_state,
                })
            res['errors'].append(t(msg))
            continue
        if analysis.getResult():
            msg = _(
                'Analysis ${service} at slot ${slot} has a result - not updated',
                mapping={
                    'service': service.Title(),
                    'slot': row['Cup'],
                })
            res['errors'].append(t(msg))
            continue

        analysis.setInterimFields([
            {
                'keyword': 'dilution_factor',
                'title': 'Dilution Factor',
                'value': row['Dilution'],
                'unit': ''
            },
            {
                'keyword': 'injection',
                'title': 'Injection',
                'value': row['Injection'],
                'unit': ''
            },
            {
                'keyword': 'peak',
                'title': 'Peak Height/Area',
                'value': row['Peak Height/Area'],
                'unit': ''
            },
            {
                'keyword': 'peak_mean',
                'title': 'Peak Mean',
                'value': row.get('Peak Mean', '0'),
                'unit': ''
            },
            {
                'keyword': 'peak_st_dev',
                'title': 'Peak St dev',
                'value': row.get('Peak St dev', '0'),
                'unit': ''
            },
            {
                'keyword': 'concentration',
                'title': 'Concentration',
                'value': row['Concentration'],
                'unit': ''
            },
            {
                'keyword': 'concentration_mean',
                'title': 'Concentration Mean',
                'value': row['Concentration Mean'],
                'unit': ''
            },
            {
                'keyword': 'concentration_st_dev',
                'title': 'Concentration St dev',
                'value': row['Concentration St dev'],
                'unit': ''
            },
            {
                'keyword': 'deviation',
                'title': 'Deviation',
                'value': row['Deviation'],
                'unit': ''
            },
        ])

        msg = _('Analysis ${service} at slot ${slot}: OK',
                mapping={
                    'service': service.Title(),
                    'slot': row['Cup'],
                })
        res['log'].append(t(msg))

    return json.dumps(res)
コード例 #50
0
ファイル: fiastar.py プロジェクト: mas009/Bika-LIMS
def Import(context,request):
    """ Read FIAStar analysis results
    """

    template = "fiastar_import.pt"

    csvfile = request.form['fiastar_file']

    bac = getToolByName(context, 'bika_analysis_catalog')
    uc = getToolByName(context, 'uid_catalog')
    bsc = getToolByName(context, 'bika_setup_catalog')
    workflow = getToolByName(context, 'portal_workflow')

    updateable_states = ['sample_received', 'assigned', 'not_requested']
    now = DateTime().strftime('%Y%m%d-%H%M')

    res = {'errors': [],
           'log': [],}

    options = {'dilute_factor' : 1,
               'F SO2' : 'FSO2',
               'T SO2' : 'TSO2'}
    for k,v in options.items():
        if k in request:
            options[k] = request.get(k)
        else:
            options[k] = v

    # kw_map to lookup Fiastar parameter -> service keyword and vice versa
    kw_map = {}
    for param in ['F SO2', 'T SO2']:
        service = bsc(getKeyword = options[param])
        if not service:
            msg = _('Service keyword ${keyword} not found',
                    mapping = {'keyword': options[param], })
            res['errors'].append(t(msg))
            continue
        service = service[0].getObject()
        kw_map[param] = service
        kw_map[service.getKeyword()] = param

    # all errors at this point are fatal ones
    if res['errors']:
        return json.dumps(res)

    rows = []
    batch_headers = None
    fia1 = False
    fia2 = False
    # place all valid rows into list of dict by CSV row title
    for row in csvfile.readlines():
        if not row: continue
        row = row.split(';')
        # a new batch starts
        if row[0] == 'List name':
            fia1 = False
            fia2 = False
            if row[13] == 'Concentration':
                fia1 = True
            elif row[15] == 'Concentration':
                row[13] = 'Peak Mean'
                row[14] = 'Peak St dev'
                row[16] = 'Concentration Mean'
                row[17] = 'Concentration St dev'
                fia2 = True
            fields = row
            continue
        row = dict(zip(fields, row))
        if row['Parameter'] == 'sample' or not row['Concentration']:
            continue
        if fia1:
            row['Peak Mean'] = 0
            row['Peak St dev'] = 0
            row['Concentration Mean'] = 0
            row['Concentration St dev'] = 0
        rows.append(row)

    log = []
    if len(rows) == 0:
        res['log'].append(t(_("No valid file or format")))

    for row in rows:
        param = row['Parameter']
        service = kw_map[param]
        keyword = service.getKeyword()
        calc = service.getCalculation()
        interim_fields = calc and calc.getInterimFields() or []

        p_uid = row['Sample name']
        parent = uc(UID = p_uid)
        if len(parent) == 0:
            msg = _('Analysis parent UID ${parent_uid} not found',
                    mapping = {'parent_uid': row['Sample name'], })
            res['errors'].append(t(msg))
            continue
        parent = parent[0].getObject()

        c_uid = row['Sample type']
        container = uc(UID = c_uid)
        if len(container) == 0:
            msg = _('Analysis container UID ${parent_uid} not found',
                    mapping = {'container_uid': row['Sample type'], })
            res['errors'].append(t(msg))
            continue
        container = container[0].getObject()

        # Duplicates.
        if p_uid != c_uid:
            dups = [d.getObject() for d in
                    bac(portal_type='DuplicateAnalysis',
                       path={'query': "/".join(container.getPhysicalPath()),
                             'level': 0,})]
            # The analyses should exist already
            # or no results will be imported.
            analysis = None
            for dup in dups:
                if dup.getAnalysis().aq_parent == p_uid and \
                   dup.getKeyword() in (options['F SO2'], options['T SO2']):
                    analysis = dup
            if not analysis:
                msg = _('Duplicate analysis for slot ${slot} not found',
                        mapping = {'slot': row['Cup'], })
                res['errors'].append(t(msg))
                continue
            row['analysis'] = analysis
        else:
            analyses = parent.objectIds()
            if keyword in analyses:
                # analysis exists for this parameter.
                analysis = parent.get(keyword)
                row['analysis'] = analysis
            else:
                # analysis does not exist;
                # create new analysis and set 'results_not_requested' state
                analysis = _createObjectByType("Analysis", parent, keyword)
                analysis.edit(Service = service,
                              InterimFields = interim_fields,
                              MaxTimeAllowed = service.getMaxTimeAllowed())
                changeWorkflowState(analysis,
                                    'not_requested',
                                    comments="FOSS FIAStar")

                analysis.unmarkCreationFlag()
                zope.event.notify(ObjectInitializedEvent(analysis))
                row['analysis'] = analysis

        as_state = workflow.getInfoFor(analysis, 'review_state', '')
        if (as_state not in updateable_states):
            msg = _('Analysis ${service} at slot ${slot} in state ${state} - not updated',
                    mapping = {'service': service.Title(),
                               'slot': row['Cup'],
                               'state': as_state,})
            res['errors'].append(t(msg))
            continue
        if analysis.getResult():
            msg = _('Analysis ${service} at slot ${slot} has a result - not updated',
                    mapping = {'service': service.Title(),
                               'slot': row['Cup'], })
            res['errors'].append(t(msg))
            continue

        analysis.setInterimFields(
            [
            {'keyword':'dilution_factor',
             'title': 'Dilution Factor',
             'value': row['Dilution'],
             'unit':''},
            {'keyword':'injection',
             'title': 'Injection',
             'value': row['Injection'],
             'unit':''},
            {'keyword':'peak',
             'title': 'Peak Height/Area',
             'value': row['Peak Height/Area'],
             'unit':''},
            {'keyword':'peak_mean',
             'title': 'Peak Mean',
             'value': row.get('Peak Mean', '0'),
             'unit':''},
            {'keyword':'peak_st_dev',
             'title': 'Peak St dev',
             'value': row.get('Peak St dev', '0'),
             'unit':''},
            {'keyword':'concentration',
             'title': 'Concentration',
             'value': row['Concentration'],
             'unit':''},
            {'keyword':'concentration_mean',
             'title': 'Concentration Mean',
             'value': row['Concentration Mean'],
             'unit':''},
            {'keyword':'concentration_st_dev',
             'title': 'Concentration St dev',
             'value': row['Concentration St dev'],
             'unit':''},
            {'keyword':'deviation',
             'title': 'Deviation',
             'value': row['Deviation'],
             'unit':''},
            ]
        )

        msg = _('Analysis ${service} at slot ${slot}: OK',
                mapping = {'service': service.Title(),
                           'slot': row['Cup'], })
        res['log'].append(t(msg))

    return json.dumps(res)
コード例 #51
0
ファイル: workflow.py プロジェクト: KaskMartin/Bika-LIMS
    def cloneAR(self, ar):
        newar = _createObjectByType("AnalysisRequest", ar.aq_parent, tmpID())
        newar.title = ar.title
        newar.description = ar.description
        newar.setContact(ar.getContact())
        newar.setCCContact(ar.getCCContact())
        newar.setCCEmails(ar.getCCEmails())
        newar.setBatch(ar.getBatch())
        newar.setTemplate(ar.getTemplate())
        newar.setProfile(ar.getProfile())
        newar.setSamplingDate(ar.getSamplingDate())
        newar.setSampleType(ar.getSampleType())
        newar.setSamplePoint(ar.getSamplePoint())
        newar.setStorageLocation(ar.getStorageLocation())
        newar.setSamplingDeviation(ar.getSamplingDeviation())
        newar.setPriority(ar.getPriority())
        newar.setSampleCondition(ar.getSampleCondition())
        newar.setSample(ar.getSample())
        newar.setClientOrderNumber(ar.getClientOrderNumber())
        newar.setClientReference(ar.getClientReference())
        newar.setClientSampleID(ar.getClientSampleID())
        newar.setDefaultContainerType(ar.getDefaultContainerType())
        newar.setAdHoc(ar.getAdHoc())
        newar.setComposite(ar.getComposite())
        newar.setReportDryMatter(ar.getReportDryMatter())
        newar.setInvoiceExclude(ar.getInvoiceExclude())
        newar.setAttachment(ar.getAttachment())
        newar.setInvoice(ar.getInvoice())
        newar.setDateReceived(ar.getDateReceived())
        newar.setMemberDiscount(ar.getMemberDiscount())
        # Set the results for each AR analysis
        ans = ar.getAnalyses(full_objects=True)
        for an in ans:
            nan = _createObjectByType("Analysis", newar, an.getKeyword())
            nan.setService(an.getService())
            nan.setCalculation(an.getCalculation())
            nan.setInterimFields(an.getInterimFields())
            nan.setResult(an.getResult())
            nan.setResultDM(an.getResultDM())
            nan.setRetested = False,
            nan.setMaxTimeAllowed(an.getMaxTimeAllowed())
            nan.setDueDate(an.getDueDate())
            nan.setDuration(an.getDuration())
            nan.setReportDryMatter(an.getReportDryMatter())
            nan.setAnalyst(an.getAnalyst())
            nan.setInstrument(an.getInstrument())
            nan.setSamplePartition(an.getSamplePartition())
            nan.unmarkCreationFlag()
            zope.event.notify(ObjectInitializedEvent(nan))
            changeWorkflowState(nan, 'bika_analysis_workflow',
                                'to_be_verified')
            nan.reindexObject()

        newar.reindexObject()
        newar.aq_parent.reindexObject()
        renameAfterCreation(newar)
        newar.setRequestID(newar.getId())

        if hasattr(ar, 'setChildAnalysisRequest'):
            ar.setChildAnalysisRequest(newar)
        newar.setParentAnalysisRequest(ar)
        return newar
コード例 #52
0
    def workflow_script_retract(self):
        # DuplicateAnalysis doesn't have analysis_workflow.
        if self.portal_type == "DuplicateAnalysis":
            return
        if skip(self, "retract"):
            return
        ar = self.aq_parent
        workflow = getToolByName(self, "portal_workflow")
        if workflow.getInfoFor(self, 'cancellation_state',
                               'active') == "cancelled":
            return False
        # We'll assign the new analysis to this same worksheet, if any.
        ws = self.getBackReferences("WorksheetAnalysis")
        if ws:
            ws = ws[0]
        # Rename the analysis to make way for it's successor.
        # Support multiple retractions by renaming to *-0, *-1, etc
        parent = self.aq_parent
        analyses = [
            x for x in parent.objectValues("Analysis")
            if x.getId().startswith(self.id)
        ]
        kw = self.getKeyword()
        # LIMS-1290 - Analyst must be able to retract, which creates a new Analysis.
        parent._verifyObjectPaste = str  # I cancel the permission check with this.
        parent.manage_renameObject(kw, "{0}-{1}".format(kw, len(analyses)))
        delattr(parent, '_verifyObjectPaste')
        # Create new analysis and copy values from retracted
        analysis = _createObjectByType("Analysis", parent, kw)
        analysis.edit(
            Service=self.getService(),
            Calculation=self.getCalculation(),
            InterimFields=self.getInterimFields(),
            Result=self.getResult(),
            ResultDM=self.getResultDM(),
            Retested=True,  # True
            MaxTimeAllowed=self.getMaxTimeAllowed(),
            DueDate=self.getDueDate(),
            Duration=self.getDuration(),
            ReportDryMatter=self.getReportDryMatter(),
            Analyst=self.getAnalyst(),
            Instrument=self.getInstrument(),
            SamplePartition=self.getSamplePartition())
        analysis.unmarkCreationFlag()

        # We must bring the specification across manually.
        analysis.specification = self.specification

        # zope.event.notify(ObjectInitializedEvent(analysis))
        changeWorkflowState(analysis, "bika_analysis_workflow",
                            "sample_received")
        if ws:
            ws.addAnalysis(analysis)
        analysis.reindexObject()
        # retract our dependencies
        if not "retract all dependencies" in self.REQUEST["workflow_skiplist"]:
            for dependency in self.getDependencies():
                if not skip(dependency, "retract", peek=True):
                    if workflow.getInfoFor(dependency, "review_state") in (
                            "attachment_due",
                            "to_be_verified",
                    ):
                        # (NB: don"t retract if it"s verified)
                        workflow.doActionFor(dependency, "retract")
        # Retract our dependents
        for dep in self.getDependents():
            if not skip(dep, "retract", peek=True):
                if workflow.getInfoFor(
                        dep, "review_state") not in ("sample_received",
                                                     "retracted"):
                    self.REQUEST["workflow_skiplist"].append(
                        "retract all dependencies")
                    # just return to "received" state, no cascade
                    workflow.doActionFor(dep, 'retract')
                    self.REQUEST["workflow_skiplist"].remove(
                        "retract all dependencies")
        # Escalate action to the parent AR
        if not skip(ar, "retract", peek=True):
            if workflow.getInfoFor(ar, "review_state") == "sample_received":
                skip(ar, "retract")
            else:
                if not "retract all analyses" in self.REQUEST[
                        "workflow_skiplist"]:
                    self.REQUEST["workflow_skiplist"].append(
                        "retract all analyses")
                workflow.doActionFor(ar, "retract")
        # Escalate action to the Worksheet (if it's on one).
        ws = self.getBackReferences("WorksheetAnalysis")
        if ws:
            ws = ws[0]
            if not skip(ws, "retract", peek=True):
                if workflow.getInfoFor(ws, "review_state") == "open":
                    skip(ws, "retract")
                else:
                    if not "retract all analyses" in self.REQUEST[
                            'workflow_skiplist']:
                        self.REQUEST["workflow_skiplist"].append(
                            "retract all analyses")
                    try:
                        workflow.doActionFor(ws, "retract")
                    except WorkflowException:
                        pass
            # Add to worksheet Analyses
            analyses = list(ws.getAnalyses())
            analyses += [
                analysis,
            ]
            ws.setAnalyses(analyses)
            # Add to worksheet layout
            layout = ws.getLayout()
            pos = [
                x["position"] for x in layout
                if x["analysis_uid"] == self.UID()
            ][0]
            slot = {
                "position": pos,
                "analysis_uid": analysis.UID(),
                "container_uid": analysis.aq_parent.UID(),
                "type": "a"
            }
            layout.append(slot)
            ws.setLayout(layout)