Beispiel #1
0
    def test_default_stickers(self):
        """https://jira.bikalabs.com/browse/WINE-44: display SampleID or
        SamplePartition ID depending on bikasetup.ShowPartitions value
        """

        folder = self.portal.bika_setup.bika_analysisservices
        services = [_createObjectByType("AnalysisService", folder, tmpID()),
                    _createObjectByType("AnalysisService", folder, tmpID())]
        services[0].processForm()
        services[1].processForm()
        services[0].edit(title="Detect Dust")
        services[1].edit(title="Detect water")
        service_uids = [s.UID for s in services]
        folder = self.portal.clients
        client = _createObjectByType("Client", folder, tmpID())
        client.processForm()
        folder = self.portal.clients.objectValues("Client")[0]
        contact = _createObjectByType("Contact", folder, tmpID())
        contact.processForm()
        contact.edit(Firstname="Bob", Surname="Dobbs", email="*****@*****.**")
        folder = self.portal.bika_setup.bika_sampletypes
        sampletype = _createObjectByType("SampleType", folder, tmpID())
        sampletype.processForm()
        sampletype.edit(title="Air", Prefix="AIR")

        values = {'Client': client.UID(),
                  'Contact': contact.UID(),
                  'SamplingDate': '2015-01-01',
                  'SampleType': sampletype.UID()}

        for stemp in getStickerTemplates():

            # create and receive AR
            ar = create_analysisrequest(client, {}, values, service_uids)
            ar.bika_setup.setShowPartitions(False)
            doActionFor(ar, 'receive')
            self.assertEquals(ar.portal_workflow.getInfoFor(ar, 'review_state'), 'sample_received')
            # check sticker text
            ar.REQUEST['items'] = ar.getId()
            ar.REQUEST['template'] = stemp.get('id')
            sticker = Sticker(ar, ar.REQUEST)()
            pid = ar.getSample().objectValues("SamplePartition")[0].getId()
            self.assertNotIn(pid, sticker, "Sticker must not contain partition ID %s"%pid)

            # create and receive AR
            ar = create_analysisrequest(client, {}, values, service_uids)
            ar.bika_setup.setShowPartitions(True)
            doActionFor(ar, 'receive')
            self.assertEquals(ar.portal_workflow.getInfoFor(ar, 'review_state'), 'sample_received')
            # check sticker text
            ar.REQUEST['items'] = ar.getId()
            ar.REQUEST['template'] = stemp.get('id')
            sticker = Sticker(ar, ar.REQUEST)()
            pid = ar.getSample().objectValues("SamplePartition")[0].getId()
            self.assertIn(pid, sticker, "Sticker must contain partition ID %s"%pid)
Beispiel #2
0
    def __call__(self):
        form = self.request.form
        bsc = getToolByName(self.context, 'bika_setup_catalog')

        # find and remove existing specs
        cs = bsc(portal_type = 'AnalysisSpec',
                  getClientUID = self.context.UID())
        if cs:
            self.context.manage_delObjects([s.id for s in cs])

        # find and duplicate lab specs
        ls = bsc(portal_type = 'AnalysisSpec',
                 getClientUID = self.context.bika_setup.bika_analysisspecs.UID())
        ls = [s.getObject() for s in ls]
        for labspec in ls:
            clientspec = _createObjectByType("AnalysisSpec", self.context, tmpID())
            clientspec.processForm()
            clientspec.edit(
                SampleType = labspec.getSampleType(),
                ResultsRange = labspec.getResultsRange(),
            )
        translate = self.context.translate
        message = _("Analysis specifications reset to lab defaults.")
        self.context.plone_utils.addPortalMessage(message, 'info')
        self.request.RESPONSE.redirect(self.context.absolute_url() +
                                       "/analysisspecs")
        return
Beispiel #3
0
    def addReferenceAnalysis(self, service_uid, reference_type):
        """ add an analysis to the sample """
        rc = getToolByName(self, REFERENCE_CATALOG)
        service = rc.lookupObject(service_uid)

        analysis = _createObjectByType("ReferenceAnalysis", self, tmpID())
        analysis.unmarkCreationFlag()

        calculation = service.getCalculation()
        interim_fields = calculation and calculation.getInterimFields() or []
        renameAfterCreation(analysis)

        # maxtime = service.getMaxTimeAllowed() and service.getMaxTimeAllowed() \
        #     or {'days':0, 'hours':0, 'minutes':0}
        # starttime = DateTime()
        # max_days = float(maxtime.get('days', 0)) + \
        #          (
        #              (float(maxtime.get('hours', 0)) * 3600 + \
        #               float(maxtime.get('minutes', 0)) * 60)
        #              / 86400
        #          )
        # duetime = starttime + max_days

        analysis.setReferenceType(reference_type)
        analysis.setService(service_uid)
        analysis.setInterimFields(interim_fields)
        return analysis.UID()
Beispiel #4
0
    def addReferenceAnalysis(self, service_uid, reference_type):
        """ add an analysis to the sample """
        rc = getToolByName(self, REFERENCE_CATALOG)
        service = rc.lookupObject(service_uid)

        analysis = _createObjectByType("ReferenceAnalysis", self, tmpID())
        analysis.unmarkCreationFlag()

        calculation = service.getCalculation()
        interim_fields = calculation and calculation.getInterimFields() or []
        renameAfterCreation(analysis)

        # maxtime = service.getMaxTimeAllowed() and service.getMaxTimeAllowed() \
        #     or {'days':0, 'hours':0, 'minutes':0}
        # starttime = DateTime()
        # max_days = float(maxtime.get('days', 0)) + \
        #          (
        #              (float(maxtime.get('hours', 0)) * 3600 + \
        #               float(maxtime.get('minutes', 0)) * 60)
        #              / 86400
        #          )
        # duetime = starttime + max_days

        analysis.setReferenceType(reference_type)
        analysis.setService(service_uid)
        analysis.setInterimFields(interim_fields)
        return analysis.UID()
Beispiel #5
0
def create_sample(context, request, values):
    # Retrieve the required tools
    uc = getToolByName(context, 'uid_catalog')
    # Determine if the sampling workflow is enabled
    workflow_enabled = context.bika_setup.getSamplingWorkflowEnabled()
    # Create sample or refer to existing for secondary analysis request
    if values.get('Sample_uid', ''):
        sample = uc(UID=values['Sample'])[0].getObject()
    else:
        sample = _createObjectByType('Sample', context, tmpID())
        # Specifically set the sample type
        sample.setSampleType(values['SampleType'])
        # Specifically set the sample point
        if 'SamplePoint' in values:
            sample.setSamplePoint(values['SamplePoint'])
        # Specifically set the storage location
        if 'StorageLocation' in values:
            sample.setStorageLocation(values['StorageLocation'])
        # Update the created sample with indicated values
        sample.processForm(REQUEST=request, values=values)
        # Perform the appropriate workflow action
        workflow_action =  'sampling_workflow' if workflow_enabled \
            else 'no_sampling_workflow'
        context.portal_workflow.doActionFor(sample, workflow_action)
        # Set the SampleID
        sample.edit(SampleID=sample.getId())
    # Return the newly created sample
    return sample
Beispiel #6
0
 def create_service(self, src_uid, dst_title, dst_keyword):
     folder = self.context.bika_setup.bika_analysisservices
     dst_service = _createObjectByType("AnalysisService", folder, tmpID())
     # manually set keyword and title
     dst_service.setKeyword(dst_keyword)
     dst_service.setTitle(dst_title)
     dst_service.unmarkCreationFlag()
     _id = renameAfterCreation(dst_service)
     dst_service = folder[_id]
     return dst_service
Beispiel #7
0
 def create_service(self, src_uid, dst_title, dst_keyword):
     folder = self.context.bika_setup.bika_analysisservices
     dst_service = _createObjectByType("AnalysisService", folder, tmpID())
     # manually set keyword and title
     dst_service.setKeyword(dst_keyword)
     dst_service.setTitle(dst_title)
     dst_service.unmarkCreationFlag()
     _id = renameAfterCreation(dst_service)
     dst_service = folder[_id]
     return dst_service
Beispiel #8
0
 def __call__(self):
     wf = getToolByName(self.context, 'portal_workflow')
     part = _createObjectByType("SamplePartition", self.context, tmpID())
     part.processForm()
     SamplingWorkflowEnabled = part.bika_setup.getSamplingWorkflowEnabled()
     ## We force the object to have the same state as the parent
     sample_state = wf.getInfoFor(self.context, 'review_state')
     changeWorkflowState(part, "bika_sample_workflow", sample_state)
     self.request.RESPONSE.redirect(self.context.absolute_url() +
                                    "/partitions")
     return
Beispiel #9
0
 def __call__(self):
     wf = getToolByName(self.context, 'portal_workflow')
     part = _createObjectByType("SamplePartition", self.context, tmpID())
     part.processForm()
     SamplingWorkflowEnabled = part.bika_setup.getSamplingWorkflowEnabled()
     ## We force the object to have the same state as the parent
     sample_state = wf.getInfoFor(self.context, 'review_state')
     changeWorkflowState(part, "bika_sample_workflow", sample_state)
     self.request.RESPONSE.redirect(self.context.absolute_url() +
                                    "/partitions")
     return
Beispiel #10
0
    def __call__(self):

        # Validation
        form = self.request.form
        analyst = self.request.get('analyst', '')
        template = self.request.get('template', '')
        instrument = self.request.get('instrument', '')

        if not analyst:
            message = _("Analyst must be specified.")
            self.context.plone_utils.addPortalMessage(message, 'info')
            self.request.RESPONSE.redirect(self.context.absolute_url())
            return

        rc = getToolByName(self.context, REFERENCE_CATALOG)
        wf = getToolByName(self.context, "portal_workflow")
        pm = getToolByName(self.context, "portal_membership")

        ws = _createObjectByType("Worksheet", self.context, tmpID())
        ws.processForm()

        # Set analyst and instrument
        ws.setAnalyst(analyst)
        if instrument:
            ws.setInstrument(instrument)

        # overwrite saved context UID for event subscribers
        self.request['context_uid'] = ws.UID()

        # if no template was specified, redirect to blank worksheet
        if not template:
            ws.processForm()
            self.request.RESPONSE.redirect(ws.absolute_url() + "/add_analyses")
            return

        wst = rc.lookupObject(template)
        ws.setWorksheetTemplate(wst)
        ws.applyWorksheetTemplate(wst)

        if ws.getLayout():
            self.request.RESPONSE.redirect(ws.absolute_url() +
                                           "/manage_results")
        else:
            msg = _("No analyses were added")
            self.context.plone_utils.addPortalMessage(msg)
            self.request.RESPONSE.redirect(ws.absolute_url() + "/add_analyses")
Beispiel #11
0
 def workflow_action_save_partitions_button(self):
     form = self.request.form
     # Sample Partitions or AR Manage Analyses: save Partition Table
     sample = self.context.portal_type == 'Sample' and self.context or\
         self.context.getSample()
     part_prefix = sample.getId() + "-P"
     nr_existing = len(sample.objectIds())
     nr_parts = len(form['PartTitle'][0])
     # add missing parts
     if nr_parts > nr_existing:
         for i in range(nr_parts - nr_existing):
             part = _createObjectByType("SamplePartition", sample, tmpID())
             part.setDateReceived = DateTime()
             part.processForm()
     # remove excess parts
     if nr_existing > nr_parts:
         for i in range(nr_existing - nr_parts):
             part = sample['%s%s' % (part_prefix, nr_existing - i)]
             for a in part.getBackReferences("AnalysisSamplePartition"):
                 a.setSamplePartition(None)
             sample.manage_delObjects([
                 '%s%s' % (part_prefix, nr_existing - i),
             ])
     # modify part container/preservation
     for part_uid, part_id in form['PartTitle'][0].items():
         part = sample["%s%s" %
                       (part_prefix, part_id.split(part_prefix)[1])]
         part.edit(
             Container=form['getContainer'][0][part_uid],
             Preservation=form['getPreservation'][0][part_uid],
         )
         part.reindexObject()
     objects = WorkflowAction._get_selected_items(self)
     if not objects:
         message = _("No items have been selected")
         self.context.plone_utils.addPortalMessage(message, 'info')
         if self.context.portal_type == 'Sample':
             # in samples his table is on 'Partitions' tab
             self.destination_url = self.context.absolute_url() +\
                 "/partitions"
         else:
             # in ar context this table is on 'ManageAnalyses' tab
             self.destination_url = self.context.absolute_url() +\
                 "/analyses"
         self.request.response.redirect(self.destination_url)
Beispiel #12
0
    def __call__(self):

        # Validation
        form = self.request.form
        analyst = self.request.get('analyst', '')
        template = self.request.get('template', '')
        instrument = self.request.get('instrument', '')

        if not analyst:
            message = _("Analyst must be specified.")
            self.context.plone_utils.addPortalMessage(message, 'info')
            self.request.RESPONSE.redirect(self.context.absolute_url())
            return

        rc = getToolByName(self.context, REFERENCE_CATALOG)
        wf = getToolByName(self.context, "portal_workflow")
        pm = getToolByName(self.context, "portal_membership")

        ws = _createObjectByType("Worksheet", self.context, tmpID())
        ws.processForm()

        # Set analyst and instrument
        ws.setAnalyst(analyst)
        if instrument:
            ws.setInstrument(instrument)

        # overwrite saved context UID for event subscribers
        self.request['context_uid'] = ws.UID()

        # if no template was specified, redirect to blank worksheet
        if not template:
            ws.processForm()
            self.request.RESPONSE.redirect(ws.absolute_url() + "/add_analyses")
            return

        wst = rc.lookupObject(template)
        ws.setWorksheetTemplate(wst)
        ws.applyWorksheetTemplate(wst)

        if ws.getLayout():
            self.request.RESPONSE.redirect(ws.absolute_url() + "/manage_results")
        else:
            msg = _("No analyses were added")
            self.context.plone_utils.addPortalMessage(msg)
            self.request.RESPONSE.redirect(ws.absolute_url() + "/add_analyses")
Beispiel #13
0
    def test_instrument_calibration(self):
        # Getting all instruments
        instrument_names = self.portal.bika_setup.bika_instruments.keys()
        # Setting calibration dates
        for instrument_name in instrument_names:
            # Getting each instrument
            instrument = self.portal.bika_setup.bika_instruments[
                instrument_name]
            today = date.today()
            # Getting last valid calibration
            lastcal = instrument.getLatestValidCalibration()
            if not lastcal:
                #  Creating a new calibration
                cal_obj = _createObjectByType("InstrumentCalibration",
                                              instrument, tmpID())
                cal_obj.edit(title='test',
                             DownFrom=today.strftime("%Y/%m/%d"),
                             DownTo=today.strftime("%Y/%m/%d"),
                             Instrument=instrument)
                cal_obj.unmarkCreationFlag()
                renameAfterCreation(cal_obj)
            else:
                #  Updating last calibration
                lastcal.setDownTo(today)
                lastcal.setDownFrom(today)
        #  Testing calibration state
        for instrument_name in instrument_names:
            instrument = self.portal.bika_setup.bika_instruments[
                instrument_name]
            self.assertTrue(instrument.isCalibrationInProgress())

        for instrument_name in instrument_names:
            instrument = self.portal.bika_setup.bika_instruments[
                instrument_name]
            anotherday = '2014/11/27'
            lastcal = instrument.getLatestValidCalibration()
            lastcal.setDownTo(anotherday)
            lastcal.setDownFrom(anotherday)
        for instrument_name in instrument_names:
            instrument = self.portal.bika_setup.bika_instruments[
                instrument_name]
            self.assertFalse(instrument.isCalibrationInProgress())
Beispiel #14
0
 def workflow_action_save_partitions_button(self):
     form = self.request.form
     # Sample Partitions or AR Manage Analyses: save Partition Table
     sample = self.context.portal_type == 'Sample' and self.context or\
         self.context.getSample()
     part_prefix = sample.getId() + "-P"
     nr_existing = len(sample.objectIds())
     nr_parts = len(form['PartTitle'][0])
     # add missing parts
     if nr_parts > nr_existing:
         for i in range(nr_parts - nr_existing):
             part = _createObjectByType("SamplePartition", sample, tmpID())
             part.setDateReceived = DateTime()
             part.processForm()
     # remove excess parts
     if nr_existing > nr_parts:
         for i in range(nr_existing - nr_parts):
             part = sample['%s%s' % (part_prefix, nr_existing - i)]
             for a in part.getBackReferences("AnalysisSamplePartition"):
                 a.setSamplePartition(None)
             sample.manage_delObjects(['%s%s' % (part_prefix, nr_existing - i), ])
     # modify part container/preservation
     for part_uid, part_id in form['PartTitle'][0].items():
         part = sample["%s%s" % (part_prefix, part_id.split(part_prefix)[1])]
         part.edit(
             Container=form['getContainer'][0][part_uid],
             Preservation=form['getPreservation'][0][part_uid],
         )
         part.reindexObject()
     objects = WorkflowAction._get_selected_items(self)
     if not objects:
         message = _("No items have been selected")
         self.context.plone_utils.addPortalMessage(message, 'info')
         if self.context.portal_type == 'Sample':
             # in samples his table is on 'Partitions' tab
             self.destination_url = self.context.absolute_url() +\
                 "/partitions"
         else:
             # in ar context this table is on 'ManageAnalyses' tab
             self.destination_url = self.context.absolute_url() +\
                 "/analyses"
         self.request.response.redirect(self.destination_url)
Beispiel #15
0
    def test_instrument_calibration(self):
        # Getting all instruments
        instrument_names = self.portal.bika_setup.bika_instruments.keys()
        # Setting calibration dates
        for instrument_name in instrument_names:
            # Getting each instrument
            instrument = self.portal.bika_setup.bika_instruments[instrument_name]
            today = date.today()
            # Getting last valid calibration
            lastcal = instrument.getLatestValidCalibration()
            if not lastcal:
                #  Creating a new calibration
                cal_obj = _createObjectByType("InstrumentCalibration", instrument, tmpID())
                cal_obj.edit(
                    title='test',
                    DownFrom=today.strftime("%Y/%m/%d"),
                    DownTo=today.strftime("%Y/%m/%d"),
                    Instrument=instrument
                )
                cal_obj.unmarkCreationFlag()
                renameAfterCreation(cal_obj)
            else:
                #  Updating last calibration
                lastcal.setDownTo(today)
                lastcal.setDownFrom(today)
        #  Testing calibration state
        for instrument_name in instrument_names:
            instrument = self.portal.bika_setup.bika_instruments[instrument_name]
            self.assertTrue(instrument.isCalibrationInProgress())

        for instrument_name in instrument_names:
            instrument = self.portal.bika_setup.bika_instruments[instrument_name]
            anotherday = '2014/11/27'
            lastcal = instrument.getLatestValidCalibration()
            lastcal.setDownTo(anotherday)
            lastcal.setDownFrom(anotherday)
        for instrument_name in instrument_names:
            instrument = self.portal.bika_setup.bika_instruments[instrument_name]
            self.assertFalse(instrument.isCalibrationInProgress())
Beispiel #16
0
    def _add_services_to_ar(self, ar, analyses):
        #Add Services
        service_uids = [i.split(':')[0] for i in analyses]
        new_analyses = ar.setAnalyses(service_uids)
        ar.setRequestID(ar.getId())
        ar.reindexObject()
        event.notify(ObjectInitializedEvent(ar))
        ar.at_post_create_script()

        SamplingWorkflowEnabled = \
            self.bika_setup.getSamplingWorkflowEnabled()
        wftool = getToolByName(self, 'portal_workflow')

        # Create sample partitions
        parts = [{'services': [],
                 'container':[],
                 'preservation':'',
                 'separate':False}]
        sample = ar.getSample()
        parts_and_services = {}
        for _i in range(len(parts)):
            p = parts[_i]
            part_prefix = sample.getId() + "-P"
            if '%s%s' % (part_prefix, _i + 1) in sample.objectIds():
                parts[_i]['object'] = sample['%s%s' % (part_prefix, _i + 1)]
                parts_and_services['%s%s' % (part_prefix, _i + 1)] = \
                        p['services']
            else:
                part = _createObjectByType("SamplePartition", sample, tmpID())
                parts[_i]['object'] = part
                container = None
                preservation = p['preservation']
                parts[_i]['prepreserved'] = False
                part.unmarkCreationFlag()
                part.edit(
                    Container=container,
                    Preservation=preservation,
                )
                part._renameAfterCreation()
                if SamplingWorkflowEnabled:
                    wftool.doActionFor(part, 'sampling_workflow')
                else:
                    wftool.doActionFor(part, 'no_sampling_workflow')
                parts_and_services[part.id] = p['services']

        if SamplingWorkflowEnabled:
            wftool.doActionFor(ar, 'sampling_workflow')
        else:
            wftool.doActionFor(ar, 'no_sampling_workflow')

        # Add analyses to sample partitions
        # XXX jsonapi create AR: right now, all new analyses are linked to the first samplepartition
        if new_analyses:
            analyses = list(part.getAnalyses())
            analyses.extend(new_analyses)
            part.edit(
                Analyses=analyses,
            )
            for analysis in new_analyses:
                analysis.setSamplePartition(part)

        # If Preservation is required for some partitions,
        # and the SamplingWorkflow is disabled, we need
        # to transition to to_be_preserved manually.
        if not SamplingWorkflowEnabled:
            to_be_preserved = []
            sample_due = []
            lowest_state = 'sample_due'
            for p in sample.objectValues('SamplePartition'):
                if p.getPreservation():
                    lowest_state = 'to_be_preserved'
                    to_be_preserved.append(p)
                else:
                    sample_due.append(p)
            for p in to_be_preserved:
                doActionFor(p, 'to_be_preserved')
            for p in sample_due:
                doActionFor(p, 'sample_due')
            doActionFor(sample, lowest_state)
            for analysis in ar.objectValues('Analysis'):
                doActionFor(analysis, lowest_state)
            doActionFor(ar, lowest_state)
Beispiel #17
0
    def _submit_arimport_p(self):
        """ load the profiles import layout """

        ars = []
        samples = []
        valid_batch = False
        client = self.aq_parent
        contact_obj = None
        cc_contact_obj = None

        # validate contact
        for contact in client.objectValues('Contact'):
            if contact.getUsername() == self.getContactID():
                contact_obj = contact
            if self.getCCContactID() == None:
                if contact_obj != None:
                    break
            else:
                if contact.getUsername() == self.getCCContactID():
                    cc_contact_obj = contact
                    if contact_obj != None:
                        break

        if contact_obj == None:
            valid_batch = False

        # get Keyword to ServiceId Map
        services = {}
        service_uids = {}

        for service in self.bika_setup_catalog(
                portal_type = 'AnalysisService'):
            obj = service.getObject()
            keyword = obj.getKeyword()
            if keyword:
                services[keyword] = '%s:%s' % (obj.UID(), obj.getPrice())
            service_uids[obj.UID()] = '%s:%s' % (obj.UID(), obj.getPrice())

        samplepoints = self.bika_setup_catalog(
            portal_type = 'SamplePoint',
            Title = self.getSamplePoint())
        if not samplepoints:
            valid_batch = False

        profiles = {}
        aritems = self.objectValues('ARImportItem')

        request = self.REQUEST
        title = 'Submitting AR Import'
        bar = ProgressBar(
                self, request, title, description='')
        event.notify(InitialiseProgressBar(bar))

        row_count = 0
        item_count = len(aritems)
        prefix = 'Sample'
        for aritem in aritems:
            # set up analyses
            ar_profile = None
            analyses = []
            row_count += 1

            for profilekey in aritem.getAnalysisProfile():
                this_profile = None
                if not profiles.has_key(profilekey):
                    profiles[profilekey] = []
                    # there is no profilekey index
                    l_prox = self._findProfileKey(profilekey)
                    if l_prox:
                        profiles[profilekey] = \
                                [s.UID() for s in l_prox.getService()]
                        this_profile = l_prox
                    else:
                        #TODO This will not find it!!
                        # there is no profilekey index
                        c_prox = self.bika_setup_catalog(
                                    portal_type = 'AnalysisProfile',
                                    getClientUID = client.UID(),
                                    getProfileKey = profilekey)
                        if c_prox:
                            obj = c_prox[0].getObject()
                            profiles[profilekey] = \
                                    [s.UID() for s in obj.getService()]
                            this_profile = obj

                if ar_profile is None:
                    ar_profile = obj
                else:
                    ar_profile = None
                profile = profiles[profilekey]
                for analysis in profile:
                    if not service_uids.has_key(analysis):
                        service = tool.lookupObject(analysis)
                        keyword = service.getKeyword()
                        service_uids[obj.UID()] = '%s:%s' % (obj.UID(), obj.getPrice())
                        if keyword:
                            services[keyword] = '%s:%s' % (obj.UID(), obj.getPrice())

                    if service_uids.has_key(analysis):
                        if not service_uids[analysis] in analyses:
                            analyses.append(service_uids[analysis])
                    else:
                        valid_batch = False

            for analysis in aritem.getAnalyses(full_objects=True):
                if not services.has_key(analysis):
                    for service in self.bika_setup_catalog(
                            portal_type = 'AnalysisService',
                            getKeyword = analysis):
                        obj = service.getObject()
                        services[analysis] = '%s:%s' % (obj.UID(), obj.getPrice())
                        service_uids[obj.UID()] = '%s:%s' % (obj.UID(), obj.getPrice())

                if services.has_key(analysis):
                    analyses.append(services[analysis])
                else:
                    valid_batch = False

            sampletypes = self.portal_catalog(
                portal_type = 'SampleType',
                sortable_title = aritem.getSampleType().lower(),
                )
            if not sampletypes:
                valid_batch = False
                return
            sampletypeuid = sampletypes[0].getObject().UID()

            if aritem.getSampleDate():
                date_items = aritem.getSampleDate().split('/')
                sample_date = DateTime(
                    int(date_items[2]), int(date_items[0]), int(date_items[1]))
            else:
                sample_date = None

            sample_id = '%s-%s' % (prefix, tmpID())
            sample = _createObjectByType("Sample", client, sample_id)
            sample.unmarkCreationFlag()
            sample.edit(
                SampleID = sample_id,
                ClientReference = aritem.getClientRef(),
                ClientSampleID = aritem.getClientSid(),
                SampleType = aritem.getSampleType(),
                DateSampled = sample_date,
                SamplingDate = sample_date,
                DateReceived = DateTime(),
                Remarks = aritem.getClientRemarks(),
                )
            sample._renameAfterCreation()
            sample.setSamplePoint(self.getSamplePoint())
            sample.setSampleID(sample.getId())
            event.notify(ObjectInitializedEvent(sample))
            sample.at_post_create_script()
            sample_uid = sample.UID()
            samples.append(sample_id)
            aritem.setSample(sample_uid)

            priorities = self.bika_setup_catalog(
                portal_type = 'ARPriority',
                sortable_title = aritem.Priority.lower(),
                )
            if len(priorities) < 1:
                logger.warning(
                    'Invalid Priority: validation should have prevented this')
                priority = ''
            else:
                priority = priorities[0].getObject()

            ar_id = tmpID()
            ar = _createObjectByType("AnalysisRequest", client, ar_id)
            report_dry_matter = False

            ar.unmarkCreationFlag()
            ar.edit(
                RequestID = ar_id,
                Contact = self.getContact(),
                CCContact = self.getCCContact(),
                CCEmails = self.getCCEmailsInvoice(),
                ClientOrderNumber = self.getOrderID(),
                ReportDryMatter = report_dry_matter,
                Profile = ar_profile,
                Analyses = analyses,
                Remarks = aritem.getClientRemarks(),
                Priority = priority,
                )
            ar.setSample(sample_uid)
            sample = ar.getSample()
            ar.setSampleType(sampletypeuid)
            ar_uid = ar.UID()
            aritem.setAnalysisRequest(ar_uid)
            ars.append(ar_id)
            ar._renameAfterCreation()
            progress_index = float(row_count)/float(item_count)*100.0
            progress = ProgressState(request, progress_index)
            event.notify(UpdateProgressEvent(progress))
            self._add_services_to_ar(ar, analyses)

        self.setDateApplied(DateTime())
        self.reindexObject()
Beispiel #18
0
    def _submit_arimport_c(self):
        """ load the classic import layout """

        ars = []
        samples = []
        valid_batch = True
        client = self.aq_parent
        contact_obj = None
        cc_contact_obj = None

        # validate contact
        for contact in client.objectValues('Contact'):
            if contact.getUsername() == self.getContactID():
                contact_obj = contact
            if self.getCCContactID() == None:
                if contact_obj != None:
                    break
            else:
                if contact.getUsername() == self.getCCContactID():
                    cc_contact_obj = contact
                    if contact_obj != None:
                        break

        if contact_obj == None:
            valid_batch = False

        # get Keyword to ServiceId Map
        services = {}
        for service in self.bika_setup_catalog(
                portal_type = 'AnalysisService'):
            obj = service.getObject()
            keyword = obj.getKeyword()
            if keyword:
                services[keyword] = '%s:%s' % (obj.UID(), obj.getPrice())

        samplepoints = self.bika_setup_catalog(
            portal_type = 'SamplePoint',
            Title = self.getSamplePoint())
        if not samplepoints:
            valid_batch = False

        aritems = self.objectValues('ARImportItem')
        request = self.REQUEST
        title = 'Submitting AR Import'
        bar = ProgressBar(
                self, request, title, description='')
        event.notify(InitialiseProgressBar(bar))

        SamplingWorkflowEnabled = \
            self.bika_setup.getSamplingWorkflowEnabled()
        row_count = 0
        item_count =len(aritems)
        prefix = 'Sample'
        for aritem in aritems:
            row_count += 1
            # set up analyses
            analyses = []
            for analysis in aritem.getAnalyses(full_objects=True):
                if services.has_key(analysis):
                    analyses.append(services[analysis])
                else:
                    valid_batch = False

            sampletypes = self.portal_catalog(
                portal_type = 'SampleType',
                sortable_title = aritem.getSampleType().lower(),
                )
            if not sampletypes:
                valid_batch = False
                return
            sampletypeuid = sampletypes[0].getObject().UID()
            if aritem.getSampleDate():
                date_items = aritem.getSampleDate().split('/')
                sample_date = DateTime(
                    int(date_items[2]), int(date_items[1]), int(date_items[0]))
            else:
                sample_date = None

            sample_id = '%s-%s' % (prefix, tmpID())
            sample = _createObjectByType("Sample", client, sample_id)
            sample.unmarkCreationFlag()
            sample.edit(
                SampleID = sample_id,
                ClientReference = aritem.getClientRef(),
                ClientSampleID = aritem.getClientSid(),
                SampleType = aritem.getSampleType(),
                DateSampled = sample_date,
                SamplingDate = sample_date,
                DateReceived = DateTime(),
                )
            sample._renameAfterCreation()
            #sp_id = client.invokeFactory('SamplePoint', id=tmpID())
            #sp = client[sp_id]
            #sp.edit(title=self.getSamplePoint())
            sample.setSamplePoint(self.getSamplePoint())
            sample.setSampleID(sample.getId())
            event.notify(ObjectInitializedEvent(sample))
            sample.at_post_create_script()
            sample_uid = sample.UID()
            samples.append(sample_id)
            aritem.setSample(sample_uid)

            priorities = self.bika_setup_catalog(
                portal_type = 'ARPriority',
                sortable_title = aritem.Priority.lower(),
                )
            if len(priorities) < 1:
                logger.warning(
                    'Invalid Priority: validation should have prevented this')

            #Create AR
            ar_id = tmpID()
            ar = _createObjectByType("AnalysisRequest", client, ar_id)
            if aritem.getReportDryMatter().lower() == 'y':
                report_dry_matter = True
            else:
                report_dry_matter = False
            ar.unmarkCreationFlag()
            ar.edit(
                RequestID = ar_id,
                Contact = self.getContact(),
                CCContact = self.getCCContact(),
                CCEmails = self.getCCEmailsInvoice(),
                ClientOrderNumber = self.getOrderID(),
                ReportDryMatter = report_dry_matter,
                Analyses = analyses,
                Priority = priorities[0].getObject(),
                )
            ar.setSample(sample_uid)
            sample = ar.getSample()
            ar.setSampleType(sampletypeuid)
            ar_uid = ar.UID()
            aritem.setAnalysisRequest(ar_uid)
            ars.append(ar_id)
            ar._renameAfterCreation()

            self._add_services_to_ar(ar, analyses)

            progress_index = float(row_count)/float(item_count)*100.0
            progress = ProgressState(request, progress_index)
            event.notify(UpdateProgressEvent(progress))
            #TODO REmove for production - just to look pretty
            #time.sleep(1)
        self.setDateApplied(DateTime())
        self.reindexObject()
Beispiel #19
0
def create_analysisrequest(context, request, values):
    """Create an AR.

    :param context the container in which the AR will be created (Client)
    :param request the request object
    :param values a dictionary containing fieldname/value pairs, which
           will be applied.  Some fields will have specific code to handle them,
           and others will be directly written to the schema.
    :return the new AR instance

    Special keys present (or required) in the values dict, which are not present
    in the schema:

        - Partitions: data about partitions to be created, and the
                      analyses that are to be assigned to each.

        - Prices: custom prices set in the HTML form.

        - ResultsRange: Specification values entered in the HTML form.

    """
    # Gather neccesary tools
    workflow = getToolByName(context, 'portal_workflow')
    bc = getToolByName(context, 'bika_catalog')

    # Create new sample or locate the existing for secondary AR
    if values['Sample']:
        secondary = True
        if ISample.providedBy(values['Sample']):
            sample = values['Sample']
        else:
            sample = bc(UID=values['Sample'])[0].getObject()
        samplingworkflow_enabled = sample.getSamplingWorkflowEnabled()
    else:
        secondary = False
        samplingworkflow_enabled = context.bika_setup.getSamplingWorkflowEnabled()
        sample = create_sample(context, request, values)

    # Create the Analysis Request
    ar = _createObjectByType('AnalysisRequest', context, tmpID())
    ar.setSample(sample)

    # processform renames the sample, this requires values to contain the Sample.
    values['Sample'] = sample
    ar.processForm(REQUEST=request, values=values)

    # Object has been renamed
    ar.edit(RequestID=ar.getId())

    # Set initial AR state
    workflow_action = 'sampling_workflow' if samplingworkflow_enabled \
        else 'no_sampling_workflow'
    workflow.doActionFor(ar, workflow_action)

    # Set analysis request analyses
    ar.setAnalyses(values['Analyses'],
                   prices=values.get("Prices", []),
                   specs=values.get('ResultsRange', []))
    analyses = ar.getAnalyses(full_objects=True)

    skip_receive = ['to_be_sampled', 'sample_due', 'sampled', 'to_be_preserved']
    if secondary:
        # Only 'sample_due' and 'sample_recieved' samples can be selected
        # for secondary analyses
        doActionFor(ar, 'sampled')
        doActionFor(ar, 'sample_due')
        sample_state = workflow.getInfoFor(sample, 'review_state')
        if sample_state not in skip_receive:
            doActionFor(ar, 'receive')

    for analysis in analyses:
        doActionFor(analysis, 'sample_due')
        analysis_state = workflow.getInfoFor(analysis, 'review_state')
        if analysis_state not in skip_receive:
            doActionFor(analysis, 'receive')

    if not secondary:
        # Create sample partitions
        partitions = []
        for n, partition in enumerate(values['Partitions']):
            # Calculate partition id
            partition_prefix = sample.getId() + "-P"
            partition_id = '%s%s' % (partition_prefix, n + 1)
            partition['part_id'] = partition_id
            # Point to or create sample partition
            if partition_id in sample.objectIds():
                partition['object'] = sample[partition_id]
            else:
                partition['object'] = create_samplepartition(
                    sample,
                    partition
                )
            # now assign analyses to this partition.
            obj = partition['object']
            for analysis in analyses:
                if analysis.getService().UID() in partition['services']:
                    analysis.setSamplePartition(obj)

            partitions.append(partition)

        # If Preservation is required for some partitions,
        # and the SamplingWorkflow is disabled, we need
        # to transition to to_be_preserved manually.
        if not samplingworkflow_enabled:
            to_be_preserved = []
            sample_due = []
            lowest_state = 'sample_due'
            for p in sample.objectValues('SamplePartition'):
                if p.getPreservation():
                    lowest_state = 'to_be_preserved'
                    to_be_preserved.append(p)
                else:
                    sample_due.append(p)
            for p in to_be_preserved:
                doActionFor(p, 'to_be_preserved')
            for p in sample_due:
                doActionFor(p, 'sample_due')
            doActionFor(sample, lowest_state)
            doActionFor(ar, lowest_state)

        # Transition pre-preserved partitions
        for p in partitions:
            if 'prepreserved' in p and p['prepreserved']:
                part = p['object']
                state = workflow.getInfoFor(part, 'review_state')
                if state == 'to_be_preserved':
                    workflow.doActionFor(part, 'preserve')

    # Return the newly created Analysis Request
    return ar
Beispiel #20
0
def create_analysisrequest(context, request, values):
    """Create an AR.

    :param context the container in which the AR will be created (Client)
    :param request the request object
    :param values a dictionary containing fieldname/value pairs, which
           will be applied.  Some fields will have specific code to handle them,
           and others will be directly written to the schema.
    :return the new AR instance

    Special keys present (or required) in the values dict, which are not present
    in the schema:

        - Partitions: data about partitions to be created, and the
                      analyses that are to be assigned to each.

        - Prices: custom prices set in the HTML form.

        - ResultsRange: Specification values entered in the HTML form.

    """
    # Gather neccesary tools
    workflow = getToolByName(context, 'portal_workflow')
    bc = getToolByName(context, 'bika_catalog')

    # Create new sample or locate the existing for secondary AR
    if values['Sample']:
        secondary = True
        if ISample.providedBy(values['Sample']):
            sample = values['Sample']
        else:
            sample = bc(UID=values['Sample'])[0].getObject()
        samplingworkflow_enabled = sample.getSamplingWorkflowEnabled()
    else:
        secondary = False
        samplingworkflow_enabled = context.bika_setup.getSamplingWorkflowEnabled(
        )
        sample = create_sample(context, request, values)

    # Create the Analysis Request
    ar = _createObjectByType('AnalysisRequest', context, tmpID())
    ar.setSample(sample)

    # processform renames the sample, this requires values to contain the Sample.
    values['Sample'] = sample
    ar.processForm(REQUEST=request, values=values)

    # Object has been renamed
    ar.edit(RequestID=ar.getId())

    # Set initial AR state
    workflow_action = 'sampling_workflow' if samplingworkflow_enabled \
        else 'no_sampling_workflow'
    workflow.doActionFor(ar, workflow_action)

    # Set analysis request analyses
    ar.setAnalyses(values['Analyses'],
                   prices=values.get("Prices", []),
                   specs=values.get('ResultsRange', []))
    analyses = ar.getAnalyses(full_objects=True)

    skip_receive = [
        'to_be_sampled', 'sample_due', 'sampled', 'to_be_preserved'
    ]
    if secondary:
        # Only 'sample_due' and 'sample_recieved' samples can be selected
        # for secondary analyses
        doActionFor(ar, 'sampled')
        doActionFor(ar, 'sample_due')
        sample_state = workflow.getInfoFor(sample, 'review_state')
        if sample_state not in skip_receive:
            doActionFor(ar, 'receive')

    for analysis in analyses:
        doActionFor(analysis, 'sample_due')
        analysis_state = workflow.getInfoFor(analysis, 'review_state')
        if analysis_state not in skip_receive:
            doActionFor(analysis, 'receive')

    if not secondary:
        # Create sample partitions
        partitions = []
        for n, partition in enumerate(values['Partitions']):
            # Calculate partition id
            partition_prefix = sample.getId() + "-P"
            partition_id = '%s%s' % (partition_prefix, n + 1)
            partition['part_id'] = partition_id
            # Point to or create sample partition
            if partition_id in sample.objectIds():
                partition['object'] = sample[partition_id]
            else:
                partition['object'] = create_samplepartition(sample, partition)
            # now assign analyses to this partition.
            obj = partition['object']
            for analysis in analyses:
                if analysis.getService().UID() in partition['services']:
                    analysis.setSamplePartition(obj)

            partitions.append(partition)

        # If Preservation is required for some partitions,
        # and the SamplingWorkflow is disabled, we need
        # to transition to to_be_preserved manually.
        if not samplingworkflow_enabled:
            to_be_preserved = []
            sample_due = []
            lowest_state = 'sample_due'
            for p in sample.objectValues('SamplePartition'):
                if p.getPreservation():
                    lowest_state = 'to_be_preserved'
                    to_be_preserved.append(p)
                else:
                    sample_due.append(p)
            for p in to_be_preserved:
                doActionFor(p, 'to_be_preserved')
            for p in sample_due:
                doActionFor(p, 'sample_due')
            doActionFor(sample, lowest_state)
            doActionFor(ar, lowest_state)

        # Transition pre-preserved partitions
        for p in partitions:
            if 'prepreserved' in p and p['prepreserved']:
                part = p['object']
                state = workflow.getInfoFor(part, 'review_state')
                if state == 'to_be_preserved':
                    workflow.doActionFor(part, 'preserve')

    # Return the newly created Analysis Request
    return ar
Beispiel #21
0
    def workflow_script_reject(self):
        """Copy real analyses to RejectAnalysis, with link to real
           create a new worksheet, with the original analyses, and new
           duplicates and references to match the rejected
           worksheet.
        """
        if skip(self, "reject"):
            return
        utils = getToolByName(self, 'plone_utils')
        workflow = self.portal_workflow

        def copy_src_fields_to_dst(src, dst):
            # These will be ignored when copying field values between analyses
            ignore_fields = [
                'UID',
                'id',
                'title',
                'allowDiscussion',
                'subject',
                'description',
                'location',
                'contributors',
                'creators',
                'effectiveDate',
                'expirationDate',
                'language',
                'rights',
                'creation_date',
                'modification_date',
                'Layout',  # ws
                'Analyses',  # ws
            ]
            fields = src.Schema().fields()
            for field in fields:
                fieldname = field.getName()
                if fieldname in ignore_fields:
                    continue
                getter = getattr(
                    src, 'get' + fieldname,
                    src.Schema().getField(fieldname).getAccessor(src))
                setter = getattr(
                    dst, 'set' + fieldname,
                    dst.Schema().getField(fieldname).getMutator(dst))
                if getter is None or setter is None:
                    # ComputedField
                    continue
                setter(getter())

        analysis_positions = {}
        for item in self.getLayout():
            analysis_positions[item['analysis_uid']] = item['position']
        old_layout = []
        new_layout = []

        # New worksheet
        worksheets = self.aq_parent
        new_ws = _createObjectByType('Worksheet', worksheets, tmpID())
        new_ws.unmarkCreationFlag()
        new_ws_id = renameAfterCreation(new_ws)
        copy_src_fields_to_dst(self, new_ws)
        new_ws.edit(Number=new_ws_id, Remarks=self.getRemarks())

        # Objects are being created inside other contexts, but we want their
        # workflow handlers to be aware of which worksheet this is occurring in.
        # We save the worksheet in request['context_uid'].
        # We reset it again below....  be very sure that this is set to the
        # UID of the containing worksheet before invoking any transitions on
        # analyses.
        self.REQUEST['context_uid'] = new_ws.UID()

        # loop all analyses
        analyses = self.getAnalyses()
        new_ws_analyses = []
        old_ws_analyses = []
        for analysis in analyses:
            # Skip published or verified analyses
            review_state = workflow.getInfoFor(analysis, 'review_state', '')
            if review_state in ['published', 'verified', 'retracted']:
                old_ws_analyses.append(analysis.UID())
                old_layout.append({
                    'position': position,
                    'type': 'a',
                    'analysis_uid': analysis.UID(),
                    'container_uid': analysis.aq_parent.UID()
                })
                continue
            # Normal analyses:
            # - Create matching RejectAnalysis inside old WS
            # - Link analysis to new WS in same position
            # - Copy all field values
            # - Clear analysis result, and set Retested flag
            if analysis.portal_type == 'Analysis':
                reject = _createObjectByType('RejectAnalysis', self, tmpID())
                reject.unmarkCreationFlag()
                reject_id = renameAfterCreation(reject)
                copy_src_fields_to_dst(analysis, reject)
                reject.setAnalysis(analysis)
                reject.reindexObject()
                analysis.edit(
                    Result=None,
                    Retested=True,
                )
                analysis.reindexObject()
                position = analysis_positions[analysis.UID()]
                old_ws_analyses.append(reject.UID())
                old_layout.append({
                    'position': position,
                    'type': 'r',
                    'analysis_uid': reject.UID(),
                    'container_uid': self.UID()
                })
                new_ws_analyses.append(analysis.UID())
                new_layout.append({
                    'position': position,
                    'type': 'a',
                    'analysis_uid': analysis.UID(),
                    'container_uid': analysis.aq_parent.UID()
                })
            # Reference analyses
            # - Create a new reference analysis in the new worksheet
            # - Transition the original analysis to 'rejected' state
            if analysis.portal_type == 'ReferenceAnalysis':
                service_uid = analysis.getService().UID()
                reference = analysis.aq_parent
                reference_type = analysis.getReferenceType()
                new_analysis_uid = reference.addReferenceAnalysis(
                    service_uid, reference_type)
                position = analysis_positions[analysis.UID()]
                old_ws_analyses.append(analysis.UID())
                old_layout.append({
                    'position': position,
                    'type': reference_type,
                    'analysis_uid': analysis.UID(),
                    'container_uid': reference.UID()
                })
                new_ws_analyses.append(new_analysis_uid)
                new_layout.append({
                    'position': position,
                    'type': reference_type,
                    'analysis_uid': new_analysis_uid,
                    'container_uid': reference.UID()
                })
                workflow.doActionFor(analysis, 'reject')
                new_reference = reference.uid_catalog(
                    UID=new_analysis_uid)[0].getObject()
                workflow.doActionFor(new_reference, 'assign')
                analysis.reindexObject()
            # Duplicate analyses
            # - Create a new duplicate inside the new worksheet
            # - Transition the original analysis to 'rejected' state
            if analysis.portal_type == 'DuplicateAnalysis':
                src_analysis = analysis.getAnalysis()
                ar = src_analysis.aq_parent
                service = src_analysis.getService()
                duplicate_id = new_ws.generateUniqueId('DuplicateAnalysis')
                new_duplicate = _createObjectByType('DuplicateAnalysis',
                                                    new_ws, duplicate_id)
                new_duplicate.unmarkCreationFlag()
                copy_src_fields_to_dst(analysis, new_duplicate)
                workflow.doActionFor(new_duplicate, 'assign')
                new_duplicate.reindexObject()
                position = analysis_positions[analysis.UID()]
                old_ws_analyses.append(analysis.UID())
                old_layout.append({
                    'position': position,
                    'type': 'd',
                    'analysis_uid': analysis.UID(),
                    'container_uid': self.UID()
                })
                new_ws_analyses.append(new_duplicate.UID())
                new_layout.append({
                    'position': position,
                    'type': 'd',
                    'analysis_uid': new_duplicate.UID(),
                    'container_uid': new_ws.UID()
                })
                workflow.doActionFor(analysis, 'reject')
                analysis.reindexObject()

        new_ws.setAnalyses(new_ws_analyses)
        new_ws.setLayout(new_layout)
        new_ws.replaces_rejected_worksheet = self.UID()
        for analysis in new_ws.getAnalyses():
            review_state = workflow.getInfoFor(analysis, 'review_state', '')
            if review_state == 'to_be_verified':
                changeWorkflowState(analysis, "bika_analysis_workflow",
                                    "sample_received")
        self.REQUEST['context_uid'] = self.UID()
        self.setLayout(old_layout)
        self.setAnalyses(old_ws_analyses)
        self.replaced_by = new_ws.UID()
Beispiel #22
0
    def workflow_script_reject(self):
        """Copy real analyses to RejectAnalysis, with link to real
           create a new worksheet, with the original analyses, and new
           duplicates and references to match the rejected
           worksheet.
        """
        if skip(self, "reject"):
            return
        utils = getToolByName(self, 'plone_utils')
        workflow = self.portal_workflow

        def copy_src_fields_to_dst(src, dst):
            # These will be ignored when copying field values between analyses
            ignore_fields = ['UID',
                             'id',
                             'title',
                             'allowDiscussion',
                             'subject',
                             'description',
                             'location',
                             'contributors',
                             'creators',
                             'effectiveDate',
                             'expirationDate',
                             'language',
                             'rights',
                             'creation_date',
                             'modification_date',
                             'Layout',    # ws
                             'Analyses',  # ws
            ]
            fields = src.Schema().fields()
            for field in fields:
                fieldname = field.getName()
                if fieldname in ignore_fields:
                    continue
                getter = getattr(src, 'get'+fieldname,
                                 src.Schema().getField(fieldname).getAccessor(src))
                setter = getattr(dst, 'set'+fieldname,
                                 dst.Schema().getField(fieldname).getMutator(dst))
                if getter is None or setter is None:
                    # ComputedField
                    continue
                setter(getter())

        analysis_positions = {}
        for item in self.getLayout():
            analysis_positions[item['analysis_uid']] = item['position']
        old_layout = []
        new_layout = []

        # New worksheet
        worksheets = self.aq_parent
        new_ws = _createObjectByType('Worksheet', worksheets, tmpID())
        new_ws.unmarkCreationFlag()
        new_ws_id = renameAfterCreation(new_ws)
        copy_src_fields_to_dst(self, new_ws)
        new_ws.edit(
            Number = new_ws_id,
            Remarks = self.getRemarks()
        )

        # Objects are being created inside other contexts, but we want their
        # workflow handlers to be aware of which worksheet this is occurring in.
        # We save the worksheet in request['context_uid'].
        # We reset it again below....  be very sure that this is set to the
        # UID of the containing worksheet before invoking any transitions on
        # analyses.
        self.REQUEST['context_uid'] = new_ws.UID()

        # loop all analyses
        analyses = self.getAnalyses()
        new_ws_analyses = []
        old_ws_analyses = []
        for analysis in analyses:
            # Skip published or verified analyses
            review_state = workflow.getInfoFor(analysis, 'review_state', '')
            if review_state in ['published', 'verified', 'retracted']:
                old_ws_analyses.append(analysis.UID())
                old_layout.append({'position': position,
                                   'type':'a',
                                   'analysis_uid':analysis.UID(),
                                   'container_uid':analysis.aq_parent.UID()})
                continue
            # Normal analyses:
            # - Create matching RejectAnalysis inside old WS
            # - Link analysis to new WS in same position
            # - Copy all field values
            # - Clear analysis result, and set Retested flag
            if analysis.portal_type == 'Analysis':
                reject = _createObjectByType('RejectAnalysis', self, tmpID())
                reject.unmarkCreationFlag()
                reject_id = renameAfterCreation(reject)
                copy_src_fields_to_dst(analysis, reject)
                reject.setAnalysis(analysis)
                reject.reindexObject()
                analysis.edit(
                    Result = None,
                    Retested = True,
                )
                analysis.reindexObject()
                position = analysis_positions[analysis.UID()]
                old_ws_analyses.append(reject.UID())
                old_layout.append({'position': position,
                                   'type':'r',
                                   'analysis_uid':reject.UID(),
                                   'container_uid':self.UID()})
                new_ws_analyses.append(analysis.UID())
                new_layout.append({'position': position,
                                   'type':'a',
                                   'analysis_uid':analysis.UID(),
                                   'container_uid':analysis.aq_parent.UID()})
            # Reference analyses
            # - Create a new reference analysis in the new worksheet
            # - Transition the original analysis to 'rejected' state
            if analysis.portal_type == 'ReferenceAnalysis':
                service_uid = analysis.getService().UID()
                reference = analysis.aq_parent
                reference_type = analysis.getReferenceType()
                new_analysis_uid = reference.addReferenceAnalysis(service_uid,
                                                                  reference_type)
                position = analysis_positions[analysis.UID()]
                old_ws_analyses.append(analysis.UID())
                old_layout.append({'position': position,
                                   'type':reference_type,
                                   'analysis_uid':analysis.UID(),
                                   'container_uid':reference.UID()})
                new_ws_analyses.append(new_analysis_uid)
                new_layout.append({'position': position,
                                   'type':reference_type,
                                   'analysis_uid':new_analysis_uid,
                                   'container_uid':reference.UID()})
                workflow.doActionFor(analysis, 'reject')
                new_reference = reference.uid_catalog(UID=new_analysis_uid)[0].getObject()
                workflow.doActionFor(new_reference, 'assign')
                analysis.reindexObject()
            # Duplicate analyses
            # - Create a new duplicate inside the new worksheet
            # - Transition the original analysis to 'rejected' state
            if analysis.portal_type == 'DuplicateAnalysis':
                src_analysis = analysis.getAnalysis()
                ar = src_analysis.aq_parent
                service = src_analysis.getService()
                duplicate_id = new_ws.generateUniqueId('DuplicateAnalysis')
                new_duplicate = _createObjectByType('DuplicateAnalysis',
                                                    new_ws, duplicate_id)
                new_duplicate.unmarkCreationFlag()
                copy_src_fields_to_dst(analysis, new_duplicate)
                workflow.doActionFor(new_duplicate, 'assign')
                new_duplicate.reindexObject()
                position = analysis_positions[analysis.UID()]
                old_ws_analyses.append(analysis.UID())
                old_layout.append({'position': position,
                                   'type':'d',
                                   'analysis_uid':analysis.UID(),
                                   'container_uid':self.UID()})
                new_ws_analyses.append(new_duplicate.UID())
                new_layout.append({'position': position,
                                   'type':'d',
                                   'analysis_uid':new_duplicate.UID(),
                                   'container_uid':new_ws.UID()})
                workflow.doActionFor(analysis, 'reject')
                analysis.reindexObject()

        new_ws.setAnalyses(new_ws_analyses)
        new_ws.setLayout(new_layout)
        new_ws.replaces_rejected_worksheet = self.UID()
        for analysis in new_ws.getAnalyses():
            review_state = workflow.getInfoFor(analysis, 'review_state', '')
            if review_state == 'to_be_verified':
                changeWorkflowState(analysis, "bika_analysis_workflow", "sample_received")
        self.REQUEST['context_uid'] = self.UID()
        self.setLayout(old_layout)
        self.setAnalyses(old_ws_analyses)
        self.replaced_by = new_ws.UID()
Beispiel #23
0
    def _create_ar(self, context, request):
        """Creates AnalysisRequest object, with supporting Sample, Partition
        and Analysis objects.  The client is retrieved from the obj_path
        key in the request.

        Required request parameters:

            - Contact: One client contact Fullname.  The contact must exist
              in the specified client.  The first Contact with the specified
              value in it's Fullname field will be used.

            - SampleType_<index> - Must be an existing sample type.

        Optional request parameters:

        - CCContacts: A list of contact Fullnames, which will be copied on
          all messages related to this AR and it's sample or results.

        - CCEmails: A list of email addresses to include as above.

        - Sample_id: Create a secondary AR with an existing sample.  If
          unspecified, a new sample is created.

        - Specification: a lookup to set Analysis specs default values
          for all analyses

        - Analysis_Specification: specs (or overrides) per analysis, using
          a special lookup format.

            &Analysis_Specification:list=<Keyword>:min:max:error&...


        """

        wftool = getToolByName(context, 'portal_workflow')
        bc = getToolByName(context, 'bika_catalog')
        bsc = getToolByName(context, 'bika_setup_catalog')
        pc = getToolByName(context, 'portal_catalog')
        ret = {
            "url": router.url_for("create", force_external=True),
            "success": True,
            "error": False,
        }
        SamplingWorkflowEnabled = context.bika_setup.getSamplingWorkflowEnabled()
        for field in [
            'Client',
            'SampleType',
            'Contact',
            'SamplingDate',
            'Services']:
            self.require(field)
            self.used(field)

        try:
            client = resolve_request_lookup(context, request, 'Client')[0].getObject()
        except IndexError:
            raise Exception("Client not found")

        # Sample_id
        if 'Sample' in request:
            try:
                sample = resolve_request_lookup(context, request, 'Sample')[0].getObject()
            except IndexError:
                raise Exception("Sample not found")
        else:
            # Primary AR
            sample = _createObjectByType("Sample", client, tmpID())
            sample.unmarkCreationFlag()
            fields = set_fields_from_request(sample, request)
            for field in fields:
                self.used(field)
            sample._renameAfterCreation()
            sample.setSampleID(sample.getId())
            event.notify(ObjectInitializedEvent(sample))
            sample.at_post_create_script()

            if SamplingWorkflowEnabled:
                wftool.doActionFor(sample, 'sampling_workflow')
            else:
                wftool.doActionFor(sample, 'no_sampling_workflow')

        ret['sample_id'] = sample.getId()

        parts = [{'services': [],
                  'container': [],
                  'preservation': '',
                  'separate': False}]

        specs = self.get_specs_from_request()
        ar = _createObjectByType("AnalysisRequest", client, tmpID())
        ar.unmarkCreationFlag()
        fields = set_fields_from_request(ar, request)
        for field in fields:
            self.used(field)
        ar.setSample(sample.UID())
        ar._renameAfterCreation()
        ret['ar_id'] = ar.getId()
        brains = resolve_request_lookup(context, request, 'Services')
        service_uids = [p.UID for p in brains]
        new_analyses = ar.setAnalyses(service_uids, specs=specs)
        ar.setRequestID(ar.getId())
        ar.reindexObject()
        event.notify(ObjectInitializedEvent(ar))
        ar.at_post_create_script()

        # Create sample partitions
        parts_and_services = {}
        for _i in range(len(parts)):
            p = parts[_i]
            part_prefix = sample.getId() + "-P"
            if '%s%s' % (part_prefix, _i + 1) in sample.objectIds():
                parts[_i]['object'] = sample['%s%s' % (part_prefix, _i + 1)]
                parts_and_services['%s%s' % (part_prefix, _i + 1)] = p['services']
                part = parts[_i]['object']
            else:
                part = _createObjectByType("SamplePartition", sample, tmpID())
                parts[_i]['object'] = part
                container = None
                preservation = p['preservation']
                parts[_i]['prepreserved'] = False
                part.edit(
                    Container=container,
                    Preservation=preservation,
                )
                part.processForm()
                if SamplingWorkflowEnabled:
                    wftool.doActionFor(part, 'sampling_workflow')
                else:
                    wftool.doActionFor(part, 'no_sampling_workflow')
                parts_and_services[part.id] = p['services']

        if SamplingWorkflowEnabled:
            wftool.doActionFor(ar, 'sampling_workflow')
        else:
            wftool.doActionFor(ar, 'no_sampling_workflow')

        # Add analyses to sample partitions
        # XXX jsonapi create AR: right now, all new analyses are linked to the first samplepartition
        if new_analyses:
            analyses = list(part.getAnalyses())
            analyses.extend(new_analyses)
            part.edit(
                Analyses=analyses,
            )
            for analysis in new_analyses:
                analysis.setSamplePartition(part)

        # If Preservation is required for some partitions,
        # and the SamplingWorkflow is disabled, we need
        # to transition to to_be_preserved manually.
        if not SamplingWorkflowEnabled:
            to_be_preserved = []
            sample_due = []
            lowest_state = 'sample_due'
            for p in sample.objectValues('SamplePartition'):
                if p.getPreservation():
                    lowest_state = 'to_be_preserved'
                    to_be_preserved.append(p)
                else:
                    sample_due.append(p)
            for p in to_be_preserved:
                doActionFor(p, 'to_be_preserved')
            for p in sample_due:
                doActionFor(p, 'sample_due')
            doActionFor(sample, lowest_state)
            for analysis in ar.objectValues('Analysis'):
                doActionFor(analysis, lowest_state)
            doActionFor(ar, lowest_state)

        # receive secondary AR
        if request.get('Sample_id', ''):
            doActionFor(ar, 'sampled')
            doActionFor(ar, 'sample_due')
            not_receive = ['to_be_sampled', 'sample_due', 'sampled',
                           'to_be_preserved']
            sample_state = wftool.getInfoFor(sample, 'review_state')
            if sample_state not in not_receive:
                doActionFor(ar, 'receive')
            for analysis in ar.getAnalyses(full_objects=1):
                doActionFor(analysis, 'sampled')
                doActionFor(analysis, 'sample_due')
                if sample_state not in not_receive:
                    doActionFor(analysis, 'receive')

        if self.unused:
            raise BadRequest("The following request fields were not used: %s.  Request aborted." % self.unused)

        return ret
Beispiel #24
0
    def cloneAR(self, ar):
        newar = _createObjectByType("AnalysisRequest", ar.aq_parent, tmpID())
        newar.title = ar.title
        newar.description = ar.description
        newar.setContact(ar.getContact())
        newar.setCCContact(ar.getCCContact())
        newar.setCCEmails(ar.getCCEmails())
        newar.setBatch(ar.getBatch())
        newar.setTemplate(ar.getTemplate())
        newar.setProfile(ar.getProfile())
        newar.setSamplingDate(ar.getSamplingDate())
        newar.setSampleType(ar.getSampleType())
        newar.setSamplePoint(ar.getSamplePoint())
        newar.setStorageLocation(ar.getStorageLocation())
        newar.setSamplingDeviation(ar.getSamplingDeviation())
        newar.setPriority(ar.getPriority())
        newar.setSampleCondition(ar.getSampleCondition())
        newar.setSample(ar.getSample())
        newar.setClientOrderNumber(ar.getClientOrderNumber())
        newar.setClientReference(ar.getClientReference())
        newar.setClientSampleID(ar.getClientSampleID())
        newar.setDefaultContainerType(ar.getDefaultContainerType())
        newar.setAdHoc(ar.getAdHoc())
        newar.setComposite(ar.getComposite())
        newar.setReportDryMatter(ar.getReportDryMatter())
        newar.setInvoiceExclude(ar.getInvoiceExclude())
        newar.setAttachment(ar.getAttachment())
        newar.setInvoice(ar.getInvoice())
        newar.setDateReceived(ar.getDateReceived())
        newar.setMemberDiscount(ar.getMemberDiscount())
        # Set the results for each AR analysis
        ans = ar.getAnalyses(full_objects=True)
        for an in ans:
            nan = _createObjectByType("Analysis", newar, an.getKeyword())
            nan.setService(an.getService())
            nan.setCalculation(an.getCalculation())
            nan.setInterimFields(an.getInterimFields())
            nan.setResult(an.getResult())
            nan.setResultDM(an.getResultDM())
            nan.setRetested = False,
            nan.setMaxTimeAllowed(an.getMaxTimeAllowed())
            nan.setDueDate(an.getDueDate())
            nan.setDuration(an.getDuration())
            nan.setReportDryMatter(an.getReportDryMatter())
            nan.setAnalyst(an.getAnalyst())
            nan.setInstrument(an.getInstrument())
            nan.setSamplePartition(an.getSamplePartition())
            nan.unmarkCreationFlag()
            notify(ObjectInitializedEvent(nan))
            changeWorkflowState(nan, 'bika_analysis_workflow',
                                'to_be_verified')
            nan.reindexObject()

        newar.reindexObject()
        newar.aq_parent.reindexObject()
        renameAfterCreation(newar)
        newar.setRequestID(newar.getId())

        if hasattr(ar, 'setChildAnalysisRequest'):
            ar.setChildAnalysisRequest(newar)
        newar.setParentAnalysisRequest(ar)
        return newar
Beispiel #25
0
    def process(self):
        self._parser.parse()
        parsed = self._parser.resume()
        self._errors = self._parser.errors
        self._warns = self._parser.warns
        self._logs = self._parser.logs
        self._priorizedsearchcriteria = ''

        if parsed == False:
            return False

        # Allowed analysis states
        allowed_ar_states_msg = [t(_(s)) for s in self.getAllowedARStates()]
        allowed_an_states_msg = [t(_(s)) for s in self.getAllowedAnalysisStates()]
        self.log("Allowed Analysis Request states: ${allowed_states}",
                 mapping={'allowed_states': ', '.join(allowed_ar_states_msg)})
        self.log("Allowed analysis states: ${allowed_states}",
                 mapping={'allowed_states': ', '.join(allowed_an_states_msg)})

        # Exclude non existing ACODEs
        acodes = []
        ancount = 0
        arprocessed = []
        instprocessed = []
        importedars = {}
        importedinsts = {}
        rawacodes = self._parser.getAnalysisKeywords()
        exclude = self.getKeywordsToBeExcluded()
        for acode in rawacodes:
            if acode in exclude:
                continue
            service = self.bsc(getKeyword=acode)
            if not service:
                self.warn('Service keyword ${analysis_keyword} not found',
                            mapping={"analysis_keyword": acode})
            else:
                acodes.append(acode)
        if len(acodes) == 0:
            self.err("Service keywords: no matches found")

        searchcriteria = self.getIdSearchCriteria();
        #self.log(_("Search criterias: %s") % (', '.join(searchcriteria)))
        for objid, results in self._parser.getRawResults().iteritems():
            # Allowed more than one result for the same sample and analysis.
            # Needed for calibration tests
            for result in results:
                analyses = self._getZODBAnalyses(objid)
                inst = None
                if len(analyses) == 0 and self.instrument_uid:
                    # No registered analyses found, but maybe we need to
                    # create them first if an instruemnt id has been set in
                    insts = self.bsc(portal_type='Instrument', UID=self.instrument_uid)
                    if len(insts) == 0:
                        # No instrument found
                        self.err("No Analysis Request with '${allowed_ar_states}' "
                                 "states found, And no QC analyses found for ${object_id}",
                                 mapping={"allowed_ar_states": ', '.join(allowed_ar_states_msg),
                                          "object_id": objid})
                        self.err("Instrument not found")
                        continue

                    inst = insts[0].getObject()

                    # Create a new ReferenceAnalysis and link it to the Instrument
                    # Here we have an objid (i.e. R01200012) and
                    # a dict with results (the key is the AS keyword).
                    # How can we create a ReferenceAnalysis if we don't know
                    # which ReferenceSample we might use?
                    # Ok. The objid HAS to be the ReferenceSample code.
                    refsample = self.bc(portal_type='ReferenceSample', id=objid)
                    if refsample and len(refsample) == 1:
                        refsample = refsample[0].getObject()

                    elif refsample and len(refsample) > 1:
                        # More than one reference sample found!
                        self.err(
                            "More than one reference sample found for '${object_id}'",
                            mapping={"object_id": objid})
                        continue

                    else:
                        # No reference sample found!
                        self.err("No Reference Sample found for ${object_id}",
                                 mapping={"object_id": objid})
                        continue

                    # For each acode, create a ReferenceAnalysis and attach it
                    # to the Reference Sample
                    service_uids = []
                    reference_type = 'b' if refsample.getBlank() == True else 'c'
                    services = self.bsc(portal_type='AnalysisService')
                    service_uids = [service.UID for service in services \
                                    if service.getObject().getKeyword() in result.keys()]
                    analyses = inst.addReferences(refsample, service_uids)

                elif len(analyses) == 0:
                    # No analyses found
                    self.err("No Analysis Request with '${allowed_ar_states}' "
                             "states neither QC analyses found for ${object_id}",
                             mapping={
                                 "allowed_ar_states":', '.join(allowed_ar_states_msg),
                                 "object_id": objid})
                    continue

                # Look for timestamp
                capturedate = result.get('DateTime',{}).get('DateTime',None)
                if capturedate:
                    del result['DateTime']
                for acode, values in result.iteritems():
                    if acode not in acodes:
                        # Analysis keyword doesn't exist
                        continue

                    ans = [analysis for analysis in analyses \
                           if analysis.getKeyword() == acode]

                    if len(ans) > 1:
                        self.err("More than one analyses found for ${object_id} and ${analysis_keyword)",
                                 mapping={"object_id": objid,
                                          "analysis_keyword": acode})
                        continue

                    elif len(ans) == 0:
                        self.err("No analyses found for ${object_id} and ${analysis_keyword)",
                                 mapping={"object_id": objid,
                                          "analysis_keyword": acode})
                        continue

                    analysis = ans[0]
                    if capturedate:
                        values['DateTime'] = capturedate
                    processed = self._process_analysis(objid, analysis, values)
                    if processed:
                        ancount += 1
                        if inst:
                            # Calibration Test (import to Instrument)
                            instprocessed.append(inst.UID())
                            importedinst = inst.title in importedinsts.keys() \
                                        and importedinsts[inst.title] or []
                            if acode not in importedinst:
                                importedinst.append(acode)
                            importedinsts[inst.title] = importedinst
                        else:
                            ar = analysis.portal_type == 'Analysis' and analysis.aq_parent or None
                            if ar and ar.UID:
                                # Set AR imported info
                                arprocessed.append(ar.UID())
                                importedar = ar.getRequestID() in importedars.keys() \
                                            and importedars[ar.getRequestID()] or []
                                if acode not in importedar:
                                    importedar.append(acode)
                                importedars[ar.getRequestID()] = importedar

                        # Create the AttachmentType for mime type if not exists
                        attuid = None
                        attachmentType = self.bsc(portal_type="AttachmentType",
                                                  title=self._parser.getAttachmentFileType())
                        if len(attachmentType) == 0:
                            try:
                                folder = self.context.bika_setup.bika_attachmenttypes
                                obj = _createObjectByType("AttachmentType", folder, tmpID())
                                obj.edit(title=self._parser.getAttachmentFileType(),
                                         description="Autogenerated file type")
                                obj.unmarkCreationFlag()
                                renameAfterCreation(obj)
                                attuid = obj.UID()
                            except:
                                attuid = None
                                self.err(
                                    "Unable to create the Attachment Type ${mime_type}",
                                    mapping={
                                    "mime_type": self._parser.getFileMimeType()})
                        else:
                            attuid = attachmentType[0].UID

                        if attuid is not None:
                            try:
                                # Attach the file to the Analysis
                                wss = analysis.getBackReferences('WorksheetAnalysis')
                                if wss and len(wss) > 0:
                                    #TODO: Mirar si es pot evitar utilitzar el WS i utilitzar directament l'Anàlisi (útil en cas de CalibrationTest)
                                    ws = wss[0]
                                    attachment = _createObjectByType("Attachment", ws, tmpID())
                                    attachment.edit(
                                        AttachmentFile=self._parser.getInputFile(),
                                        AttachmentType=attuid,
                                        AttachmentKeys='Results, Automatic import')
                                    attachment.reindexObject()
                                    others = analysis.getAttachment()
                                    attachments = []
                                    for other in others:
                                        if other.getAttachmentFile().filename != attachment.getAttachmentFile().filename:
                                            attachments.append(other.UID())
                                    attachments.append(attachment.UID())
                                    analysis.setAttachment(attachments)

                            except:
    #                            self.err(_("Unable to attach results file '${file_name}' to AR ${request_id}",
    #                                       mapping={"file_name": self._parser.getInputFile().filename,
    #                                                "request_id": ar.getRequestID()}))
                                pass

        # Calculate analysis dependencies
        for aruid in arprocessed:
            ar = self.bc(portal_type='AnalysisRequest',
                         UID=aruid)
            ar = ar[0].getObject()
            analyses = ar.getAnalyses()
            for analysis in analyses:
                analysis = analysis.getObject()
                if analysis.calculateResult(True, True):
                    self.log(
                        "${request_id} calculated result for '${analysis_keyword}': '${analysis_result}'",
                        mapping={"request_id": ar.getRequestID(),
                                 "analysis_keyword": analysis.getKeyword(),
                                 "analysis_result": str(analysis.getResult())}
                    )

        # Not sure if there's any reason why ReferenceAnalyses have not
        # defined the method calculateResult...
        # Needs investigation.
        #for instuid in instprocessed:
        #    inst = self.bsc(portal_type='Instrument',UID=instuid)[0].getObject()
        #    analyses = inst.getAnalyses()
        #    for analysis in analyses:
        #        if (analysis.calculateResult(True, True)):
        #            self.log(_("%s calculated result for '%s': '%s'") %
        #                 (inst.title, analysis.getKeyword(), str(analysis.getResult())))

        for arid, acodes in importedars.iteritems():
            acodesmsg = ["Analysis %s" % acod for acod in acodes]
            self.log("${request_id}: ${analysis_keywords} imported sucessfully",
                     mapping={"request_id": arid,
                              "analysis_keywords": acodesmsg})

        for instid, acodes in importedinsts.iteritems():
            acodesmsg = ["Analysis %s" % acod for acod in acodes]
            msg = "%s: %s %s" % (instid, ", ".join(acodesmsg), "imported sucessfully")
            self.log(msg)

        if self.instrument_uid:
            self.log(
                "Import finished successfully: ${nr_updated_ars} ARs, "
                "${nr_updated_instruments} Instruments and ${nr_updated_results} "
                "results updated",
                mapping={"nr_updated_ars": str(len(importedars)),
                         "nr_updated_instruments": str(len(importedinsts)),
                         "nr_updated_results": str(ancount)})
        else:
            self.log(
                "Import finished successfully: ${nr_updated_ars} ARs and "
                "${nr_updated_results} results updated",
                mapping={"nr_updated_ars": str(len(importedars)),
                         "nr_updated_results": str(ancount)})
Beispiel #26
0
    def create(self, context, request):
        """/@@API/create: Create new object.

        Required parameters:

            - obj_type = portal_type of new object.
            - obj_path = path of new object, from plone site root. - Not required for
             obj_type=AnalysisRequest

        Optionally:

            - obj_id = ID of new object.

        All other parameters in the request are matched against the object's
        Schema.  If a matching field is found in the schema, then the value is
        taken from the request and sent to the field's mutator.

        Reference fields may have their target value(s) specified with a
        delimited string query syntax, containing the portal_catalog search:

            <FieldName>=index1:value1|index2:value2

        eg to set the Client of a batch:

            ...@@API/update?obj_path=<path>...
            ...&Client=title:<client_title>&...

        And, to set a multi-valued reference, these both work:

            ...@@API/update?obj_path=<path>...
            ...&InheritedObjects:list=title:AR1...
            ...&InheritedObjects:list=title:AR2...

            ...@@API/update?obj_path=<path>...
            ...&InheritedObjects[]=title:AR1...
            ...&InheritedObjects[]=title:AR2...

        The Analysis_Specification parameter is special, it mimics
        the format of the python dictionaries, and only service Keyword
        can be used to reference services.  Even if the keyword is not
        actively required, it must be supplied:

            <service_keyword>:min:max:error tolerance

        The function returns a dictionary as a json string:

        {
            runtime: Function running time.
            error: true or string(message) if error. false if no error.
            success: true or string(message) if success. false if no success.
        }

        >>> portal = layer['portal']
        >>> portal_url = portal.absolute_url()
        >>> from plone.app.testing import SITE_OWNER_NAME
        >>> from plone.app.testing import SITE_OWNER_PASSWORD

        Simple AR creation, no obj_path parameter is required:

        >>> browser = layer['getBrowser'](portal, loggedIn=True, username=SITE_OWNER_NAME, password=SITE_OWNER_PASSWORD)
        >>> browser.open(portal_url+"/@@API/create", "&".join([
        ... "obj_type=AnalysisRequest",
        ... "Client=portal_type:Client|id:client-1",
        ... "SampleType=portal_type:SampleType|title:Apple Pulp",
        ... "Contact=portal_type:Contact|getFullname:Rita Mohale",
        ... "Services:list=portal_type:AnalysisService|title:Calcium",
        ... "Services:list=portal_type:AnalysisService|title:Copper",
        ... "Services:list=portal_type:AnalysisService|title:Magnesium",
        ... "SamplingDate=2013-09-29",
        ... "Specification=portal_type:AnalysisSpec|title:Apple Pulp",
        ... ]))
        >>> browser.contents
        '{..."success": true...}'

        If some parameters are specified and are not located as existing fields or properties
        of the created instance, the create should fail:

        >>> browser = layer['getBrowser'](portal, loggedIn=True, username=SITE_OWNER_NAME, password=SITE_OWNER_PASSWORD)
        >>> browser.open(portal_url+"/@@API/create?", "&".join([
        ... "obj_type=Batch",
        ... "obj_path=/batches",
        ... "title=Test",
        ... "Thing=Fish"
        ... ]))
        >>> browser.contents
        '{...The following request fields were not used: ...Thing...}'

        Now we test that the AR create also fails if some fields are spelled wrong

        >>> browser = layer['getBrowser'](portal, loggedIn=True, username=SITE_OWNER_NAME, password=SITE_OWNER_PASSWORD)
        >>> browser.open(portal_url+"/@@API/create", "&".join([
        ... "obj_type=AnalysisRequest",
        ... "thing=Fish",
        ... "Client=portal_type:Client|id:client-1",
        ... "SampleType=portal_type:SampleType|title:Apple Pulp",
        ... "Contact=portal_type:Contact|getFullname:Rita Mohale",
        ... "Services:list=portal_type:AnalysisService|title:Calcium",
        ... "Services:list=portal_type:AnalysisService|title:Copper",
        ... "Services:list=portal_type:AnalysisService|title:Magnesium",
        ... "SamplingDate=2013-09-29"
        ... ]))
        >>> browser.contents
        '{...The following request fields were not used: ...thing...}'

        """
        savepoint = transaction.savepoint()
        self.context = context
        self.request = request
        self.unused = [x for x in self.request.form.keys()]
        self.used("form.submitted")
        self.used("__ac_name")
        self.used("__ac_password")
        # always require obj_type
        self.require("obj_type")
        obj_type = self.request['obj_type']
        self.used("obj_type")
        # AnalysisRequest shortcut: creates Sample, Partition, AR, Analyses.
        if obj_type == "AnalysisRequest":
            try:
                return self._create_ar(context, request)
            except:
                savepoint.rollback()
                raise
        # Other object types require explicit path as their parent
        self.require("obj_path")
        obj_path = self.request['obj_path']
        if not obj_path.startswith("/"):
            obj_path = "/" + obj_path
        self.used("obj_path")
        site_path = request['PATH_INFO'].replace("/@@API/create", "")
        parent = context.restrictedTraverse(str(site_path + obj_path))
        # normal permissions still apply for this user
        if not getSecurityManager().checkPermission(AccessJSONAPI, parent):
            msg = "You don't have the '{0}' permission on {1}".format(
                AccessJSONAPI, parent.absolute_url())
            raise Unauthorized(msg)

        obj_id = request.get("obj_id", "")
        _renameAfterCreation = False
        if not obj_id:
            _renameAfterCreation = True
            obj_id = tmpID()
        self.used(obj_id)

        ret = {
            "url": router.url_for("create", force_external=True),
            "success": True,
            "error": False,
        }

        try:
            obj = _createObjectByType(obj_type, parent, obj_id)
            obj.unmarkCreationFlag()
            if _renameAfterCreation:
                renameAfterCreation(obj)
            ret['obj_id'] = obj.getId()
            used_fields = set_fields_from_request(obj, request)
            for field in used_fields:
                self.used(field)
            obj.reindexObject()
            obj.aq_parent.reindexObject()
            event.notify(ObjectInitializedEvent(obj))
            obj.at_post_create_script()
        except:
            savepoint.rollback()
            raise

        if self.unused:
            raise BadRequest("The following request fields were not used: %s.  Request aborted." % self.unused)

        return ret
Beispiel #27
0
    def cloneAR(self, ar):
        newar = _createObjectByType("AnalysisRequest", ar.aq_parent, tmpID())
        newar.title = ar.title
        newar.description = ar.description
        newar.setContact(ar.getContact())
        newar.setCCContact(ar.getCCContact())
        newar.setCCEmails(ar.getCCEmails())
        newar.setBatch(ar.getBatch())
        newar.setTemplate(ar.getTemplate())
        newar.setProfile(ar.getProfile())
        newar.setSamplingDate(ar.getSamplingDate())
        newar.setSampleType(ar.getSampleType())
        newar.setSamplePoint(ar.getSamplePoint())
        newar.setStorageLocation(ar.getStorageLocation())
        newar.setSamplingDeviation(ar.getSamplingDeviation())
        newar.setPriority(ar.getPriority())
        newar.setSampleCondition(ar.getSampleCondition())
        newar.setSample(ar.getSample())
        newar.setClientOrderNumber(ar.getClientOrderNumber())
        newar.setClientReference(ar.getClientReference())
        newar.setClientSampleID(ar.getClientSampleID())
        newar.setDefaultContainerType(ar.getDefaultContainerType())
        newar.setAdHoc(ar.getAdHoc())
        newar.setComposite(ar.getComposite())
        newar.setReportDryMatter(ar.getReportDryMatter())
        newar.setInvoiceExclude(ar.getInvoiceExclude())
        newar.setAttachment(ar.getAttachment())
        newar.setInvoice(ar.getInvoice())
        newar.setDateReceived(ar.getDateReceived())
        newar.setMemberDiscount(ar.getMemberDiscount())
        # Set the results for each AR analysis
        ans = ar.getAnalyses(full_objects=True)
        for an in ans:
            nan = _createObjectByType("Analysis", newar, an.getKeyword())
            nan.setService(an.getService())
            nan.setCalculation(an.getCalculation())
            nan.setInterimFields(an.getInterimFields())
            nan.setResult(an.getResult())
            nan.setResultDM(an.getResultDM())
            nan.setRetested = False,
            nan.setMaxTimeAllowed(an.getMaxTimeAllowed())
            nan.setDueDate(an.getDueDate())
            nan.setDuration(an.getDuration())
            nan.setReportDryMatter(an.getReportDryMatter())
            nan.setAnalyst(an.getAnalyst())
            nan.setInstrument(an.getInstrument())
            nan.setSamplePartition(an.getSamplePartition())
            nan.unmarkCreationFlag()
            notify(ObjectInitializedEvent(nan))
            changeWorkflowState(nan, 'bika_analysis_workflow',
                                'to_be_verified')
            nan.reindexObject()

        newar.reindexObject()
        newar.aq_parent.reindexObject()
        renameAfterCreation(newar)
        newar.setRequestID(newar.getId())

        if hasattr(ar, 'setChildAnalysisRequest'):
            ar.setChildAnalysisRequest(newar)
        newar.setParentAnalysisRequest(ar)
        return newar
Beispiel #28
0
def create_analysisrequest(
    context,
    request,
    values,
    analyses=[],
    partitions=None,
    specifications=None,
    prices=None
):
    # Gather neccesary tools
    workflow = getToolByName(context, 'portal_workflow')
    bc = getToolByName(context, 'bika_catalog')

    # Create new sample or locate the existing for secondary AR
    if values.get('Sample'):
        secondary = True
        if ISample.providedBy(values['Sample']):
            sample = values['Sample']
        else:
            sample = bc(UID=values['Sample'])[0].getObject()
        workflow_enabled = sample.getSamplingWorkflowEnabled()
    else:
        secondary = False
        workflow_enabled = context.bika_setup.getSamplingWorkflowEnabled()
        sample = create_sample(context, request, values)

    # Create the Analysis Request
    ar = _createObjectByType('AnalysisRequest', context, tmpID())
    ar.setSample(sample)

    # processform renames the sample, this requires values to contain the Sample.
    values['Sample'] = sample
    ar.processForm(REQUEST=request, values=values)

    # Object has been renamed
    ar.edit(RequestID=ar.getId())

    # Set initial AR state
    workflow_action = 'sampling_workflow' if workflow_enabled \
        else 'no_sampling_workflow'
    workflow.doActionFor(ar, workflow_action)

    # Set analysis request analyses
    analyses = ar.setAnalyses(analyses, prices=prices, specs=specifications)

    if secondary:
        # Only 'sample_due' and 'sample_recieved' samples can be selected
        # for secondary analyses
        doActionFor(ar, 'sample')
        doActionFor(ar, 'sample_due')
        sample_state = workflow.getInfoFor(sample, 'review_state')
        if sample_state == 'sample_received':
            doActionFor(ar, 'receive')

        for analysis in ar.getAnalyses(full_objects=1):
            doActionFor(analysis, 'sample')
            doActionFor(analysis, 'sample_due')
            analysis_transition_ids = [t['id'] for t in workflow.getTransitionsFor(analysis)]
            if 'receive' in analysis_transition_ids and sample_state == 'sample_received':
                doActionFor(analysis, 'receive')

    if not secondary:
        # Create sample partitions
        if not partitions:
            partitions = [{'services':analyses}]
        for n, partition in enumerate(partitions):
            # Calculate partition id
            partition_prefix = sample.getId() + "-P"
            partition_id = '%s%s' % (partition_prefix, n + 1)
            partition['part_id'] = partition_id
            # Point to or create sample partition
            if partition_id in sample.objectIds():
                partition['object'] = sample[partition_id]
            else:
                partition['object'] = create_samplepartition(
                    sample,
                    partition,
                    analyses
                )
        # If Preservation is required for some partitions,
        # and the SamplingWorkflow is disabled, we need
        # to transition to to_be_preserved manually.
        if not workflow_enabled:
            to_be_preserved = []
            sample_due = []
            lowest_state = 'sample_due'
            for p in sample.objectValues('SamplePartition'):
                if p.getPreservation():
                    lowest_state = 'to_be_preserved'
                    to_be_preserved.append(p)
                else:
                    sample_due.append(p)
            for p in to_be_preserved:
                doActionFor(p, 'to_be_preserved')
            for p in sample_due:
                doActionFor(p, 'sample_due')
            doActionFor(sample, lowest_state)
            doActionFor(ar, lowest_state)

        # Transition pre-preserved partitions
        for p in partitions:
            if 'prepreserved' in p and p['prepreserved']:
                part = p['object']
                state = workflow.getInfoFor(part, 'review_state')
                if state == 'to_be_preserved':
                    workflow.doActionFor(part, 'preserve')

    # Return the newly created Analysis Request
    return ar
Beispiel #29
0
    def process(self):
        self._parser.parse()
        parsed = self._parser.resume()
        self._errors = self._parser.errors
        self._warns = self._parser.warns
        self._logs = self._parser.logs
        self._priorizedsearchcriteria = ''

        if parsed == False:
            return False

        # Allowed analysis states
        allowed_ar_states_msg = [t(_(s)) for s in self.getAllowedARStates()]
        allowed_an_states_msg = [
            t(_(s)) for s in self.getAllowedAnalysisStates()
        ]
        self.log("Allowed Analysis Request states: ${allowed_states}",
                 mapping={'allowed_states': ', '.join(allowed_ar_states_msg)})
        self.log("Allowed analysis states: ${allowed_states}",
                 mapping={'allowed_states': ', '.join(allowed_an_states_msg)})

        # Exclude non existing ACODEs
        acodes = []
        ancount = 0
        arprocessed = []
        instprocessed = []
        importedars = {}
        importedinsts = {}
        rawacodes = self._parser.getAnalysisKeywords()
        exclude = self.getKeywordsToBeExcluded()
        for acode in rawacodes:
            if acode in exclude:
                continue
            service = self.bsc(getKeyword=acode)
            if not service:
                self.warn('Service keyword ${analysis_keyword} not found',
                          mapping={"analysis_keyword": acode})
            else:
                acodes.append(acode)
        if len(acodes) == 0:
            self.err("Service keywords: no matches found")

        searchcriteria = self.getIdSearchCriteria()
        #self.log(_("Search criterias: %s") % (', '.join(searchcriteria)))
        for objid, results in self._parser.getRawResults().iteritems():
            # Allowed more than one result for the same sample and analysis.
            # Needed for calibration tests
            for result in results:
                analyses = self._getZODBAnalyses(objid)
                inst = None
                if len(analyses) == 0 and self.instrument_uid:
                    # No registered analyses found, but maybe we need to
                    # create them first if an instruemnt id has been set in
                    insts = self.bsc(portal_type='Instrument',
                                     UID=self.instrument_uid)
                    if len(insts) == 0:
                        # No instrument found
                        self.err(
                            "No Analysis Request with '${allowed_ar_states}' "
                            "states found, And no QC analyses found for ${object_id}",
                            mapping={
                                "allowed_ar_states":
                                ', '.join(allowed_ar_states_msg),
                                "object_id":
                                objid
                            })
                        self.err("Instrument not found")
                        continue

                    inst = insts[0].getObject()

                    # Create a new ReferenceAnalysis and link it to the Instrument
                    # Here we have an objid (i.e. R01200012) and
                    # a dict with results (the key is the AS keyword).
                    # How can we create a ReferenceAnalysis if we don't know
                    # which ReferenceSample we might use?
                    # Ok. The objid HAS to be the ReferenceSample code.
                    refsample = self.bc(portal_type='ReferenceSample',
                                        id=objid)
                    if refsample and len(refsample) == 1:
                        refsample = refsample[0].getObject()

                    elif refsample and len(refsample) > 1:
                        # More than one reference sample found!
                        self.err(
                            "More than one reference sample found for '${object_id}'",
                            mapping={"object_id": objid})
                        continue

                    else:
                        # No reference sample found!
                        self.err("No Reference Sample found for ${object_id}",
                                 mapping={"object_id": objid})
                        continue

                    # For each acode, create a ReferenceAnalysis and attach it
                    # to the Reference Sample
                    service_uids = []
                    reference_type = 'b' if refsample.getBlank(
                    ) == True else 'c'
                    services = self.bsc(portal_type='AnalysisService')
                    service_uids = [service.UID for service in services \
                                    if service.getObject().getKeyword() in result.keys()]
                    analyses = inst.addReferences(refsample, service_uids)

                elif len(analyses) == 0:
                    # No analyses found
                    self.err(
                        "No Analysis Request with '${allowed_ar_states}' "
                        "states neither QC analyses found for ${object_id}",
                        mapping={
                            "allowed_ar_states":
                            ', '.join(allowed_ar_states_msg),
                            "object_id": objid
                        })
                    continue

                # Look for timestamp
                capturedate = result.get('DateTime', {}).get('DateTime', None)
                if capturedate:
                    del result['DateTime']
                for acode, values in result.iteritems():
                    if acode not in acodes:
                        # Analysis keyword doesn't exist
                        continue

                    ans = [analysis for analysis in analyses \
                           if analysis.getKeyword() == acode]

                    if len(ans) > 1:
                        self.err(
                            "More than one analyses found for ${object_id} and ${analysis_keyword)",
                            mapping={
                                "object_id": objid,
                                "analysis_keyword": acode
                            })
                        continue

                    elif len(ans) == 0:
                        self.err(
                            "No analyses found for ${object_id} and ${analysis_keyword)",
                            mapping={
                                "object_id": objid,
                                "analysis_keyword": acode
                            })
                        continue

                    analysis = ans[0]
                    if capturedate:
                        values['DateTime'] = capturedate
                    processed = self._process_analysis(objid, analysis, values)
                    if processed:
                        ancount += 1
                        if inst:
                            # Calibration Test (import to Instrument)
                            instprocessed.append(inst.UID())
                            importedinst = inst.title in importedinsts.keys() \
                                        and importedinsts[inst.title] or []
                            if acode not in importedinst:
                                importedinst.append(acode)
                            importedinsts[inst.title] = importedinst
                        else:
                            ar = analysis.portal_type == 'Analysis' and analysis.aq_parent or None
                            if ar and ar.UID:
                                # Set AR imported info
                                arprocessed.append(ar.UID())
                                importedar = ar.getRequestID() in importedars.keys() \
                                            and importedars[ar.getRequestID()] or []
                                if acode not in importedar:
                                    importedar.append(acode)
                                importedars[ar.getRequestID()] = importedar

                        # Create the AttachmentType for mime type if not exists
                        attuid = None
                        attachmentType = self.bsc(
                            portal_type="AttachmentType",
                            title=self._parser.getAttachmentFileType())
                        if len(attachmentType) == 0:
                            try:
                                folder = self.context.bika_setup.bika_attachmenttypes
                                obj = _createObjectByType(
                                    "AttachmentType", folder, tmpID())
                                obj.edit(
                                    title=self._parser.getAttachmentFileType(),
                                    description="Autogenerated file type")
                                obj.unmarkCreationFlag()
                                renameAfterCreation(obj)
                                attuid = obj.UID()
                            except:
                                attuid = None
                                self.err(
                                    "Unable to create the Attachment Type ${mime_type}",
                                    mapping={
                                        "mime_type":
                                        self._parser.getFileMimeType()
                                    })
                        else:
                            attuid = attachmentType[0].UID

                        if attuid is not None:
                            try:
                                # Attach the file to the Analysis
                                wss = analysis.getBackReferences(
                                    'WorksheetAnalysis')
                                if wss and len(wss) > 0:
                                    #TODO: Mirar si es pot evitar utilitzar el WS i utilitzar directament l'Anàlisi (útil en cas de CalibrationTest)
                                    ws = wss[0]
                                    attachment = _createObjectByType(
                                        "Attachment", ws, tmpID())
                                    attachment.edit(
                                        AttachmentFile=self._parser.
                                        getInputFile(),
                                        AttachmentType=attuid,
                                        AttachmentKeys=
                                        'Results, Automatic import')
                                    attachment.reindexObject()
                                    others = analysis.getAttachment()
                                    attachments = []
                                    for other in others:
                                        if other.getAttachmentFile(
                                        ).filename != attachment.getAttachmentFile(
                                        ).filename:
                                            attachments.append(other.UID())
                                    attachments.append(attachment.UID())
                                    analysis.setAttachment(attachments)

                            except:
                                #                            self.err(_("Unable to attach results file '${file_name}' to AR ${request_id}",
                                #                                       mapping={"file_name": self._parser.getInputFile().filename,
                                #                                                "request_id": ar.getRequestID()}))
                                pass

        # Calculate analysis dependencies
        for aruid in arprocessed:
            ar = self.bc(portal_type='AnalysisRequest', UID=aruid)
            ar = ar[0].getObject()
            analyses = ar.getAnalyses()
            for analysis in analyses:
                analysis = analysis.getObject()
                if analysis.calculateResult(True, True):
                    self.log(
                        "${request_id} calculated result for '${analysis_keyword}': '${analysis_result}'",
                        mapping={
                            "request_id": ar.getRequestID(),
                            "analysis_keyword": analysis.getKeyword(),
                            "analysis_result": str(analysis.getResult())
                        })

        # Not sure if there's any reason why ReferenceAnalyses have not
        # defined the method calculateResult...
        # Needs investigation.
        #for instuid in instprocessed:
        #    inst = self.bsc(portal_type='Instrument',UID=instuid)[0].getObject()
        #    analyses = inst.getAnalyses()
        #    for analysis in analyses:
        #        if (analysis.calculateResult(True, True)):
        #            self.log(_("%s calculated result for '%s': '%s'") %
        #                 (inst.title, analysis.getKeyword(), str(analysis.getResult())))

        for arid, acodes in importedars.iteritems():
            acodesmsg = ["Analysis %s" % acod for acod in acodes]
            self.log(
                "${request_id}: ${analysis_keywords} imported sucessfully",
                mapping={
                    "request_id": arid,
                    "analysis_keywords": acodesmsg
                })

        for instid, acodes in importedinsts.iteritems():
            acodesmsg = ["Analysis %s" % acod for acod in acodes]
            msg = "%s: %s %s" % (instid, ", ".join(acodesmsg),
                                 "imported sucessfully")
            self.log(msg)

        if self.instrument_uid:
            self.log(
                "Import finished successfully: ${nr_updated_ars} ARs, "
                "${nr_updated_instruments} Instruments and ${nr_updated_results} "
                "results updated",
                mapping={
                    "nr_updated_ars": str(len(importedars)),
                    "nr_updated_instruments": str(len(importedinsts)),
                    "nr_updated_results": str(ancount)
                })
        else:
            self.log(
                "Import finished successfully: ${nr_updated_ars} ARs and "
                "${nr_updated_results} results updated",
                mapping={
                    "nr_updated_ars": str(len(importedars)),
                    "nr_updated_results": str(ancount)
                })
Beispiel #30
0
    def _import_file(self, importoption, csvfile, client_id):
        fullfilename = csvfile.filename
        fullfilename = fullfilename.split('/')[-1]
        filename = fullfilename.split('.')[0]
        log = []
        r = self.portal_catalog(portal_type='Client', id=client_id)
        if len(r) == 0:
            #This is not a user input issue - client_id is added to template
            log.append('   Could not find Client %s' % client_id)
            return None, '\n'.join(log)

        client = r[0].getObject()
        updateable_states = ['sample_received', 'assigned']
        reader = csv.reader(csvfile.readlines())
        samples = []
        sample_headers = None
        batch_headers = None
        batch_remarks = []
        row_count = 0
        for row in reader:
            row_count = row_count + 1
            if not row:
                continue
            # a new batch starts
            if row_count == 1:
                if row[0] == 'Header':
                    continue
                else:
                    msg = '%s invalid batch header' % row
                    transaction_note(msg)
                    return None, msg
            elif row_count == 2:
                msg = None
                if row[1] != 'Import':
                    msg = 'Invalid batch header - Import required in cell B2'
                    transaction_note(msg)
                    return None, msg
                entered_name = fullfilename.split('.')[0]
                if not row[2] or entered_name.lower() != row[2].lower():
                    msg = 'Filename, %s, does not match entered filename, %s' \
                            % (filename, row[2])
                    transaction_note(msg)
                    return None, msg

                batch_headers = row[0:]
                arimport_id = tmpID()
                title = filename
                idx = 1
                while title in [i.Title() for i in client.objectValues()]:
                    title = '%s-%s' % (filename, idx)
                    idx += 1
                arimport = _createObjectByType("ARImport", client, arimport_id,
                                               title=title)
                arimport.unmarkCreationFlag()
                continue
            elif row_count == 3:
                sample_headers = row[10:]
                continue
            elif row_count in [4,5,6]:
                continue

            #otherwise add to list of sample
            samples.append(row)
        if not row_count:
            msg = 'Invalid batch header'
            transaction_note(msg)
            return None, msg

        pad = 8192*' '
        request = self.request

        title = 'Importing file'
        bar = ProgressBar(
                self.context, self.request, title, description='')
        notify(InitialiseProgressBar(bar))

        sample_count = len(samples)
        row_count = 0
        for sample in samples:
            next_num = tmpID()
            row_count = row_count + 1
            item_remarks = []
            progress_index = float(row_count)/float(sample_count)*100.0
            progress = ProgressState(self.request, progress_index)
            notify(UpdateProgressEvent(progress))
            #TODO REmove for production - just to look pretty
            #time.sleep(1)
            analyses = []
            for i in range(10, len(sample)):
                if sample[i] != '1':
                    continue
                analyses.append(sample_headers[(i-10)])
            if len(analyses) > 0:
                aritem_id = '%s_%s' %('aritem', (str(next_num)))
                aritem = _createObjectByType("ARImportItem", arimport, aritem_id)
                aritem.edit(
                    SampleName=sample[0],
                    ClientRef=batch_headers[10],
                    ClientSid=sample[1],
                    SampleDate=sample[2],
                    SampleType = sample[3],
                    PickingSlip = sample[4],
                    ContainerType = sample[5],
                    ReportDryMatter = sample[6],
                    Priority = sample[7],
                    )

                aritem.setRemarks(item_remarks)
                if importoption == 'c':
                    aritem.setAnalyses(analyses)
                elif importoption == 'p':
                    aritem.setAnalysisProfile(analyses)

        cc_names_report = ','.join(
                [i.strip() for i in batch_headers[6].split(';')]) \
                if (batch_headers and len(batch_headers) > 7) else ""
        cc_emails_report = ','.join(
                [i.strip() for i in batch_headers[7].split(';')]) \
                if batch_headers and len(batch_headers) > 8 else ""
        cc_emails_invoice = ','.join(
                [i.strip() for i in batch_headers[8].split(';')]) \
                if batch_headers and len(batch_headers) > 9 else ""

        try:
            numOfSamples = int(batch_headers[12])
        except:
            numOfSamples = 0
        arimport.edit(
            ImportOption=importoption,
            FileName=batch_headers[2],
            OriginalFile=csvfile,
            ClientTitle = batch_headers[3],
            ClientID = batch_headers[4],
            ContactID = batch_headers[5],
            CCNamesReport = cc_names_report,
            CCEmailsReport = cc_emails_report,
            CCEmailsInvoice = cc_emails_invoice,
            OrderID = batch_headers[9],
            QuoteID = batch_headers[10],
            SamplePoint = batch_headers[11],
            NumberSamples = numOfSamples,
            Remarks = batch_remarks,
            Analyses = sample_headers,
            DateImported=DateTime(),
            )
        arimport._renameAfterCreation()

        valid = arimport.validateIt()
        return arimport, msg