Exemplo n.º 1
0
 def test_import_ppm(self):
     ar = self.add_analysisrequest(
         self.client,
         dict(Client=self.client.UID(),
              Contact=self.contact.UID(),
              DateSampled=datetime.now().date().isoformat(),
              SampleType=self.sampletype.UID()),
         [srv.UID() for srv in self.services])
     api.do_transition_for(ar, 'receive')
     data = open(fn2, 'rb').read()
     import_file = FileUpload(TestFile(cStringIO.StringIO(data), fn2))
     request = TestRequest(form=dict(
         instrument_results_file_format='xlsx',
         submitted=True,
         artoapply='received_tobeverified',
         results_override='override',
         instrument_results_file=import_file,
         default_unit='ppm',
         instrument=''))
     results = importer.Import(self.portal, request)
     ag = ar.getAnalyses(full_objects=True, getKeyword='ag107')[0]
     al = ar.getAnalyses(full_objects=True, getKeyword='al27')[0]
     test_results = eval(results)  # noqa
     self.assertEqual(ag.getResult(), '1118000.0')
     self.assertEqual(al.getResult(), '2228000.0')
Exemplo n.º 2
0
    def workflow_action_retract_ar(self):

        # AR should be retracted
        # Can't transition inactive ARs
        if not api.is_active(self.context):
            message = _('Item is inactive.')
            self.context.plone_utils.addPortalMessage(message, 'info')
            self.request.response.redirect(self.context.absolute_url())
            return

        # 1. Copies the AR linking the original one and viceversa
        ar = self.context
        newar = self.cloneAR(ar)

        # 2. The old AR gets a status of 'invalid'
        api.do_transition_for(ar, 'retract_ar')

        # 3. The new AR copy opens in status 'to be verified'
        changeWorkflowState(newar, 'bika_ar_workflow', 'to_be_verified')

        # 4. The system immediately alerts the client contacts who ordered
        # the results, per email and SMS, that a possible mistake has been
        # picked up and is under investigation.
        # A much possible information is provided in the email, linking
        # to the AR online.
        bika_setup = api.get_bika_setup()
        if bika_setup.getNotifyOnARRetract():
            self.notify_ar_retract(ar, newar)

        message = _('${items} invalidated.',
                    mapping={'items': ar.getRequestID()})
        self.context.plone_utils.addPortalMessage(message, 'warning')
        self.request.response.redirect(newar.absolute_url())
Exemplo n.º 3
0
    def workflow_action_retract_ar(self):

        # AR should be retracted
        # Can't transition inactive ARs
        if not api.is_active(self.context):
            message = _('Item is inactive.')
            self.context.plone_utils.addPortalMessage(message, 'info')
            self.request.response.redirect(self.context.absolute_url())
            return

        # 1. Copies the AR linking the original one and viceversa
        ar = self.context
        newar = self.cloneAR(ar)

        # 2. The old AR gets a status of 'invalid'
        api.do_transition_for(ar, 'retract_ar')

        # 3. The new AR copy opens in status 'to be verified'
        changeWorkflowState(newar, 'bika_ar_workflow', 'to_be_verified')

        # 4. The system immediately alerts the client contacts who ordered
        # the results, per email and SMS, that a possible mistake has been
        # picked up and is under investigation.
        # A much possible information is provided in the email, linking
        # to the AR online.
        bika_setup = api.get_bika_setup()
        if bika_setup.getNotifyOnARRetract():
            self.notify_ar_retract(ar, newar)

        message = _('${items} invalidated.', mapping={'items': ar.getId()})
        self.context.plone_utils.addPortalMessage(message, 'warning')
        self.request.response.redirect(newar.absolute_url())
Exemplo n.º 4
0
    def workflow_action_invalidate(self):
        # AR should be retracted
        # Can't transition inactive ARs
        if not api.is_active(self.context):
            message = _('Item is inactive.')
            self.context.plone_utils.addPortalMessage(message, 'info')
            self.request.response.redirect(self.context.absolute_url())
            return

        # Retract the AR and get the retest
        api.do_transition_for(self.context, 'invalidate')
        retest = self.context.getRetest()

        # 4. The system immediately alerts the client contacts who ordered
        # the results, per email and SMS, that a possible mistake has been
        # picked up and is under investigation.
        # A much possible information is provided in the email, linking
        # to the AR online.
        bika_setup = api.get_bika_setup()
        if bika_setup.getNotifyOnARRetract():
            self.notify_ar_retract(self.context, retest)

        message = _('${items} invalidated.',
                    mapping={'items': self.context.getId()})
        self.context.plone_utils.addPortalMessage(message, 'warning')
        self.request.response.redirect(retest.absolute_url())
Exemplo n.º 5
0
def doActionFor(instance, action_id):
    actionperformed = False
    message = ''
    if not skip(instance, action_id, peek=True):
        try:
            api.do_transition_for(instance, action_id)
            actionperformed = True
        except api.BikaLIMSError as e:
            message = str(e)
            logger.warn(message)
    return actionperformed, message
Exemplo n.º 6
0
def doActionFor(instance, action_id):
    actionperformed = False
    message = ''
    if not skip(instance, action_id, peek=True):
        try:
            api.do_transition_for(instance, action_id)
            actionperformed = True
        except api.BikaLIMSError as e:
            message = str(e)
            logger.warn(message)
    return actionperformed, message
Exemplo n.º 7
0
def doActionFor(instance, action_id):
    actionperformed = False
    message = ''
    if not skip(instance, action_id, peek=True):
        try:
            api.do_transition_for(instance, action_id)
            actionperformed = True
        except ploneapi.exc.InvalidParameterError as e:
            message = str(e)
            logger.warn("Failed to perform transition {} on {}: {}".format(
                action_id, instance, message))
    return actionperformed, message
    def test_LIMS_2371_SignificantFigures(self):

        RESULT_VALUES = {
            '-22770264':    {1: '-2e07', 2: '-2.3e07', 3: '-2.28e07', 4: '-2.277e07', 5: '-2.277e07', 6: '-2.27703e07', 7: '-2.277026e07'},
            '-2277.3':      {1: '-2000', 2: '-2300', 3: '-2280', 4: '-2277', 5: '-2277.3', 6: '-2277.30', 7: '-2277.300'},
            '-40277':       {1: '-40000', 2: '-40000', 3: '-40300', 4: '-40280', 5: '-40277', 6: '-40277.0', 7: '-40277.00'},
            '-40277.036':   {1: '-40000', 2: '-40000', 3: '-40300', 4: '-40280', 5: '-40277', 6: '-40277.0', 7: '-40277.04'},
            '47000.01':     {1: '50000', 2: '47000', 3: '47000', 4: '47000', 5: '47000', 6: '47000.0', 7: '47000.01', 8: '47000.010', 9: '47000.0100'},
            '132':          {1: '100', 2: '130', 3: '132', 4: '132.0', 5: '132.00', 6: '132.000'},
            '14700.04':     {1: '10000', 2: '15000', 3: '14700', 4: '14700', 5: '14700', 6: '14700.0', 7: '14700.04', 8: '14700.040', 9: '14700.0400'},
            '1407.0':       {1: '1000', 2: '1400', 3: '1410', 4: '1407', 5: '1407.0', 6: '1407.00', 7: '1407.000'},
            '0.147':        {1: '0.1', 2: '0.15', 3: '0.147', 4: '0.1470', 5: '0.14700'},
            '4308':         {1: '4000', 2: '4300', 3: '4310', 4: '4308', 5: '4308.0', 6: '4308.00', 7: '4308.000'},
            '470000':       {1: '500000', 2: '470000', 3: '470000', 4: '470000', 5: '470000', 6: '470000', 7: '470000.0', 8: '470000.00', 9: '470000.000'},
            '0.154':        {1: '0.2', 2: '0.15', 3: '0.154', 4: '0.1540', 5: '0.15400', 6: '0.154000'},
            '0.166':        {1: '0.2', 2: '0.17', 3: '0.166', 4: '0.1660', 5: '0.16600', 6: '0.166000'},
            '0.156':        {1: '0.2', 2: '0.16', 3: '0.156', 4: '0.1560', 5: '0.15600', 6: '0.156000'},
            '47841242':     {1: '5e07', 2: '4.8e07', 3: '4.78e07', 4: '4.784e07', 5: '4.7841e07', 6: '4.78412e07', 7: '4.784124e07', 8: '4.7841242e07', 9: '4.7841242e07', 10: '4.7841242e07'},
            '2.2e-06':      {1: '0.000002', 2: '0.0000022', 3: '0.00000220', 4: '0.000002200'},
            '19019.19019':  {1: '20000', 2: '19000', 3: '19000', 4: '19020', 5: '19019', 6: '19019.2', 7: '19019.19', 8: '19019.190', 9: '19019.1902', 10: '19019.19019'}
        }

        service = self.service
        service.setExponentialFormatPrecision(7)  # just a high value
        service.setDisplayRounding("SIGNIFICANT_FIGURES")
        service.setLowerDetectionLimit('-999999999')  # Test results below 0 too
        for value, tests in RESULT_VALUES.items():
            # Create the AR with modified analysis service
            for sig_figures, expected in tests.items():
                service.setSignificantFigures(sig_figures)
                ar = create_analysisrequest(
                    self.client,
                    {},
                    {'Client': self.client.UID(),
                     'Contact': self.client.getContacts()[0].UID(),
                     'SamplingDate': '2015-01-01',
                     'SampleType': self.sampletype.UID()},
                    [service.UID()])
                do_transition_for(ar, 'receive')
                an = ar.getAnalyses()[0].getObject()
                an.setResult(value)
                self.assertEqual(an.getFormattedResult(), expected)
    def test_retract_an_analysis_request(self):
        ar = self.create_ar()

        # Check "receive" transition -> sample_received
        api.do_transition_for(ar, "receive")
        self.assertEquals(
            api.get_workflow_status_of(ar, "review_state"),
            "sample_received"
        )

        for analysis in ar.getAnalyses(full_objects=True):
            analysis.setResult("12")

            # Check "submit" transition -> to_be_verified
            api.do_transition_for(analysis, "submit")
            self.assertEquals(
                api.get_workflow_status_of(analysis, "review_state"),
                "to_be_verified"
            )

            # Check "retract" transition -> retracted
            api.do_transition_for(analysis, "retract")
            self.assertEquals(api.get_workflow_status_of(
                analysis, "review_state"), "retracted")

        for analysis in ar.getAnalyses(full_objects=True):
            if api.get_workflow_status_of(
                    analysis, "review_state") == "retracted":
                continue

            # Check "submit" transition -> to_be_verified
            analysis.setResult(12)
            api.do_transition_for(analysis, "submit")
            self.assertEquals(
                api.get_workflow_status_of(analysis, "review_state"),
                "to_be_verified")

        # Check "retract" transition -> "sample_received"
        api.do_transition_for(ar, "retract")
        self.assertEquals(
            api.get_workflow_status_of(ar, "review_state"),
            "sample_received")
Exemplo n.º 10
0
 def test_import_csv_without_filename_suffix(self):
     ar = self.add_analysisrequest(
         self.client,
         dict(Client=self.client.UID(),
              Contact=self.contact.UID(),
              DateSampled=datetime.now().date().isoformat(),
              SampleType=self.sampletype.UID()),
         [srv.UID() for srv in self.services])
     api.do_transition_for(ar, 'receive')
     data = open(fn, 'r').read()
     import_file = FileUpload(TestFile(cStringIO.StringIO(data), fn))
     request = TestRequest(
         form=dict(submitted=True,
                   artoapply='received_tobeverified',
                   results_override='override',
                   instrument_results_file=import_file,
                   instrument=api.get_uid(self.instrument)))
     results = importer.Import(self.portal, request)
     ag = ar.getAnalyses(full_objects=True, getKeyword='Ag107')[0]
     al = ar.getAnalyses(full_objects=True, getKeyword='Al27')[0]
     test_results = eval(results)  # noqa
     self.assertEqual(ag.getResult(), '0.111')
     self.assertEqual(al.getResult(), '0.222')
Exemplo n.º 11
0
    def test_import_xlsx(self):
        ar1 = self.add_analysisrequest(
            self.client,
            dict(Client=self.client.UID(),
                 Contact=self.contact.UID(),
                 DateSampled=datetime.now().date().isoformat(),
                 SampleType=self.sampletype.UID()),
            [srv.UID() for srv in self.services])
        ar2 = self.add_analysisrequest(
            self.client,
            dict(Client=self.client.UID(),
                 Contact=self.contact.UID(),
                 DateSampled=datetime.now().date().isoformat(),
                 SampleType=self.sampletype.UID()),
            [srv.UID() for srv in self.services])
        api.do_transition_for(ar1, 'receive')
        api.do_transition_for(ar2, 'receive')

        data = open(fn, 'rb').read()
        import_file = FileUpload(TestFile(cStringIO.StringIO(data), fn))
        request = TestRequest(
            form=dict(submitted=True,
                      artoapply='received_tobeverified',
                      results_override='override',
                      instrument_results_file=import_file,
                      worksheet='Concentrations',
                      instrument=api.get_uid(self.instrument)))
        results = importer.Import(self.portal, request)
        test_results = eval(results)  # noqa
        ag1 = ar1.getAnalyses(full_objects=True, getKeyword='Ag107')[0]
        al1 = ar1.getAnalyses(full_objects=True, getKeyword='Al27')[0]
        ag2 = ar2.getAnalyses(full_objects=True, getKeyword='Ag107')[0]
        al2 = ar2.getAnalyses(full_objects=True, getKeyword='Al27')[0]
        self.assertEqual(ag1.getResult(), '0.111')
        self.assertEqual(al1.getResult(), '0.555')
        self.assertEqual(ag2.getResult(), '0.222')
        self.assertEqual(al2.getResult(), '0.666')
Exemplo n.º 12
0
    def calculateTotalResults(self, objid, analysis):
        """ If an AR(objid) has an analysis that has a calculation
        then check if param analysis is used on the calculations formula.
        Here we are dealing with two types of analysis.
        1. Calculated Analysis - Results are calculated.
        2. Analysis - Results are captured and not calculated
        :param objid: AR ID or Worksheet's Reference Sample IDs
        :param analysis: Analysis Object
        """
        analyses = self._getZODBAnalyses(objid)
        # Filter Analyses With Calculation
        analyses_with_calculation = filter(lambda an: an.getCalculation(),
                                           analyses)
        for analysis_with_calc in analyses_with_calculation:
            # Get the calculation to get the formula so that we can check
            # if param analysis keyword is used on the calculation formula
            calcultion = analysis_with_calc.getCalculation()
            formula = calcultion.getMinifiedFormula()
            # The analysis that we are currenly on
            analysis_keyword = analysis.getKeyword()
            if analysis_keyword not in formula:
                continue

            # If the analysis_keyword is in the formula, it means that this
            # analysis is a dependent on that calculated analysis
            calc_passed = analysis_with_calc.calculateResult(
                override=self._override[1])
            if calc_passed:
                api.do_transition_for(analysis_with_calc, "submit")
                self.log(
                    "${request_id}: calculated result for "
                    "'${analysis_keyword}': '${analysis_result}'",
                    mapping={
                        "request_id": objid,
                        "analysis_keyword": analysis_with_calc.getKeyword(),
                        "analysis_result": str(analysis_with_calc.getResult())
                    })
Exemplo n.º 13
0
    def calculateTotalResults(self, objid, analysis):
        """ If an AR(objid) has an analysis that has a calculation
        then check if param analysis is used on the calculations formula.
        Here we are dealing with two types of analysis.
        1. Calculated Analysis - Results are calculated.
        2. Analysis - Results are captured and not calculated
        :param objid: AR ID or Worksheet's Reference Sample IDs
        :param analysis: Analysis Object
        """
        analyses = self._getZODBAnalyses(objid)
        # Filter Analyses With Calculation
        analyses_with_calculation = filter(
                                        lambda an: an.getCalculation(),
                                        analyses)
        for analysis_with_calc in analyses_with_calculation:
            # Get the calculation to get the formula so that we can check
            # if param analysis keyword is used on the calculation formula
            calcultion = analysis_with_calc.getCalculation()
            formula = calcultion.getMinifiedFormula()
            # The analysis that we are currenly on
            analysis_keyword = analysis.getKeyword()
            if analysis_keyword not in formula:
                continue

            # If the analysis_keyword is in the formula, it means that this
            # analysis is a dependent on that calculated analysis
            calc_passed = analysis_with_calc.calculateResult(override=self._override[1])
            if calc_passed:
                api.do_transition_for(analysis_with_calc, "submit")
                self.log(
                    "${request_id}: calculated result for "
                    "'${analysis_keyword}': '${analysis_result}'",
                    mapping={"request_id": objid,
                             "analysis_keyword": analysis_with_calc.getKeyword(),
                             "analysis_result": str(analysis_with_calc.getResult())}
                )
Exemplo n.º 14
0
    def test_LIMS_2221_DecimalMarkWithSciNotation(self):
        # Notations
        # '1' => aE+b / aE-b
        # '2' => ax10^b / ax10^-b
        # '3' => ax10^b / ax10^-b (with superscript)
        # '4' => a·10^b / a·10^-b
        # '5' => a·10^b / a·10^-b (with superscript)
        matrix = [
            # as_prec  as_exp not mark  result          formatted result
            # -------  ------ --- ----  ------          ----------------
            [0,        0,     1,  ',',  '0',            '0'],
            [0,        0,     2,  ',',  '0',            '0'],
            [0,        0,     3,  ',',  '0',            '0'],
            [0,        0,     4,  ',',  '0',            '0'],
            [0,        0,     5,  ',',  '0',            '0'],
            [2,        5,     1,  ',',  '0.01',         '0,01'],
            [2,        5,     2,  ',',  '0.01',         '0,01'],
            [2,        5,     3,  ',',  '0.01',         '0,01'],
            [2,        5,     4,  ',',  '0.01',         '0,01'],
            [2,        5,     5,  ',',  '0.01',         '0,01'],
            [2,        1,     1,  ',',  '0.123',        '1,2e-01'],
            [2,        1,     2,  ',',  '0.123',        '1,2x10^-1'],
            [2,        1,     3,  ',',  '0.123',        '1,2x10<sup>-1</sup>'],
            [2,        1,     4,  ',',  '0.123',        '1,2·10^-1'],
            [2,        1,     5,  ',',  '0.123',        '1,2·10<sup>-1</sup>'],
            [2,        1,     1,  ',',  '1.234',        '1,23'],
            [2,        1,     2,  ',',  '1.234',        '1,23'],
            [2,        1,     3,  ',',  '1.234',        '1,23'],
            [2,        1,     4,  ',',  '1.234',        '1,23'],
            [2,        1,     5,  ',',  '1.234',        '1,23'],
            [2,        1,     1,  ',',  '12.345',       '1,235e01'],
            [2,        1,     2,  ',',  '12.345',       '1,235x10^1'],
            [2,        1,     3,  ',',  '12.345',       '1,235x10<sup>1</sup>'],
            [2,        1,     4,  ',',  '12.345',       '1,235·10^1'],
            [2,        1,     5,  ',',  '12.345',       '1,235·10<sup>1</sup>'],
            [4,        3,     1,  ',',  '-123.45678',   '-123,4568'],
            [4,        3,     2,  ',',  '-123.45678',   '-123,4568'],
            [4,        3,     3,  ',',  '-123.45678',   '-123,4568'],
            [4,        3,     4,  ',',  '-123.45678',   '-123,4568'],
            [4,        3,     5,  ',',  '-123.45678',   '-123,4568'],
            [4,        3,     1,  ',',  '-1234.5678',   '-1,2345678e03'],
            [4,        3,     2,  ',',  '-1234.5678',   '-1,2345678x10^3'],
            [4,        3,     3,  ',',  '-1234.5678',   '-1,2345678x10<sup>3</sup>'],
            [4,        3,     4,  ',',  '-1234.5678',   '-1,2345678·10^3'],
            [4,        3,     5,  ',',  '-1234.5678',   '-1,2345678·10<sup>3</sup>'],
        ]
        serv = self.service
        serv.setLowerDetectionLimit('-99999')  # test results below 0 too
        prevm = []
        an = None
        bs = get_bika_setup()
        for m in matrix:
            as_prec = m[0]
            as_exp = m[1]
            notation = m[2]
            _dm = m[3]
            _result = m[4]
            _expected = m[5]
            bs.setResultsDecimalMark(_dm)
            # Create the AR and set the values to the AS, but only if necessary
            if not an or prevm[0] != as_prec or prevm[1] != as_exp:
                serv.setPrecision(as_prec)
                serv.setExponentialFormatPrecision(as_exp)
                self.assertEqual(serv.getPrecision(), as_prec)
                self.assertEqual(
                    serv.Schema().getField('Precision').get(serv), as_prec)
                self.assertEqual(serv.getExponentialFormatPrecision(), as_exp)
                self.assertEqual(
                    serv.Schema().getField(
                        'ExponentialFormatPrecision').get(serv), as_exp)
                values = {'Client': self.client.UID(),
                          'Contact': self.client.getContacts()[0].UID(),
                          'SamplingDate': '2015-01-01',
                          'SampleType': self.sampletype.UID()}
                ar = create_analysisrequest(self.client, {}, values,
                                            [serv.UID()])
                do_transition_for(ar, 'receive')
                an = ar.getAnalyses()[0].getObject()
                prevm = m
            an.setResult(_result)

            self.assertEqual(an.getResult(), _result)
            self.assertEqual(an.Schema().getField('Result').get(an), _result)
            decimalmark = bs.getResultsDecimalMark()
            fr = an.getFormattedResult(sciformat=notation,
                                       decimalmark=decimalmark)
            self.assertEqual(fr, _expected)
Exemplo n.º 15
0
def create_analysisrequest(context,
                           request,
                           values,
                           analyses=None,
                           partitions=None,
                           specifications=None,
                           prices=None):
    """This is meant for general use and should do everything necessary to
    create and initialise an AR and any other required auxilliary objects
    (Sample, SamplePartition, Analysis...)

    :param context:
        The container in which the ARs will be created.
    :param request:
        The current Request object.
    :param values:
        a dict, where keys are AR|Sample schema field names.
    :param analyses:
        Analysis services list.  If specified, augments the values in
        values['Analyses']. May consist of service objects, UIDs, or Keywords.
    :param partitions:
        A list of dictionaries, if specific partitions are required.  If not
        specified, AR's sample is created with a single partition.
    :param specifications:
        These values augment those found in values['Specifications']
    :param prices:
        Allow different prices to be set for analyses.  If not set, prices
        are read from the associated analysis service.
    """

    # Gather neccesary tools
    workflow = getToolByName(context, 'portal_workflow')
    bc = getToolByName(context, 'bika_catalog')
    # Analyses are analyses services
    analyses_services = analyses
    analyses = []
    # It's necessary to modify these and we don't want to pollute the
    # parent's data
    values = values.copy()
    analyses_services = analyses_services if analyses_services else []
    anv = values['Analyses'] if values.get('Analyses', None) else []
    analyses_services = anv + analyses_services

    if not analyses_services:
        raise RuntimeError(
            "create_analysisrequest: no analyses services provided")

    # Create new sample or locate the existing for secondary AR
    if not values.get('Sample', False):
        secondary = False
        workflow_enabled = context.bika_setup.getSamplingWorkflowEnabled()
        sample = create_sample(context, request, values)
    else:
        secondary = True
        sample = get_sample_from_values(context, values)
        workflow_enabled = sample.getSamplingWorkflowEnabled()

    # Create the Analysis Request
    ar = _createObjectByType('AnalysisRequest', context, tmpID())

    # Set some required fields manually before processForm is called
    ar.setSample(sample)
    values['Sample'] = sample

    if values.get('DateSampled', False):
        #Inject the timezone into a selection by
        #datewidget which is timezone naive
        #ie. DateSampled is '2017-05-15 01:05'
        #but should be      '2017/05/15 01:05:00 GMT+2'
        #else processForm => reindexObject() sets it to GMT+0 which results in
        #an incorrect date record.

        tz = DateTime().timezone()
        datesampled = DateTime(values['DateSampled'] + ' ' + tz)
        values['DateSampled'] = datesampled

    ar.processForm(REQUEST=request, values=values)
    # Object has been renamed
    ar.edit(RequestID=ar.getId())

    # Set initial AR state
    action = '{0}sampling_workflow'.format('' if workflow_enabled else 'no_')
    workflow.doActionFor(ar, action)

    # Set analysis request analyses
    service_uids = _resolve_items_to_service_uids(analyses_services)
    # processForm already has created the analyses, but here we create the
    # analyses with specs and prices. This function, even it is called 'set',
    # deletes the old analyses, so eventually we obtain the desired analyses.
    ar.setAnalyses(service_uids, prices=prices, specs=specifications)
    # Gettin the ar objects
    analyses = ar.getAnalyses(full_objects=True)
    # Continue to set the state of the AR
    skip_receive = [
        'to_be_sampled', 'sample_due', 'sampled', 'to_be_preserved'
    ]
    if secondary:
        # Only 'sample_due' and 'sample_recieved' samples can be selected
        # for secondary analyses
        doActionFor(ar, 'sampled')
        doActionFor(ar, 'sample_due')
        sample_state = workflow.getInfoFor(sample, 'review_state')
        if sample_state not in skip_receive:
            doActionFor(ar, 'receive')

    # Set the state of analyses we created.
    for analysis in analyses:
        revers = analysis.getService().getNumberOfRequiredVerifications()
        analysis.setNumberOfRequiredVerifications(revers)
        doActionFor(analysis, 'sample_due')
        analysis_state = workflow.getInfoFor(analysis, 'review_state')
        if analysis_state not in skip_receive:
            doActionFor(analysis, 'receive')

    if not secondary:
        # Create sample partitions
        if not partitions:
            partitions = values.get('Partitions', [{'services': service_uids}])
        for n, partition in enumerate(partitions):
            # Calculate partition id
            partition['object'] = create_samplepartition(
                sample, partition, analyses)
        # If Preservation is required for some partitions,
        # and the SamplingWorkflow is disabled, we need
        # to transition to to_be_preserved manually.
        if not workflow_enabled:
            to_be_preserved = []
            sample_due = []
            lowest_state = 'sample_due'
            for p in sample.objectValues('SamplePartition'):
                if p.getPreservation():
                    lowest_state = 'to_be_preserved'
                    to_be_preserved.append(p)
                else:
                    sample_due.append(p)
            for p in to_be_preserved:
                doActionFor(p, 'to_be_preserved')
            for p in sample_due:
                doActionFor(p, 'sample_due')
            doActionFor(sample, lowest_state)
            doActionFor(ar, lowest_state)

        # Transition pre-preserved partitions
        for p in partitions:
            if 'prepreserved' in p and p['prepreserved']:
                part = p['object']
                state = workflow.getInfoFor(part, 'review_state')
                if state == 'to_be_preserved':
                    workflow.doActionFor(part, 'preserve')

    # Once the ar is fully created, check if there are rejection reasons
    reject_field = values.get('RejectionReasons', '')
    if reject_field and reject_field.get('checkbox', False):
        doActionFor(ar, 'reject')

    # If the Sampling Workflow field values are valid,
    # and the SamplingWorkflow is enabled, we will
    # automatically kick off the "sample" transition now
    tids = [t['id'] for t in get_transitions_for(ar)]
    if 'sample' in tids and ar.getSampler() and ar.getDateSampled():
        do_transition_for(ar, 'sample')

    # Return the newly created Analysis Request
    return ar
Exemplo n.º 16
0
    def action_add(self):
        """Form action to add a new attachment

        Code taken from bika.lims.content.addARAttachment.
        """

        form = self.request.form
        parent = api.get_parent(self.context)
        attachment_file = form.get('AttachmentFile_file', None)
        AttachmentType = form.get('AttachmentType', '')
        AttachmentKeys = form.get('AttachmentKeys', '')
        ReportOption = form.get('ReportOption', 'a')

        # nothing to do if the attachment file is missing
        if attachment_file is None:
            logger.warn("AttachmentView.action_add_attachment: Attachment file is missing")
            return

        # create attachment
        attachment = self.create_attachment(
            parent,
            attachment_file,
            AttachmentType=AttachmentType,
            AttachmentKeys=AttachmentKeys,
            ReportOption=ReportOption)

        # append the new UID to the end of the current order
        self.set_attachments_order(api.get_uid(attachment))

        # handle analysis attachment
        analysis_uid = form.get("Analysis", None)
        if analysis_uid:
            rc = api.get_tool("reference_catalog")
            analysis = rc.lookupObject(analysis_uid)
            others = analysis.getAttachment()
            attachments = []
            for other in others:
                attachments.append(other.UID())
            attachments.append(attachment.UID())
            analysis.setAttachment(attachments)
            # The metadata for getAttachmentUIDs need to get updated,
            # otherwise the attachments are not displayed
            # https://github.com/senaite/bika.lims/issues/521
            analysis.reindexObject()

            if api.get_workflow_status_of(analysis) == 'attachment_due':
                api.do_transition_for(analysis, 'attach')
        else:
            others = self.context.getAttachment()
            attachments = []
            for other in others:
                attachments.append(other.UID())
            attachments.append(attachment.UID())

            self.context.setAttachment(attachments)

        if self.request['HTTP_REFERER'].endswith('manage_results'):
            self.request.response.redirect('{}/manage_results'.format(
                self.context.absolute_url()))
        else:
            self.request.response.redirect(self.context.absolute_url())
Exemplo n.º 17
0
def do_transition_for(brain_or_object, transition):
    """Proxy to bika.lims.api.do_transition_for
    """
    return api.do_transition_for(brain_or_object, transition)
Exemplo n.º 18
0
def do_transition_for(brain_or_object, transition):
    """Proxy to senaite.api.do_transition_for
    """
    return api.do_transition_for(brain_or_object, transition)
Exemplo n.º 19
0
def do_transition_for(brain_or_object, transition):
    """Proxy to bika.lims.api.do_transition_for
    """
    return api.do_transition_for(brain_or_object, transition)
Exemplo n.º 20
0
    def action_add(self):
        """Form action to add a new attachment

        Code taken from bika.lims.content.addARAttachment.
        """
        form = self.request.form
        parent = api.get_parent(self.context)
        this_file = form.get('AttachmentFile_file', None)

        # nothing to do if the attachment file is missing
        if this_file is None:
            logger.warn(
                "AttachmentView.action_add_attachment: Attachment file is missing"
            )
            return

        # create attachment
        attachmentid = self.context.generateUniqueId('Attachment')
        attachment = api.create(parent, "Attachment", id=attachmentid)

        # update the attachment with the values from the form
        attachment.edit(
            AttachmentFile=this_file,
            AttachmentType=form.get('AttachmentType', ''),
            AttachmentKeys=form.get('AttachmentKeys', ''),
            ReportOption=form.get('ReportOption', 'a'),
        )

        # process and reindex
        attachment.processForm()
        attachment.reindexObject()

        # append the new UID to the end of the current order
        self.set_attachments_order(api.get_uid(attachment))

        # handle analysis attachment
        analysis_uid = form.get("Analysis", None)
        if analysis_uid:
            rc = api.get_tool("reference_catalog")
            analysis = rc.lookupObject(analysis_uid)
            others = analysis.getAttachment()
            attachments = []
            for other in others:
                attachments.append(other.UID())
            attachments.append(attachment.UID())
            analysis.setAttachment(attachments)

            if api.get_workflow_status_of(analysis) == 'attachment_due':
                api.do_transition_for(analysis, 'attach')
        else:
            others = self.context.getAttachment()
            attachments = []
            for other in others:
                attachments.append(other.UID())
            attachments.append(attachment.UID())

            self.context.setAttachment(attachments)

        if self.request['HTTP_REFERER'].endswith('manage_results'):
            self.request.response.redirect('{}/manage_results'.format(
                self.context.absolute_url()))
        else:
            self.request.response.redirect(self.context.absolute_url())
Exemplo n.º 21
0
    def workflow_script_import(self):
        """Create objects from valid ARImport
        """
        def convert_date_string(datestr):
            return datestr.replace('-', '/')

        def lookup_sampler_uid(import_user):
            #Lookup sampler's uid
            found = False
            userid = None
            user_ids = []
            users = getUsers(self, ['LabManager', 'Sampler']).items()
            for (samplerid, samplername) in users:
                if import_user == samplerid:
                    found = True
                    userid = samplerid
                    break
                if import_user == samplername:
                    user_ids.append(samplerid)
            if found:
                return userid
            if len(user_ids) == 1:
                return user_ids[0]
            if len(user_ids) > 1:
                #raise ValueError('Sampler %s is ambiguous' % import_user)
                return ''
            #Otherwise
            #raise ValueError('Sampler %s not found' % import_user)
            return ''

        bsc = getToolByName(self, 'bika_setup_catalog')
        workflow = getToolByName(self, 'portal_workflow')
        client = self.aq_parent

        title = _('Submitting AR Import')
        description = _('Creating and initialising objects')
        bar = ProgressBar(self, self.REQUEST, title, description)
        notify(InitialiseProgressBar(bar))

        profiles = [x.getObject() for x in bsc(portal_type='AnalysisProfile')]

        gridrows = self.schema['SampleData'].get(self)
        row_cnt = 0
        for therow in gridrows:
            row = therow.copy()
            row_cnt += 1
            # Create Sample
            sample = _createObjectByType('Sample', client, tmpID())
            sample.unmarkCreationFlag()
            # First convert all row values into something the field can take
            sample.edit(**row)
            sample._renameAfterCreation()
            event.notify(ObjectInitializedEvent(sample))
            sample.at_post_create_script()
            swe = self.bika_setup.getSamplingWorkflowEnabled()
            if swe:
                workflow.doActionFor(sample, 'sampling_workflow')
            else:
                workflow.doActionFor(sample, 'no_sampling_workflow')
            part = _createObjectByType('SamplePartition', sample, 'part-1')
            part.unmarkCreationFlag()
            renameAfterCreation(part)
            if swe:
                workflow.doActionFor(part, 'sampling_workflow')
            else:
                workflow.doActionFor(part, 'no_sampling_workflow')
            container = self.get_row_container(row)
            if container:
                part.edit(Container=container)

            # Profiles are titles, profile keys, or UIDS: convert them to UIDs.
            newprofiles = []
            for title in row['Profiles']:
                objects = [
                    x for x in profiles
                    if title in (x.getProfileKey(), x.UID(), x.Title())
                ]
                for obj in objects:
                    newprofiles.append(obj.UID())
            row['Profiles'] = newprofiles

            # BBB in bika.lims < 3.1.9, only one profile is permitted
            # on an AR.  The services are all added, but only first selected
            # profile name is stored.
            row['Profile'] = newprofiles[0] if newprofiles else None

            # Same for analyses
            newanalyses = set(
                self.get_row_services(row) +
                self.get_row_profile_services(row))
            row['Analyses'] = []
            # get batch
            batch = self.schema['Batch'].get(self)
            if batch:
                row['Batch'] = batch
            # Add AR fields from schema into this row's data
            row['ClientReference'] = self.getClientReference()
            row['ClientOrderNumber'] = self.getClientOrderNumber()
            row['Contact'] = self.getContact()
            row['DateSampled'] = convert_date_string(row['DateSampled'])
            if row['Sampler']:
                row['Sampler'] = lookup_sampler_uid(row['Sampler'])

            # Create AR
            ar = _createObjectByType("AnalysisRequest", client, tmpID())
            ar.setSample(sample)
            ar.unmarkCreationFlag()
            ar.edit(**row)
            ar._renameAfterCreation()
            ar.setAnalyses(list(newanalyses))
            for analysis in ar.getAnalyses(full_objects=True):
                analysis.setSamplePartition(part)
            ar.at_post_create_script()
            if swe:
                workflow.doActionFor(ar, 'sampling_workflow')
            else:
                workflow.doActionFor(ar, 'no_sampling_workflow')

            # If the Sampling Workflow field values are valid,
            # and the SamplingWorkflow is enabled, we will
            # automatically kick off the "sample" transition now
            tids = [t['id'] for t in get_transitions_for(ar)]
            if 'sample' in tids and ar.getSampler() and ar.getDateSampled():
                do_transition_for(ar, 'sample')

            progress_index = float(row_cnt) / len(gridrows) * 100
            progress = ProgressState(self.REQUEST, progress_index)
            notify(UpdateProgressEvent(progress))
        # document has been written to, and redirect() fails here
        self.REQUEST.response.write(
            '<script>document.location.href="%s"</script>' %
            (self.aq_parent.absolute_url()))