def test_default_stickers(self): """https://jira.bikalabs.com/browse/WINE-44: display SampleID or SamplePartition ID depending on bikasetup.ShowPartitions value """ folder = self.portal.bika_setup.bika_analysisservices services = [_createObjectByType("AnalysisService", folder, tmpID()), _createObjectByType("AnalysisService", folder, tmpID())] services[0].processForm() services[1].processForm() services[0].edit(title="Detect Dust") services[1].edit(title="Detect water") service_uids = [s.UID for s in services] folder = self.portal.clients client = _createObjectByType("Client", folder, tmpID()) client.processForm() folder = self.portal.clients.objectValues("Client")[0] contact = _createObjectByType("Contact", folder, tmpID()) contact.processForm() contact.edit(Firstname="Bob", Surname="Dobbs", email="*****@*****.**") folder = self.portal.bika_setup.bika_sampletypes sampletype = _createObjectByType("SampleType", folder, tmpID()) sampletype.processForm() sampletype.edit(title="Air", Prefix="AIR") values = {'Client': client.UID(), 'Contact': contact.UID(), 'SamplingDate': '2015-01-01', 'SampleType': sampletype.UID()} for size in ["large", "small"]: # create and receive AR ar = create_analysisrequest(client, {}, values, service_uids) ar.bika_setup.setShowPartitions(False) doActionFor(ar, 'receive') self.assertEquals(ar.portal_workflow.getInfoFor(ar, 'review_state'), 'sample_received') # check sticker text ar.REQUEST['items'] = ar.getId() ar.REQUEST['template'] = "bika.lims:sticker_%s.pt"%size sticker = Sticker(ar, ar.REQUEST)() pid = ar.getSample().objectValues("SamplePartition")[0].getId() self.assertNotIn(pid, sticker, "Sticker must not contain partition ID %s"%pid) # create and receive AR ar = create_analysisrequest(client, {}, values, service_uids) ar.bika_setup.setShowPartitions(True) doActionFor(ar, 'receive') self.assertEquals(ar.portal_workflow.getInfoFor(ar, 'review_state'), 'sample_received') # check sticker text ar.REQUEST['items'] = ar.getId() ar.REQUEST['template'] = "bika.lims:sticker_%s.pt"%size sticker = Sticker(ar, ar.REQUEST)() pid = ar.getSample().objectValues("SamplePartition")[0].getId() self.assertIn(pid, sticker, "Sticker must contain partition ID %s"%pid)
def workflow_script_import(self): """Create objects from valid SampleImport """ bsc = getToolByName(self, 'bika_setup_catalog') client = self.aq_parent profiles = [x.getObject() for x in bsc(portal_type='AnalysisProfile')] gridrows = self.schema['SampleData'].get(self) row_cnt = 0 for therow in gridrows: row = deepcopy(therow) row_cnt += 1 # Profiles are titles, profile keys, or UIDS: convert them to UIDs. newprofiles = [] for title in row['Profiles']: objects = [ x for x in profiles if title in (x.getProfileKey(), x.UID(), x.Title()) ] for obj in objects: newprofiles.append(obj.UID()) row['Profiles'] = newprofiles # Same for analyses newanalyses = set( self.get_row_services(row) + self.get_row_profile_services(row)) # get batch batch = self.schema['Batch'].get(self) if batch: row['Batch'] = batch.UID() # Add AR fields from schema into this row's data if not row.get('ClientReference'): row['ClientReference'] = self.getClientReference() row['ClientOrderNumber'] = self.getClientOrderNumber() contact_uid =\ self.getContact().UID() if self.getContact() else None row['Contact'] = contact_uid # Creating analysis request from gathered data create_analysisrequest( client, self.REQUEST, row, analyses=list(newanalyses), ) self.REQUEST.response.redirect(client.absolute_url())
def create_ar(self, path, analyses=[], **kwargs): portal = api.portal.get() container = portal.restrictedTraverse(path.strip('/').split('/')) # create object obj = create_analysisrequest(container, container.REQUEST, kwargs, analyses) return obj.UID()
def create_sample(**kwargs): """Creates a new sample """ values = kwargs and kwargs or {} request = _api.get_request() date_sampled = DateTime().strftime("%Y-%m-%d") values.update({ "DateSampled": values.get("DateSampled") or date_sampled, }) to_update = ["Client", "Contact", "SampleType"] for portal_type in to_update: field_value = values.get(portal_type) if not field_value: field_value = _api.get_uid(get_object(portal_type)) values[portal_type] = field_value services = None if "services" in values: services = values.pop("services") if not services: services = map(_api.get_uid, get_objects("AnalysisService")) client = _api.get_object_by_uid(values.get("Client")) sample = create_analysisrequest(client, request, values, services) return sample
def workflow_script_import(self): """Create objects from valid ARImport """ bsc = getToolByName(self, 'bika_setup_catalog') client = self.aq_parent title = _('Submitting Sample Import') description = _('Creating and initialising objects') bar = ProgressBar(self, self.REQUEST, title, description) notify(InitialiseProgressBar(bar)) profiles = [x.getObject() for x in bsc(portal_type='AnalysisProfile')] gridrows = self.schema['SampleData'].get(self) row_cnt = 0 for therow in gridrows: row = deepcopy(therow) row_cnt += 1 # Profiles are titles, profile keys, or UIDS: convert them to UIDs. newprofiles = [] for title in row['Profiles']: objects = [x for x in profiles if title in (x.getProfileKey(), x.UID(), x.Title())] for obj in objects: newprofiles.append(obj.UID()) row['Profiles'] = newprofiles # Same for analyses newanalyses = set(self.get_row_services(row) + self.get_row_profile_services(row)) # get batch batch = self.schema['Batch'].get(self) if batch: row['Batch'] = batch.UID() # Add AR fields from schema into this row's data row['ClientReference'] = self.getClientReference() row['ClientOrderNumber'] = self.getClientOrderNumber() contact_uid =\ self.getContact().UID() if self.getContact() else None row['Contact'] = contact_uid # Creating analysis request from gathered data ar = create_analysisrequest( client, self.REQUEST, row, analyses=list(newanalyses),) # progress marker update progress_index = float(row_cnt) / len(gridrows) * 100 progress = ProgressState(self.REQUEST, progress_index) notify(UpdateProgressEvent(progress)) # document has been written to, and redirect() fails here self.REQUEST.response.write( '<script>document.location.href="%s"</script>' % ( self.absolute_url()))
def test_InstrumentInterfaceGeneXpert(self): # Checking if genexpert has already been added to Interface list. exims = [] for exim_id in instruments.__all__: exims.append((exim_id)) self.assertTrue('genexpert.genexpert' in exims) # Creating/ Getting some necessary objects. catalog = getToolByName(self.portal, 'portal_catalog') # Getting the first client client = self.portal.clients['client-1'] sampletype = self.portal.bika_setup.bika_sampletypes['sampletype-1'] values = { 'Client': client.UID(), 'Contact': client.getContacts()[0].UID(), 'SamplingDate': '2015-01-01', 'SampleType': sampletype.UID(), 'Profiles': '' } # Getting some services services = catalog(portal_type='AnalysisService', inactive_state='active')[:1] services[0].getObject().edit(Keyword="EbolaRUO") service_uids = [service.getObject().UID() for service in services] request = {} ar = create_analysisrequest(client, request, values, service_uids) wf = getToolByName(ar, 'portal_workflow') wf.doActionFor(ar, 'receive') # bsc = getToolByName(self.portal, 'bika_setup_catalog') # ins = bsc(portal_type='Instrument', inactive_state='active') transaction.commit() # Importing test file. import os dir_path = os.path.dirname(os.path.realpath(__file__)) temp_file = codecs.open(dir_path + "/files/GeneXpert.csv", encoding='utf-16-le') test_file = ConvertToUploadFile(temp_file) genex_parser = GeneXpertParser(test_file) importer = GeneXpertImporter( parser=genex_parser, context=self.portal, idsearchcriteria=['getId', 'getSampleID', 'getClientSampleID'], allowed_ar_states=[ 'sample_received', 'attachment_due', 'to_be_verified' ], allowed_analysis_states=None, override=[True, True]) tbex = '' try: importer.process() except: tbex = traceback.format_exc() errors = importer.errors logs = importer.logs print logs print errors
def createAR(self, path, analyses=[], **kwargs): portal = api.portal.get() container = portal.restrictedTraverse(path.strip("/").split("/")) # login again saved = self.swapSecurityManager("test_labmanager") # create object obj = create_analysisrequest(container, container.REQUEST, kwargs, analyses) # go back to original security manager setSecurityManager(saved) transaction.savepoint() return obj.UID()
def test_retract_an_analysis_request_without_profile_price(self): #Test the retract process to avoid LIMS-1989 profs = self.portal.bika_setup.bika_analysisprofiles # analysisprofile-1: Trace Metals analysisprofile = profs['analysisprofile-1'] catalog = getToolByName(self.portal, 'portal_catalog') # Getting the first client client = self.portal.clients['client-1'] sampletype = self.portal.bika_setup.bika_sampletypes['sampletype-1'] values = {'Client': client.UID(), 'Contact': client.getContacts()[0].UID(), 'SamplingDate': '2015-01-01', 'SampleType': sampletype.UID()} # Getting some services values['Profiles'] = analysisprofile.UID() services = catalog(portal_type = 'AnalysisService', inactive_state = 'active')[:3] service_uids = [service.getObject().UID() for service in services] request = {} ar = create_analysisrequest(client, request, values, service_uids) transaction.commit() all_analyses, all_profiles, analyses_from_profiles = ar.getServicesAndProfiles() if len(all_profiles) == 0: self.fail('Profiles not being used on the AR') wf = getToolByName(ar, 'portal_workflow') wf.doActionFor(ar, 'receive') # Cheking if everything is going OK #import pdb; pdb.set_trace() self.assertEquals(ar.portal_workflow.getInfoFor(ar, 'review_state'), 'sample_received') count = 0 for analysis in ar.getAnalyses(full_objects=True): analysis.setResult('12') wf.doActionFor(analysis, 'submit') self.assertEquals(analysis.portal_workflow.getInfoFor(analysis, 'review_state'),'to_be_verified') # Only retract the first one if count == 0: wf.doActionFor(analysis, 'retract') self.assertEquals(analysis.portal_workflow.getInfoFor(analysis, 'review_state'),'retracted') transaction.commit() count += 1 browser = self.getBrowser() invoice_url = '%s/invoice' % ar.absolute_url() browser.open(invoice_url) if '30.00' in browser.contents: self.fail('Retracted Analyses Services found on the invoice') if '20.00' not in browser.contents: self.fail('SubTotal incorrect')
def create_ar(self, analysisservice): # Create the AR client = self.portal.clients['client-1'] sampletype = self.portal.bika_setup.bika_sampletypes['sampletype-1'] values = {'Client': client.UID(), 'Contact': client.getContacts()[0].UID(), 'SamplingDate': '2016-01-01', 'SampleType': sampletype.UID()} request = {} services = [analysisservice,] ar = create_analysisrequest(client, request, values, services) wf = getToolByName(ar, 'portal_workflow') wf.doActionFor(ar, 'receive') return ar
def test_retract_an_analysis_request(self): #Test the retract process to avoid LIMS-1989 catalog = getToolByName(self.portal, 'portal_catalog') # Getting the first client client = self.portal.clients['client-1'] sampletype = self.portal.bika_setup.bika_sampletypes['sampletype-1'] values = { 'Client': client.UID(), 'Contact': client.getContacts()[0].UID(), 'SamplingDate': '2015-01-01', 'SampleType': sampletype.UID() } # Getting some services services = catalog(portal_type='AnalysisService', inactive_state='active')[:3] service_uids = [service.getObject().UID() for service in services] request = {} ar = create_analysisrequest(client, request, values, service_uids) wf = getToolByName(ar, 'portal_workflow') try: wf.doActionFor(ar, 'receive') except WorkflowException: pass # Cheking if everything is going OK #import pdb; pdb.set_trace() self.assertEquals(ar.portal_workflow.getInfoFor(ar, 'review_state'), 'sample_received') for analysis in ar.getAnalyses(full_objects=True): analysis.setResult('12') wf.doActionFor(analysis, 'submit') self.assertEquals( analysis.portal_workflow.getInfoFor(analysis, 'review_state'), 'to_be_verified') # retracting results wf.doActionFor(analysis, 'retract') self.assertEquals( analysis.portal_workflow.getInfoFor(analysis, 'review_state'), 'retracted') for analysis in ar.getAnalyses(full_objects=True): if analysis.portal_workflow.getInfoFor( analysis, 'review_state') == 'retracted': continue wf.doActionFor(analysis, 'submit') self.assertEquals( analysis.portal_workflow.getInfoFor(analysis, 'review_state'), 'to_be_verified') wf.doActionFor(ar, 'retract') self.assertEquals(ar.portal_workflow.getInfoFor(ar, 'review_state'), 'sample_received')
def test_sample_workflow_action_schedule_sampling(self): """ This test validates the function bika/lims//browser/analysisrequest/workflow.py/workflow_action_schedule_sampling """ from bika.lims.utils.workflow import schedulesampling workflow = getToolByName(self.portal, 'portal_workflow') pc = getToolByName(self.portal, 'portal_catalog') sampler = api.user.get(username='******') coordinator = self.createUser('SamplingCoordinator', 'cord1') # checking if the user belongs to the coordinators group mtool = getToolByName(self.portal, 'portal_membership') groups_tool = getToolByName(self.portal, 'portal_groups') usr_groups = groups_tool.getGroupsByUserId('cord1') self.assertIn( 'SamplingCoordinators', [group.id for group in usr_groups]) # Getting the client client = self.portal.clients['client-1'] # Getting a sample type sampletype = self.portal.bika_setup.bika_sampletypes['sampletype-1'] # Creating an AR values = {'Client': client.UID(), 'Contact': client.getContacts()[0].UID(), 'SamplingDate': '2015-01-01', 'SampleType': sampletype.UID()} request = {} services = [s.UID() for s in self.services] # creating the analysisrequest ar = create_analysisrequest(client, request, values, services) self.assertEqual( workflow.getInfoFor(ar, 'review_state'), 'to_be_sampled') # Changing user to coordinator logout() login(self.portal, 'cord1') # If ScheduledSamplingSampler is empty and ScheduleSamplingEnabled, # no workflow_action_schedule_sampling can be done schedulesampling.doTransition(ar.getSample()) self.assertEqual( workflow.getInfoFor(ar, 'review_state'), 'to_be_sampled') self.assertEqual( workflow.getInfoFor(ar.getSample(), 'review_state'), 'to_be_sampled') # set a value in ScheduledSamplingSampler ar.setScheduledSamplingSampler(sampler) schedulesampling.doTransition(ar.getSample()) self.assertEqual( workflow.getInfoFor(ar, 'review_state'), 'scheduled_sampling') self.assertEqual( workflow.getInfoFor(ar.getSample(), 'review_state'), 'scheduled_sampling')
def test_sample_workflow_action_schedule_sampling(self): """ This test validates the function bika/lims//browser/analysisrequest/workflow.py/workflow_action_schedule_sampling """ from bika.lims.utils.workflow import schedulesampling workflow = getToolByName(self.portal, 'portal_workflow') pc = getToolByName(self.portal, 'portal_catalog') sampler = api.user.get(username='******') coordinator = self.createUser('SamplingCoordinator', 'cord1') # checking if the user belongs to the coordinators group mtool = getToolByName(self.portal, 'portal_membership') groups_tool = getToolByName(self.portal, 'portal_groups') usr_groups = groups_tool.getGroupsByUserId('cord1') self.assertIn('SamplingCoordinators', [group.id for group in usr_groups]) # Getting the client client = self.portal.clients['client-1'] # Getting a sample type sampletype = self.portal.bika_setup.bika_sampletypes['sampletype-1'] # Creating an AR values = { 'Client': client.UID(), 'Contact': client.getContacts()[0].UID(), 'SamplingDate': '2015-01-01', 'SampleType': sampletype.UID() } request = {} services = [s.UID() for s in self.services] # creating the analysisrequest ar = create_analysisrequest(client, request, values, services) self.assertEqual(workflow.getInfoFor(ar, 'review_state'), 'to_be_sampled') # Changing user to coordinator logout() login(self.portal, 'cord1') # If ScheduledSamplingSampler is empty and ScheduleSamplingEnabled, # no workflow_action_schedule_sampling can be done schedulesampling.doTransition(ar.getSample()) self.assertEqual(workflow.getInfoFor(ar, 'review_state'), 'to_be_sampled') self.assertEqual(workflow.getInfoFor(ar.getSample(), 'review_state'), 'to_be_sampled') # set a value in ScheduledSamplingSampler ar.setScheduledSamplingSampler(sampler) schedulesampling.doTransition(ar.getSample()) self.assertEqual(workflow.getInfoFor(ar, 'review_state'), 'scheduled_sampling') self.assertEqual(workflow.getInfoFor(ar.getSample(), 'review_state'), 'scheduled_sampling')
def create_sample(services, client, contact, sample_type, receive=True): """Creates a new sample with the specified services """ request = _api.get_request() values = { 'Client': client.UID(), 'Contact': contact.UID(), 'DateSampled': DateTime().strftime("%Y-%m-%d"), 'SampleType': sample_type.UID() } service_uids = map(_api.get_uid, services) sample = create_analysisrequest(client, request, values, service_uids) if receive: do_action_for(sample, "receive") transaction.commit() return sample
def test_retract_an_analysis_request(self): #Test the retract process to avoid LIMS-1989 catalog = getToolByName(self.portal, 'portal_catalog') # Getting the first client client = self.portal.clients['client-1'] sampletype = self.portal.bika_setup.bika_sampletypes['sampletype-1'] values = {'Client': client.UID(), 'Contact': client.getContacts()[0].UID(), 'SamplingDate': '2015-01-01', 'SampleType': sampletype.UID()} # Getting some services services = catalog(portal_type = 'AnalysisService', inactive_state = 'active')[:3] service_uids = [service.getObject().UID() for service in services] request = {} ar = create_analysisrequest(client, request, values, service_uids) wf = getToolByName(ar, 'portal_workflow') try: wf.doActionFor(ar, 'receive') except WorkflowException: pass # Cheking if everything is going OK #import pdb; pdb.set_trace() self.assertEquals(ar.portal_workflow.getInfoFor(ar, 'review_state'), 'sample_received') for analysis in ar.getAnalyses(full_objects=True): analysis.setResult('12') wf.doActionFor(analysis, 'submit') self.assertEquals(analysis.portal_workflow.getInfoFor(analysis, 'review_state'),'to_be_verified') # retracting results wf.doActionFor(analysis, 'retract') self.assertEquals(analysis.portal_workflow.getInfoFor(analysis, 'review_state'),'retracted') for analysis in ar.getAnalyses(full_objects=True): if analysis.portal_workflow.getInfoFor(analysis, 'review_state') == 'retracted': continue wf.doActionFor(analysis, 'submit') self.assertEquals(analysis.portal_workflow.getInfoFor(analysis, 'review_state'),'to_be_verified') wf.doActionFor(ar, 'retract') self.assertEquals(ar.portal_workflow.getInfoFor(ar, 'review_state'), 'sample_received')
def async_create_analysisrequest(self): msgs = [] form = self.request.form records = json.loads(form.get('records', '[]')) attachments = json.loads(form.get('attachments', '[]')) ARs = [] logger.info('Async create %s records' % len(records)) for n, record in enumerate(records): client_uid = record.get("Client") client = api.get_object_by_uid(client_uid) if not client: msgs.append("Error: Client {} found".format(client_uid)) continue # get the specifications and pass them directly to the AR # create function. specifications = record.pop("Specifications", {}) # Create the Analysis Request ar = create_analysisrequest(client, self.request, values=record, specifications=specifications) ARs.append(ar.getId()) _attachments = [] for att_uid in attachments.get(str(n), []): attachment = api.get_object_by_uid(att_uid) _attachments.append(attachment) if _attachments: ar.setAttachment(_attachments) if len(ARs) == 1: msgs.append('Created AR {}'.format(ARs[0])) elif len(ARs) > 1: msgs.append('Created ARs {}'.format(', '.join(ARs))) else: msgs.append('No ARs created') message = '; '.join(msgs) logger.info('AR Creation complete: {}'.format(message)) self._email_analyst(message) return
def test_LIMS_2371_SignificantFigures(self): RESULT_VALUES = { '-22770264': {1: '-2e07', 2: '-2.3e07', 3: '-2.28e07', 4: '-2.277e07', 5: '-2.277e07', 6: '-2.27703e07', 7: '-2.277026e07'}, '-2277.3': {1: '-2000', 2: '-2300', 3: '-2280', 4: '-2277', 5: '-2277.3', 6: '-2277.30', 7: '-2277.300'}, '-40277': {1: '-40000', 2: '-40000', 3: '-40300', 4: '-40280', 5: '-40277', 6: '-40277.0', 7: '-40277.00'}, '-40277.036': {1: '-40000', 2: '-40000', 3: '-40300', 4: '-40280', 5: '-40277', 6: '-40277.0', 7: '-40277.04'}, '47000.01': {1: '50000', 2: '47000', 3: '47000', 4: '47000', 5: '47000', 6: '47000.0', 7: '47000.01', 8: '47000.010', 9: '47000.0100'}, '132': {1: '100', 2: '130', 3: '132', 4: '132.0', 5: '132.00', 6: '132.000'}, '14700.04': {1: '10000', 2: '15000', 3: '14700', 4: '14700', 5: '14700', 6: '14700.0', 7: '14700.04', 8: '14700.040', 9: '14700.0400'}, '1407.0': {1: '1000', 2: '1400', 3: '1410', 4: '1407', 5: '1407.0', 6: '1407.00', 7: '1407.000'}, '0.147': {1: '0.1', 2: '0.15', 3: '0.147', 4: '0.1470', 5: '0.14700'}, '4308': {1: '4000', 2: '4300', 3: '4310', 4: '4308', 5: '4308.0', 6: '4308.00', 7: '4308.000'}, '470000': {1: '500000', 2: '470000', 3: '470000', 4: '470000', 5: '470000', 6: '470000', 7: '470000.0', 8: '470000.00', 9: '470000.000'}, '0.154': {1: '0.2', 2: '0.15', 3: '0.154', 4: '0.1540', 5: '0.15400', 6: '0.154000'}, '0.166': {1: '0.2', 2: '0.17', 3: '0.166', 4: '0.1660', 5: '0.16600', 6: '0.166000'}, '0.156': {1: '0.2', 2: '0.16', 3: '0.156', 4: '0.1560', 5: '0.15600', 6: '0.156000'}, '47841242': {1: '5e07', 2: '4.8e07', 3: '4.78e07', 4: '4.784e07', 5: '4.7841e07', 6: '4.78412e07', 7: '4.784124e07', 8: '4.7841242e07', 9: '4.7841242e07', 10: '4.7841242e07'}, '2.2e-06': {1: '0.000002', 2: '0.0000022', 3: '0.00000220', 4: '0.000002200'}, '19019.19019': {1: '20000', 2: '19000', 3: '19000', 4: '19020', 5: '19019', 6: '19019.2', 7: '19019.19', 8: '19019.190', 9: '19019.1902', 10: '19019.19019'} } service = self.service service.setExponentialFormatPrecision(7) # just a high value service.setDisplayRounding("SIGNIFICANT_FIGURES") service.setLowerDetectionLimit('-999999999') # Test results below 0 too for value, tests in RESULT_VALUES.items(): # Create the AR with modified analysis service for sig_figures, expected in tests.items(): service.setSignificantFigures(sig_figures) ar = create_analysisrequest( self.client, {}, {'Client': self.client.UID(), 'Contact': self.client.getContacts()[0].UID(), 'SamplingDate': '2015-01-01', 'SampleType': self.sampletype.UID()}, [service.UID()]) do_transition_for(ar, 'receive') an = ar.getAnalyses()[0].getObject() an.setResult(value) self.assertEqual(an.getFormattedResult(), expected)
def test_retract_an_analysis_request(self): catalog = getToolByName(self.portal, 'portal_catalog') client = self.portal.clients['client-1'] sampletype = self.portal.bika_setup.bika_sampletypes['sampletype-1'] values = {'Client': client.UID(), 'Contact': client.getContacts()[0].UID(), 'SamplingDate': '2015-01-01', 'SampleType': sampletype.UID()} services = catalog(portal_type = 'AnalysisService', inactive_state = 'active')[:3] service_uids = [service.getObject().UID() for service in services] request = {} ar = create_analysisrequest(client, request, values, service_uids) transaction.commit() wf = getToolByName(ar, 'portal_workflow') wf.doActionFor(ar, 'receive') self.assertEquals(ar.portal_workflow.getInfoFor(ar, 'review_state'), 'sample_received') count = 0 for analysis in ar.getAnalyses(full_objects=True): analysis.setResult('12') wf.doActionFor(analysis, 'submit') self.assertEquals(analysis.portal_workflow.getInfoFor(analysis, 'review_state'),'to_be_verified') # Only retract the first one if count == 0: wf.doActionFor(analysis, 'retract') self.assertEquals(analysis.portal_workflow.getInfoFor(analysis, 'review_state'),'retracted') transaction.commit() count += 1 browser = self.getBrowser() invoice_url = '%s/invoice' % ar.absolute_url() browser.open(invoice_url) if '30.00' in browser.contents: self.fail('Retracted Analyses Services found on the invoice') if '20.00' not in browser.contents: self.fail('SubTotal incorrect')
def test_LIMS2001(self): # ARs creation # Client: Happy Hills # SampleType: Apple Pulp # Contact: Rita Mohale client = self.portal.clients['client-1'] sampletype = self.portal.bika_setup.bika_sampletypes['sampletype-1'] values = { 'Client': client.UID(), 'Contact': client.getContacts()[0].UID(), 'SamplingDate': '2015-01-01', 'SampleType': sampletype.UID() } # analysis-service-3: Calcium (Ca) # analysis-service-6: Cooper (Cu) # analysis-service-7: Iron (Fe) servs = self.portal.bika_setup.bika_analysisservices aservs = [ servs['analysisservice-3'], servs['analysisservice-6'], servs['analysisservice-7'] ] services = [s.UID() for s in aservs] request = {} ar = create_analysisrequest(client, request, values, services) wf = getToolByName(ar, 'portal_workflow') wf.doActionFor(ar, 'receive') # Worksheet creation wsfolder = self.portal.worksheets ws = _createObjectByType("Worksheet", wsfolder, tmpID()) ws.processForm() bsc = getToolByName(self.portal, 'bika_setup_catalog') lab_contacts = [o.getObject() for o in bsc(portal_type="LabContact")] lab_contact = [ o for o in lab_contacts if o.getUsername() == 'analyst1' ] self.assertEquals(len(lab_contact), 1) lab_contact = lab_contact[0] ws.setAnalyst(lab_contact.getUsername()) ws.setResultsLayout(self.portal.bika_setup.getWorksheetLayout()) # Add analyses into the worksheet self.request['context_uid'] = ws.UID() for analysis in ar.getAnalyses(): an = analysis.getObject() ws.addAnalysis(an) self.assertEquals(len(ws.getAnalyses()), 3) # Add a duplicate for slot 1 (there's only one slot) ws.addDuplicateAnalyses('1', None) ans = ws.getAnalyses() reg = [an for an in ans if an.portal_type == 'Analysis'] dup = [an for an in ans if an.portal_type == 'DuplicateAnalysis'] regkeys = [an.getKeyword() for an in reg] dupkeys = [an.getKeyword() for an in dup] regkeys.sort() dupkeys.sort() expregs = ['Ca', 'Cu', 'Fe'] expdups = ['Ca', 'Cu', 'Fe'] self.assertEquals(regkeys, expregs) self.assertEquals(dupkeys, expdups) # Add a result, submit and add another duplicate an1 = [an for an in reg if an.getKeyword() == 'Cu'][0] an1.setResult('13') wf.doActionFor(an1, 'submit') ws.addDuplicateAnalyses('1', None) ans = ws.getAnalyses() reg = [an for an in ans if an.portal_type == 'Analysis'] dup = [an for an in ans if an.portal_type == 'DuplicateAnalysis'] regkeys = [an.getKeyword() for an in reg] dupkeys = [an.getKeyword() for an in dup] regkeys.sort() dupkeys.sort() expregs = ['Ca', 'Cu', 'Fe'] expdups = ['Ca', 'Ca', 'Cu', 'Cu', 'Fe', 'Fe'] self.assertEquals(regkeys, expregs) self.assertEquals(dupkeys, expdups) # Retract the previous analysis and add another duplicate wf.doActionFor(an1, 'retract') ws.addDuplicateAnalyses('1', None) ans = ws.getAnalyses() reg = [an for an in ans if an.portal_type == 'Analysis'] dup = [an for an in ans if an.portal_type == 'DuplicateAnalysis'] regkeys = [an.getKeyword() for an in reg] dupkeys = [an.getKeyword() for an in dup] regkeys.sort() dupkeys.sort() expregs = ['Ca', 'Cu', 'Cu', 'Fe'] expdups = ['Ca', 'Ca', 'Ca', 'Cu', 'Cu', 'Cu', 'Fe', 'Fe', 'Fe'] self.assertEquals(regkeys, expregs) self.assertEquals(dupkeys, expdups) # Do the same process, but with two ARs ar = create_analysisrequest(client, request, values, services) wf.doActionFor(ar, 'receive') # Add analyses into the worksheet for analysis in ar.getAnalyses(): an = analysis.getObject() ws.addAnalysis(an) ans = ws.getAnalyses() reg = [an for an in ans if an.portal_type == 'Analysis'] regkeys = [an.getKeyword() for an in reg] regkeys.sort() expregs = ['Ca', 'Ca', 'Cu', 'Cu', 'Cu', 'Fe', 'Fe'] self.assertEquals(regkeys, expregs) # Add a duplicte for the second AR # slot 1: previous AR # slot 2: Duplicate 1 (analysis without result) # slot 3: Duplicate 2 (analysis with submitted result) # slot 4: Duplicate 3 (analysis retracted) # slot 5: this new AR ws.addDuplicateAnalyses('5', None) ans = ws.getAnalyses() reg = [an for an in ans if an.portal_type == 'Analysis'] dup = [an for an in ans if an.portal_type == 'DuplicateAnalysis'] regkeys = [an.getKeyword() for an in reg] dupkeys = [an.getKeyword() for an in dup] regkeys.sort() dupkeys.sort() expregs = ['Ca', 'Ca', 'Cu', 'Cu', 'Cu', 'Fe', 'Fe'] expdups = [ 'Ca', 'Ca', 'Ca', 'Ca', 'Cu', 'Cu', 'Cu', 'Cu', 'Fe', 'Fe', 'Fe', 'Fe' ] self.assertEquals(regkeys, expregs) self.assertEquals(dupkeys, expdups)
def test_DecimalSciNotation(self): # Notations # '1' => aE+b / aE-b # '2' => ax10^b / ax10^-b # '3' => ax10^b / ax10^-b (with superscript) # '4' => a·10^b / a·10^-b # '5' => a·10^b / a·10^-b (with superscript) matrix = [ # as_prec as_exp not result formatted result # ------- ------ --- -------- ------------------------------ [0, 0, 1, '0', '0'], [0, 0, 2, '0', '0'], [0, 0, 3, '0', '0'], [0, 0, 4, '0', '0'], [0, 0, 5, '0', '0'], # Crazy results!... # decimal precision << exponential precision, result << 1 # For example: # Precision=2, Exp precision=5 and result=0.000012 # Which is the less confusing result? # a) 0.00 # Because the precision is 2 and the number of significant # decimals is below precision. # b) 1e-05 # Because the exponential precision is 5 and the number of # significant decimals is equal or above the exp precision # # The best choice is (a): give priority to decimal precision # and omit the exponential precision. # # "Calculate precision from uncertainties" is the best antidote to # avoid these abnormal results, precisely... maybe the labmanager # setup the uncertainties ranges but missed to select the checkbox # "Calculate precision from uncertainties", so the system has to # deal with these incoherent values. From this point of view, # again the best choice is to give priority to decimal precision. # # We follow this rule: # if the result is >0 and <1 and the number of significant digits # is below the precision, ALWAYS use the decimal precision and don't # take exp precision into account [0, 5, 1, '0.00001', '0'], [0, 5, 2, '0.00001', '0'], [0, 5, 3, '0.00001', '0'], [0, 5, 4, '0.00001', '0'], [0, 5, 5, '0.00001', '0'], [0, 5, 1, '-0.00001', '0'], [0, 5, 2, '-0.00001', '0'], [0, 5, 3, '-0.00001', '0'], [0, 5, 4, '-0.00001', '0'], [0, 5, 5, '-0.00001', '0'], [2, 5, 1, '0.00012', '0.00'], [2, 5, 2, '0.00012', '0.00'], [2, 5, 3, '0.00012', '0.00'], [2, 5, 4, '0.00012', '0.00'], [2, 5, 5, '0.00012', '0.00'], [2, 5, 1, '0.00001', '0.00'], [2, 5, 2, '0.00001', '0.00'], [2, 5, 3, '0.00001', '0.00'], [2, 5, 4, '0.00001', '0.00'], [2, 5, 5, '0.00001', '0.00'], [2, 5, 1, '0.0000123', '0.00'], [2, 5, 2, '0.0000123', '0.00'], [2, 5, 3, '0.0000123', '0.00'], [2, 5, 4, '0.0000123', '0.00'], [2, 5, 5, '0.0000123', '0.00'], [2, 5, 1, '0.01', '0.01'], [2, 5, 2, '0.01', '0.01'], [2, 5, 3, '0.01', '0.01'], [2, 5, 4, '0.01', '0.01'], [2, 5, 5, '0.01', '0.01'], # More crazy results... exp_precision = 0 has no sense! # As above, the decimal precision gets priority [2, 0, 1, '0', '0.00'], [2, 0, 2, '0', '0.00'], [2, 0, 3, '0', '0.00'], [2, 0, 4, '0', '0.00'], [2, 0, 5, '0', '0.00'], [2, 0, 1, '0.012', '0.01'], [2, 0, 2, '0.012', '0.01'], [2, 0, 3, '0.012', '0.01'], [2, 0, 4, '0.012', '0.01'], [2, 0, 5, '0.012', '0.01'], [2, 1, 1, '0', '0.00'], [2, 1, 2, '0', '0.00'], [2, 1, 3, '0', '0.00'], [2, 1, 4, '0', '0.00'], [2, 1, 5, '0', '0.00'], # Apply the sci notation here, but 'cut' the extra decimals first [2, 1, 1, '0.012', '1e-02'], [2, 1, 2, '0.012', '1x10^-2'], [2, 1, 3, '0.012', '1x10<sup>-2</sup>'], [2, 1, 4, '0.012', '1·10^-2'], [2, 1, 5, '0.012', '1·10<sup>-2</sup>'], [2, 1, 1, '0.123', '1.2e-01'], [2, 1, 2, '0.123', '1.2x10^-1'], [2, 1, 3, '0.123', '1.2x10<sup>-1</sup>'], [2, 1, 4, '0.123', '1.2·10^-1'], [2, 1, 5, '0.123', '1.2·10<sup>-1</sup>'], [2, 1, 1, '1.234', '1.23'], [2, 1, 2, '1.234', '1.23'], [2, 1, 3, '1.234', '1.23'], [2, 1, 4, '1.234', '1.23'], [2, 1, 5, '1.234', '1.23'], [2, 1, 1, '12.345', '1.235e01'], [2, 1, 2, '12.345', '1.235x10^1'], [2, 1, 3, '12.345', '1.235x10<sup>1</sup>'], [2, 1, 4, '12.345', '1.235·10^1'], [2, 1, 5, '12.345', '1.235·10<sup>1</sup>'], [4, 3, 1, '0.0000123', '0.0000'], [4, 3, 2, '0.0000123', '0.0000'], [4, 3, 3, '0.0000123', '0.0000'], [4, 3, 4, '0.0000123', '0.0000'], [4, 3, 5, '0.0000123', '0.0000'], [4, 3, 1, '0.0001234', '1e-04'], [4, 3, 2, '0.0001234', '1x10^-4'], [4, 3, 3, '0.0001234', '1x10<sup>-4</sup>'], [4, 3, 4, '0.0001234', '1·10^-4'], [4, 3, 5, '0.0001234', '1·10<sup>-4</sup>'], [4, 3, 1, '0.0012345', '1.2e-03'], [4, 3, 2, '0.0012345', '1.2x10^-3'], [4, 3, 3, '0.0012345', '1.2x10<sup>-3</sup>'], [4, 3, 4, '0.0012345', '1.2·10^-3'], [4, 3, 5, '0.0012345', '1.2·10<sup>-3</sup>'], [4, 3, 1, '0.0123456', '0.0123'], [4, 3, 1, '0.0123456', '0.0123'], [4, 3, 2, '0.0123456', '0.0123'], [4, 3, 3, '0.0123456', '0.0123'], [4, 3, 4, '0.0123456', '0.0123'], [4, 3, 5, '0.0123456', '0.0123'], [4, 3, 1, '0.1234567', '0.1235'], [4, 3, 2, '0.1234567', '0.1235'], [4, 3, 3, '0.1234567', '0.1235'], [4, 3, 4, '0.1234567', '0.1235'], [4, 3, 5, '0.1234567', '0.1235'], [4, 3, 1, '1.2345678', '1.2346'], [4, 3, 2, '1.2345678', '1.2346'], [4, 3, 3, '1.2345678', '1.2346'], [4, 3, 4, '1.2345678', '1.2346'], [4, 3, 5, '1.2345678', '1.2346'], [4, 3, 1, '12.345678', '12.3457'], [4, 3, 2, '12.345678', '12.3457'], [4, 3, 3, '12.345678', '12.3457'], [4, 3, 4, '12.345678', '12.3457'], [4, 3, 5, '12.345678', '12.3457'], [4, 3, 1, '123.45678', '123.4568'], [4, 3, 2, '123.45678', '123.4568'], [4, 3, 3, '123.45678', '123.4568'], [4, 3, 4, '123.45678', '123.4568'], [4, 3, 5, '123.45678', '123.4568'], [4, 3, 1, '1234.5678', '1.2345678e03'], [4, 3, 2, '1234.5678', '1.2345678x10^3'], [4, 3, 3, '1234.5678', '1.2345678x10<sup>3</sup>'], [4, 3, 4, '1234.5678', '1.2345678·10^3'], [4, 3, 5, '1234.5678', '1.2345678·10<sup>3</sup>'], [4, 3, 1, '-0.0000123', '0.0000'], [4, 3, 2, '-0.0000123', '0.0000'], [4, 3, 3, '-0.0000123', '0.0000'], [4, 3, 4, '-0.0000123', '0.0000'], [4, 3, 5, '-0.0000123', '0.0000'], [4, 3, 1, '-0.0001234', '-1e-04'], [4, 3, 2, '-0.0001234', '-1x10^-4'], [4, 3, 3, '-0.0001234', '-1x10<sup>-4</sup>'], [4, 3, 4, '-0.0001234', '-1·10^-4'], [4, 3, 5, '-0.0001234', '-1·10<sup>-4</sup>'], [4, 3, 1, '-0.0012345', '-1.2e-03'], [4, 3, 2, '-0.0012345', '-1.2x10^-3'], [4, 3, 3, '-0.0012345', '-1.2x10<sup>-3</sup>'], [4, 3, 4, '-0.0012345', '-1.2·10^-3'], [4, 3, 5, '-0.0012345', '-1.2·10<sup>-3</sup>'], [4, 3, 1, '-0.0123456', '-0.0123'], [4, 3, 1, '-0.0123456', '-0.0123'], [4, 3, 2, '-0.0123456', '-0.0123'], [4, 3, 3, '-0.0123456', '-0.0123'], [4, 3, 4, '-0.0123456', '-0.0123'], [4, 3, 5, '-0.0123456', '-0.0123'], [4, 3, 1, '-0.1234567', '-0.1235'], [4, 3, 2, '-0.1234567', '-0.1235'], [4, 3, 3, '-0.1234567', '-0.1235'], [4, 3, 4, '-0.1234567', '-0.1235'], [4, 3, 5, '-0.1234567', '-0.1235'], [4, 3, 1, '-1.2345678', '-1.2346'], [4, 3, 2, '-1.2345678', '-1.2346'], [4, 3, 3, '-1.2345678', '-1.2346'], [4, 3, 4, '-1.2345678', '-1.2346'], [4, 3, 5, '-1.2345678', '-1.2346'], [4, 3, 1, '-12.345678', '-12.3457'], [4, 3, 2, '-12.345678', '-12.3457'], [4, 3, 3, '-12.345678', '-12.3457'], [4, 3, 4, '-12.345678', '-12.3457'], [4, 3, 5, '-12.345678', '-12.3457'], [4, 3, 1, '-123.45678', '-123.4568'], [4, 3, 2, '-123.45678', '-123.4568'], [4, 3, 3, '-123.45678', '-123.4568'], [4, 3, 4, '-123.45678', '-123.4568'], [4, 3, 5, '-123.45678', '-123.4568'], [4, 3, 1, '-1234.5678', '-1.2345678e03'], [4, 3, 2, '-1234.5678', '-1.2345678x10^3'], [4, 3, 3, '-1234.5678', '-1.2345678x10<sup>3</sup>'], [4, 3, 4, '-1234.5678', '-1.2345678·10^3'], [4, 3, 5, '-1234.5678', '-1.2345678·10<sup>3</sup>'], [4, 3, 1, '1200000', '1.2e06'], [4, 3, 2, '1200000', '1.2x10^6'], [4, 3, 3, '1200000', '1.2x10<sup>6</sup>'], [4, 3, 4, '1200000', '1.2·10^6'], [4, 3, 5, '1200000', '1.2·10<sup>6</sup>'], # Weird!!! negative values for exp precision [2, -6, 1, '12340', '12340.00'], [2, -4, 1, '12340', '1.234e04'], [2, -4, 2, '12340', '1.234x10^4'], [2, -4, 3, '12340', '1.234x10<sup>4</sup>'], [2, -4, 4, '12340', '1.234·10^4'], [2, -4, 5, '12340', '1.234·10<sup>4</sup>'], [2, -4, 1, '12340.01', '1.234001e04'], [2, -4, 2, '12340.01', '1.234001x10^4'], [2, -4, 3, '12340.01', '1.234001x10<sup>4</sup>'], [2, -4, 4, '12340.01', '1.234001·10^4'], [2, -4, 5, '12340.01', '1.234001·10<sup>4</sup>'], [2, -6, 1, '-12340', '-12340.00'], [2, -4, 1, '-12340', '-1.234e04'], [2, -4, 2, '-12340', '-1.234x10^4'], [2, -4, 3, '-12340', '-1.234x10<sup>4</sup>'], [2, -4, 4, '-12340', '-1.234·10^4'], [2, -4, 5, '-12340', '-1.234·10<sup>4</sup>'], [2, -4, 1, '-12340.01', '-1.234001e04'], [2, -4, 2, '-12340.01', '-1.234001x10^4'], [2, -4, 3, '-12340.01', '-1.234001x10<sup>4</sup>'], [2, -4, 4, '-12340.01', '-1.234001·10^4'], [2, -4, 5, '-12340.01', '-1.234001·10<sup>4</sup>'], [2, 6, 1, '12340', '12340.00'], [2, 6, 2, '12340', '12340.00'], [2, 6, 3, '12340', '12340.00'], [2, 6, 4, '12340', '12340.00'], [2, 6, 5, '12340', '12340.00'], [2, 4, 1, '12340', '1.234e04'], [2, 4, 2, '12340', '1.234x10^4'], [2, 4, 3, '12340', '1.234x10<sup>4</sup>'], [2, 4, 4, '12340', '1.234·10^4'], [2, 4, 5, '12340', '1.234·10<sup>4</sup>'], [2, 4, 1, '12340.0123', '1.234001e04'], [2, 4, 2, '12340.0123', '1.234001x10^4'], [2, 4, 3, '12340.0123', '1.234001x10<sup>4</sup>'], [2, 4, 4, '12340.0123', '1.234001·10^4'], [2, 4, 5, '12340.0123', '1.234001·10<sup>4</sup>'], [2, 4, 1, '-12340', '-1.234e04'], [2, 4, 2, '-12340', '-1.234x10^4'], [2, 4, 3, '-12340', '-1.234x10<sup>4</sup>'], [2, 4, 4, '-12340', '-1.234·10^4'], [2, 4, 5, '-12340', '-1.234·10<sup>4</sup>'], [2, 4, 1, '-12340.0123', '-1.234001e04'], [2, 4, 2, '-12340.0123', '-1.234001x10^4'], [2, 4, 3, '-12340.0123', '-1.234001x10<sup>4</sup>'], [2, 4, 4, '-12340.0123', '-1.234001·10^4'], [2, 4, 5, '-12340.0123', '-1.234001·10<sup>4</sup>'], ] s = self.service s.setLowerDetectionLimit('-99999') # We want to test results below 0 too prevm = [] an = None bs = self.portal.bika_setup; for m in matrix: # Create the AR and set the values to the AS, but only if necessary if not an or prevm[0] != m[0] or prevm[1] != m[1]: s.setPrecision(m[0]) s.setExponentialFormatPrecision(m[1]) self.assertEqual(s.getPrecision(), m[0]) self.assertEqual(s.Schema().getField('Precision').get(s), m[0]) self.assertEqual(s.getExponentialFormatPrecision(), m[1]) self.assertEqual(s.Schema().getField('ExponentialFormatPrecision').get(s), m[1]) client = self.portal.clients['client-1'] sampletype = bs.bika_sampletypes['sampletype-1'] values = {'Client': client.UID(), 'Contact': client.getContacts()[0].UID(), 'SamplingDate': '2015-01-01', 'SampleType': sampletype.UID()} ar = create_analysisrequest(client, {}, values, [s.UID()]) wf = getToolByName(ar, 'portal_workflow') wf.doActionFor(ar, 'receive') an = ar.getAnalyses()[0].getObject() prevm = m; an.setResult(m[3]) self.assertEqual(an.getResult(), m[3]) self.assertEqual(an.Schema().getField('Result').get(an), m[3]) fr = an.getFormattedResult(sciformat=m[2]) #print '%s %s %s %s => \'%s\' ?= \'%s\'' % (m[0],m[1],m[2],m[3],m[4],fr) self.assertEqual(fr, m[4])
def test_ar_manage_results_detectionlimit_selector_manual(self): cases = [ # ROUND 1 --------------------- { 'min': '10', 'max': '20', 'displaydl': False, 'manual': False, 'input': '5', 'expresult': 5.0, 'expformattedresult': '< 10', 'isbelowldl': True, 'isaboveudl': False, 'isldl': False, 'isudl': False }, { 'min': '10', 'max': '20', 'displaydl': False, 'manual': False, 'input': '15', 'expresult': 15.0, 'expformattedresult': '15.00', 'isbelowldl': False, 'isaboveudl': False, 'isldl': False, 'isudl': False }, { 'min': '10', 'max': '20', 'displaydl': False, 'manual': False, 'input': '25', 'expresult': 25.0, 'expformattedresult': '> 20', 'isbelowldl': False, 'isaboveudl': True, 'isldl': False, 'isudl': False }, { 'min': '10', 'max': '20', 'displaydl': False, 'manual': False, 'input': '<5', 'expresult': 5.0, # '<' assignment not allowed 'expformattedresult': '< 10', 'isbelowldl': True, 'isaboveudl': False, 'isldl': False, 'isudl': False }, { 'min': '10', 'max': '20', 'displaydl': False, 'manual': False, 'input': '<15', 'expresult': 15.0, # '<' assignment not allowed 'expformattedresult': '15.00', 'isbelowldl': False, 'isaboveudl': False, 'isldl': False, 'isudl': False }, { 'min': '10', 'max': '20', 'displaydl': False, 'manual': False, 'input': '>15', 'expresult': 15.0, # '>' assignment not allowed 'expformattedresult': '15.00', 'isbelowldl': False, 'isaboveudl': False, 'isldl': False, 'isudl': False }, { 'min': '10', 'max': '20', 'displaydl': False, 'manual': False, 'input': '25', 'expresult': 25.0, # '>' assignment not allowed 'expformattedresult': '> 20', 'isbelowldl': False, 'isaboveudl': True, 'isldl': False, 'isudl': False }, # ROUND 2 --------------------- { 'min': '10', 'max': '20', 'displaydl': True, 'manual': False, 'input': '5', 'expresult': 5.0, 'expformattedresult': '< 10', 'isbelowldl': True, 'isaboveudl': False, 'isldl': False, 'isudl': False }, { 'min': '10', 'max': '20', 'displaydl': True, 'manual': False, 'input': '15', 'expresult': 15.0, 'expformattedresult': '15.00', 'isbelowldl': False, 'isaboveudl': False, 'isldl': False, 'isudl': False }, { 'min': '10', 'max': '20', 'displaydl': True, 'manual': False, 'input': '25', 'expresult': 25.0, 'expformattedresult': '> 20', 'isbelowldl': False, 'isaboveudl': True, 'isldl': False, 'isudl': False }, { 'min': '10', 'max': '20', 'displaydl': True, 'manual': False, 'input': '<5', 'expresult': 10.0, # '<' assignment allowed, but not custom 'expformattedresult': '< 10', 'isbelowldl': True, 'isaboveudl': False, 'isldl': True, 'isudl': False }, { 'min': '10', 'max': '20', 'displaydl': True, 'manual': False, 'input': '<15', 'expresult': 10.0, # '<' assignment allowed, but not custom 'expformattedresult': '< 10', 'isbelowldl': True, 'isaboveudl': False, 'isldl': True, 'isudl': False }, { 'min': '10', 'max': '20', 'displaydl': True, 'manual': False, 'input': '>15', 'expresult': 20.0, # '>' assignment allowed, but not custom 'expformattedresult': '> 20', 'isbelowldl': False, 'isaboveudl': True, 'isldl': False, 'isudl': True }, { 'min': '10', 'max': '20', 'displaydl': True, 'manual': False, 'input': '>25', 'expresult': 20.0, # '>' assignment allowed, but not custom 'expformattedresult': '> 20', 'isbelowldl': False, 'isaboveudl': True, 'isldl': False, 'isudl': True }, # ROUND 3 --------------------- { 'min': '10', 'max': '20', 'displaydl': True, 'manual': True, 'input': '5', 'expresult': 5.0, 'expformattedresult': '< 10', 'isbelowldl': True, 'isaboveudl': False, 'isldl': False, 'isudl': False }, { 'min': '10', 'max': '20', 'displaydl': True, 'manual': True, 'input': '15', 'expresult': 15.0, 'expformattedresult': '15.00', 'isbelowldl': False, 'isaboveudl': False, 'isldl': False, 'isudl': False }, { 'min': '10', 'max': '20', 'displaydl': True, 'manual': True, 'input': '25', 'expresult': 25.0, 'expformattedresult': '> 20', 'isbelowldl': False, 'isaboveudl': True, 'isldl': False, 'isudl': False }, { 'min': '10', 'max': '20', 'displaydl': True, 'manual': True, 'input': '<5', 'expresult': 5.0, # '<' assignment allowed 'expformattedresult': '< 5', 'isbelowldl': True, 'isaboveudl': False, 'isldl': True, 'isudl': False }, { 'min': '10', 'max': '20', 'displaydl': True, 'manual': True, 'input': '<15', 'expresult': 15.0, # '<' assignment allowed 'expformattedresult': '< 15', 'isbelowldl': True, 'isaboveudl': False, 'isldl': True, 'isudl': False }, { 'min': '10', 'max': '20', 'displaydl': True, 'manual': True, 'input': '>15', 'expresult': 15.0, # '>' assignment allowed 'expformattedresult': '> 15', 'isbelowldl': False, 'isaboveudl': True, 'isldl': False, 'isudl': True }, { 'min': '10', 'max': '20', 'displaydl': True, 'manual': True, 'input': '>25', 'expresult': 25.0, # '>' assignment allowed 'expformattedresult': '> 25', 'isbelowldl': False, 'isaboveudl': True, 'isldl': False, 'isudl': True }, ] for case in cases: s = self.services[0] s.setDetectionLimitSelector(case['displaydl']) s.setAllowManualDetectionLimit(case['manual']) s.setLowerDetectionLimit(case['min']) s.setUpperDetectionLimit(case['max']) # Input results # Client: Happy Hills # SampleType: Apple Pulp # Contact: Rita Mohale # Analyses: [Calcium, Copper] client = self.portal.clients['client-1'] sampletype = self.portal.bika_setup.bika_sampletypes[ 'sampletype-1'] values = { 'Client': client.UID(), 'Contact': client.getContacts()[0].UID(), 'SamplingDate': '2015-01-01', 'SampleType': sampletype.UID() } request = {} ar = create_analysisrequest(client, request, values, [s.UID()]) wf = getToolByName(ar, 'portal_workflow') wf.doActionFor(ar, 'receive') an = ar.getAnalyses()[0].getObject() an.setResult(case['input']) self.assertEqual(an.isBelowLowerDetectionLimit(), case['isbelowldl']) self.assertEqual(an.isAboveUpperDetectionLimit(), case['isaboveudl']) self.assertEqual(an.isLowerDetectionLimit(), case['isldl']) self.assertEqual(an.isUpperDetectionLimit(), case['isudl']) self.assertEqual(float(an.getResult()), case['expresult']) #import pdb; pdb.set_trace() self.assertEqual(an.getFormattedResult(html=False), case['expformattedresult']) expres = case['expformattedresult'] expres = expres.replace( '< ', '< ') if an.isBelowLowerDetectionLimit() else expres expres = expres.replace( '> ', '> ') if an.isAboveUpperDetectionLimit() else expres self.assertEqual(an.getFormattedResult(html=True), expres) self.assertEqual(an.getFormattedResult(), expres)
def test_ar_manageresults_limitdetections(self): # Input results # Client: Happy Hills # SampleType: Apple Pulp # Contact: Rita Mohale # Analyses: [Calcium, Copper] client = self.portal.clients['client-1'] sampletype = self.portal.bika_setup.bika_sampletypes['sampletype-1'] values = { 'Client': client.UID(), 'Contact': client.getContacts()[0].UID(), 'SamplingDate': '2015-01-01', 'SampleType': sampletype.UID() } request = {} services = [s.UID() for s in self.services] ar = create_analysisrequest(client, request, values, services) # Basic detection limits asidxs = { 'analysisservice-3': 0, 'analysisservice-6': 1, 'analysisservice-7': 2 } for a in ar.getAnalyses(): an = a.getObject() idx = asidxs[an.getService().id] self.assertEqual(an.getLowerDetectionLimit(), float(self.lds[idx]['min'])) self.assertEqual(an.getUpperDetectionLimit(), float(self.lds[idx]['max'])) self.assertEqual(an.getService().getAllowManualDetectionLimit(), self.lds[idx]['manual']) # Empty result self.assertFalse(an.getDetectionLimitOperand()) self.assertFalse(an.isBelowLowerDetectionLimit()) self.assertFalse(an.isAboveUpperDetectionLimit()) # Set a result an.setResult('15') self.assertEqual(float(an.getResult()), 15) self.assertFalse(an.isBelowLowerDetectionLimit()) self.assertFalse(an.isAboveUpperDetectionLimit()) self.assertFalse(an.getDetectionLimitOperand()) self.assertEqual(an.getFormattedResult(), '15.00') self.assertEqual(an.getFormattedResult(html=True), '15.00') self.assertEqual(an.getFormattedResult(html=False), '15.00') an.setResult('-1') self.assertEqual(float(an.getResult()), -1) self.assertTrue(an.isBelowLowerDetectionLimit()) self.assertFalse(an.isAboveUpperDetectionLimit()) self.assertFalse(an.getDetectionLimitOperand()) self.assertEqual(an.getFormattedResult(html=False), '< %s' % (self.lds[idx]['min'])) self.assertEqual(an.getFormattedResult(html=True), '< %s' % (self.lds[idx]['min'])) self.assertEqual(an.getFormattedResult(), '< %s' % (self.lds[idx]['min'])) an.setResult('2000') self.assertEqual(float(an.getResult()), 2000) self.assertFalse(an.isBelowLowerDetectionLimit()) self.assertTrue(an.isAboveUpperDetectionLimit()) self.assertFalse(an.getDetectionLimitOperand()) self.assertEqual(an.getFormattedResult(html=False), '> %s' % (self.lds[idx]['max'])) self.assertEqual(an.getFormattedResult(html=True), '> %s' % (self.lds[idx]['max'])) self.assertEqual(an.getFormattedResult(), '> %s' % (self.lds[idx]['max'])) # Set a DL result an.setResult('<15') self.assertEqual(float(an.getResult()), 15) if self.lds[idx]['manual']: self.assertTrue(an.isBelowLowerDetectionLimit()) self.assertFalse(an.isAboveUpperDetectionLimit()) self.assertEqual(an.getDetectionLimitOperand(), '<') self.assertEqual(an.getFormattedResult(html=False), '< 15') self.assertEqual(an.getFormattedResult(html=True), '< 15') self.assertEqual(an.getFormattedResult(), '< 15') else: self.assertFalse(an.isBelowLowerDetectionLimit()) self.assertFalse(an.isAboveUpperDetectionLimit()) self.assertFalse(an.getDetectionLimitOperand()) self.assertEqual(an.getFormattedResult(html=False), '15.00') self.assertEqual(an.getFormattedResult(html=True), '15.00') self.assertEqual(an.getFormattedResult(), '15.00') an.setResult('>15') self.assertEqual(float(an.getResult()), 15) if self.lds[idx]['manual']: self.assertFalse(an.isBelowLowerDetectionLimit()) self.assertTrue(an.isAboveUpperDetectionLimit()) self.assertEqual(an.getDetectionLimitOperand(), '>') self.assertEqual(an.getFormattedResult(html=False), '> 15') self.assertEqual(an.getFormattedResult(html=True), '> 15') self.assertEqual(an.getFormattedResult(), '> 15') else: self.assertFalse(an.isBelowLowerDetectionLimit()) self.assertFalse(an.isAboveUpperDetectionLimit()) self.assertFalse(an.getDetectionLimitOperand()) self.assertEqual(an.getFormattedResult(html=False), '15.00') self.assertEqual(an.getFormattedResult(html=True), '15.00') self.assertEqual(an.getFormattedResult(), '15.00') # Set a DL result explicitely an.setDetectionLimitOperand('<') an.setResult('15') self.assertEqual(float(an.getResult()), 15) if self.lds[idx]['manual']: self.assertTrue(an.isBelowLowerDetectionLimit()) self.assertFalse(an.isAboveUpperDetectionLimit()) self.assertEqual(an.getDetectionLimitOperand(), '<') self.assertEqual(an.getFormattedResult(html=False), '< 15') self.assertEqual(an.getFormattedResult(html=True), '< 15') self.assertEqual(an.getFormattedResult(), '< 15') else: self.assertFalse(an.isBelowLowerDetectionLimit()) self.assertFalse(an.isAboveUpperDetectionLimit()) self.assertFalse(an.getDetectionLimitOperand()) self.assertEqual(an.getFormattedResult(html=False), '15.00') self.assertEqual(an.getFormattedResult(html=True), '15.00') self.assertEqual(an.getFormattedResult(), '15.00') an.setDetectionLimitOperand('>') an.setResult('15') self.assertEqual(float(an.getResult()), 15) if self.lds[idx]['manual']: self.assertFalse(an.isBelowLowerDetectionLimit()) self.assertTrue(an.isAboveUpperDetectionLimit()) self.assertEqual(an.getDetectionLimitOperand(), '>') self.assertEqual(an.getFormattedResult(html=False), '> 15') self.assertEqual(an.getFormattedResult(html=True), '> 15') self.assertEqual(an.getFormattedResult(), '> 15') else: self.assertFalse(an.isBelowLowerDetectionLimit()) self.assertFalse(an.isAboveUpperDetectionLimit()) self.assertFalse(an.getDetectionLimitOperand()) self.assertEqual(an.getFormattedResult(html=False), '15.00') self.assertEqual(an.getFormattedResult(html=True), '15.00') self.assertEqual(an.getFormattedResult(), '15.00')
def __call__(self): form = self.request.form plone.protect.CheckAuthenticator(self.request.form) plone.protect.PostOnly(self.request.form) came_from = 'came_from' in form and form['came_from'] or 'add' wftool = getToolByName(self.context, 'portal_workflow') uc = getToolByName(self.context, 'uid_catalog') bsc = getToolByName(self.context, 'bika_setup_catalog') errors = {} form_parts = json.loads(self.request.form['parts']) # First make a list of non-empty columns columns = [] for column in range(int(form['col_count'])): name = 'ar.%s' % column ar = form.get(name, None) if ar and 'Analyses' in ar.keys(): columns.append(column) if len(columns) == 0: ajax_form_error(errors, message=t(_("No analyses have been selected"))) return json.dumps({'errors':errors}) # Now some basic validation required_fields = [field.getName() for field in AnalysisRequestSchema.fields() if field.required] for column in columns: formkey = "ar.%s" % column ar = form[formkey] # Secondary ARs don't have sample fields present in the form data # if 'Sample_uid' in ar and ar['Sample_uid']: # adapter = getAdapter(self.context, name='getWidgetVisibility') # wv = adapter().get('secondary', {}).get('invisible', []) # required_fields = [x for x in required_fields if x not in wv] # check that required fields have values for field in required_fields: # This one is still special. if field in ['RequestID']: continue # And these are not required if this is a secondary AR if ar.get('Sample', '') != '' and field in [ 'SamplingDate', 'SampleType' ]: continue if not ar.get(field, ''): ajax_form_error(errors, field, column) # Return errors if there are any if errors: return json.dumps({'errors': errors}) # Get the prices from the form data prices = form.get('Prices', None) # Initialize the Anlysis Request collection ARs = [] # if a new profile is created automatically, # this flag triggers the status message new_profile = None # The actual submission for column in columns: # Get partitions from the form data if form_parts: partitions = form_parts[str(column)] else: partitions = [] # Get the form data using the appropriate form key formkey = "ar.%s" % column values = form[formkey].copy() # resolved values is formatted as acceptable by archetypes # widget machines resolved_values = {} for k, v in values.items(): # Analyses, we handle that specially. if k == 'Analyses': continue if "%s_uid" % k in values: v = values["%s_uid" % k] if v and "," in v: v = v.split(",") resolved_values[k] = values["%s_uid" % k] else: resolved_values[k] = values[k] # Get the analyses from the form data analyses = values["Analyses"] # Gather the specifications from the form data # no defaults are applied here - the defaults should already be # present in the form data specifications = {} for analysis in analyses: for service_uid in analyses: min_element_name = "ar.%s.min.%s"%(column, service_uid) max_element_name = "ar.%s.max.%s"%(column, service_uid) error_element_name = "ar.%s.error.%s"%(column, service_uid) if min_element_name in form: specifications[service_uid] = { "min": form[min_element_name], "max": form[max_element_name], "error": form[error_element_name] } # Selecting a template sets the hidden 'parts' field to template values. # Selecting a profile will allow ar_add.js to fill in the parts field. # The result is the same once we are here. if not partitions: partitions = [{ 'services': [], 'container': None, 'preservation': '', 'separate': False }] # Apply DefaultContainerType to partitions without a container default_container_type = resolved_values.get( 'DefaultContainerType', None ) if default_container_type: container_type = bsc(UID=default_container_type)[0].getObject() containers = container_type.getContainers() for partition in partitions: if not partition.get("container", None): partition['container'] = containers # Retrieve the catalogue reference to the client client = uc(UID=resolved_values['Client'])[0].getObject() # Create the Analysis Request ar = create_analysisrequest( client, self.request, resolved_values, analyses, partitions, specifications, prices ) # Add the created analysis request to the list ARs.append(ar.getId()) # Display the appropriate message after creation if len(ARs) > 1: message = _("Analysis requests ${ARs} were successfully created.", mapping={'ARs': safe_unicode(', '.join(ARs))}) else: message = _("Analysis request ${AR} was successfully created.", mapping={'AR': safe_unicode(ARs[0])}) self.context.plone_utils.addPortalMessage(message, 'info') # Automatic label printing # Won't print labels for Register on Secondary ARs new_ars = None if came_from == 'add': new_ars = [ar for ar in ARs if ar[-2:] == '01'] if 'register' in self.context.bika_setup.getAutoPrintLabels() and new_ars: return json.dumps({ 'success': message, 'labels': new_ars, 'labelsize': self.context.bika_setup.getAutoLabelSize() }) else: return json.dumps({'success': message})
def __call__(self): form = self.request.form plone.protect.CheckAuthenticator(self.request.form) plone.protect.PostOnly(self.request.form) uc = getToolByName(self.context, 'uid_catalog') bsc = getToolByName(self.context, 'bika_setup_catalog') portal_catalog = getToolByName(self.context, 'portal_catalog') # Load the form data from request.state. If anything goes wrong here, # put a bullet through the whole process. try: states = json.loads(form['state']) except Exception as e: message = t(_('Badly formed state: ${errmsg}', mapping={'errmsg': e.message})) ajax_form_error(self.errors, message=message) return json.dumps({'errors': self.errors}) # Validate incoming form data required = [field.getName() for field in AnalysisRequestSchema.fields() if field.required] + ["Analyses"] # First remove all states which are completely empty; if all # required fields are not present, we assume that the current # AR had no data entered, and can be ignored nonblank_states = {} for arnum, state in states.items(): for key, val in state.items(): if val \ and "%s_hidden" % key not in state \ and not key.endswith('hidden'): nonblank_states[arnum] = state break # in valid_states, all ars that pass validation will be stored valid_states = {} for arnum, state in nonblank_states.items(): # Secondary ARs are a special case, these fields are not required if state.get('Sample', ''): if 'SamplingDate' in required: required.remove('SamplingDate') if 'SampleType' in required: required.remove('SampleType') # fields flagged as 'hidden' are not considered required because # they will already have default values inserted in them for fieldname in required: if fieldname + '_hidden' in state: required.remove(fieldname) missing = [f for f in required if not state.get(f, '')] # If there are required fields missing, flag an error if missing: msg = t(_('Required fields have no values: ' '${field_names}', mapping={'field_names': ', '.join(missing)})) ajax_form_error(self.errors, arnum=arnum, message=msg) continue # This ar is valid! valid_states[arnum] = state # - Expand lists of UIDs returned by multiValued reference widgets # - Transfer _uid values into their respective fields for arnum in valid_states.keys(): for field, value in valid_states[arnum].items(): if field.endswith('_uid') and ',' in value: valid_states[arnum][field] = value.split(',') elif field.endswith('_uid'): valid_states[arnum][field] = value if self.errors: return json.dumps({'errors': self.errors}) # Now, we will create the specified ARs. ARs = [] for arnum, state in valid_states.items(): # Create the Analysis Request ar = create_analysisrequest( portal_catalog(UID=state['Client'])[0].getObject(), self.request, state ) ARs.append(ar.Title()) # Display the appropriate message after creation if len(ARs) > 1: message = _('Analysis requests ${ARs} were successfully created.', mapping={'ARs': safe_unicode(', '.join(ARs))}) else: message = _('Analysis request ${AR} was successfully created.', mapping={'AR': safe_unicode(ARs[0])}) self.context.plone_utils.addPortalMessage(message, 'info') # Automatic label printing won't print "register" labels for Secondary. ARs new_ars = [ar for ar in ARs if ar[-2:] == '01'] if 'register' in self.context.bika_setup.getAutoPrintStickers() \ and new_ars: return json.dumps({ 'success': message, 'stickers': new_ars, 'stickertemplate': self.context.bika_setup.getAutoStickerTemplate() }) else: return json.dumps({'success': message})
def test_service_hidden_analysisrequest(self): # Input results # Client: Happy Hills # SampleType: Apple Pulp # Contact: Rita Mohale # Analyses: [Calcium, Copper, Iron] client = self.portal.clients['client-1'] sampletype = self.portal.bika_setup.bika_sampletypes['sampletype-1'] request = {} services = [s.UID() for s in self.services] values = {'Client': client.UID(), 'Contact': client.getContacts()[0].UID(), 'SamplingDate': '2015-01-01', 'SampleType': sampletype.UID()} ar = create_analysisrequest(client, request, values, services) self.assertFalse('hidden' in ar.getAnalysisServiceSettings(services[0])) self.assertFalse(ar.isAnalysisServiceHidden(services[0])) self.assertFalse(ar.getAnalysisServiceSettings(services[1]).get('hidden')) self.assertFalse(ar.isAnalysisServiceHidden(services[1])) self.assertFalse(ar.getAnalysisServiceSettings(services[2]).get('hidden')) self.assertTrue(ar.isAnalysisServiceHidden(services[2])) # For Calcium (unset) uid = self.services[0].UID() self.assertFalse(self.services[0].getHidden()) self.assertFalse(self.analysisprofile.isAnalysisServiceHidden(uid)) self.assertFalse('hidden' in self.artemplate.getAnalysisServiceSettings(uid)) # For Copper (False) uid = self.services[1].UID() self.assertFalse(self.services[1].getHidden()) self.assertFalse(ar.isAnalysisServiceHidden(uid)) self.assertFalse('hidden' in ar.getAnalysisServiceSettings(uid)) # For Iron (True) uid = self.services[2].UID() self.assertTrue(self.services[2].getHidden()) self.assertTrue(ar.isAnalysisServiceHidden(uid)) self.assertFalse('hidden' in ar.getAnalysisServiceSettings(uid)) # Modify visibility for Calcium in AR uid = self.services[0].UID(); sets = [{'uid': uid}] ar.setAnalysisServicesSettings(sets) self.assertFalse(ar.isAnalysisServiceHidden(uid)) self.assertFalse('hidden' in ar.getAnalysisServiceSettings(uid)) sets = [{'uid': uid, 'hidden': False}] ar.setAnalysisServicesSettings(sets) self.assertFalse(ar.isAnalysisServiceHidden(uid)) self.assertTrue('hidden' in ar.getAnalysisServiceSettings(uid)) sets = [{'uid': uid, 'hidden': True}] ar.setAnalysisServicesSettings(sets) self.assertTrue(ar.isAnalysisServiceHidden(uid)) self.assertTrue('hidden' in ar.getAnalysisServiceSettings(uid)) ar.setAnalysisServicesSettings([]) # AR with profile with no changes values['Profile'] = self.analysisprofile.UID() ar = create_analysisrequest(client, request, values, services) self.assertFalse('hidden' in ar.getAnalysisServiceSettings(services[0])) self.assertFalse(ar.getAnalysisServiceSettings(services[1]).get('hidden')) self.assertFalse(ar.getAnalysisServiceSettings(services[2]).get('hidden')) uid = self.services[0].UID() self.assertFalse(self.services[0].getHidden()) self.assertFalse(ar.isAnalysisServiceHidden(uid)) self.assertFalse('hidden' in ar.getAnalysisServiceSettings(uid)) uid = self.services[1].UID() self.assertFalse(self.services[1].getHidden()) self.assertFalse(ar.isAnalysisServiceHidden(uid)) self.assertFalse('hidden' in ar.getAnalysisServiceSettings(uid)) uid = self.services[2].UID() self.assertTrue(self.services[2].getHidden()) self.assertTrue(ar.isAnalysisServiceHidden(uid)) self.assertFalse('hidden' in ar.getAnalysisServiceSettings(uid)) # AR with template with no changes values['Template'] = self.artemplate del values['Profile'] ar = create_analysisrequest(client, request, values, services) self.assertFalse('hidden' in ar.getAnalysisServiceSettings(services[0])) self.assertFalse(ar.getAnalysisServiceSettings(services[1]).get('hidden')) self.assertFalse(ar.getAnalysisServiceSettings(services[2]).get('hidden')) uid = self.services[0].UID() self.assertFalse(self.services[0].getHidden()) self.assertFalse(ar.isAnalysisServiceHidden(uid)) self.assertFalse('hidden' in ar.getAnalysisServiceSettings(uid)) uid = self.services[1].UID() self.assertFalse(self.services[1].getHidden()) self.assertFalse(ar.isAnalysisServiceHidden(uid)) self.assertFalse('hidden' in ar.getAnalysisServiceSettings(uid)) uid = self.services[2].UID() self.assertTrue(self.services[2].getHidden()) self.assertTrue(ar.isAnalysisServiceHidden(uid)) self.assertFalse('hidden' in ar.getAnalysisServiceSettings(uid)) # AR with profile, with changes values['Profile'] = self.analysisprofile.UID() del values['Template'] matrix = [[2, 1,-2], # AS = Not set [2, 1,-2], # AS = False [2, 1,-1]] for i in range(len(matrix)): sets = {'uid': services[i]} opts = [0, 1, 2] for j in opts: if j == 0: sets['hidden'] = False elif j == 1: sets['hidden'] = True else: del sets['hidden'] self.analysisprofile.setAnalysisServicesSettings(sets) ar = create_analysisrequest(client, request, values, services) res = matrix[i][j] if res < 0: self.assertFalse('hidden' in ar.getAnalysisServiceSettings(services[i])) else: self.assertTrue('hidden' in ar.getAnalysisServiceSettings(services[i])) if abs(res) == 1: self.assertTrue(ar.isAnalysisServiceHidden(services[i])) elif abs(res) == 2: self.assertFalse(ar.isAnalysisServiceHidden(services[i])) # Restore self.analysisprofile.setAnalysisServicesSettings([]) # AR with template, with changes values['Template'] = self.artemplate.UID() del values['Profile'] matrix = [[2, 1,-2], # AS = Not set [2, 1,-2], # AS = False [2, 1,-1]] for i in range(len(matrix)): sets = {'uid': services[i]} opts = [0, 1, 2] for j in opts: if j == 0: sets['hidden'] = False elif j == 1: sets['hidden'] = True else: del sets['hidden'] self.artemplate.setAnalysisServicesSettings(sets) ar = create_analysisrequest(client, request, values, services) res = matrix[i][j] if res < 0: self.assertFalse('hidden' in ar.getAnalysisServiceSettings(services[i])) else: self.assertTrue('hidden' in ar.getAnalysisServiceSettings(services[i])) if abs(res) == 1: self.assertTrue(ar.isAnalysisServiceHidden(services[i])) elif abs(res) == 2: self.assertFalse(ar.isAnalysisServiceHidden(services[i])) # Restore self.artemplate.setAnalysisServicesSettings([])
def test_analysis_method_calculation(self): # Input results # Client: Happy Hills # SampleType: Apple Pulp # Contact: Rita Mohale # Analyses: [Calcium, Mg, Total Hardness] from bika.lims.utils.analysisrequest import create_analysisrequest for f in self.formulas: # Set custom calculation self.calculation.setFormula(f['formula']) self.assertEqual(self.calculation.getFormula(), f['formula']) interims = [] for k,v in f['interims'].items(): interims.append({'keyword': k, 'title':k, 'value': v, 'hidden': False, 'type': 'int', 'unit': ''}) self.calculation.setInterimFields(interims) self.assertEqual(self.calculation.getInterimFields(), interims) # Create the AR client = self.portal.clients['client-1'] sampletype = self.portal.bika_setup.bika_sampletypes['sampletype-1'] values = {'Client': client.UID(), 'Contact': client.getContacts()[0].UID(), 'SamplingDate': '2015-01-01', 'SampleType': sampletype.UID()} request = {} services = [s.UID() for s in self.services] + [self.calcservice.UID()] ar = create_analysisrequest(client, request, values, services) doActionFor(ar, 'receive') # Set results and interims calcanalysis = None for an in ar.getAnalyses(): an = an.getObject() key = an.getKeyword() if key in f['analyses']: an.setResult(f['analyses'][key]) if an.isLowerDetectionLimit() \ or an.isUpperDetectionLimit(): operator = an.getDetectionLimitOperand() strres = f['analyses'][key].replace(operator, '') self.assertEqual(an.getResult(), strres) else: self.assertEqual(an.getResult(), f['analyses'][key]) elif key == self.calcservice.getKeyword(): calcanalysis = an # Set interims interims = an.getInterimFields() intermap = [] for i in interims: if i['keyword'] in f['interims']: ival = float(f['interims'][i['keyword']]) intermap.append({'keyword': i['keyword'], 'value': ival, 'title': i['title'], 'hidden': i['hidden'], 'type': i['type'], 'unit': i['unit']}) else: intermap.append(i) an.setInterimFields(intermap) self.assertEqual(an.getInterimFields(), intermap) # Let's go.. calculate and check result success = calcanalysis.calculateResult(True, True) self.assertTrue(success, True) self.assertNotEqual(calcanalysis.getResult(), '', 'getResult returns an empty string') self.assertEqual(float(calcanalysis.getResult()), float(f['exresult']))
def test_calculation_uncertainties_precision(self): # Input results # Client: Happy Hills # SampleType: Apple Pulp # Contact: Rita Mohale # Analyses: [Calcium, Mg, Total Hardness] for f in self.formulas_precision: self.calculation.setFormula(f['formula']) self.assertEqual(self.calculation.getFormula(), f['formula']) interims = [] for k,v in f['interims'].items(): interims.append({'keyword': k, 'title':k, 'value': v, 'hidden': False, 'type': 'int', 'unit': ''}); self.calculation.setInterimFields(interims) self.assertEqual(self.calculation.getInterimFields(), interims) for case in f['test_uncertainties_precision']: # Define precision services_obj = [s for s in self.services] + [self.calcservice] for service in services_obj: service.setPrecisionFromUncertainty(True) service.setUncertainties(case['uncertainties']) # Create the AR client = self.portal.clients['client-1'] sampletype = self.portal.bika_setup.bika_sampletypes['sampletype-1'] values = {'Client': client.UID(), 'Contact': client.getContacts()[0].UID(), 'SamplingDate': '2015-01-01', 'SampleType': sampletype.UID()} request = {} services = [s.UID() for s in self.services] + [self.calcservice.UID()] ar = create_analysisrequest(client, request, values, services) wf = getToolByName(ar, 'portal_workflow') wf.doActionFor(ar, 'receive') # Set results and interims calcanalysis = None for an in ar.getAnalyses(): an = an.getObject() key = an.getKeyword() if key in f['analyses']: an.setResult(f['analyses'][key]) if an.isLowerDetectionLimit() \ or an.isUpperDetectionLimit(): operator = an.getDetectionLimitOperand() strres = f['analyses'][key].replace(operator, '') self.assertEqual(an.getResult(), str(float(strres))) else: # The analysis' results have to be always strings self.assertEqual(an.getResult(), str(f['analyses'][key])) elif key == self.calcservice.getKeyword(): calcanalysis = an # Set interims interims = an.getInterimFields() intermap = [] for i in interims: if i['keyword'] in f['interims']: ival = float(f['interims'][i['keyword']]) intermap.append({'keyword': i['keyword'], 'value': ival, 'title': i['title'], 'hidden': i['hidden'], 'type': i['type'], 'unit': i['unit']}) else: intermap.append(i) an.setInterimFields(intermap) self.assertEqual(an.getInterimFields(), intermap) # Let's go.. calculate and check result calcanalysis.calculateResult(override=True, cascade=True) self.assertEqual(calcanalysis.getFormattedResult(), case['expected_result'])
def test_DecimalSciNotation(self): # Notations # '1' => aE+b / aE-b # '2' => ax10^b / ax10^-b # '3' => ax10^b / ax10^-b (with superscript) # '4' => a·10^b / a·10^-b # '5' => a·10^b / a·10^-b (with superscript) matrix = [ # as_prec as_exp not result formatted result # ------- ------ --- -------- ------------------------------ [0, 0, 1, '0', '0'], [0, 0, 2, '0', '0'], [0, 0, 3, '0', '0'], [0, 0, 4, '0', '0'], [0, 0, 5, '0', '0'], # Crazy results!... # decimal precision << exponential precision, result << 1 # For example: # Precision=2, Exp precision=5 and result=0.000012 # Which is the less confusing result? # a) 0.00 # Because the precision is 2 and the number of significant # decimals is below precision. # b) 1e-05 # Because the exponential precision is 5 and the number of # significant decimals is equal or above the exp precision # # The best choice is (a): give priority to decimal precision # and omit the exponential precision. # # "Calculate precision from uncertainties" is the best antidote to # avoid these abnormal results, precisely... maybe the labmanager # setup the uncertainties ranges but missed to select the checkbox # "Calculate precision from uncertainties", so the system has to # deal with these incoherent values. From this point of view, # again the best choice is to give priority to decimal precision. # # We follow this rule: # if the result is >0 and <1 and the number of significant digits # is below the precision, ALWAYS use the decimal precision and don't # take exp precision into account [0, 5, 1, '0.00001', '0'], [0, 5, 2, '0.00001', '0'], [0, 5, 3, '0.00001', '0'], [0, 5, 4, '0.00001', '0'], [0, 5, 5, '0.00001', '0'], [0, 5, 1, '-0.00001', '0'], [0, 5, 2, '-0.00001', '0'], [0, 5, 3, '-0.00001', '0'], [0, 5, 4, '-0.00001', '0'], [0, 5, 5, '-0.00001', '0'], [2, 5, 1, '0.00012', '0.00'], [2, 5, 2, '0.00012', '0.00'], [2, 5, 3, '0.00012', '0.00'], [2, 5, 4, '0.00012', '0.00'], [2, 5, 5, '0.00012', '0.00'], [2, 5, 1, '0.00001', '0.00'], [2, 5, 2, '0.00001', '0.00'], [2, 5, 3, '0.00001', '0.00'], [2, 5, 4, '0.00001', '0.00'], [2, 5, 5, '0.00001', '0.00'], [2, 5, 1, '0.0000123', '0.00'], [2, 5, 2, '0.0000123', '0.00'], [2, 5, 3, '0.0000123', '0.00'], [2, 5, 4, '0.0000123', '0.00'], [2, 5, 5, '0.0000123', '0.00'], [2, 5, 1, '0.01', '0.01'], [2, 5, 2, '0.01', '0.01'], [2, 5, 3, '0.01', '0.01'], [2, 5, 4, '0.01', '0.01'], [2, 5, 5, '0.01', '0.01'], # More crazy results... exp_precision = 0 has no sense! # As above, the decimal precision gets priority [2, 0, 1, '0', '0.00'], [2, 0, 2, '0', '0.00'], [2, 0, 3, '0', '0.00'], [2, 0, 4, '0', '0.00'], [2, 0, 5, '0', '0.00'], [2, 0, 1, '0.012', '0.01'], [2, 0, 2, '0.012', '0.01'], [2, 0, 3, '0.012', '0.01'], [2, 0, 4, '0.012', '0.01'], [2, 0, 5, '0.012', '0.01'], [2, 1, 1, '0', '0.00'], [2, 1, 2, '0', '0.00'], [2, 1, 3, '0', '0.00'], [2, 1, 4, '0', '0.00'], [2, 1, 5, '0', '0.00'], # Apply the sci notation here, but 'cut' the extra decimals first [2, 1, 1, '0.012', '1e-02'], [2, 1, 2, '0.012', '1x10^-2'], [2, 1, 3, '0.012', '1x10<sup>-2</sup>'], [2, 1, 4, '0.012', '1·10^-2'], [2, 1, 5, '0.012', '1·10<sup>-2</sup>'], [2, 1, 1, '0.123', '1.2e-01'], [2, 1, 2, '0.123', '1.2x10^-1'], [2, 1, 3, '0.123', '1.2x10<sup>-1</sup>'], [2, 1, 4, '0.123', '1.2·10^-1'], [2, 1, 5, '0.123', '1.2·10<sup>-1</sup>'], [2, 1, 1, '1.234', '1.23'], [2, 1, 2, '1.234', '1.23'], [2, 1, 3, '1.234', '1.23'], [2, 1, 4, '1.234', '1.23'], [2, 1, 5, '1.234', '1.23'], [2, 1, 1, '12.345', '1.235e01'], [2, 1, 2, '12.345', '1.235x10^1'], [2, 1, 3, '12.345', '1.235x10<sup>1</sup>'], [2, 1, 4, '12.345', '1.235·10^1'], [2, 1, 5, '12.345', '1.235·10<sup>1</sup>'], [4, 3, 1, '0.0000123', '0.0000'], [4, 3, 2, '0.0000123', '0.0000'], [4, 3, 3, '0.0000123', '0.0000'], [4, 3, 4, '0.0000123', '0.0000'], [4, 3, 5, '0.0000123', '0.0000'], [4, 3, 1, '0.0001234', '1e-04'], [4, 3, 2, '0.0001234', '1x10^-4'], [4, 3, 3, '0.0001234', '1x10<sup>-4</sup>'], [4, 3, 4, '0.0001234', '1·10^-4'], [4, 3, 5, '0.0001234', '1·10<sup>-4</sup>'], [4, 3, 1, '0.0012345', '1.2e-03'], [4, 3, 2, '0.0012345', '1.2x10^-3'], [4, 3, 3, '0.0012345', '1.2x10<sup>-3</sup>'], [4, 3, 4, '0.0012345', '1.2·10^-3'], [4, 3, 5, '0.0012345', '1.2·10<sup>-3</sup>'], [4, 3, 1, '0.0123456', '0.0123'], [4, 3, 1, '0.0123456', '0.0123'], [4, 3, 2, '0.0123456', '0.0123'], [4, 3, 3, '0.0123456', '0.0123'], [4, 3, 4, '0.0123456', '0.0123'], [4, 3, 5, '0.0123456', '0.0123'], [4, 3, 1, '0.1234567', '0.1235'], [4, 3, 2, '0.1234567', '0.1235'], [4, 3, 3, '0.1234567', '0.1235'], [4, 3, 4, '0.1234567', '0.1235'], [4, 3, 5, '0.1234567', '0.1235'], [4, 3, 1, '1.2345678', '1.2346'], [4, 3, 2, '1.2345678', '1.2346'], [4, 3, 3, '1.2345678', '1.2346'], [4, 3, 4, '1.2345678', '1.2346'], [4, 3, 5, '1.2345678', '1.2346'], [4, 3, 1, '12.345678', '12.3457'], [4, 3, 2, '12.345678', '12.3457'], [4, 3, 3, '12.345678', '12.3457'], [4, 3, 4, '12.345678', '12.3457'], [4, 3, 5, '12.345678', '12.3457'], [4, 3, 1, '123.45678', '123.4568'], [4, 3, 2, '123.45678', '123.4568'], [4, 3, 3, '123.45678', '123.4568'], [4, 3, 4, '123.45678', '123.4568'], [4, 3, 5, '123.45678', '123.4568'], [4, 3, 1, '1234.5678', '1.2345678e03'], [4, 3, 2, '1234.5678', '1.2345678x10^3'], [4, 3, 3, '1234.5678', '1.2345678x10<sup>3</sup>'], [4, 3, 4, '1234.5678', '1.2345678·10^3'], [4, 3, 5, '1234.5678', '1.2345678·10<sup>3</sup>'], [4, 3, 1, '-0.0000123', '0.0000'], [4, 3, 2, '-0.0000123', '0.0000'], [4, 3, 3, '-0.0000123', '0.0000'], [4, 3, 4, '-0.0000123', '0.0000'], [4, 3, 5, '-0.0000123', '0.0000'], [4, 3, 1, '-0.0001234', '-1e-04'], [4, 3, 2, '-0.0001234', '-1x10^-4'], [4, 3, 3, '-0.0001234', '-1x10<sup>-4</sup>'], [4, 3, 4, '-0.0001234', '-1·10^-4'], [4, 3, 5, '-0.0001234', '-1·10<sup>-4</sup>'], [4, 3, 1, '-0.0012345', '-1.2e-03'], [4, 3, 2, '-0.0012345', '-1.2x10^-3'], [4, 3, 3, '-0.0012345', '-1.2x10<sup>-3</sup>'], [4, 3, 4, '-0.0012345', '-1.2·10^-3'], [4, 3, 5, '-0.0012345', '-1.2·10<sup>-3</sup>'], [4, 3, 1, '-0.0123456', '-0.0123'], [4, 3, 1, '-0.0123456', '-0.0123'], [4, 3, 2, '-0.0123456', '-0.0123'], [4, 3, 3, '-0.0123456', '-0.0123'], [4, 3, 4, '-0.0123456', '-0.0123'], [4, 3, 5, '-0.0123456', '-0.0123'], [4, 3, 1, '-0.1234567', '-0.1235'], [4, 3, 2, '-0.1234567', '-0.1235'], [4, 3, 3, '-0.1234567', '-0.1235'], [4, 3, 4, '-0.1234567', '-0.1235'], [4, 3, 5, '-0.1234567', '-0.1235'], [4, 3, 1, '-1.2345678', '-1.2346'], [4, 3, 2, '-1.2345678', '-1.2346'], [4, 3, 3, '-1.2345678', '-1.2346'], [4, 3, 4, '-1.2345678', '-1.2346'], [4, 3, 5, '-1.2345678', '-1.2346'], [4, 3, 1, '-12.345678', '-12.3457'], [4, 3, 2, '-12.345678', '-12.3457'], [4, 3, 3, '-12.345678', '-12.3457'], [4, 3, 4, '-12.345678', '-12.3457'], [4, 3, 5, '-12.345678', '-12.3457'], [4, 3, 1, '-123.45678', '-123.4568'], [4, 3, 2, '-123.45678', '-123.4568'], [4, 3, 3, '-123.45678', '-123.4568'], [4, 3, 4, '-123.45678', '-123.4568'], [4, 3, 5, '-123.45678', '-123.4568'], [4, 3, 1, '-1234.5678', '-1.2345678e03'], [4, 3, 2, '-1234.5678', '-1.2345678x10^3'], [4, 3, 3, '-1234.5678', '-1.2345678x10<sup>3</sup>'], [4, 3, 4, '-1234.5678', '-1.2345678·10^3'], [4, 3, 5, '-1234.5678', '-1.2345678·10<sup>3</sup>'], [4, 3, 1, '1200000', '1.2e06'], [4, 3, 2, '1200000', '1.2x10^6'], [4, 3, 3, '1200000', '1.2x10<sup>6</sup>'], [4, 3, 4, '1200000', '1.2·10^6'], [4, 3, 5, '1200000', '1.2·10<sup>6</sup>'], # Weird!!! negative values for exp precision [2, -6, 1, '12340', '12340.00'], [2, -4, 1, '12340', '1.234e04'], [2, -4, 2, '12340', '1.234x10^4'], [2, -4, 3, '12340', '1.234x10<sup>4</sup>'], [2, -4, 4, '12340', '1.234·10^4'], [2, -4, 5, '12340', '1.234·10<sup>4</sup>'], [2, -4, 1, '12340.01', '1.234001e04'], [2, -4, 2, '12340.01', '1.234001x10^4'], [2, -4, 3, '12340.01', '1.234001x10<sup>4</sup>'], [2, -4, 4, '12340.01', '1.234001·10^4'], [2, -4, 5, '12340.01', '1.234001·10<sup>4</sup>'], [2, -6, 1, '-12340', '-12340.00'], [2, -4, 1, '-12340', '-1.234e04'], [2, -4, 2, '-12340', '-1.234x10^4'], [2, -4, 3, '-12340', '-1.234x10<sup>4</sup>'], [2, -4, 4, '-12340', '-1.234·10^4'], [2, -4, 5, '-12340', '-1.234·10<sup>4</sup>'], [2, -4, 1, '-12340.01', '-1.234001e04'], [2, -4, 2, '-12340.01', '-1.234001x10^4'], [2, -4, 3, '-12340.01', '-1.234001x10<sup>4</sup>'], [2, -4, 4, '-12340.01', '-1.234001·10^4'], [2, -4, 5, '-12340.01', '-1.234001·10<sup>4</sup>'], [2, 6, 1, '12340', '12340.00'], [2, 6, 2, '12340', '12340.00'], [2, 6, 3, '12340', '12340.00'], [2, 6, 4, '12340', '12340.00'], [2, 6, 5, '12340', '12340.00'], [2, 4, 1, '12340', '1.234e04'], [2, 4, 2, '12340', '1.234x10^4'], [2, 4, 3, '12340', '1.234x10<sup>4</sup>'], [2, 4, 4, '12340', '1.234·10^4'], [2, 4, 5, '12340', '1.234·10<sup>4</sup>'], [2, 4, 1, '12340.0123', '1.234001e04'], [2, 4, 2, '12340.0123', '1.234001x10^4'], [2, 4, 3, '12340.0123', '1.234001x10<sup>4</sup>'], [2, 4, 4, '12340.0123', '1.234001·10^4'], [2, 4, 5, '12340.0123', '1.234001·10<sup>4</sup>'], [2, 4, 1, '-12340', '-1.234e04'], [2, 4, 2, '-12340', '-1.234x10^4'], [2, 4, 3, '-12340', '-1.234x10<sup>4</sup>'], [2, 4, 4, '-12340', '-1.234·10^4'], [2, 4, 5, '-12340', '-1.234·10<sup>4</sup>'], [2, 4, 1, '-12340.0123', '-1.234001e04'], [2, 4, 2, '-12340.0123', '-1.234001x10^4'], [2, 4, 3, '-12340.0123', '-1.234001x10<sup>4</sup>'], [2, 4, 4, '-12340.0123', '-1.234001·10^4'], [2, 4, 5, '-12340.0123', '-1.234001·10<sup>4</sup>'], ] s = self.service s.setLowerDetectionLimit( '-99999') # We want to test results below 0 too prevm = [] an = None bs = self.portal.bika_setup for m in matrix: # Create the AR and set the values to the AS, but only if necessary if not an or prevm[0] != m[0] or prevm[1] != m[1]: s.setPrecision(m[0]) s.setExponentialFormatPrecision(m[1]) self.assertEqual(s.getPrecision(), m[0]) self.assertEqual(s.Schema().getField('Precision').get(s), m[0]) self.assertEqual(s.getExponentialFormatPrecision(), m[1]) self.assertEqual( s.Schema().getField('ExponentialFormatPrecision').get(s), m[1]) client = self.portal.clients['client-1'] sampletype = bs.bika_sampletypes['sampletype-1'] values = { 'Client': client.UID(), 'Contact': client.getContacts()[0].UID(), 'SamplingDate': '2015-01-01', 'SampleType': sampletype.UID() } ar = create_analysisrequest(client, {}, values, [s.UID()]) wf = getToolByName(ar, 'portal_workflow') wf.doActionFor(ar, 'receive') an = ar.getAnalyses()[0].getObject() prevm = m an.setResult(m[3]) self.assertEqual(an.getResult(), m[3]) self.assertEqual(an.Schema().getField('Result').get(an), m[3]) fr = an.getFormattedResult(sciformat=m[2]) #print '%s %s %s %s => \'%s\' ?= \'%s\'' % (m[0],m[1],m[2],m[3],m[4],fr) self.assertEqual(fr, m[4])
def import_items(self): context = self.context request = context.REQUEST uc = getToolByName(context, 'uid_catalog') bika_catalog = getToolByName(context, 'bika_catalog') client = context.aq_parent contact = [ c for c in client.objectValues('Contact') if c.getFullname() == self.context.getContactName() ][0] self.progressbar_init('Submitting AR Import') # Find existing batch or create new batch if required. batch = None batch_id = context.getBatchID() batch_title = context.getBatchTitle() # First try to find existing batch brains = bika_catalog(portal_type='Batch', id=batch_id) if not brains: brains = bika_catalog(portal_type='Batch', title=batch_title) if brains: batch = brains[0] if not batch: # Create batch if it does not exist _bid = batch_id if batch_id else tmpID() batch = _createObjectByType("Batch", client, _bid) batch.unmarkCreationFlag() batch.edit(title=batch_title, description=context.getBatchDescription(), ClientBatchID=context.getClientBatchID(), Remarks=context.getLabBatchComment(), ClientBatchComment=context.getClientBatchComment()) if not batch_id: batch._renameAfterCreation() event.notify(ObjectInitializedEvent(batch)) batch.at_post_create_script() itemdata = context.Schema()['ItemData'].get(context) for i, item in enumerate(itemdata): service_uids = [] for a in item['Analyses']: for service in self.resolve_analyses(a): if isinstance(service, AnalysisService): service_uids.append(service.UID()) # Create AR ar_values = { 'Contact': contact, 'ClientOrderNumber': context.getClientOrderNumber(), 'Remarks': item['Remarks'], 'Batch': batch if batch else None, 'ClientReference': context.getClientReference(), 'ClientSampleID': item['ClientSampleID'], 'SampleType': context.getSampleType(), # SampleSite field: extenders/arimport,sample,analysisrequest.py 'SampleSite': context.getField('SampleSite').get(context), 'DateSampled': DateTime(item['DateSampled']), 'SamplingDate': DateTime(item['DateSampled']), 'Remarks': item['Remarks'], } ar = create_analysisrequest(client, request, ar_values, analyses=service_uids, partitions=[{}]) self.progressbar_progress(i + 1, len(itemdata))
def __call__(self): form = self.request.form plone.protect.CheckAuthenticator(self.request.form) plone.protect.PostOnly(self.request.form) came_from = "came_from" in form and form["came_from"] or "add" wftool = getToolByName(self.context, "portal_workflow") uc = getToolByName(self.context, "uid_catalog") bsc = getToolByName(self.context, "bika_setup_catalog") errors = {} form_parts = json.loads(self.request.form["parts"]) # First make a list of non-empty columns columns = [] for column in range(int(form["col_count"])): name = "ar.%s" % column ar = form.get(name, None) if ar and "Analyses" in ar.keys(): columns.append(column) if len(columns) == 0: ajax_form_error(errors, message=t(_("No analyses have been selected"))) return json.dumps({"errors": errors}) # Now some basic validation required_fields = [field.getName() for field in AnalysisRequestSchema.fields() if field.required] for column in columns: formkey = "ar.%s" % column ar = form[formkey] # check that required fields have values for field in required_fields: # This one is still special. if field in ["RequestID"]: continue # And these are not required if this is a secondary AR if ar.get("Sample", "") != "" and field in ["SamplingDate", "SampleType"]: continue if not ar.get(field, ""): ajax_form_error(errors, field, column) # Return errors if there are any if errors: return json.dumps({"errors": errors}) # Get the prices from the form data prices = form.get("Prices", None) # Initialize the Anlysis Request collection ARs = [] # if a new profile is created automatically, # this flag triggers the status message new_profile = None # The actual submission for column in columns: # Get partitions from the form data if form_parts: partitions = form_parts[str(column)] else: partitions = [] # Get the form data using the appropriate form key formkey = "ar.%s" % column values = form[formkey].copy() # resolved values is formatted as acceptable by archetypes # widget machines resolved_values = {} for k, v in values.items(): # Analyses, we handle that specially. if k == "Analyses": continue # Insert the reference *_uid values instead of titles. if "_uid" in k: v = values[k] v = v.split(",") if v and "," in v else v fname = k.replace("_uid", "") resolved_values[fname] = v continue # we want to write the UIDs and ignore the title values if k + "_uid" in values: continue resolved_values[k] = values[k] # Get the analyses from the form data analyses = values["Analyses"] # Gather the specifications from the form specs = json.loads(form["copy_to_new_specs"]).get(str(column), {}) if not specs: specs = json.loads(form["specs"]).get(str(column), {}) if specs: specs = dicts_to_dict(specs, "keyword") # Modify the spec with all manually entered values for service_uid in analyses: min_element_name = "ar.%s.min.%s" % (column, service_uid) max_element_name = "ar.%s.max.%s" % (column, service_uid) error_element_name = "ar.%s.error.%s" % (column, service_uid) service_keyword = bsc(UID=service_uid)[0].getKeyword if min_element_name in form: if service_keyword not in specs: specs[service_keyword] = {} specs[service_keyword]["keyword"] = service_keyword specs[service_keyword]["min"] = form[min_element_name] specs[service_keyword]["max"] = form[max_element_name] specs[service_keyword]["error"] = form[error_element_name] # Selecting a template sets the hidden 'parts' field to template values. # Selecting a profile will allow ar_add.js to fill in the parts field. # The result is the same once we are here. if not partitions: partitions = [{"services": [], "container": None, "preservation": "", "separate": False}] # Apply DefaultContainerType to partitions without a container default_container_type = resolved_values.get("DefaultContainerType", None) if default_container_type: container_type = bsc(UID=default_container_type)[0].getObject() containers = container_type.getContainers() for partition in partitions: if not partition.get("container", None): partition["container"] = containers # Retrieve the catalogue reference to the client client = uc(UID=resolved_values["Client"])[0].getObject() # Create the Analysis Request ar = create_analysisrequest( client, self.request, resolved_values, analyses=analyses, partitions=partitions, specifications=specs.values(), prices=prices, ) # Add the created analysis request to the list ARs.append(ar.getId()) # Display the appropriate message after creation if len(ARs) > 1: message = _( "Analysis requests ${ARs} were successfully created.", mapping={"ARs": safe_unicode(", ".join(ARs))} ) else: message = _("Analysis request ${AR} was successfully created.", mapping={"AR": safe_unicode(ARs[0])}) self.context.plone_utils.addPortalMessage(message, "info") # Automatic label printing # Won't print labels for Register on Secondary ARs new_ars = None if came_from == "add": new_ars = [ar for ar in ARs if ar[-2:] == "01"] if "register" in self.context.bika_setup.getAutoPrintStickers() and new_ars: return json.dumps( { "success": message, "stickers": new_ars, "stickertemplate": self.context.bika_setup.getAutoStickerTemplate(), } ) else: return json.dumps({"success": message})
def test_ar_manageresults_limitdetections(self): # Input results # Client: Happy Hills # SampleType: Apple Pulp # Contact: Rita Mohale # Analyses: [Calcium, Copper] client = self.portal.clients['client-1'] sampletype = self.portal.bika_setup.bika_sampletypes['sampletype-1'] values = {'Client': client.UID(), 'Contact': client.getContacts()[0].UID(), 'SamplingDate': '2015-01-01', 'SampleType': sampletype.UID()} request = {} services = [s.UID() for s in self.services] ar = create_analysisrequest(client, request, values, services) # Basic detection limits asidxs = {'analysisservice-3': 0, 'analysisservice-6': 1, 'analysisservice-7': 2} for a in ar.getAnalyses(): an = a.getObject() idx = asidxs[an.getService().id] self.assertEqual(an.getLowerDetectionLimit(), float(self.lds[idx]['min'])) self.assertEqual(an.getUpperDetectionLimit(), float(self.lds[idx]['max'])) self.assertEqual(an.getService().getAllowManualDetectionLimit(), self.lds[idx]['manual']) # Empty result self.assertFalse(an.getDetectionLimitOperand()) self.assertFalse(an.isBelowLowerDetectionLimit()) self.assertFalse(an.isAboveUpperDetectionLimit()) # Set a result an.setResult('15') self.assertEqual(float(an.getResult()), 15) self.assertFalse(an.isBelowLowerDetectionLimit()) self.assertFalse(an.isAboveUpperDetectionLimit()) self.assertFalse(an.getDetectionLimitOperand()) self.assertEqual(an.getFormattedResult(), '15.00') self.assertEqual(an.getFormattedResult(html=True), '15.00') self.assertEqual(an.getFormattedResult(html=False), '15.00') an.setResult('-1') self.assertEqual(float(an.getResult()), -1) self.assertTrue(an.isBelowLowerDetectionLimit()) self.assertFalse(an.isAboveUpperDetectionLimit()) self.assertFalse(an.getDetectionLimitOperand()) self.assertEqual(an.getFormattedResult(html=False), '< %s' % (self.lds[idx]['min'])) self.assertEqual(an.getFormattedResult(html=True), '< %s' % (self.lds[idx]['min'])) self.assertEqual(an.getFormattedResult(), '< %s' % (self.lds[idx]['min'])) an.setResult('2000') self.assertEqual(float(an.getResult()), 2000) self.assertFalse(an.isBelowLowerDetectionLimit()) self.assertTrue(an.isAboveUpperDetectionLimit()) self.assertFalse(an.getDetectionLimitOperand()) self.assertEqual(an.getFormattedResult(html=False), '> %s' % (self.lds[idx]['max'])) self.assertEqual(an.getFormattedResult(html=True), '> %s' % (self.lds[idx]['max'])) self.assertEqual(an.getFormattedResult(), '> %s' % (self.lds[idx]['max'])) # Set a DL result an.setResult('<15') self.assertEqual(float(an.getResult()), 15) if self.lds[idx]['manual']: self.assertTrue(an.isBelowLowerDetectionLimit()) self.assertFalse(an.isAboveUpperDetectionLimit()) self.assertEqual(an.getDetectionLimitOperand(), '<') self.assertEqual(an.getFormattedResult(html=False), '< 15') self.assertEqual(an.getFormattedResult(html=True), '< 15') self.assertEqual(an.getFormattedResult(), '< 15') else: self.assertFalse(an.isBelowLowerDetectionLimit()) self.assertFalse(an.isAboveUpperDetectionLimit()) self.assertFalse(an.getDetectionLimitOperand()) self.assertEqual(an.getFormattedResult(html=False), '15.00') self.assertEqual(an.getFormattedResult(html=True), '15.00') self.assertEqual(an.getFormattedResult(), '15.00') an.setResult('>15') self.assertEqual(float(an.getResult()), 15) if self.lds[idx]['manual']: self.assertFalse(an.isBelowLowerDetectionLimit()) self.assertTrue(an.isAboveUpperDetectionLimit()) self.assertEqual(an.getDetectionLimitOperand(), '>') self.assertEqual(an.getFormattedResult(html=False), '> 15') self.assertEqual(an.getFormattedResult(html=True), '> 15') self.assertEqual(an.getFormattedResult(), '> 15') else: self.assertFalse(an.isBelowLowerDetectionLimit()) self.assertFalse(an.isAboveUpperDetectionLimit()) self.assertFalse(an.getDetectionLimitOperand()) self.assertEqual(an.getFormattedResult(html=False), '15.00') self.assertEqual(an.getFormattedResult(html=True), '15.00') self.assertEqual(an.getFormattedResult(), '15.00') # Set a DL result explicitely an.setDetectionLimitOperand('<') an.setResult('15') self.assertEqual(float(an.getResult()), 15) if self.lds[idx]['manual']: self.assertTrue(an.isBelowLowerDetectionLimit()) self.assertFalse(an.isAboveUpperDetectionLimit()) self.assertEqual(an.getDetectionLimitOperand(), '<') self.assertEqual(an.getFormattedResult(html=False), '< 15') self.assertEqual(an.getFormattedResult(html=True), '< 15') self.assertEqual(an.getFormattedResult(), '< 15') else: self.assertFalse(an.isBelowLowerDetectionLimit()) self.assertFalse(an.isAboveUpperDetectionLimit()) self.assertFalse(an.getDetectionLimitOperand()) self.assertEqual(an.getFormattedResult(html=False), '15.00') self.assertEqual(an.getFormattedResult(html=True), '15.00') self.assertEqual(an.getFormattedResult(), '15.00') an.setDetectionLimitOperand('>') an.setResult('15') self.assertEqual(float(an.getResult()), 15) if self.lds[idx]['manual']: self.assertFalse(an.isBelowLowerDetectionLimit()) self.assertTrue(an.isAboveUpperDetectionLimit()) self.assertEqual(an.getDetectionLimitOperand(), '>') self.assertEqual(an.getFormattedResult(html=False), '> 15') self.assertEqual(an.getFormattedResult(html=True), '> 15') self.assertEqual(an.getFormattedResult(), '> 15') else: self.assertFalse(an.isBelowLowerDetectionLimit()) self.assertFalse(an.isAboveUpperDetectionLimit()) self.assertFalse(an.getDetectionLimitOperand()) self.assertEqual(an.getFormattedResult(html=False), '15.00') self.assertEqual(an.getFormattedResult(html=True), '15.00') self.assertEqual(an.getFormattedResult(), '15.00')
def test_reflex_rule_set_get(self): """ Testing the simple set/get data from the field and the content type functions. """ # Creating a department department_data = [ { 'title': 'dep1', } ] deps = self.create_departments(department_data) # Creating a category category_data = [{ 'title': 'cat1', 'Department': deps[0] }, ] cats = self.create_category(category_data) # Creating a method methods_data = [ { 'title': 'Method 1', 'description': 'A description', 'Instructions': 'An instruction', 'MethodID': 'm1', 'Accredited': 'True' }, ] meths = self.create_methods(methods_data) # Creating an analysis service as_data = [{ 'title': 'analysis service1', 'ShortTitle': 'as1', 'Keyword': 'as1', 'PointOfCapture': 'Lab', 'Category': cats[0], 'Methods': meths, }, ] ans_list = self.create_analysisservices(as_data) # Creating a simple rule rules = [{ 'actions': [{ 'act_row_idx': 0, 'action': 'repeat', 'an_result_id': 'rep-1', 'analyst': '', 'otherWS': 'current', 'setresultdiscrete': '', 'setresulton': 'original', 'setresultvalue': '', 'worksheettemplate': ''}], 'conditions': [{ 'analysisservice': ans_list[0].UID(), 'and_or': 'no', 'cond_row_idx': 0, 'discreteresult': '', 'range0': '5', 'range1': '10'}], 'mother_service_uid': ans_list[0].UID(), 'rulenumber': '0', 'trigger': 'submit'}, ] title = 'Rule MS' method = meths[0] rule = self.create_reflex_rule(title, method, rules) self.assertTrue( ans_list[0].UID() == rule.getReflexRules()[0] .get('conditions', {})[0].get('analysisservice', '') ) self.assertTrue( ans_list[0].UID() == rule.getReflexRules()[0] .get('mother_service_uid', '') ) # Create an analysis Request client = self.portal.clients['client-1'] sampletype = self.portal.bika_setup.bika_sampletypes['sampletype-1'] values = {'Client': client.UID(), 'Contact': client.getContacts()[0].UID(), 'SamplingDate': '2015-01-01', 'SampleType': sampletype.UID()} request = {} ar = create_analysisrequest(client, request, values, [ans_list[-1]]) doActionFor(ar, 'receive') # Getting the analysis analysis = ar.getAnalyses(full_objects=True)[0] # Testing reflexrule content type public functions result = rule.getActionReflexRules(analysis, 'submit') # No result self.assertEqual(result, []) # A result outside the range analysis.setResult('11.3') result = rule.getActionReflexRules(analysis, 'submit') self.assertEqual(result, []) # A result inside the range analysis.setResult('6.7') result = rule.getActionReflexRules(analysis, 'submit') # Removing rulename and rulenumber from result, this function returns # both and we don't wnt them to compare del result[0]['rulenumber'] del result[0]['rulename'] self.assertEqual(result, rules[0]['actions'])
def test_ar_manage_results_detectionlimit_selector_manual(self): cases = [ # ROUND 1 --------------------- {'min' : '10', 'max' : '20', 'displaydl' : False, 'manual' : False, 'input' : '5', 'expresult' : 5.0, 'expformattedresult': '< 10', 'isbelowldl' : True, 'isaboveudl' : False, 'isldl' : False, 'isudl' : False}, {'min' : '10', 'max' : '20', 'displaydl' : False, 'manual' : False, 'input' : '15', 'expresult' : 15.0, 'expformattedresult': '15.00', 'isbelowldl' : False, 'isaboveudl' : False, 'isldl' : False, 'isudl' : False}, {'min' : '10', 'max' : '20', 'displaydl' : False, 'manual' : False, 'input' : '25', 'expresult' : 25.0, 'expformattedresult': '> 20', 'isbelowldl' : False, 'isaboveudl' : True, 'isldl' : False, 'isudl' : False}, {'min' : '10', 'max' : '20', 'displaydl' : False, 'manual' : False, 'input' : '<5', 'expresult' : 5.0, # '<' assignment not allowed 'expformattedresult': '< 10', 'isbelowldl' : True, 'isaboveudl' : False, 'isldl' : False, 'isudl' : False}, {'min' : '10', 'max' : '20', 'displaydl' : False, 'manual' : False, 'input' : '<15', 'expresult' : 15.0, # '<' assignment not allowed 'expformattedresult': '15.00', 'isbelowldl' : False, 'isaboveudl' : False, 'isldl' : False, 'isudl' : False}, {'min' : '10', 'max' : '20', 'displaydl' : False, 'manual' : False, 'input' : '>15', 'expresult' : 15.0, # '>' assignment not allowed 'expformattedresult': '15.00', 'isbelowldl' : False, 'isaboveudl' : False, 'isldl' : False, 'isudl' : False}, {'min' : '10', 'max' : '20', 'displaydl' : False, 'manual' : False, 'input' : '25', 'expresult' : 25.0, # '>' assignment not allowed 'expformattedresult': '> 20', 'isbelowldl' : False, 'isaboveudl' : True, 'isldl' : False, 'isudl' : False}, # ROUND 2 --------------------- {'min' : '10', 'max' : '20', 'displaydl' : True, 'manual' : False, 'input' : '5', 'expresult' : 5.0, 'expformattedresult': '< 10', 'isbelowldl' : True, 'isaboveudl' : False, 'isldl' : False, 'isudl' : False}, {'min' : '10', 'max' : '20', 'displaydl' : True, 'manual' : False, 'input' : '15', 'expresult' : 15.0, 'expformattedresult': '15.00', 'isbelowldl' : False, 'isaboveudl' : False, 'isldl' : False, 'isudl' : False}, {'min' : '10', 'max' : '20', 'displaydl' : True, 'manual' : False, 'input' : '25', 'expresult' : 25.0, 'expformattedresult': '> 20', 'isbelowldl' : False, 'isaboveudl' : True, 'isldl' : False, 'isudl' : False}, {'min' : '10', 'max' : '20', 'displaydl' : True, 'manual' : False, 'input' : '<5', 'expresult' : 10.0, # '<' assignment allowed, but not custom 'expformattedresult': '< 10', 'isbelowldl' : True, 'isaboveudl' : False, 'isldl' : True, 'isudl' : False}, {'min' : '10', 'max' : '20', 'displaydl' : True, 'manual' : False, 'input' : '<15', 'expresult' : 10.0, # '<' assignment allowed, but not custom 'expformattedresult': '< 10', 'isbelowldl' : True, 'isaboveudl' : False, 'isldl' : True, 'isudl' : False}, {'min' : '10', 'max' : '20', 'displaydl' : True, 'manual' : False, 'input' : '>15', 'expresult' : 20.0, # '>' assignment allowed, but not custom 'expformattedresult': '> 20', 'isbelowldl' : False, 'isaboveudl' : True, 'isldl' : False, 'isudl' : True}, {'min' : '10', 'max' : '20', 'displaydl' : True, 'manual' : False, 'input' : '>25', 'expresult' : 20.0, # '>' assignment allowed, but not custom 'expformattedresult': '> 20', 'isbelowldl' : False, 'isaboveudl' : True, 'isldl' : False, 'isudl' : True}, # ROUND 3 --------------------- {'min' : '10', 'max' : '20', 'displaydl' : True, 'manual' : True, 'input' : '5', 'expresult' : 5.0, 'expformattedresult': '< 10', 'isbelowldl' : True, 'isaboveudl' : False, 'isldl' : False, 'isudl' : False}, {'min' : '10', 'max' : '20', 'displaydl' : True, 'manual' : True, 'input' : '15', 'expresult' : 15.0, 'expformattedresult': '15.00', 'isbelowldl' : False, 'isaboveudl' : False, 'isldl' : False, 'isudl' : False}, {'min' : '10', 'max' : '20', 'displaydl' : True, 'manual' : True, 'input' : '25', 'expresult' : 25.0, 'expformattedresult': '> 20', 'isbelowldl' : False, 'isaboveudl' : True, 'isldl' : False, 'isudl' : False}, {'min' : '10', 'max' : '20', 'displaydl' : True, 'manual' : True, 'input' : '<5', 'expresult' : 5.0, # '<' assignment allowed 'expformattedresult': '< 5', 'isbelowldl' : True, 'isaboveudl' : False, 'isldl' : True, 'isudl' : False}, {'min' : '10', 'max' : '20', 'displaydl' : True, 'manual' : True, 'input' : '<15', 'expresult' : 15.0, # '<' assignment allowed 'expformattedresult': '< 15', 'isbelowldl' : True, 'isaboveudl' : False, 'isldl' : True, 'isudl' : False}, {'min' : '10', 'max' : '20', 'displaydl' : True, 'manual' : True, 'input' : '>15', 'expresult' : 15.0, # '>' assignment allowed 'expformattedresult': '> 15', 'isbelowldl' : False, 'isaboveudl' : True, 'isldl' : False, 'isudl' : True}, {'min' : '10', 'max' : '20', 'displaydl' : True, 'manual' : True, 'input' : '>25', 'expresult' : 25.0, # '>' assignment allowed 'expformattedresult': '> 25', 'isbelowldl' : False, 'isaboveudl' : True, 'isldl' : False, 'isudl' : True}, ] for case in cases: s = self.services[0] s.setDetectionLimitSelector(case['displaydl']) s.setAllowManualDetectionLimit(case['manual']) s.setLowerDetectionLimit(case['min']) s.setUpperDetectionLimit(case['max']) # Input results # Client: Happy Hills # SampleType: Apple Pulp # Contact: Rita Mohale # Analyses: [Calcium, Copper] client = self.portal.clients['client-1'] sampletype = self.portal.bika_setup.bika_sampletypes['sampletype-1'] values = {'Client': client.UID(), 'Contact': client.getContacts()[0].UID(), 'SamplingDate': '2015-01-01', 'SampleType': sampletype.UID()} request = {} ar = create_analysisrequest(client, request, values, [s.UID()]) wf = getToolByName(ar, 'portal_workflow') wf.doActionFor(ar, 'receive') an = ar.getAnalyses()[0].getObject() an.setResult(case['input']) self.assertEqual(an.isBelowLowerDetectionLimit(), case['isbelowldl']) self.assertEqual(an.isAboveUpperDetectionLimit(), case['isaboveudl']) self.assertEqual(an.isLowerDetectionLimit(), case['isldl']) self.assertEqual(an.isUpperDetectionLimit(), case['isudl']) self.assertEqual(float(an.getResult()), case['expresult']) #import pdb; pdb.set_trace() self.assertEqual(an.getFormattedResult(html=False), case['expformattedresult']) expres = case['expformattedresult'] expres = expres.replace('< ', '< ') if an.isBelowLowerDetectionLimit() else expres expres = expres.replace('> ', '> ') if an.isAboveUpperDetectionLimit() else expres self.assertEqual(an.getFormattedResult(html=True), expres) self.assertEqual(an.getFormattedResult(), expres)
def test_service_hidden_analysisrequest(self): # Input results # Client: Happy Hills # SampleType: Apple Pulp # Contact: Rita Mohale # Analyses: [Calcium, Copper, Iron] client = self.portal.clients['client-1'] sampletype = self.portal.bika_setup.bika_sampletypes['sampletype-1'] request = {} services = [s.UID() for s in self.services] values = { 'Client': client.UID(), 'Contact': client.getContacts()[0].UID(), 'SamplingDate': '2015-01-01', 'SampleType': sampletype.UID() } ar = create_analysisrequest(client, request, values, services) self.assertFalse( 'hidden' in ar.getAnalysisServiceSettings(services[0])) self.assertFalse(ar.isAnalysisServiceHidden(services[0])) self.assertFalse( ar.getAnalysisServiceSettings(services[1]).get('hidden')) self.assertFalse(ar.isAnalysisServiceHidden(services[1])) self.assertFalse( ar.getAnalysisServiceSettings(services[2]).get('hidden')) self.assertTrue(ar.isAnalysisServiceHidden(services[2])) # For Calcium (unset) uid = self.services[0].UID() self.assertFalse(self.services[0].getHidden()) self.assertFalse(self.analysisprofile.isAnalysisServiceHidden(uid)) self.assertFalse( 'hidden' in self.artemplate.getAnalysisServiceSettings(uid)) # For Copper (False) uid = self.services[1].UID() self.assertFalse(self.services[1].getHidden()) self.assertFalse(ar.isAnalysisServiceHidden(uid)) self.assertFalse('hidden' in ar.getAnalysisServiceSettings(uid)) # For Iron (True) uid = self.services[2].UID() self.assertTrue(self.services[2].getHidden()) self.assertTrue(ar.isAnalysisServiceHidden(uid)) self.assertFalse('hidden' in ar.getAnalysisServiceSettings(uid)) # Modify visibility for Calcium in AR uid = self.services[0].UID() sets = [{'uid': uid}] ar.setAnalysisServicesSettings(sets) self.assertFalse(ar.isAnalysisServiceHidden(uid)) self.assertFalse('hidden' in ar.getAnalysisServiceSettings(uid)) sets = [{'uid': uid, 'hidden': False}] ar.setAnalysisServicesSettings(sets) self.assertFalse(ar.isAnalysisServiceHidden(uid)) self.assertTrue('hidden' in ar.getAnalysisServiceSettings(uid)) sets = [{'uid': uid, 'hidden': True}] ar.setAnalysisServicesSettings(sets) self.assertTrue(ar.isAnalysisServiceHidden(uid)) self.assertTrue('hidden' in ar.getAnalysisServiceSettings(uid)) ar.setAnalysisServicesSettings([]) # AR with profile with no changes values['Profiles'] = self.analysisprofile.UID() ar = create_analysisrequest(client, request, values, services) self.assertFalse( 'hidden' in ar.getAnalysisServiceSettings(services[0])) self.assertFalse( ar.getAnalysisServiceSettings(services[1]).get('hidden')) self.assertFalse( ar.getAnalysisServiceSettings(services[2]).get('hidden')) uid = self.services[0].UID() self.assertFalse(self.services[0].getHidden()) self.assertFalse(ar.isAnalysisServiceHidden(uid)) self.assertFalse('hidden' in ar.getAnalysisServiceSettings(uid)) uid = self.services[1].UID() self.assertFalse(self.services[1].getHidden()) self.assertFalse(ar.isAnalysisServiceHidden(uid)) self.assertFalse('hidden' in ar.getAnalysisServiceSettings(uid)) uid = self.services[2].UID() self.assertTrue(self.services[2].getHidden()) self.assertTrue(ar.isAnalysisServiceHidden(uid)) self.assertFalse('hidden' in ar.getAnalysisServiceSettings(uid)) # AR with template with no changes values['Template'] = self.artemplate del values['Profiles'] ar = create_analysisrequest(client, request, values, services) self.assertFalse( 'hidden' in ar.getAnalysisServiceSettings(services[0])) self.assertFalse( ar.getAnalysisServiceSettings(services[1]).get('hidden')) self.assertFalse( ar.getAnalysisServiceSettings(services[2]).get('hidden')) uid = self.services[0].UID() self.assertFalse(self.services[0].getHidden()) self.assertFalse(ar.isAnalysisServiceHidden(uid)) self.assertFalse('hidden' in ar.getAnalysisServiceSettings(uid)) uid = self.services[1].UID() self.assertFalse(self.services[1].getHidden()) self.assertFalse(ar.isAnalysisServiceHidden(uid)) self.assertFalse('hidden' in ar.getAnalysisServiceSettings(uid)) uid = self.services[2].UID() self.assertTrue(self.services[2].getHidden()) self.assertTrue(ar.isAnalysisServiceHidden(uid)) self.assertFalse('hidden' in ar.getAnalysisServiceSettings(uid)) # AR with profile, with changes values['Profiles'] = self.analysisprofile.UID() del values['Template'] matrix = [ [2, 1, -2], # AS = Not set [2, 1, -2], # AS = False [2, 1, -1] ] for i in range(len(matrix)): sets = {'uid': services[i]} opts = [0, 1, 2] for j in opts: if j == 0: sets['hidden'] = False elif j == 1: sets['hidden'] = True else: del sets['hidden'] self.analysisprofile.setAnalysisServicesSettings(sets) ar = create_analysisrequest(client, request, values, services) res = matrix[i][j] if res < 0: self.assertFalse( 'hidden' in ar.getAnalysisServiceSettings(services[i])) else: self.assertTrue( 'hidden' in ar.getAnalysisServiceSettings(services[i])) if abs(res) == 1: self.assertTrue(ar.isAnalysisServiceHidden(services[i])) elif abs(res) == 2: self.assertFalse(ar.isAnalysisServiceHidden(services[i])) # Restore self.analysisprofile.setAnalysisServicesSettings([]) # AR with template, with changes values['Template'] = self.artemplate.UID() del values['Profiles'] matrix = [ [2, 1, -2], # AS = Not set [2, 1, -2], # AS = False [2, 1, -1] ] for i in range(len(matrix)): sets = {'uid': services[i]} opts = [0, 1, 2] for j in opts: if j == 0: sets['hidden'] = False elif j == 1: sets['hidden'] = True else: del sets['hidden'] self.artemplate.setAnalysisServicesSettings(sets) ar = create_analysisrequest(client, request, values, services) res = matrix[i][j] if res < 0: self.assertFalse( 'hidden' in ar.getAnalysisServiceSettings(services[i])) else: # testing tests self.assertTrue( 'hidden' in ar.getAnalysisServiceSettings(services[i])) if abs(res) == 1: self.assertTrue(ar.isAnalysisServiceHidden(services[i])) elif abs(res) == 2: self.assertFalse(ar.isAnalysisServiceHidden(services[i])) # Restore self.artemplate.setAnalysisServicesSettings([])
def __call__(self): form = self.request.form plone.protect.CheckAuthenticator(self.request.form) plone.protect.PostOnly(self.request.form) came_from = 'came_from' in form and form['came_from'] or 'add' wftool = getToolByName(self.context, 'portal_workflow') uc = getToolByName(self.context, 'uid_catalog') bsc = getToolByName(self.context, 'bika_setup_catalog') errors = {} form_parts = json.loads(self.request.form['parts']) # First make a list of non-empty columns columns = [] for column in range(int(form['col_count'])): name = 'ar.%s' % column ar = form.get(name, None) if ar and 'Analyses' in ar.keys(): columns.append(column) if len(columns) == 0: ajax_form_error(errors, message=t(_("No analyses have been selected"))) return json.dumps({'errors': errors}) # Now some basic validation required_fields = [ field.getName() for field in AnalysisRequestSchema.fields() if field.required ] for column in columns: formkey = "ar.%s" % column ar = form[formkey] # check that required fields have values for field in required_fields: # This one is still special. if field in ['RequestID']: continue # And these are not required if this is a secondary AR if ar.get('Sample', '') != '' and field in [ 'SamplingDate', 'SampleType' ]: continue if not ar.get(field, ''): ajax_form_error(errors, field, column) # Return errors if there are any if errors: return json.dumps({'errors': errors}) # Get the prices from the form data prices = form.get('Prices', None) # Initialize the Anlysis Request collection ARs = [] # if a new profile is created automatically, # this flag triggers the status message new_profile = None # The actual submission for column in columns: # Get partitions from the form data if form_parts: partitions = form_parts[str(column)] else: partitions = [] # Get the form data using the appropriate form key formkey = "ar.%s" % column values = form[formkey].copy() # resolved values is formatted as acceptable by archetypes # widget machines resolved_values = {} for k, v in values.items(): # Analyses, we handle that specially. if k == 'Analyses': continue # Insert the reference *_uid values instead of titles. if "_uid" in k: v = values[k] v = v.split(",") if v and "," in v else v fname = k.replace("_uid", "") resolved_values[fname] = v continue # we want to write the UIDs and ignore the title values if k + "_uid" in values: continue resolved_values[k] = values[k] # Get the analyses from the form data analyses = values["Analyses"] # Gather the specifications from the form specs = json.loads(form['copy_to_new_specs']).get(str(column), {}) if not specs: specs = json.loads(form['specs']).get(str(column), {}) if specs: specs = dicts_to_dict(specs, 'keyword') # Modify the spec with all manually entered values for service_uid in analyses: min_element_name = "ar.%s.min.%s" % (column, service_uid) max_element_name = "ar.%s.max.%s" % (column, service_uid) error_element_name = "ar.%s.error.%s" % (column, service_uid) service_keyword = bsc(UID=service_uid)[0].getKeyword if min_element_name in form: if service_keyword not in specs: specs[service_keyword] = {} specs[service_keyword]["keyword"] = service_keyword specs[service_keyword]["min"] = form[min_element_name] specs[service_keyword]["max"] = form[max_element_name] specs[service_keyword]["error"] = form[error_element_name] # Selecting a template sets the hidden 'parts' field to template values. # Selecting a profile will allow ar_add.js to fill in the parts field. # The result is the same once we are here. if not partitions: partitions = [{ 'services': [], 'container': None, 'preservation': '', 'separate': False }] # Apply DefaultContainerType to partitions without a container default_container_type = resolved_values.get( 'DefaultContainerType', None) if default_container_type: container_type = bsc(UID=default_container_type)[0].getObject() containers = container_type.getContainers() for partition in partitions: if not partition.get("container", None): partition['container'] = containers # Retrieve the catalogue reference to the client client = uc(UID=resolved_values['Client'])[0].getObject() # Create the Analysis Request ar = create_analysisrequest(client, self.request, resolved_values, analyses=analyses, partitions=partitions, specifications=specs.values(), prices=prices) # Add the created analysis request to the list ARs.append(ar.getId()) # Display the appropriate message after creation if len(ARs) > 1: message = _("Analysis requests ${ARs} were successfully created.", mapping={'ARs': safe_unicode(', '.join(ARs))}) else: message = _("Analysis request ${AR} was successfully created.", mapping={'AR': safe_unicode(ARs[0])}) self.context.plone_utils.addPortalMessage(message, 'info') # Automatic label printing # Won't print labels for Register on Secondary ARs new_ars = None if came_from == 'add': new_ars = [ar for ar in ARs if ar[-2:] == '01'] if 'register' in self.context.bika_setup.getAutoPrintStickers( ) and new_ars: return json.dumps({ 'success': message, 'stickers': new_ars, 'stickertemplate': self.context.bika_setup.getAutoStickerTemplate() }) else: return json.dumps({'success': message})
def test_DecimalMarkWithSciNotation(self): # Notations # '1' => aE+b / aE-b # '2' => ax10^b / ax10^-b # '3' => ax10^b / ax10^-b (with superscript) # '4' => a·10^b / a·10^-b # '5' => a·10^b / a·10^-b (with superscript) matrix = [ # as_prec as_exp not decimalmark result formatted result # ------- ------ --- ----------- -------- -------------------- [0, 0, 1, '0', '0'], [0, 0, 2, '0', '0'], [0, 0, 3, '0', '0'], [0, 0, 4, '0', '0'], [0, 0, 5, '0', '0'], [2, 5, 1, '0.01', '0,01'], [2, 5, 2, '0.01', '0,01'], [2, 5, 3, '0.01', '0,01'], [2, 5, 4, '0.01', '0,01'], [2, 5, 5, '0.01', '0,01'], [2, 1, 1, '0.123', '1,2e-01'], [2, 1, 2, '0.123', '1,2x10^-1'], [2, 1, 3, '0.123', '1,2x10<sup>-1</sup>'], [2, 1, 4, '0.123', '1,2·10^-1'], [2, 1, 5, '0.123', '1,2·10<sup>-1</sup>'], [2, 1, 1, '1.234', '1,23'], [2, 1, 2, '1.234', '1,23'], [2, 1, 3, '1.234', '1,23'], [2, 1, 4, '1.234', '1,23'], [2, 1, 5, '1.234', '1,23'], [2, 1, 1, '12.345', '1,235e01'], [2, 1, 2, '12.345', '1,235x10^1'], [2, 1, 3, '12.345', '1,235x10<sup>1</sup>'], [2, 1, 4, '12.345', '1,235·10^1'], [2, 1, 5, '12.345', '1,235·10<sup>1</sup>'], [4, 3, 1, '-123.45678', '-123,4568'], [4, 3, 2, '-123.45678', '-123,4568'], [4, 3, 3, '-123.45678', '-123,4568'], [4, 3, 4, '-123.45678', '-123,4568'], [4, 3, 5, '-123.45678', '-123,4568'], [4, 3, 1, '-1234.5678', '-1,2345678e03'], [4, 3, 2, '-1234.5678', '-1,2345678x10^3'], [4, 3, 3, '-1234.5678', '-1,2345678x10<sup>3</sup>'], [4, 3, 4, '-1234.5678', '-1,2345678·10^3'], [4, 3, 5, '-1234.5678', '-1,2345678·10<sup>3</sup>'], ] s = self.service s.setLowerDetectionLimit( '-99999') # We want to test results below 0 too prevm = [] an = None bs = self.portal.bika_setup bs.setResultsDecimalMark(',') for m in matrix: # Create the AR and set the values to the AS, but only if necessary if not an or prevm[0] != m[0] or prevm[1] != m[1]: s.setPrecision(m[0]) s.setExponentialFormatPrecision(m[1]) self.assertEqual(s.getPrecision(), m[0]) self.assertEqual(s.Schema().getField('Precision').get(s), m[0]) self.assertEqual(s.getExponentialFormatPrecision(), m[1]) self.assertEqual( s.Schema().getField('ExponentialFormatPrecision').get(s), m[1]) client = self.portal.clients['client-1'] sampletype = bs.bika_sampletypes['sampletype-1'] values = { 'Client': client.UID(), 'Contact': client.getContacts()[0].UID(), 'SamplingDate': '2015-01-01', 'SampleType': sampletype.UID() } ar = create_analysisrequest(client, {}, values, [s.UID()]) wf = getToolByName(ar, 'portal_workflow') wf.doActionFor(ar, 'receive') an = ar.getAnalyses()[0].getObject() prevm = m an.setResult(m[3]) self.assertEqual(an.getResult(), m[3]) self.assertEqual(an.Schema().getField('Result').get(an), m[3]) fr = an.getFormattedResult(sciformat=m[2], decimalmark=bs.getResultsDecimalMark()) #print '%s %s %s %s => \'%s\' ?= \'%s\'' % (m[0],m[1],m[2],m[3],m[4],fr) self.assertEqual(fr, m[4])
def test_analysis_method_calculation(self): # Input results # Client: Happy Hills # SampleType: Apple Pulp # Contact: Rita Mohale # Analyses: [Calcium, Mg, Total Hardness] from bika.lims.utils.analysisrequest import create_analysisrequest for f in self.formulas: # Set custom calculation self.calculation.setFormula(f['formula']) self.assertEqual(self.calculation.getFormula(), f['formula']) interims = [] for k, v in f['interims'].items(): interims.append({ 'keyword': k, 'title': k, 'value': v, 'hidden': False, 'type': 'int', 'unit': '' }) self.calculation.setInterimFields(interims) self.assertEqual(self.calculation.getInterimFields(), interims) # Create the AR client = self.portal.clients['client-1'] sampletype = self.portal.bika_setup.bika_sampletypes[ 'sampletype-1'] values = { 'Client': client.UID(), 'Contact': client.getContacts()[0].UID(), 'SamplingDate': '2015-01-01', 'SampleType': sampletype.UID() } request = {} services = [s.UID() for s in self.services] + [self.calcservice.UID()] ar = create_analysisrequest(client, request, values, services) doActionFor(ar, 'receive') # Set results and interims calcanalysis = None for an in ar.getAnalyses(): an = an.getObject() key = an.getKeyword() if key in f['analyses']: an.setResult(f['analyses'][key]) if an.isLowerDetectionLimit() \ or an.isUpperDetectionLimit(): operator = an.getDetectionLimitOperand() strres = f['analyses'][key].replace(operator, '') self.assertEqual(an.getResult(), strres) else: self.assertEqual(an.getResult(), f['analyses'][key]) elif key == self.calcservice.getKeyword(): calcanalysis = an # Set interims interims = an.getInterimFields() intermap = [] for i in interims: if i['keyword'] in f['interims']: ival = float(f['interims'][i['keyword']]) intermap.append({ 'keyword': i['keyword'], 'value': ival, 'title': i['title'], 'hidden': i['hidden'], 'type': i['type'], 'unit': i['unit'] }) else: intermap.append(i) an.setInterimFields(intermap) self.assertEqual(an.getInterimFields(), intermap) # Let's go.. calculate and check result success = calcanalysis.calculateResult(True, True) self.assertTrue(success, True) self.assertNotEqual(calcanalysis.getResult(), '', 'getResult returns an empty string') self.assertEqual(float(calcanalysis.getResult()), float(f['exresult']))
def add_analysisrequest(self, client, kwargs, services): return create_analysisrequest(client, self.request, kwargs, services)
def test_ar_manageresults_manualuncertainty(self): # Input results # Client: Happy Hills # SampleType: Apple Pulp # Contact: Rita Mohale # Analyses: [Calcium, Copper] client = self.portal.clients['client-1'] sampletype = self.portal.bika_setup.bika_sampletypes['sampletype-1'] values = {'Client': client.UID(), 'Contact': client.getContacts()[0].UID(), 'SamplingDate': '2015-01-01', 'SampleType': sampletype.UID()} request = {} services = [s.UID() for s in self.services] ar = create_analysisrequest(client, request, values, services) # Basic uncertainty input for a in ar.getAnalyses(): an = a.getObject() self.assertFalse(an.getUncertainty()) an.setUncertainty('0.2') self.assertEqual(an.getUncertainty(), 0.2) an.setUncertainty('0.4') self.assertEqual(an.getUncertainty(), 0.4) an.setUncertainty(None) self.assertFalse(an.getUncertainty()) # Copper (advanced uncertainty) cu = [a.getObject() for a in ar.getAnalyses() \ if a.getObject().getServiceUID() == self.services[1].UID()][0] self.assertFalse(cu.getUncertainty()) # Uncertainty range 5 - 10 (0.2) cu.setResult('5.5') self.assertEqual(cu.getResult(), '5.5') self.assertEqual(cu.getUncertainty(), 0.02) cu.setUncertainty('0.8') self.assertEqual(cu.getUncertainty(), 0.8) cu.setUncertainty(None) self.assertEqual(cu.getUncertainty(), 0.02) # Uncertainty range 10 - 20 (0.4) cu.setResult('15.5') self.assertEqual(cu.getResult(), '15.5') self.assertEqual(cu.getUncertainty(), 0.4) cu.setUncertainty('0.7') self.assertEqual(cu.getUncertainty(), 0.7) cu.setUncertainty(None) self.assertEqual(cu.getUncertainty(), 0.4) # Uncertainty range >20 (None) cu.setResult('25.5') self.assertEqual(cu.getResult(), '25.5') self.assertFalse(cu.getUncertainty()) cu.setUncertainty('0.9') self.assertEqual(cu.getUncertainty(), 0.9) cu.setUncertainty(None) self.assertFalse(cu.getUncertainty()) # Iron (advanced uncertainty with precision) fe = [a.getObject() for a in ar.getAnalyses() \ if a.getObject().getServiceUID() == self.services[2].UID()][0] self.assertFalse(cu.getUncertainty()) # Uncertainty range 0 - 5 (0.0015) fe.setResult('2.3452') self.assertEqual(fe.getUncertainty(), 0.0015) self.assertEqual(fe.getResult(), '2.3452') self.assertEqual(fe.getFormattedResult(), '2.345') fe.setUncertainty('0.06') self.assertEqual(fe.getUncertainty(), 0.06) self.assertEqual(fe.getResult(), '2.3452') self.assertEqual(fe.getFormattedResult(), '2.35') fe.setUncertainty('0.7') self.assertEqual(fe.getUncertainty(), 0.7) self.assertEqual(fe.getResult(), '2.3452') self.assertEqual(fe.getFormattedResult(), '2.3') fe.setUncertainty(None) self.assertEqual(fe.getUncertainty(), 0.0015) self.assertEqual(fe.getResult(), '2.3452') self.assertEqual(fe.getFormattedResult(), '2.345') # Uncertainty range 5 - 10 (0.02) fe.setResult('8.23462') self.assertEqual(fe.getUncertainty(), 0.02) self.assertEqual(fe.getResult(), '8.23462') self.assertEqual(fe.getFormattedResult(), '8.23') fe.setUncertainty('0.6') self.assertEqual(fe.getUncertainty(), 0.6) self.assertEqual(fe.getResult(), '8.23462') self.assertEqual(fe.getFormattedResult(), '8.2') fe.setUncertainty('0.07') self.assertEqual(fe.getUncertainty(), 0.07) self.assertEqual(fe.getResult(), '8.23462') self.assertEqual(fe.getFormattedResult(), '8.23') fe.setUncertainty(None) self.assertEqual(fe.getUncertainty(), 0.02) self.assertEqual(fe.getResult(), '8.23462') self.assertEqual(fe.getFormattedResult(), '8.23') # Uncertainty range >20 (None) fe.setResult('25.523345') self.assertFalse(fe.getUncertainty()) self.assertEqual(fe.getResult(), '25.523345') self.assertEqual(fe.getPrecision(), 2) self.assertEqual(fe.getService().getPrecision(), 2) self.assertEqual(fe.getFormattedResult(), '25.52') fe.setUncertainty('0.9') self.assertEqual(fe.getUncertainty(), 0.9) self.assertEqual(fe.getResult(), '25.523345') self.assertEqual(fe.getPrecision(), 1) self.assertEqual(fe.getService().getPrecision(), 2) self.assertEqual(fe.getFormattedResult(), '25.5') fe.setUncertainty(None) self.assertFalse(fe.getUncertainty()) self.assertEqual(fe.getResult(), '25.523345') self.assertEqual(fe.getPrecision(), 2) self.assertEqual(fe.getService().getPrecision(), 2) self.assertEqual(fe.getFormattedResult(), '25.52')
def import_items(self): context = self.context request = context.REQUEST uc = getToolByName(context, 'uid_catalog') bika_catalog = getToolByName(context, 'bika_catalog') client = context.aq_parent contact = [c for c in client.objectValues('Contact') if c.getFullname() == self.context.getContactName()][0] self.progressbar_init('Submitting AR Import') # Find existing batch or create new batch if required. batch = None batch_id = context.getBatchID() batch_title = context.getBatchTitle() # First try to find existing batch brains = bika_catalog(portal_type='Batch', id=batch_id) if not brains: brains = bika_catalog(portal_type='Batch', title=batch_title) if brains: batch = brains[0] if not batch: # Create batch if it does not exist _bid = batch_id if batch_id else tmpID() batch = _createObjectByType("Batch", client, _bid) batch.unmarkCreationFlag() batch.edit( title=batch_title, description=context.getBatchDescription(), ClientBatchID=context.getClientBatchID(), Remarks=context.getLabBatchComment(), ClientBatchComment=context.getClientBatchComment() ) if not batch_id: batch._renameAfterCreation() event.notify(ObjectInitializedEvent(batch)) batch.at_post_create_script() itemdata = context.Schema()['ItemData'].get(context) for i, item in enumerate(itemdata): service_uids = [] for a in item['Analyses']: for service in self.resolve_analyses(a): if isinstance(service, AnalysisService): service_uids.append(service.UID()) # Create AR ar_values = { 'Contact': contact, 'ClientOrderNumber': context.getClientOrderNumber(), 'Remarks': item['Remarks'], 'Batch': batch if batch else None, 'ClientReference': context.getClientReference(), 'ClientSampleID': item['ClientSampleID'], 'SampleType': context.getSampleType(), # SampleSite field: extenders/arimport,sample,analysisrequest.py 'SampleSite': context.getField('SampleSite').get(context), 'DateSampled': DateTime(item['DateSampled']), 'SamplingDate': DateTime(item['DateSampled']), 'Remarks': item['Remarks'], } ar = create_analysisrequest( client, request, ar_values, analyses=service_uids, partitions=[{}] ) self.progressbar_progress(i + 1, len(itemdata))
def test_LIMS_2221_DecimalMarkWithSciNotation(self): # Notations # '1' => aE+b / aE-b # '2' => ax10^b / ax10^-b # '3' => ax10^b / ax10^-b (with superscript) # '4' => a·10^b / a·10^-b # '5' => a·10^b / a·10^-b (with superscript) matrix = [ # as_prec as_exp not mark result formatted result # ------- ------ --- ---- ------ ---------------- [0, 0, 1, ',', '0', '0'], [0, 0, 2, ',', '0', '0'], [0, 0, 3, ',', '0', '0'], [0, 0, 4, ',', '0', '0'], [0, 0, 5, ',', '0', '0'], [2, 5, 1, ',', '0.01', '0,01'], [2, 5, 2, ',', '0.01', '0,01'], [2, 5, 3, ',', '0.01', '0,01'], [2, 5, 4, ',', '0.01', '0,01'], [2, 5, 5, ',', '0.01', '0,01'], [2, 1, 1, ',', '0.123', '1,2e-01'], [2, 1, 2, ',', '0.123', '1,2x10^-1'], [2, 1, 3, ',', '0.123', '1,2x10<sup>-1</sup>'], [2, 1, 4, ',', '0.123', '1,2·10^-1'], [2, 1, 5, ',', '0.123', '1,2·10<sup>-1</sup>'], [2, 1, 1, ',', '1.234', '1,23'], [2, 1, 2, ',', '1.234', '1,23'], [2, 1, 3, ',', '1.234', '1,23'], [2, 1, 4, ',', '1.234', '1,23'], [2, 1, 5, ',', '1.234', '1,23'], [2, 1, 1, ',', '12.345', '1,235e01'], [2, 1, 2, ',', '12.345', '1,235x10^1'], [2, 1, 3, ',', '12.345', '1,235x10<sup>1</sup>'], [2, 1, 4, ',', '12.345', '1,235·10^1'], [2, 1, 5, ',', '12.345', '1,235·10<sup>1</sup>'], [4, 3, 1, ',', '-123.45678', '-123,4568'], [4, 3, 2, ',', '-123.45678', '-123,4568'], [4, 3, 3, ',', '-123.45678', '-123,4568'], [4, 3, 4, ',', '-123.45678', '-123,4568'], [4, 3, 5, ',', '-123.45678', '-123,4568'], [4, 3, 1, ',', '-1234.5678', '-1,2345678e03'], [4, 3, 2, ',', '-1234.5678', '-1,2345678x10^3'], [4, 3, 3, ',', '-1234.5678', '-1,2345678x10<sup>3</sup>'], [4, 3, 4, ',', '-1234.5678', '-1,2345678·10^3'], [4, 3, 5, ',', '-1234.5678', '-1,2345678·10<sup>3</sup>'], ] serv = self.service serv.setLowerDetectionLimit('-99999') # test results below 0 too prevm = [] an = None bs = get_bika_setup() for m in matrix: as_prec = m[0] as_exp = m[1] notation = m[2] _dm = m[3] _result = m[4] _expected = m[5] bs.setResultsDecimalMark(_dm) # Create the AR and set the values to the AS, but only if necessary if not an or prevm[0] != as_prec or prevm[1] != as_exp: serv.setPrecision(as_prec) serv.setExponentialFormatPrecision(as_exp) self.assertEqual(serv.getPrecision(), as_prec) self.assertEqual( serv.Schema().getField('Precision').get(serv), as_prec) self.assertEqual(serv.getExponentialFormatPrecision(), as_exp) self.assertEqual( serv.Schema().getField( 'ExponentialFormatPrecision').get(serv), as_exp) values = {'Client': self.client.UID(), 'Contact': self.client.getContacts()[0].UID(), 'SamplingDate': '2015-01-01', 'SampleType': self.sampletype.UID()} ar = create_analysisrequest(self.client, {}, values, [serv.UID()]) do_transition_for(ar, 'receive') an = ar.getAnalyses()[0].getObject() prevm = m an.setResult(_result) self.assertEqual(an.getResult(), _result) self.assertEqual(an.Schema().getField('Result').get(an), _result) decimalmark = bs.getResultsDecimalMark() fr = an.getFormattedResult(sciformat=notation, decimalmark=decimalmark) self.assertEqual(fr, _expected)
def test_calculation_uncertainties_precision(self): # Input results # Client: Happy Hills # SampleType: Apple Pulp # Contact: Rita Mohale # Analyses: [Calcium, Mg, Total Hardness] for f in self.formulas_precision: self.calculation.setFormula(f['formula']) self.assertEqual(self.calculation.getFormula(), f['formula']) interims = [] for k, v in f['interims'].items(): interims.append({ 'keyword': k, 'title': k, 'value': v, 'hidden': False, 'type': 'int', 'unit': '' }) self.calculation.setInterimFields(interims) self.assertEqual(self.calculation.getInterimFields(), interims) for case in f['test_uncertainties_precision']: # Define precision services_obj = [s for s in self.services] + [self.calcservice] for service in services_obj: service.setPrecisionFromUncertainty(True) service.setUncertainties(case['uncertainties']) # Create the AR client = self.portal.clients['client-1'] sampletype = self.portal.bika_setup.bika_sampletypes[ 'sampletype-1'] values = { 'Client': client.UID(), 'Contact': client.getContacts()[0].UID(), 'SamplingDate': '2015-01-01', 'SampleType': sampletype.UID() } request = {} services = [s.UID() for s in self.services] + [self.calcservice.UID()] ar = create_analysisrequest(client, request, values, services) wf = getToolByName(ar, 'portal_workflow') wf.doActionFor(ar, 'receive') # Set results and interims calcanalysis = None for an in ar.getAnalyses(): an = an.getObject() key = an.getKeyword() if key in f['analyses']: an.setResult(f['analyses'][key]) if an.isLowerDetectionLimit() \ or an.isUpperDetectionLimit(): operator = an.getDetectionLimitOperand() strres = f['analyses'][key].replace(operator, '') self.assertEqual(an.getResult(), str(float(strres))) else: # The analysis' results have to be always strings self.assertEqual(an.getResult(), str(f['analyses'][key])) elif key == self.calcservice.getKeyword(): calcanalysis = an # Set interims interims = an.getInterimFields() intermap = [] for i in interims: if i['keyword'] in f['interims']: ival = float(f['interims'][i['keyword']]) intermap.append({ 'keyword': i['keyword'], 'value': ival, 'title': i['title'], 'hidden': i['hidden'], 'type': i['type'], 'unit': i['unit'] }) else: intermap.append(i) an.setInterimFields(intermap) self.assertEqual(an.getInterimFields(), intermap) # Let's go.. calculate and check result calcanalysis.calculateResult(True, True) self.assertEqual(calcanalysis.getFormattedResult(), case['expected_result'])
def test_RegularAnalyses(self): """ See docs/imm_results_entry_behaviour.png for further details """ # Basic simulation # B1 rule = 'YYYYYYYY' output = [1, 1, 1, 1, 1] self.service.setInstrumentEntryOfResults(True) self.service.setManualEntryOfResults(True) self.service.setMethods([self.method]) self.instrument1.setMethod(self.method) self.instrument2.setMethod(self.method) self.instrument1.setDisposeUntilNextCalibrationTest(False) self.instrument2.setDisposeUntilNextCalibrationTest(False) self.service.setInstruments([self.instrument1, self.instrument2]) self.service.setInstrument(self.instrument1) self.assertTrue(self.service.getManualEntryOfResults()) self.assertTrue(self.service.getInstrumentEntryOfResults()) self.assertTrue(self.instrument1.getMethod().UID(), self.method.UID()) self.assertTrue(self.instrument2.getMethod().UID(), self.method.UID()) self.assertFalse(self.instrument1.getDisposeUntilNextCalibrationTest()) self.assertFalse(self.instrument2.getDisposeUntilNextCalibrationTest()) self.assertTrue(len(self.service.getAvailableInstruments()) == 2) self.assertTrue(self.instrument1 in self.service.getAvailableInstruments()) self.assertTrue(self.instrument2 in self.service.getAvailableInstruments()) self.assertEqual(self.service.getInstrument().UID(), self.instrument1.UID()) self.assertTrue(len(self.service.getMethods()) == 1) self.assertEqual(self.service.getMethods()[0].UID(), self.method.UID()) ar = self.create_ar(self.service) auids = [ar.getAnalyses()[0].UID] constraint = get_method_instrument_constraints(ar, auids)[auids[0]] cons = constraint.get(self.method.UID(), None) self.assertFalse(cons is None) trimmed = cons[10][1:len(rule)+1] self.assertEqual(trimmed, rule) self.assertEqual(cons[0], output[0]) # Method list visible? self.assertEqual(cons[1], output[1]) # None in methods list? self.assertEqual(cons[2], output[2]) # Instrument list visible? self.assertEqual(cons[3], output[3]) # None in instruments list? self.assertEqual(cons[5], output[4]) # Results field editable # Automatic simulation conds = { 'YYYYYYYY': [1, 1, 1, 1, 1, 0], # B1 'YYYYYYNYY': [1, 1, 1, 1, 1, 1], # B3 'YYYYYYNYN': [1, 1, 1, 1, 1, 1], # B4 'YYYYYN': [1, 1, 1, 1, 1, 1], # B6 'YYYYN': [1, 1, 1, 1, 1, 0], # B7 'YYYNYYYY': [1, 1, 1, 0, 1, 0], # B8 'YYYNYYNYY': [1, 1, 1, 0, 1, 1], # B10 'YYYNYYNYN': [1, 1, 1, 1, 0, 1], # B11 'YYYNYN': [1, 1, 1, 1, 0, 1], # B13 'YYNYYYYY': [1, 1, 1, 1, 1, 0], # B15 'YYNYYYNYY': [1, 1, 1, 1, 1, 1], # B17 'YYNYYYNYN': [1, 1, 1, 1, 1, 1], # B18 'YYNYYN': [1, 1, 1, 1, 1, 1], # B20 'YYNYN': [1, 1, 1, 1, 1, 0], # B21 'YNY': [2, 0, 0, 0, 1, 0], # B22 'YNN': [0, 0, 0, 0, 1, 0], # B23 'NYYYYYYY': [3, 2, 1, 1, 1, 0], # B24 'NYYYYYNYY': [3, 2, 1, 1, 1, 1], # B26 'NYYYYYNYN': [3, 2, 1, 1, 1, 1], # B27 'NYYYYN': [3, 2, 1, 1, 1, 1], # B29 'NYYNYYYY': [3, 2, 1, 0, 1, 0], # B31 'NYYNYYNYY': [3, 2, 1, 0, 1, 1], # B33 'NYYNYYNYN': [3, 2, 1, 1, 0, 1], # B34 'NYYNYN': [3, 2, 1, 1, 0, 1], # B36 'NYNYYYYY': [3, 1, 1, 0, 1, 0], # B38 'NYNYYYNYY': [3, 1, 1, 0, 1, 1], # B40 'NYNYYYNYN': [3, 1, 1, 1, 0, 1], # B41 'NYNYYN': [3, 1, 1, 0, 0, 1], # B43 'NYNYN': [3, 1, 1, 0, 0, 1], # B44" # Situations that cannot be simulated # 'YYYYYYYN': [1, 1, 1, 1, 1, 0], # B2 -- IMPOSSIBLE # 'YYYYYYNN': [1, 1, 1, 1, 1, 1], # B5 -- IMPOSSIBLE # 'YYYNYYYN': [1, 1, 1, 0, 1, 0], # B9 -- IMPOSSIBLE # 'YYYNYYNN': [1, 1, 1, 0, 1, 1], # B12 -- IMPOSSIBLE # 'YYYNN': [1, 1, 1, 1, 0, 1], # B14 -- CANNOT REPRODUCE # 'YYNYYYYN': [1, 1, 1, 1, 1, 0], # B16 -- IMPOSSIBLE # 'YYNYYYNN': [1, 1, 1, 1, 1, 1], # B19 -- IMPOSSIBLE # 'NYYYYYYN': [3, 2, 1, 1, 1, 0], # B25 -- IMPOSSIBLE # 'NYYYYYNN': [3, 2, 1, 1, 1, 1], # B28 -- IMPOSSIBLE # 'NYYYN': [3, 2, 1, 1, 0, 1], # B30 -- CANNOT REPRODUCE # 'NYYNYYYN': [3, 2, 1, 0, 1, 0], # B32 -- IMPOSSIBLE # 'NYYNYYNN': [3, 2, 1, 0, 1, 1], # B35 -- IMPOSSIBLE # 'NYYNN': [3, 2, 1, 1, 0, 1], # B37 -- CANNOT REPRODUCE # 'NYNYYYYN': [3, 1, 1, 0, 1, 0], # B39 -- IMPOSSIBLE # 'NYNYYYNN': [3, 1, 1, 0, 1, 1], # B42 -- IMPOSSIBLE } for k, v in conds.items(): # Analysis allows instrument entry? a_instruentry = len(k) > 1 and k[1] == 'Y' # Analysis allows manual entry? a_manualentry = k[0] == 'Y' or not a_instruentry # Method is not None? m_isnotnone = len(k) > 2 and k[2] == 'Y' # Method allows manual entry? m_manualentry = (len(k) > 3 and k[3] == 'Y') # At least one instrument available? m_instravilab = len(k) > 4 and k[4] == 'Y' # All instruments valid? m_allinstrval = len(k) > 6 and k[6] == 'Y' # Valid instruments available? m_validinstru = (len(k) > 5 and k[5] == 'Y') or (m_allinstrval) # Method allows the ASs default instr? m_allowsdefin = len(k) > 7 and k[7] == 'Y' # Default instrument is valid? i_definstrval = (len(k) > 8 and k[8] == 'Y') or (m_allowsdefin and m_allinstrval) defmeth = self.method if a_manualentry: self.service.setManualEntryOfResults(True) self.service.setMethods([self.method, self.method2]) self.service.setInstrumentEntryOfResults(a_instruentry) self.assertTrue(self.service.getManualEntryOfResults()) self.assertEqual(self.service.getInstrumentEntryOfResults(), a_instruentry) else: self.service.setInstrumentEntryOfResults(True) self.service.setManualEntryOfResults(False) self.service.setMethods([]) self.assertTrue(self.service.getInstrumentEntryOfResults()) self.assertFalse(self.service.getManualEntryOfResults()) self.method.setManualEntryOfResults(m_manualentry) self.assertEqual(self.method.getManualEntryOfResults(), m_manualentry) if m_instravilab: if m_isnotnone: self.instrument1.setMethod(self.method) self.instrument2.setMethod(self.method) self.instrument3.setMethod(self.method2) self.assertTrue(self.instrument1.getMethod().UID(), self.method.UID()) self.assertTrue(self.instrument2.getMethod().UID(), self.method.UID()) else: self.instrument1.setMethod(None) self.instrument2.setMethod(None) self.instrument3.setMethod(None) self.service.setInstrument(self.instrument1) if m_validinstru: if m_allinstrval: self.instrument1.setDisposeUntilNextCalibrationTest(False) self.instrument2.setDisposeUntilNextCalibrationTest(False) self.assertFalse(self.instrument1.getDisposeUntilNextCalibrationTest()) self.assertFalse(self.instrument2.getDisposeUntilNextCalibrationTest()) else: self.instrument1.setDisposeUntilNextCalibrationTest(False) self.instrument2.setDisposeUntilNextCalibrationTest(True) self.assertFalse(self.instrument1.getDisposeUntilNextCalibrationTest()) self.assertTrue(self.instrument2.getDisposeUntilNextCalibrationTest()) if m_allowsdefin: self.service.setInstruments([self.instrument2, self.instrument1, self.instrument3]) if not i_definstrval: self.instrument2.setDisposeUntilNextCalibrationTest(True) self.service.setInstruments([self.instrument2, self.instrument1, self.instrument3]) self.service.setInstrument(self.instrument2) else: self.service.setInstruments([self.instrument3]) else: self.instrument1.setDisposeUntilNextCalibrationTest(True) self.assertTrue(self.instrument1.getDisposeUntilNextCalibrationTest()) self.instrument2.setDisposeUntilNextCalibrationTest(True) self.assertTrue(self.instrument2.getDisposeUntilNextCalibrationTest()) self.service.setInstruments([self.instrument1]) else: self.instrument1.setMethod(None) self.instrument2.setMethod(None) self.instrument1.setDisposeUntilNextCalibrationTest(False) self.instrument1.setDisposeUntilNextCalibrationTest(False) self.service.setInstruments([]) self.service.setInstrument(None) # Create the AR client = self.portal.clients['client-1'] sampletype = self.portal.bika_setup.bika_sampletypes['sampletype-1'] values = {'Client': client.UID(), 'Contact': client.getContacts()[0].UID(), 'SamplingDate': '2016-01-01', 'SampleType': sampletype.UID()} request = {} services = [self.service,] ar = create_analysisrequest(client, request, values, services) wf = getToolByName(ar, 'portal_workflow') wf.doActionFor(ar, 'receive') # Get the constraints auids = [ar.getAnalyses()[0].UID] constraint = get_method_instrument_constraints(ar, auids)[auids[0]] muid = self.method.UID() if m_isnotnone else '' cons = constraint.get(muid, None) self.assertFalse(cons is None) trimmed = cons[10][1:len(k)+1] self.assertTrue(trimmed.startswith(k)) self.assertEqual(cons[0], v[0]) # Method list visible? self.assertEqual(cons[1], v[1]) # None in methods list? self.assertEqual(cons[2], v[2]) # Instrument list visible? self.assertEqual(cons[3], v[3]) # None in instruments list? self.assertEqual(cons[5], v[4]) # Results field editable self.assertEqual(cons[6] == '', v[5] == 0) # Error message? '''
def test_RegularAnalyses(self): """ See docs/imm_results_entry_behaviour.png for further details """ # Basic simulation # B1 rule = 'YYYYYYYY' output = [1, 1, 1, 1, 1] self.service.setInstrumentEntryOfResults(True) self.service.setManualEntryOfResults(True) self.service.setMethods([self.method]) self.instrument1.setMethod(self.method) self.instrument2.setMethod(self.method) self.instrument1.setDisposeUntilNextCalibrationTest(False) self.instrument2.setDisposeUntilNextCalibrationTest(False) self.service.setInstruments([self.instrument1, self.instrument2]) self.service.setInstrument(self.instrument1) self.assertTrue(self.service.getManualEntryOfResults()) self.assertTrue(self.service.getInstrumentEntryOfResults()) self.assertTrue(self.instrument1.getMethod().UID(), self.method.UID()) self.assertTrue(self.instrument2.getMethod().UID(), self.method.UID()) self.assertFalse(self.instrument1.getDisposeUntilNextCalibrationTest()) self.assertFalse(self.instrument2.getDisposeUntilNextCalibrationTest()) self.assertTrue(len(self.service.getAvailableInstruments()) == 2) self.assertTrue( self.instrument1 in self.service.getAvailableInstruments()) self.assertTrue( self.instrument2 in self.service.getAvailableInstruments()) self.assertEqual(self.service.getInstrument().UID(), self.instrument1.UID()) self.assertTrue(len(self.service.getMethods()) == 1) self.assertEqual(self.service.getMethods()[0].UID(), self.method.UID()) ar = self.create_ar(self.service) auids = [ar.getAnalyses()[0].UID] constraint = get_method_instrument_constraints(ar, auids)[auids[0]] cons = constraint.get(self.method.UID(), None) self.assertFalse(cons is None) trimmed = cons[10][1:len(rule) + 1] self.assertEqual(trimmed, rule) self.assertEqual(cons[0], output[0]) # Method list visible? self.assertEqual(cons[1], output[1]) # None in methods list? self.assertEqual(cons[2], output[2]) # Instrument list visible? self.assertEqual(cons[3], output[3]) # None in instruments list? self.assertEqual(cons[5], output[4]) # Results field editable # Automatic simulation conds = { 'YYYYYYYY': [1, 1, 1, 1, 1, 0], # B1 'YYYYYYNYY': [1, 1, 1, 1, 1, 1], # B3 'YYYYYYNYN': [1, 1, 1, 1, 1, 1], # B4 'YYYYYN': [1, 1, 1, 1, 1, 1], # B6 'YYYYN': [1, 1, 1, 1, 1, 0], # B7 'YYYNYYYY': [1, 1, 1, 0, 1, 0], # B8 'YYYNYYNYY': [1, 1, 1, 0, 1, 1], # B10 'YYYNYYNYN': [1, 1, 1, 1, 0, 1], # B11 'YYYNYN': [1, 1, 1, 1, 0, 1], # B13 'YYNYYYYY': [1, 1, 1, 1, 1, 0], # B15 'YYNYYYNYY': [1, 1, 1, 1, 1, 1], # B17 'YYNYYYNYN': [1, 1, 1, 1, 1, 1], # B18 'YYNYYN': [1, 1, 1, 1, 1, 1], # B20 'YYNYN': [1, 1, 1, 1, 1, 0], # B21 'YNY': [2, 0, 0, 0, 1, 0], # B22 'YNN': [0, 0, 0, 0, 1, 0], # B23 'NYYYYYYY': [3, 2, 1, 1, 1, 0], # B24 'NYYYYYNYY': [3, 2, 1, 1, 1, 1], # B26 'NYYYYYNYN': [3, 2, 1, 1, 1, 1], # B27 'NYYYYN': [3, 2, 1, 1, 1, 1], # B29 'NYYNYYYY': [3, 2, 1, 0, 1, 0], # B31 'NYYNYYNYY': [3, 2, 1, 0, 1, 1], # B33 'NYYNYYNYN': [3, 2, 1, 1, 0, 1], # B34 'NYYNYN': [3, 2, 1, 1, 0, 1], # B36 'NYNYYYYY': [3, 1, 1, 0, 1, 0], # B38 'NYNYYYNYY': [3, 1, 1, 0, 1, 1], # B40 'NYNYYYNYN': [3, 1, 1, 1, 0, 1], # B41 'NYNYYN': [3, 1, 1, 0, 0, 1], # B43 'NYNYN': [3, 1, 1, 0, 0, 1], # B44" # Situations that cannot be simulated # 'YYYYYYYN': [1, 1, 1, 1, 1, 0], # B2 -- IMPOSSIBLE # 'YYYYYYNN': [1, 1, 1, 1, 1, 1], # B5 -- IMPOSSIBLE # 'YYYNYYYN': [1, 1, 1, 0, 1, 0], # B9 -- IMPOSSIBLE # 'YYYNYYNN': [1, 1, 1, 0, 1, 1], # B12 -- IMPOSSIBLE # 'YYYNN': [1, 1, 1, 1, 0, 1], # B14 -- CANNOT REPRODUCE # 'YYNYYYYN': [1, 1, 1, 1, 1, 0], # B16 -- IMPOSSIBLE # 'YYNYYYNN': [1, 1, 1, 1, 1, 1], # B19 -- IMPOSSIBLE # 'NYYYYYYN': [3, 2, 1, 1, 1, 0], # B25 -- IMPOSSIBLE # 'NYYYYYNN': [3, 2, 1, 1, 1, 1], # B28 -- IMPOSSIBLE # 'NYYYN': [3, 2, 1, 1, 0, 1], # B30 -- CANNOT REPRODUCE # 'NYYNYYYN': [3, 2, 1, 0, 1, 0], # B32 -- IMPOSSIBLE # 'NYYNYYNN': [3, 2, 1, 0, 1, 1], # B35 -- IMPOSSIBLE # 'NYYNN': [3, 2, 1, 1, 0, 1], # B37 -- CANNOT REPRODUCE # 'NYNYYYYN': [3, 1, 1, 0, 1, 0], # B39 -- IMPOSSIBLE # 'NYNYYYNN': [3, 1, 1, 0, 1, 1], # B42 -- IMPOSSIBLE } for k, v in conds.items(): # Analysis allows instrument entry? a_instruentry = len(k) > 1 and k[1] == 'Y' # Analysis allows manual entry? a_manualentry = k[0] == 'Y' or not a_instruentry # Method is not None? m_isnotnone = len(k) > 2 and k[2] == 'Y' # Method allows manual entry? m_manualentry = (len(k) > 3 and k[3] == 'Y') # At least one instrument available? m_instravilab = len(k) > 4 and k[4] == 'Y' # All instruments valid? m_allinstrval = len(k) > 6 and k[6] == 'Y' # Valid instruments available? m_validinstru = (len(k) > 5 and k[5] == 'Y') or (m_allinstrval) # Method allows the ASs default instr? m_allowsdefin = len(k) > 7 and k[7] == 'Y' # Default instrument is valid? i_definstrval = (len(k) > 8 and k[8] == 'Y') or (m_allowsdefin and m_allinstrval) defmeth = self.method if a_manualentry: self.service.setManualEntryOfResults(True) self.service.setMethods([self.method, self.method2]) self.service.setInstrumentEntryOfResults(a_instruentry) self.assertTrue(self.service.getManualEntryOfResults()) self.assertEqual(self.service.getInstrumentEntryOfResults(), a_instruentry) else: self.service.setInstrumentEntryOfResults(True) self.service.setManualEntryOfResults(False) self.service.setMethods([]) self.assertTrue(self.service.getInstrumentEntryOfResults()) self.assertFalse(self.service.getManualEntryOfResults()) self.method.setManualEntryOfResults(m_manualentry) self.assertEqual(self.method.getManualEntryOfResults(), m_manualentry) if m_instravilab: if m_isnotnone: self.instrument1.setMethod(self.method) self.instrument2.setMethod(self.method) self.instrument3.setMethod(self.method2) self.assertTrue(self.instrument1.getMethod().UID(), self.method.UID()) self.assertTrue(self.instrument2.getMethod().UID(), self.method.UID()) else: self.instrument1.setMethod(None) self.instrument2.setMethod(None) self.instrument3.setMethod(None) self.service.setInstrument(self.instrument1) if m_validinstru: if m_allinstrval: self.instrument1.setDisposeUntilNextCalibrationTest( False) self.instrument2.setDisposeUntilNextCalibrationTest( False) self.assertFalse(self.instrument1. getDisposeUntilNextCalibrationTest()) self.assertFalse(self.instrument2. getDisposeUntilNextCalibrationTest()) else: self.instrument1.setDisposeUntilNextCalibrationTest( False) self.instrument2.setDisposeUntilNextCalibrationTest( True) self.assertFalse(self.instrument1. getDisposeUntilNextCalibrationTest()) self.assertTrue(self.instrument2. getDisposeUntilNextCalibrationTest()) if m_allowsdefin: self.service.setInstruments([ self.instrument2, self.instrument1, self.instrument3 ]) if not i_definstrval: self.instrument2.setDisposeUntilNextCalibrationTest( True) self.service.setInstruments([ self.instrument2, self.instrument1, self.instrument3 ]) self.service.setInstrument(self.instrument2) else: self.service.setInstruments([self.instrument3]) else: self.instrument1.setDisposeUntilNextCalibrationTest(True) self.assertTrue( self.instrument1.getDisposeUntilNextCalibrationTest()) self.instrument2.setDisposeUntilNextCalibrationTest(True) self.assertTrue( self.instrument2.getDisposeUntilNextCalibrationTest()) self.service.setInstruments([self.instrument1]) else: self.instrument1.setMethod(None) self.instrument2.setMethod(None) self.instrument1.setDisposeUntilNextCalibrationTest(False) self.instrument1.setDisposeUntilNextCalibrationTest(False) self.service.setInstruments([]) self.service.setInstrument(None) # Create the AR client = self.portal.clients['client-1'] sampletype = self.portal.bika_setup.bika_sampletypes[ 'sampletype-1'] values = { 'Client': client.UID(), 'Contact': client.getContacts()[0].UID(), 'SamplingDate': '2016-01-01', 'SampleType': sampletype.UID() } request = {} services = [ self.service, ] ar = create_analysisrequest(client, request, values, services) wf = getToolByName(ar, 'portal_workflow') wf.doActionFor(ar, 'receive') # Get the constraints auids = [ar.getAnalyses()[0].UID] constraint = get_method_instrument_constraints(ar, auids)[auids[0]] muid = self.method.UID() if m_isnotnone else '' cons = constraint.get(muid, None) self.assertFalse(cons is None) trimmed = cons[10][1:len(k) + 1] self.assertTrue(trimmed.startswith(k)) self.assertEqual(cons[0], v[0]) # Method list visible? self.assertEqual(cons[1], v[1]) # None in methods list? self.assertEqual(cons[2], v[2]) # Instrument list visible? self.assertEqual(cons[3], v[3]) # None in instruments list? self.assertEqual(cons[5], v[4]) # Results field editable self.assertEqual(cons[6] == '', v[5] == 0) # Error message? '''
def test_LIMS2001(self): # ARs creation # Client: Happy Hills # SampleType: Apple Pulp # Contact: Rita Mohale client = self.portal.clients['client-1'] sampletype = self.portal.bika_setup.bika_sampletypes['sampletype-1'] values = {'Client': client.UID(), 'Contact': client.getContacts()[0].UID(), 'SamplingDate': '2015-01-01', 'SampleType': sampletype.UID()} # analysis-service-3: Calcium (Ca) # analysis-service-6: Cooper (Cu) # analysis-service-7: Iron (Fe) servs = self.portal.bika_setup.bika_analysisservices aservs = [servs['analysisservice-3'], servs['analysisservice-6'], servs['analysisservice-7']] services = [s.UID() for s in aservs] request = {} ar = create_analysisrequest(client, request, values, services) sp = _createObjectByType('SamplePartition', ar.getSample(), tmpID()) wf = getToolByName(ar, 'portal_workflow') wf.doActionFor(ar, 'receive') # Worksheet creation wsfolder = self.portal.worksheets ws = _createObjectByType("Worksheet", wsfolder, tmpID()) ws.processForm() bsc = getToolByName(self.portal, 'bika_setup_catalog') lab_contacts = [o.getObject() for o in bsc(portal_type="LabContact")] lab_contact = [o for o in lab_contacts if o.getUsername() == 'analyst1'] self.assertEquals(len(lab_contact), 1) lab_contact = lab_contact[0] ws.setAnalyst(lab_contact.getUsername()) ws.setResultsLayout(self.portal.bika_setup.getWorksheetLayout()) # Add analyses into the worksheet self.request['context_uid'] = ws.UID() for analysis in ar.getAnalyses(): an = analysis.getObject() an.setSamplePartition(sp) ws.addAnalysis(an) self.assertEquals(len(ws.getAnalyses()), 3) # Add a duplicate for slot 1 (there's only one slot) ws.addDuplicateAnalyses('1', None) ans = ws.getAnalyses() reg = [an for an in ans if an.portal_type == 'Analysis'] dup = [an for an in ans if an.portal_type == 'DuplicateAnalysis'] regkeys = [an.getKeyword() for an in reg] dupkeys = [an.getKeyword() for an in dup] regkeys.sort() dupkeys.sort() expregs = ['Ca', 'Cu', 'Fe'] expdups = ['Ca', 'Cu', 'Fe'] self.assertEquals(regkeys, expregs) self.assertEquals(dupkeys, expdups) # Add a result, submit and add another duplicate an1 = [an for an in reg if an.getKeyword() == 'Cu'][0] an1.setResult('13') wf.doActionFor(an1, 'submit') ws.addDuplicateAnalyses('1', None) ans = ws.getAnalyses() reg = [an for an in ans if an.portal_type == 'Analysis'] dup = [an for an in ans if an.portal_type == 'DuplicateAnalysis'] regkeys = [an.getKeyword() for an in reg] dupkeys = [an.getKeyword() for an in dup] regkeys.sort() dupkeys.sort() expregs = ['Ca', 'Cu', 'Fe'] expdups = ['Ca', 'Ca', 'Cu', 'Cu', 'Fe', 'Fe'] self.assertEquals(regkeys, expregs) self.assertEquals(dupkeys, expdups) # Retract the previous analysis and add another duplicate wf.doActionFor(an1, 'retract') ws.addDuplicateAnalyses('1', None) ans = ws.getAnalyses() reg = [an for an in ans if an.portal_type == 'Analysis'] dup = [an for an in ans if an.portal_type == 'DuplicateAnalysis'] regkeys = [an.getKeyword() for an in reg] dupkeys = [an.getKeyword() for an in dup] regkeys.sort() dupkeys.sort() expregs = ['Ca', 'Cu', 'Cu', 'Fe'] expdups = ['Ca', 'Ca', 'Ca', 'Cu', 'Cu', 'Cu', 'Fe', 'Fe', 'Fe'] self.assertEquals(regkeys, expregs) self.assertEquals(dupkeys, expdups) # Do the same process, but with two ARs ar = create_analysisrequest(client, request, values, services) sp = _createObjectByType('SamplePartition', ar.getSample(), tmpID()) wf.doActionFor(ar, 'receive') # Add analyses into the worksheet for analysis in ar.getAnalyses(): an = analysis.getObject() an.setSamplePartition(sp) ws.addAnalysis(an) ans = ws.getAnalyses() reg = [an for an in ans if an.portal_type == 'Analysis'] regkeys = [an.getKeyword() for an in reg] regkeys.sort() expregs = ['Ca', 'Ca', 'Cu', 'Cu', 'Cu', 'Fe', 'Fe'] self.assertEquals(regkeys, expregs) # Add a duplicte for the second AR # slot 1: previous AR # slot 2: Duplicate 1 (analysis without result) # slot 3: Duplicate 2 (analysis with submitted result) # slot 4: Duplicate 3 (analysis retracted) # slot 5: this new AR ws.addDuplicateAnalyses('5', None) ans = ws.getAnalyses() reg = [an for an in ans if an.portal_type == 'Analysis'] dup = [an for an in ans if an.portal_type == 'DuplicateAnalysis'] regkeys = [an.getKeyword() for an in reg] dupkeys = [an.getKeyword() for an in dup] regkeys.sort() dupkeys.sort() expregs = ['Ca', 'Ca', 'Cu', 'Cu', 'Cu', 'Fe', 'Fe'] expdups = ['Ca', 'Ca', 'Ca', 'Ca', 'Cu', 'Cu', 'Cu', 'Cu', 'Fe', 'Fe', 'Fe', 'Fe'] self.assertEquals(regkeys, expregs) self.assertEquals(dupkeys, expdups)
def test_DecimalMarkWithSciNotation(self): # Notations # '1' => aE+b / aE-b # '2' => ax10^b / ax10^-b # '3' => ax10^b / ax10^-b (with superscript) # '4' => a·10^b / a·10^-b # '5' => a·10^b / a·10^-b (with superscript) matrix = [ # as_prec as_exp not decimalmark result formatted result # ------- ------ --- ----------- -------- -------------------- [0, 0, 1, '0', '0'], [0, 0, 2, '0', '0'], [0, 0, 3, '0', '0'], [0, 0, 4, '0', '0'], [0, 0, 5, '0', '0'], [2, 5, 1, '0.01', '0,01'], [2, 5, 2, '0.01', '0,01'], [2, 5, 3, '0.01', '0,01'], [2, 5, 4, '0.01', '0,01'], [2, 5, 5, '0.01', '0,01'], [2, 1, 1, '0.123', '1,2e-01'], [2, 1, 2, '0.123', '1,2x10^-1'], [2, 1, 3, '0.123', '1,2x10<sup>-1</sup>'], [2, 1, 4, '0.123', '1,2·10^-1'], [2, 1, 5, '0.123', '1,2·10<sup>-1</sup>'], [2, 1, 1, '1.234', '1,23'], [2, 1, 2, '1.234', '1,23'], [2, 1, 3, '1.234', '1,23'], [2, 1, 4, '1.234', '1,23'], [2, 1, 5, '1.234', '1,23'], [2, 1, 1, '12.345', '1,235e01'], [2, 1, 2, '12.345', '1,235x10^1'], [2, 1, 3, '12.345', '1,235x10<sup>1</sup>'], [2, 1, 4, '12.345', '1,235·10^1'], [2, 1, 5, '12.345', '1,235·10<sup>1</sup>'], [4, 3, 1, '-123.45678', '-123,4568'], [4, 3, 2, '-123.45678', '-123,4568'], [4, 3, 3, '-123.45678', '-123,4568'], [4, 3, 4, '-123.45678', '-123,4568'], [4, 3, 5, '-123.45678', '-123,4568'], [4, 3, 1, '-1234.5678', '-1,2345678e03'], [4, 3, 2, '-1234.5678', '-1,2345678x10^3'], [4, 3, 3, '-1234.5678', '-1,2345678x10<sup>3</sup>'], [4, 3, 4, '-1234.5678', '-1,2345678·10^3'], [4, 3, 5, '-1234.5678', '-1,2345678·10<sup>3</sup>'], ] s = self.service s.setLowerDetectionLimit('-99999') # We want to test results below 0 too prevm = [] an = None bs = self.portal.bika_setup; bs.setResultsDecimalMark(',') for m in matrix: # Create the AR and set the values to the AS, but only if necessary if not an or prevm[0] != m[0] or prevm[1] != m[1]: s.setPrecision(m[0]) s.setExponentialFormatPrecision(m[1]) self.assertEqual(s.getPrecision(), m[0]) self.assertEqual(s.Schema().getField('Precision').get(s), m[0]) self.assertEqual(s.getExponentialFormatPrecision(), m[1]) self.assertEqual(s.Schema().getField('ExponentialFormatPrecision').get(s), m[1]) client = self.portal.clients['client-1'] sampletype = bs.bika_sampletypes['sampletype-1'] values = {'Client': client.UID(), 'Contact': client.getContacts()[0].UID(), 'SamplingDate': '2015-01-01', 'SampleType': sampletype.UID()} ar = create_analysisrequest(client, {}, values, [s.UID()]) wf = getToolByName(ar, 'portal_workflow') wf.doActionFor(ar, 'receive') an = ar.getAnalyses()[0].getObject() prevm = m; an.setResult(m[3]) self.assertEqual(an.getResult(), m[3]) self.assertEqual(an.Schema().getField('Result').get(an), m[3]) fr = an.getFormattedResult(sciformat=m[2],decimalmark=bs.getResultsDecimalMark()) #print '%s %s %s %s => \'%s\' ?= \'%s\'' % (m[0],m[1],m[2],m[3],m[4],fr) self.assertEqual(fr, m[4])