def workflow_script_sample_due(self): if skip(self, "sample_due"): return # All associated AnalysisRequests are also transitioned for ar in self.getAnalysisRequests(): doActionFor(ar, "sample_due") ar.reindexObject()
def test_default_stickers(self): """https://jira.bikalabs.com/browse/WINE-44: display SampleID or SamplePartition ID depending on bikasetup.ShowPartitions value """ folder = self.portal.bika_setup.bika_analysisservices services = [_createObjectByType("AnalysisService", folder, tmpID()), _createObjectByType("AnalysisService", folder, tmpID())] services[0].processForm() services[1].processForm() services[0].edit(title="Detect Dust") services[1].edit(title="Detect water") service_uids = [s.UID for s in services] folder = self.portal.clients client = _createObjectByType("Client", folder, tmpID()) client.processForm() folder = self.portal.clients.objectValues("Client")[0] contact = _createObjectByType("Contact", folder, tmpID()) contact.processForm() contact.edit(Firstname="Bob", Surname="Dobbs", email="*****@*****.**") folder = self.portal.bika_setup.bika_sampletypes sampletype = _createObjectByType("SampleType", folder, tmpID()) sampletype.processForm() sampletype.edit(title="Air", Prefix="AIR") values = {'Client': client.UID(), 'Contact': contact.UID(), 'SamplingDate': '2015-01-01', 'SampleType': sampletype.UID()} for stemp in getStickerTemplates(): # create and receive AR ar = create_analysisrequest(client, {}, values, service_uids) ar.bika_setup.setShowPartitions(False) doActionFor(ar, 'receive') self.assertEquals(ar.portal_workflow.getInfoFor(ar, 'review_state'), 'sample_received') # check sticker text ar.REQUEST['items'] = ar.getId() ar.REQUEST['template'] = stemp.get('id') sticker = Sticker(ar, ar.REQUEST)() pid = ar.getSample().objectValues("SamplePartition")[0].getId() self.assertNotIn(pid, sticker, "Sticker must not contain partition ID %s"%pid) # create and receive AR ar = create_analysisrequest(client, {}, values, service_uids) ar.bika_setup.setShowPartitions(True) doActionFor(ar, 'receive') self.assertEquals(ar.portal_workflow.getInfoFor(ar, 'review_state'), 'sample_received') # check sticker text ar.REQUEST['items'] = ar.getId() ar.REQUEST['template'] = stemp.get('id') sticker = Sticker(ar, ar.REQUEST)() pid = ar.getSample().objectValues("SamplePartition")[0].getId() self.assertIn(pid, sticker, "Sticker must contain partition ID %s"%pid)
def workflow_script_verify(self): if skip(self, "verify"): return workflow = getToolByName(self, 'portal_workflow') self.reindexObject(idxs=["review_state", ]) if not "verify all analyses" in self.REQUEST['workflow_skiplist']: # verify all analyses in this self. analyses = self.getAnalyses() for analysis in analyses: state = workflow.getInfoFor(analysis, 'review_state', '') if state != 'to_be_verified': continue doActionFor(analysis, "verify")
def workflow_script_preserve(self): """This action can happen in the Sample UI, so we transition all self partitions that are still 'to_be_preserved' """ workflow = getToolByName(self, "portal_workflow") parts = self.objectValues("SamplePartition") tbs = [sp for sp in parts if workflow.getInfoFor(sp, "review_state") == "to_be_preserved"] for sp in tbs: doActionFor(sp, "preserve") # All associated AnalysisRequests are also transitioned for ar in self.getAnalysisRequests(): doActionFor(ar, "preserve") ar.reindexObject()
def workflow_script_receive(self): workflow = getToolByName(self, "portal_workflow") self.setDateReceived(DateTime()) self.reindexObject(idxs=["review_state", "getDateReceived"]) # Receive all self partitions that are still 'sample_due' parts = self.objectValues("SamplePartition") sample_due = [sp for sp in parts if workflow.getInfoFor(sp, "review_state") == "sample_due"] for sp in sample_due: workflow.doActionFor(sp, "receive") # when a self is received, all associated # AnalysisRequests are also transitioned for ar in self.getAnalysisRequests(): doActionFor(ar, "receive")
def workflow_script_to_be_preserved(self): if skip(self, "to_be_preserved"): return workflow = getToolByName(self, "portal_workflow") parts = self.objectValues("SamplePartition") # Transition our children tbs = [sp for sp in parts if workflow.getInfoFor(sp, "review_state") == "to_be_preserved"] for sp in tbs: doActionFor(sp, "to_be_preserved") # All associated AnalysisRequests are also transitioned for ar in self.getAnalysisRequests(): doActionFor(ar, "to_be_preserved") ar.reindexObject()
def workflow_script_retract(self): if skip(self, "retract"): return workflow = getToolByName(self, 'portal_workflow') self.reindexObject(idxs=["review_state", ]) if not "retract all analyses" in self.REQUEST['workflow_skiplist']: # retract all analyses in this self. # (NB: don't retract if it's verified) analyses = self.getAnalyses() for analysis in analyses: state = workflow.getInfoFor(analysis, 'review_state', '') if state not in ('attachment_due', 'to_be_verified',): continue doActionFor(analysis, 'retract')
def workflow_script_to_be_preserved(self): if skip(self, "to_be_preserved"): return workflow = getToolByName(self, 'portal_workflow') parts = self.objectValues('SamplePartition') # Transition our children tbs = [sp for sp in parts if workflow.getInfoFor(sp, 'review_state') == 'to_be_preserved'] for sp in tbs: doActionFor(sp, "to_be_preserved") # All associated AnalysisRequests are also transitioned for ar in self.getAnalysisRequests(): doActionFor(ar, "to_be_preserved") ar.reindexObject()
def workflow_script_receive(self): workflow = getToolByName(self, 'portal_workflow') self.setDateReceived(DateTime()) self.reindexObject(idxs=["review_state", "getDateReceived"]) # Receive all self partitions that are still 'sample_due' parts = self.objectValues('SamplePartition') sample_due = [sp for sp in parts if workflow.getInfoFor(sp, 'review_state') == 'sample_due'] for sp in sample_due: workflow.doActionFor(sp, 'receive') # when a self is received, all associated # AnalysisRequests are also transitioned for ar in self.getAnalysisRequests(): doActionFor(ar, "receive")
def __call__(self): if 'transition' in self.request.form: doActionFor(self.context, self.request.form['transition']) ## render header table self.header_table = HeaderTableView(self.context, self.request) ## Create Sample Partitions table parts_table = None if not self.allow_edit: p = SamplePartitionsView(self.context, self.request) p.allow_edit = self.allow_edit p.show_select_column = self.allow_edit p.show_workflow_action_buttons = self.allow_edit p.show_column_toggles = False p.show_select_all_checkbox = False p.review_states[0]['transitions'] = [ { 'id': 'empty' }, ] # none parts_table = p.contents_table() self.parts = parts_table ## Create Field and Lab Analyses tables self.tables = {} if not self.allow_edit: for poc in POINTS_OF_CAPTURE: if not self.context.getAnalyses({'getPointOfCapture': poc}): continue t = SampleAnalysesView(self.context, self.request, getPointOfCapture=poc, sort_on='getServiceTitle') t.form_id = "sample_%s_analyses" % poc if poc == 'field': t.review_states[0]['columns'].remove('DueDate') t.show_column_toggles = False t.review_states[0]['transitions'] = [{ 'id': 'submit' }, { 'id': 'retract' }, { 'id': 'verify' }] self.tables[POINTS_OF_CAPTURE.getValue( poc)] = t.contents_table() return self.template()
def workflow_script_preserve(self): """This action can happen in the Sample UI, so we transition all self partitions that are still 'to_be_preserved' """ workflow = getToolByName(self, 'portal_workflow') parts = self.objectValues("SamplePartition") tbs = [sp for sp in parts if workflow.getInfoFor(sp, 'review_state') == 'to_be_preserved'] for sp in tbs: doActionFor(sp, "preserve") # All associated AnalysisRequests are also transitioned for ar in self.getAnalysisRequests(): doActionFor(ar, "preserve") ar.reindexObject()
def workflow_script_verify(self): if skip(self, "verify"): return workflow = getToolByName(self, 'portal_workflow') self.reindexObject(idxs=[ "review_state", ]) if not "verify all analyses" in self.REQUEST['workflow_skiplist']: # verify all analyses in this self. analyses = self.getAnalyses() for analysis in analyses: state = workflow.getInfoFor(analysis, 'review_state', '') if state != 'to_be_verified': continue doActionFor(analysis, "verify")
def workflow_script_sample(self): if skip(self, "sample"): return workflow = getToolByName(self, 'portal_workflow') parts = self.objectValues('SamplePartition') # This action can happen in the Sample UI. So we transition all # partitions that are still 'to_be_sampled' tbs = [sp for sp in parts if workflow.getInfoFor(sp, 'review_state') == 'to_be_sampled'] for sp in tbs: doActionFor(sp, "sample") # All associated AnalysisRequests are also transitioned for ar in self.getAnalysisRequests(): doActionFor(ar, "sample") ar.reindexObject()
def workflow_script_submit(self): # Don't cascade. Shouldn't be submitting WSs directly for now, # except edge cases where all analyses are already submitted, # but self was held back until an analyst was assigned. workflow = getToolByName(self, 'portal_workflow') self.reindexObject(idxs=["review_state", ]) can_attach = True for a in self.getAnalyses(): if workflow.getInfoFor(a, 'review_state') in \ ('to_be_sampled', 'to_be_preserved', 'sample_due', 'sample_received', 'attachment_due', 'assigned',): # Note: referenceanalyses and duplicateanalyses can still # have review_state = "assigned". can_attach = False break if can_attach: doActionFor(self, 'attach')
def workflow_script_receive(self): if skip(self, "receive"): return sample = self.aq_parent workflow = getToolByName(self, 'portal_workflow') sample_state = workflow.getInfoFor(sample, 'review_state') self.setDateReceived(DateTime()) self.reindexObject(idxs=["getDateReceived", ]) # Transition our analyses analyses = self.getBackReferences('AnalysisSamplePartition') for analysis in analyses: doActionFor(analysis, "receive") # if all sibling partitions are received, promote sample if not skip(sample, "receive", peek=True): due = [sp for sp in sample.objectValues("SamplePartition") if workflow.getInfoFor(sp, 'review_state') == 'sample_due'] if sample_state == 'sample_due' and not due: doActionFor(sample, 'receive')
def workflow_script_submit(self): # Don't cascade. Shouldn't be submitting WSs directly for now, # except edge cases where all analyses are already submitted, # but self was held back until an analyst was assigned. workflow = getToolByName(self, 'portal_workflow') self.reindexObject(idxs=[ "review_state", ]) can_attach = True for a in self.getAnalyses(): if workflow.getInfoFor(a, 'review_state') in \ ('to_be_sampled', 'to_be_preserved', 'sample_due', 'sample_received', 'attachment_due', 'assigned',): # Note: referenceanalyses and duplicateanalyses can still # have review_state = "assigned". can_attach = False break if can_attach: doActionFor(self, 'attach')
def workflow_script_retract(self): if skip(self, "retract"): return workflow = getToolByName(self, 'portal_workflow') self.reindexObject(idxs=[ "review_state", ]) if not "retract all analyses" in self.REQUEST['workflow_skiplist']: # retract all analyses in this self. # (NB: don't retract if it's verified) analyses = self.getAnalyses() for analysis in analyses: state = workflow.getInfoFor(analysis, 'review_state', '') if state not in ( 'attachment_due', 'to_be_verified', ): continue doActionFor(analysis, 'retract')
def __call__(self): if 'transition' in self.request.form: doActionFor(self.context, self.request.form['transition']) ## render header table self.header_table = HeaderTableView(self.context, self.request) ## Create Sample Partitions table parts_table = None if not self.allow_edit: p = SamplePartitionsView(self.context, self.request) p.allow_edit = self.allow_edit p.show_select_column = self.allow_edit p.show_workflow_action_buttons = self.allow_edit p.show_column_toggles = False p.show_select_all_checkbox = False p.review_states[0]['transitions'] = [{'id': 'empty'},] # none parts_table = p.contents_table() self.parts = parts_table ## Create Field and Lab Analyses tables self.tables = {} if not self.allow_edit: for poc in POINTS_OF_CAPTURE: if not self.context.getAnalyses({'getPointOfCapture': poc}): continue t = SampleAnalysesView(self.context, self.request, getPointOfCapture = poc, sort_on = 'getServiceTitle') t.form_id = "sample_%s_analyses" % poc if poc == 'field': t.review_states[0]['columns'].remove('DueDate') t.show_column_toggles = False t.review_states[0]['transitions'] = [{'id':'submit'}, {'id':'retract'}, {'id':'verify'}] self.tables[POINTS_OF_CAPTURE.getValue(poc)] = t.contents_table() return self.template()
def workflow_script_receive(self): if skip(self, "receive"): return sample = self.aq_parent workflow = getToolByName(self, 'portal_workflow') sample_state = workflow.getInfoFor(sample, 'review_state') self.setDateReceived(DateTime()) self.reindexObject(idxs=[ "getDateReceived", ]) # Transition our analyses analyses = self.getBackReferences('AnalysisSamplePartition') for analysis in analyses: doActionFor(analysis, "receive") # if all sibling partitions are received, promote sample if not skip(sample, "receive", peek=True): due = [ sp for sp in sample.objectValues("SamplePartition") if workflow.getInfoFor(sp, 'review_state') == 'sample_due' ] if sample_state == 'sample_due' and not due: doActionFor(sample, 'receive')
def workflow_script_preserve(self): workflow = getToolByName(self, 'portal_workflow') sample = self.aq_parent # Transition our analyses analyses = self.getBackReferences('AnalysisSamplePartition') if analyses: for analysis in analyses: doActionFor(analysis, "preserve") # if all our siblings are now up to date, promote sample and ARs. parts = sample.objectValues("SamplePartition") if parts: lower_states = ['to_be_sampled', 'to_be_preserved', ] escalate = True for part in parts: if workflow.getInfoFor(part, 'review_state') in lower_states: escalate = False if escalate: doActionFor(sample, "preserve") for ar in sample.getAnalysisRequests(): doActionFor(ar, "preserve")
def workflow_script_preserve(self): workflow = getToolByName(self, 'portal_workflow') sample = self.aq_parent # Transition our analyses analyses = self.getBackReferences('AnalysisSamplePartition') if analyses: for analysis in analyses: doActionFor(analysis, "preserve") # if all our siblings are now up to date, promote sample and ARs. parts = sample.objectValues("SamplePartition") if parts: lower_states = [ 'to_be_sampled', 'to_be_preserved', ] escalate = True for part in parts: if workflow.getInfoFor(part, 'review_state') in lower_states: escalate = False if escalate: doActionFor(sample, "preserve") for ar in sample.getAnalysisRequests(): doActionFor(ar, "preserve")
def _add_services_to_ar(self, ar, analyses): #Add Services service_uids = [i.split(':')[0] for i in analyses] new_analyses = ar.setAnalyses(service_uids) ar.setRequestID(ar.getId()) ar.reindexObject() event.notify(ObjectInitializedEvent(ar)) ar.at_post_create_script() SamplingWorkflowEnabled = \ self.bika_setup.getSamplingWorkflowEnabled() wftool = getToolByName(self, 'portal_workflow') # Create sample partitions parts = [{'services': [], 'container':[], 'preservation':'', 'separate':False}] sample = ar.getSample() parts_and_services = {} for _i in range(len(parts)): p = parts[_i] part_prefix = sample.getId() + "-P" if '%s%s' % (part_prefix, _i + 1) in sample.objectIds(): parts[_i]['object'] = sample['%s%s' % (part_prefix, _i + 1)] parts_and_services['%s%s' % (part_prefix, _i + 1)] = \ p['services'] else: part = _createObjectByType("SamplePartition", sample, tmpID()) parts[_i]['object'] = part container = None preservation = p['preservation'] parts[_i]['prepreserved'] = False part.unmarkCreationFlag() part.edit( Container=container, Preservation=preservation, ) part._renameAfterCreation() if SamplingWorkflowEnabled: wftool.doActionFor(part, 'sampling_workflow') else: wftool.doActionFor(part, 'no_sampling_workflow') parts_and_services[part.id] = p['services'] if SamplingWorkflowEnabled: wftool.doActionFor(ar, 'sampling_workflow') else: wftool.doActionFor(ar, 'no_sampling_workflow') # Add analyses to sample partitions # XXX jsonapi create AR: right now, all new analyses are linked to the first samplepartition if new_analyses: analyses = list(part.getAnalyses()) analyses.extend(new_analyses) part.edit( Analyses=analyses, ) for analysis in new_analyses: analysis.setSamplePartition(part) # If Preservation is required for some partitions, # and the SamplingWorkflow is disabled, we need # to transition to to_be_preserved manually. if not SamplingWorkflowEnabled: to_be_preserved = [] sample_due = [] lowest_state = 'sample_due' for p in sample.objectValues('SamplePartition'): if p.getPreservation(): lowest_state = 'to_be_preserved' to_be_preserved.append(p) else: sample_due.append(p) for p in to_be_preserved: doActionFor(p, 'to_be_preserved') for p in sample_due: doActionFor(p, 'sample_due') doActionFor(sample, lowest_state) for analysis in ar.objectValues('Analysis'): doActionFor(analysis, lowest_state) doActionFor(ar, lowest_state)
def create_analysisrequest(context, request, values): """Create an AR. :param context the container in which the AR will be created (Client) :param request the request object :param values a dictionary containing fieldname/value pairs, which will be applied. Some fields will have specific code to handle them, and others will be directly written to the schema. :return the new AR instance Special keys present (or required) in the values dict, which are not present in the schema: - Partitions: data about partitions to be created, and the analyses that are to be assigned to each. - Prices: custom prices set in the HTML form. - ResultsRange: Specification values entered in the HTML form. """ # Gather neccesary tools workflow = getToolByName(context, 'portal_workflow') bc = getToolByName(context, 'bika_catalog') # Create new sample or locate the existing for secondary AR if values['Sample']: secondary = True if ISample.providedBy(values['Sample']): sample = values['Sample'] else: sample = bc(UID=values['Sample'])[0].getObject() samplingworkflow_enabled = sample.getSamplingWorkflowEnabled() else: secondary = False samplingworkflow_enabled = context.bika_setup.getSamplingWorkflowEnabled() sample = create_sample(context, request, values) # Create the Analysis Request ar = _createObjectByType('AnalysisRequest', context, tmpID()) ar.setSample(sample) # processform renames the sample, this requires values to contain the Sample. values['Sample'] = sample ar.processForm(REQUEST=request, values=values) # Object has been renamed ar.edit(RequestID=ar.getId()) # Set initial AR state workflow_action = 'sampling_workflow' if samplingworkflow_enabled \ else 'no_sampling_workflow' workflow.doActionFor(ar, workflow_action) # Set analysis request analyses ar.setAnalyses(values['Analyses'], prices=values.get("Prices", []), specs=values.get('ResultsRange', [])) analyses = ar.getAnalyses(full_objects=True) skip_receive = ['to_be_sampled', 'sample_due', 'sampled', 'to_be_preserved'] if secondary: # Only 'sample_due' and 'sample_recieved' samples can be selected # for secondary analyses doActionFor(ar, 'sampled') doActionFor(ar, 'sample_due') sample_state = workflow.getInfoFor(sample, 'review_state') if sample_state not in skip_receive: doActionFor(ar, 'receive') for analysis in analyses: doActionFor(analysis, 'sample_due') analysis_state = workflow.getInfoFor(analysis, 'review_state') if analysis_state not in skip_receive: doActionFor(analysis, 'receive') if not secondary: # Create sample partitions partitions = [] for n, partition in enumerate(values['Partitions']): # Calculate partition id partition_prefix = sample.getId() + "-P" partition_id = '%s%s' % (partition_prefix, n + 1) partition['part_id'] = partition_id # Point to or create sample partition if partition_id in sample.objectIds(): partition['object'] = sample[partition_id] else: partition['object'] = create_samplepartition( sample, partition ) # now assign analyses to this partition. obj = partition['object'] for analysis in analyses: if analysis.getService().UID() in partition['services']: analysis.setSamplePartition(obj) partitions.append(partition) # If Preservation is required for some partitions, # and the SamplingWorkflow is disabled, we need # to transition to to_be_preserved manually. if not samplingworkflow_enabled: to_be_preserved = [] sample_due = [] lowest_state = 'sample_due' for p in sample.objectValues('SamplePartition'): if p.getPreservation(): lowest_state = 'to_be_preserved' to_be_preserved.append(p) else: sample_due.append(p) for p in to_be_preserved: doActionFor(p, 'to_be_preserved') for p in sample_due: doActionFor(p, 'sample_due') doActionFor(sample, lowest_state) doActionFor(ar, lowest_state) # Transition pre-preserved partitions for p in partitions: if 'prepreserved' in p and p['prepreserved']: part = p['object'] state = workflow.getInfoFor(part, 'review_state') if state == 'to_be_preserved': workflow.doActionFor(part, 'preserve') # Return the newly created Analysis Request return ar
def create_analysisrequest(context, request, values): """Create an AR. :param context the container in which the AR will be created (Client) :param request the request object :param values a dictionary containing fieldname/value pairs, which will be applied. Some fields will have specific code to handle them, and others will be directly written to the schema. :return the new AR instance Special keys present (or required) in the values dict, which are not present in the schema: - Partitions: data about partitions to be created, and the analyses that are to be assigned to each. - Prices: custom prices set in the HTML form. - ResultsRange: Specification values entered in the HTML form. """ # Gather neccesary tools workflow = getToolByName(context, 'portal_workflow') bc = getToolByName(context, 'bika_catalog') # Create new sample or locate the existing for secondary AR if values['Sample']: secondary = True if ISample.providedBy(values['Sample']): sample = values['Sample'] else: sample = bc(UID=values['Sample'])[0].getObject() samplingworkflow_enabled = sample.getSamplingWorkflowEnabled() else: secondary = False samplingworkflow_enabled = context.bika_setup.getSamplingWorkflowEnabled( ) sample = create_sample(context, request, values) # Create the Analysis Request ar = _createObjectByType('AnalysisRequest', context, tmpID()) ar.setSample(sample) # processform renames the sample, this requires values to contain the Sample. values['Sample'] = sample ar.processForm(REQUEST=request, values=values) # Object has been renamed ar.edit(RequestID=ar.getId()) # Set initial AR state workflow_action = 'sampling_workflow' if samplingworkflow_enabled \ else 'no_sampling_workflow' workflow.doActionFor(ar, workflow_action) # Set analysis request analyses ar.setAnalyses(values['Analyses'], prices=values.get("Prices", []), specs=values.get('ResultsRange', [])) analyses = ar.getAnalyses(full_objects=True) skip_receive = [ 'to_be_sampled', 'sample_due', 'sampled', 'to_be_preserved' ] if secondary: # Only 'sample_due' and 'sample_recieved' samples can be selected # for secondary analyses doActionFor(ar, 'sampled') doActionFor(ar, 'sample_due') sample_state = workflow.getInfoFor(sample, 'review_state') if sample_state not in skip_receive: doActionFor(ar, 'receive') for analysis in analyses: doActionFor(analysis, 'sample_due') analysis_state = workflow.getInfoFor(analysis, 'review_state') if analysis_state not in skip_receive: doActionFor(analysis, 'receive') if not secondary: # Create sample partitions partitions = [] for n, partition in enumerate(values['Partitions']): # Calculate partition id partition_prefix = sample.getId() + "-P" partition_id = '%s%s' % (partition_prefix, n + 1) partition['part_id'] = partition_id # Point to or create sample partition if partition_id in sample.objectIds(): partition['object'] = sample[partition_id] else: partition['object'] = create_samplepartition(sample, partition) # now assign analyses to this partition. obj = partition['object'] for analysis in analyses: if analysis.getService().UID() in partition['services']: analysis.setSamplePartition(obj) partitions.append(partition) # If Preservation is required for some partitions, # and the SamplingWorkflow is disabled, we need # to transition to to_be_preserved manually. if not samplingworkflow_enabled: to_be_preserved = [] sample_due = [] lowest_state = 'sample_due' for p in sample.objectValues('SamplePartition'): if p.getPreservation(): lowest_state = 'to_be_preserved' to_be_preserved.append(p) else: sample_due.append(p) for p in to_be_preserved: doActionFor(p, 'to_be_preserved') for p in sample_due: doActionFor(p, 'sample_due') doActionFor(sample, lowest_state) doActionFor(ar, lowest_state) # Transition pre-preserved partitions for p in partitions: if 'prepreserved' in p and p['prepreserved']: part = p['object'] state = workflow.getInfoFor(part, 'review_state') if state == 'to_be_preserved': workflow.doActionFor(part, 'preserve') # Return the newly created Analysis Request return ar
def __call__(self): ar = self.context workflow = getToolByName(self.context, 'portal_workflow') if 'transition' in self.request.form: doActionFor(self.context, self.request.form['transition']) # Contacts get expanded for view contact = self.context.getContact() contacts = [] for cc in self.context.getCCContact(): contacts.append(cc) if contact in contacts: contacts.remove(contact) ccemails = [] for cc in contacts: ccemails.append( "%s <<a href='mailto:%s'>%s</a>>" % (cc.Title(), cc.getEmailAddress(), cc.getEmailAddress())) # CC Emails become mailto links emails = self.context.getCCEmails() if isinstance(emails, str): emails = emails and [ emails, ] or [] cc_emails = [] cc_hrefs = [] for cc in emails: cc_emails.append(cc) cc_hrefs.append("<a href='mailto:%s'>%s</a>" % (cc, cc)) # render header table self.header_table = HeaderTableView(self.context, self.request)() # Create Partitions View for this ARs sample p = SamplePartitionsView(self.context.getSample(), self.request) p.show_column_toggles = False self.parts = p.contents_table() # Create Field and Lab Analyses tables self.tables = {} for poc in POINTS_OF_CAPTURE: if self.context.getAnalyses(getPointOfCapture=poc): t = self.createAnalysesView( ar, self.request, getPointOfCapture=poc, show_categories=self.context.bika_setup. getCategoriseAnalysisServices()) t.allow_edit = True t.form_id = "%s_analyses" % poc t.review_states[0]['transitions'] = [{ 'id': 'submit' }, { 'id': 'retract' }, { 'id': 'verify' }] t.show_workflow_action_buttons = True t.show_select_column = True if getSecurityManager().checkPermission(EditFieldResults, self.context) \ and poc == 'field': t.review_states[0]['columns'].remove('DueDate') self.tables[POINTS_OF_CAPTURE.getValue( poc)] = t.contents_table() # Un-captured field analyses may cause confusion if ar.getAnalyses(getPointOfCapture='field', review_state=['sampled', 'sample_due']): message = _("There are field analyses without submitted results.") self.addMessage(message, 'info') # Create QC Analyses View for this AR show_cats = self.context.bika_setup.getCategoriseAnalysisServices() qcview = self.createQCAnalyesView(ar, self.request, show_categories=show_cats) qcview.allow_edit = False qcview.show_select_column = False qcview.show_workflow_action_buttons = False qcview.form_id = "%s_qcanalyses" qcview.review_states[0]['transitions'] = [{ 'id': 'submit' }, { 'id': 'retract' }, { 'id': 'verify' }] self.qctable = qcview.contents_table() # Create the ResultsInterpretation by department view from dependencies.dependency import ARResultsInterpretationView self.riview = ARResultsInterpretationView(ar, self.request) # If a general retracted is done, rise a waring if workflow.getInfoFor(ar, 'review_state') == 'sample_received': allstatus = list() for analysis in ar.getAnalyses(): status = workflow.getInfoFor(analysis.getObject(), 'review_state') if status not in ['retracted', 'to_be_verified', 'verified']: allstatus = [] break else: allstatus.append(status) if len(allstatus) > 0: self.addMessage("General Retract Done", 'warning') # If is a retracted AR, show the link to child AR and show a warn msg if workflow.getInfoFor(ar, 'review_state') == 'invalid': childar = hasattr(ar, 'getChildAnalysisRequest') \ and ar.getChildAnalysisRequest() or None message = _( 'These results have been withdrawn and are ' 'listed here for trace-ability purposes. Please follow ' 'the link to the retest') if childar: message = (message + " %s.") % childar.getRequestID() else: message = message + "." self.addMessage(message, 'warning') # If is an AR automatically generated due to a Retraction, show it's # parent AR information if hasattr(ar, 'getParentAnalysisRequest') \ and ar.getParentAnalysisRequest(): par = ar.getParentAnalysisRequest() message = _( 'This Analysis Request has been ' 'generated automatically due to ' 'the retraction of the Analysis ' 'Request ${retracted_request_id}.', mapping={'retracted_request_id': par.getRequestID()}) self.addMessage(message, 'info') self.renderMessages() return self.template()
def __call__(self): ar = self.context workflow = getToolByName(self.context, 'portal_workflow') if 'transition' in self.request.form: doActionFor(self.context, self.request.form['transition']) # Contacts get expanded for view contact = self.context.getContact() contacts = [] for cc in self.context.getCCContact(): contacts.append(cc) if contact in contacts: contacts.remove(contact) ccemails = [] for cc in contacts: ccemails.append("%s <<a href='mailto:%s'>%s</a>>" % (cc.Title(), cc.getEmailAddress(), cc.getEmailAddress())) # CC Emails become mailto links emails = self.context.getCCEmails() if isinstance(emails, str): emails = emails and [emails, ] or [] cc_emails = [] cc_hrefs = [] for cc in emails: cc_emails.append(cc) cc_hrefs.append("<a href='mailto:%s'>%s</a>" % (cc, cc)) # render header table self.header_table = HeaderTableView(self.context, self.request)() # Create Partitions View for this ARs sample p = SamplePartitionsView(self.context.getSample(), self.request) p.show_column_toggles = False self.parts = p.contents_table() # Create Field and Lab Analyses tables self.tables = {} for poc in POINTS_OF_CAPTURE: if self.context.getAnalyses(getPointOfCapture=poc): t = self.createAnalysesView(ar, self.request, getPointOfCapture=poc, show_categories=self.context.bika_setup.getCategoriseAnalysisServices()) t.allow_edit = True t.form_id = "%s_analyses" % poc t.review_states[0]['transitions'] = [{'id': 'submit'}, {'id': 'retract'}, {'id': 'verify'}] t.show_workflow_action_buttons = True t.show_select_column = True if getSecurityManager().checkPermission(EditFieldResults, self.context) \ and poc == 'field': t.review_states[0]['columns'].remove('DueDate') self.tables[POINTS_OF_CAPTURE.getValue(poc)] = t.contents_table() # Un-captured field analyses may cause confusion if ar.getAnalyses(getPointOfCapture='field', review_state=['sampled', 'sample_due']): message = _("There are field analyses without submitted results.") self.addMessage(message, 'info') # Create QC Analyses View for this AR show_cats = self.context.bika_setup.getCategoriseAnalysisServices() qcview = self.createQCAnalyesView(ar, self.request, show_categories=show_cats) qcview.allow_edit = False qcview.show_select_column = False qcview.show_workflow_action_buttons = False qcview.form_id = "%s_qcanalyses" qcview.review_states[0]['transitions'] = [{'id': 'submit'}, {'id': 'retract'}, {'id': 'verify'}] self.qctable = qcview.contents_table() # Create the ResultsInterpretation by department view from dependencies.dependency import ARResultsInterpretationView self.riview = ARResultsInterpretationView(ar, self.request) # If a general retracted is done, rise a waring if workflow.getInfoFor(ar, 'review_state') == 'sample_received': allstatus = list() for analysis in ar.getAnalyses(): status = workflow.getInfoFor(analysis.getObject(), 'review_state') if status not in ['retracted','to_be_verified','verified']: allstatus = [] break else: allstatus.append(status) if len(allstatus) > 0: self.addMessage("General Retract Done", 'warning') # If is a retracted AR, show the link to child AR and show a warn msg if workflow.getInfoFor(ar, 'review_state') == 'invalid': childar = hasattr(ar, 'getChildAnalysisRequest') \ and ar.getChildAnalysisRequest() or None message = _('These results have been withdrawn and are ' 'listed here for trace-ability purposes. Please follow ' 'the link to the retest') if childar: message = (message + " %s.") % childar.getRequestID() else: message = message + "." self.addMessage(message, 'warning') # If is an AR automatically generated due to a Retraction, show it's # parent AR information if hasattr(ar, 'getParentAnalysisRequest') \ and ar.getParentAnalysisRequest(): par = ar.getParentAnalysisRequest() message = _('This Analysis Request has been ' 'generated automatically due to ' 'the retraction of the Analysis ' 'Request ${retracted_request_id}.', mapping={'retracted_request_id': par.getRequestID()}) self.addMessage(message, 'info') self.renderMessages() return self.template()
def workflow_action_submit(self): form = self.request.form rc = getToolByName(self.context, REFERENCE_CATALOG) action, came_from = WorkflowAction._get_form_workflow_action(self) checkPermission = self.context.portal_membership.checkPermission if not isActive(self.context): message = _('Item is inactive.') self.context.plone_utils.addPortalMessage(message, 'info') self.request.response.redirect(self.context.absolute_url()) return # calcs.js has kept item_data and form input interim values synced, # so the json strings from item_data will be the same as the form values item_data = {} if 'item_data' in form: if isinstance(form['item_data'], list): for i_d in form['item_data']: for i, d in json.loads(i_d).items(): item_data[i] = d else: item_data = json.loads(form['item_data']) selected_analyses = WorkflowAction._get_selected_items(self) results = {} hasInterims = {} # check that the form values match the database # save them if not. for uid, result in self.request.form.get('Result', [{}])[0].items(): # if the AR has ReportDryMatter set, get dry_result from form. dry_result = '' if hasattr(self.context, 'getReportDryMatter') \ and self.context.getReportDryMatter(): for k, v in self.request.form['ResultDM'][0].items(): if uid == k: dry_result = v break if uid in selected_analyses: analysis = selected_analyses[uid] else: analysis = rc.lookupObject(uid) if not analysis: # ignore result if analysis object no longer exists continue results[uid] = result interimFields = item_data[uid] if len(interimFields) > 0: hasInterims[uid] = True else: hasInterims[uid] = False retested = 'retested' in form and uid in form['retested'] remarks = form.get('Remarks', [{}, ])[0].get(uid, '') # Don't save uneccessary things # https://github.com/bikalabs/Bika-LIMS/issues/766: # Somehow, using analysis.edit() fails silently when # logged in as Analyst. if analysis.getInterimFields() != interimFields or \ analysis.getRetested() != retested or \ analysis.getRemarks() != remarks: analysis.setInterimFields(interimFields) analysis.setRetested(retested) analysis.setRemarks(remarks) # save results separately, otherwise capture date is rewritten if analysis.getResult() != result or \ analysis.getResultDM() != dry_result: analysis.setResultDM(dry_result) analysis.setResult(result) methods = self.request.form.get('Method', [{}])[0] instruments = self.request.form.get('Instrument', [{}])[0] analysts = self.request.form.get('Analyst', [{}])[0] uncertainties = self.request.form.get('Uncertainty', [{}])[0] dlimits = self.request.form.get('DetectionLimit', [{}])[0] # discover which items may be submitted submissable = [] for uid, analysis in selected_analyses.items(): analysis_active = isActive(analysis) # Need to save the instrument? if uid in instruments and analysis_active: # TODO: Add SetAnalysisInstrument permission # allow_setinstrument = sm.checkPermission(SetAnalysisInstrument) allow_setinstrument = True # ---8<----- if allow_setinstrument == True: # The current analysis allows the instrument regards # to its analysis service and method? if (instruments[uid]==''): previnstr = analysis.getInstrument() if previnstr: previnstr.removeAnalysis(analysis) analysis.setInstrument(None); elif analysis.isInstrumentAllowed(instruments[uid]): previnstr = analysis.getInstrument() if previnstr: previnstr.removeAnalysis(analysis) analysis.setInstrument(instruments[uid]) instrument = analysis.getInstrument() instrument.addAnalysis(analysis) # Need to save the method? if uid in methods and analysis_active: # TODO: Add SetAnalysisMethod permission # allow_setmethod = sm.checkPermission(SetAnalysisMethod) allow_setmethod = True # ---8<----- if allow_setmethod == True and analysis.isMethodAllowed(methods[uid]): analysis.setMethod(methods[uid]) # Need to save the analyst? if uid in analysts and analysis_active: analysis.setAnalyst(analysts[uid]) # Need to save the uncertainty? if uid in uncertainties and analysis_active: analysis.setUncertainty(uncertainties[uid]) # Need to save the detection limit? if analysis_active: analysis.setDetectionLimitOperand(dlimits.get(uid, None)) if uid not in results or not results[uid]: continue can_submit = True # guard_submit does a lot of the same stuff, too. # the code there has also been commented. # we must find a better way to allow dependencies to control # this process. # for dependency in analysis.getDependencies(): # dep_state = workflow.getInfoFor(dependency, 'review_state') # if hasInterims[uid]: # if dep_state in ('to_be_sampled', 'to_be_preserved', # 'sample_due', 'sample_received', # 'attachment_due', 'to_be_verified',): # can_submit = False # break # else: # if dep_state in ('to_be_sampled', 'to_be_preserved', # 'sample_due', 'sample_received',): # can_submit = False # break if can_submit and analysis not in submissable: submissable.append(analysis) # and then submit them. for analysis in submissable: doActionFor(analysis, 'submit') message = PMF("Changes saved.") self.context.plone_utils.addPortalMessage(message, 'info') if checkPermission(EditResults, self.context): self.destination_url = self.context.absolute_url() + "/manage_results" else: self.destination_url = self.context.absolute_url() self.request.response.redirect(self.destination_url)
def _create_ar(self, context, request): """Creates AnalysisRequest object, with supporting Sample, Partition and Analysis objects. The client is retrieved from the obj_path key in the request. Required request parameters: - Contact: One client contact Fullname. The contact must exist in the specified client. The first Contact with the specified value in it's Fullname field will be used. - SampleType_<index> - Must be an existing sample type. Optional request parameters: - CCContacts: A list of contact Fullnames, which will be copied on all messages related to this AR and it's sample or results. - CCEmails: A list of email addresses to include as above. - Sample_id: Create a secondary AR with an existing sample. If unspecified, a new sample is created. - Specification: a lookup to set Analysis specs default values for all analyses - Analysis_Specification: specs (or overrides) per analysis, using a special lookup format. &Analysis_Specification:list=<Keyword>:min:max:error&... """ wftool = getToolByName(context, 'portal_workflow') bc = getToolByName(context, 'bika_catalog') bsc = getToolByName(context, 'bika_setup_catalog') pc = getToolByName(context, 'portal_catalog') ret = { "url": router.url_for("create", force_external=True), "success": True, "error": False, } SamplingWorkflowEnabled = context.bika_setup.getSamplingWorkflowEnabled() for field in [ 'Client', 'SampleType', 'Contact', 'SamplingDate', 'Services']: self.require(field) self.used(field) try: client = resolve_request_lookup(context, request, 'Client')[0].getObject() except IndexError: raise Exception("Client not found") # Sample_id if 'Sample' in request: try: sample = resolve_request_lookup(context, request, 'Sample')[0].getObject() except IndexError: raise Exception("Sample not found") else: # Primary AR sample = _createObjectByType("Sample", client, tmpID()) sample.unmarkCreationFlag() fields = set_fields_from_request(sample, request) for field in fields: self.used(field) sample._renameAfterCreation() sample.setSampleID(sample.getId()) event.notify(ObjectInitializedEvent(sample)) sample.at_post_create_script() if SamplingWorkflowEnabled: wftool.doActionFor(sample, 'sampling_workflow') else: wftool.doActionFor(sample, 'no_sampling_workflow') ret['sample_id'] = sample.getId() parts = [{'services': [], 'container': [], 'preservation': '', 'separate': False}] specs = self.get_specs_from_request() ar = _createObjectByType("AnalysisRequest", client, tmpID()) ar.unmarkCreationFlag() fields = set_fields_from_request(ar, request) for field in fields: self.used(field) ar.setSample(sample.UID()) ar._renameAfterCreation() ret['ar_id'] = ar.getId() brains = resolve_request_lookup(context, request, 'Services') service_uids = [p.UID for p in brains] new_analyses = ar.setAnalyses(service_uids, specs=specs) ar.setRequestID(ar.getId()) ar.reindexObject() event.notify(ObjectInitializedEvent(ar)) ar.at_post_create_script() # Create sample partitions parts_and_services = {} for _i in range(len(parts)): p = parts[_i] part_prefix = sample.getId() + "-P" if '%s%s' % (part_prefix, _i + 1) in sample.objectIds(): parts[_i]['object'] = sample['%s%s' % (part_prefix, _i + 1)] parts_and_services['%s%s' % (part_prefix, _i + 1)] = p['services'] part = parts[_i]['object'] else: part = _createObjectByType("SamplePartition", sample, tmpID()) parts[_i]['object'] = part container = None preservation = p['preservation'] parts[_i]['prepreserved'] = False part.edit( Container=container, Preservation=preservation, ) part.processForm() if SamplingWorkflowEnabled: wftool.doActionFor(part, 'sampling_workflow') else: wftool.doActionFor(part, 'no_sampling_workflow') parts_and_services[part.id] = p['services'] if SamplingWorkflowEnabled: wftool.doActionFor(ar, 'sampling_workflow') else: wftool.doActionFor(ar, 'no_sampling_workflow') # Add analyses to sample partitions # XXX jsonapi create AR: right now, all new analyses are linked to the first samplepartition if new_analyses: analyses = list(part.getAnalyses()) analyses.extend(new_analyses) part.edit( Analyses=analyses, ) for analysis in new_analyses: analysis.setSamplePartition(part) # If Preservation is required for some partitions, # and the SamplingWorkflow is disabled, we need # to transition to to_be_preserved manually. if not SamplingWorkflowEnabled: to_be_preserved = [] sample_due = [] lowest_state = 'sample_due' for p in sample.objectValues('SamplePartition'): if p.getPreservation(): lowest_state = 'to_be_preserved' to_be_preserved.append(p) else: sample_due.append(p) for p in to_be_preserved: doActionFor(p, 'to_be_preserved') for p in sample_due: doActionFor(p, 'sample_due') doActionFor(sample, lowest_state) for analysis in ar.objectValues('Analysis'): doActionFor(analysis, lowest_state) doActionFor(ar, lowest_state) # receive secondary AR if request.get('Sample_id', ''): doActionFor(ar, 'sampled') doActionFor(ar, 'sample_due') not_receive = ['to_be_sampled', 'sample_due', 'sampled', 'to_be_preserved'] sample_state = wftool.getInfoFor(sample, 'review_state') if sample_state not in not_receive: doActionFor(ar, 'receive') for analysis in ar.getAnalyses(full_objects=1): doActionFor(analysis, 'sampled') doActionFor(analysis, 'sample_due') if sample_state not in not_receive: doActionFor(analysis, 'receive') if self.unused: raise BadRequest("The following request fields were not used: %s. Request aborted." % self.unused) return ret
def create_analysisrequest( context, request, values, analyses=[], partitions=None, specifications=None, prices=None ): # Gather neccesary tools workflow = getToolByName(context, 'portal_workflow') bc = getToolByName(context, 'bika_catalog') # Create new sample or locate the existing for secondary AR if values.get('Sample'): secondary = True if ISample.providedBy(values['Sample']): sample = values['Sample'] else: sample = bc(UID=values['Sample'])[0].getObject() workflow_enabled = sample.getSamplingWorkflowEnabled() else: secondary = False workflow_enabled = context.bika_setup.getSamplingWorkflowEnabled() sample = create_sample(context, request, values) # Create the Analysis Request ar = _createObjectByType('AnalysisRequest', context, tmpID()) ar.setSample(sample) # processform renames the sample, this requires values to contain the Sample. values['Sample'] = sample ar.processForm(REQUEST=request, values=values) # Object has been renamed ar.edit(RequestID=ar.getId()) # Set initial AR state workflow_action = 'sampling_workflow' if workflow_enabled \ else 'no_sampling_workflow' workflow.doActionFor(ar, workflow_action) # Set analysis request analyses analyses = ar.setAnalyses(analyses, prices=prices, specs=specifications) if secondary: # Only 'sample_due' and 'sample_recieved' samples can be selected # for secondary analyses doActionFor(ar, 'sample') doActionFor(ar, 'sample_due') sample_state = workflow.getInfoFor(sample, 'review_state') if sample_state == 'sample_received': doActionFor(ar, 'receive') for analysis in ar.getAnalyses(full_objects=1): doActionFor(analysis, 'sample') doActionFor(analysis, 'sample_due') analysis_transition_ids = [t['id'] for t in workflow.getTransitionsFor(analysis)] if 'receive' in analysis_transition_ids and sample_state == 'sample_received': doActionFor(analysis, 'receive') if not secondary: # Create sample partitions if not partitions: partitions = [{'services':analyses}] for n, partition in enumerate(partitions): # Calculate partition id partition_prefix = sample.getId() + "-P" partition_id = '%s%s' % (partition_prefix, n + 1) partition['part_id'] = partition_id # Point to or create sample partition if partition_id in sample.objectIds(): partition['object'] = sample[partition_id] else: partition['object'] = create_samplepartition( sample, partition, analyses ) # If Preservation is required for some partitions, # and the SamplingWorkflow is disabled, we need # to transition to to_be_preserved manually. if not workflow_enabled: to_be_preserved = [] sample_due = [] lowest_state = 'sample_due' for p in sample.objectValues('SamplePartition'): if p.getPreservation(): lowest_state = 'to_be_preserved' to_be_preserved.append(p) else: sample_due.append(p) for p in to_be_preserved: doActionFor(p, 'to_be_preserved') for p in sample_due: doActionFor(p, 'sample_due') doActionFor(sample, lowest_state) doActionFor(ar, lowest_state) # Transition pre-preserved partitions for p in partitions: if 'prepreserved' in p and p['prepreserved']: part = p['object'] state = workflow.getInfoFor(part, 'review_state') if state == 'to_be_preserved': workflow.doActionFor(part, 'preserve') # Return the newly created Analysis Request return ar
def workflow_action_submit(self): form = self.request.form rc = getToolByName(self.context, REFERENCE_CATALOG) action, came_from = WorkflowAction._get_form_workflow_action(self) checkPermission = self.context.portal_membership.checkPermission if not isActive(self.context): message = _('Item is inactive.') self.context.plone_utils.addPortalMessage(message, 'info') self.request.response.redirect(self.context.absolute_url()) return # calcs.js has kept item_data and form input interim values synced, # so the json strings from item_data will be the same as the form values item_data = {} if 'item_data' in form: if isinstance(form['item_data'], list): for i_d in form['item_data']: for i, d in json.loads(i_d).items(): item_data[i] = d else: item_data = json.loads(form['item_data']) selected_analyses = WorkflowAction._get_selected_items(self) results = {} hasInterims = {} # check that the form values match the database # save them if not. for uid, result in self.request.form.get('Result', [{}])[0].items(): # if the AR has ReportDryMatter set, get dry_result from form. dry_result = '' if hasattr(self.context, 'getReportDryMatter') \ and self.context.getReportDryMatter(): for k, v in self.request.form['ResultDM'][0].items(): if uid == k: dry_result = v break if uid in selected_analyses: analysis = selected_analyses[uid] else: analysis = rc.lookupObject(uid) if not analysis: # ignore result if analysis object no longer exists continue results[uid] = result interimFields = item_data[uid] if len(interimFields) > 0: hasInterims[uid] = True else: hasInterims[uid] = False retested = 'retested' in form and uid in form['retested'] remarks = form.get('Remarks', [ {}, ])[0].get(uid, '') # Don't save uneccessary things # https://github.com/bikalabs/Bika-LIMS/issues/766: # Somehow, using analysis.edit() fails silently when # logged in as Analyst. if analysis.getInterimFields() != interimFields or \ analysis.getRetested() != retested or \ analysis.getRemarks() != remarks: analysis.setInterimFields(interimFields) analysis.setRetested(retested) analysis.setRemarks(remarks) # save results separately, otherwise capture date is rewritten if analysis.getResult() != result or \ analysis.getResultDM() != dry_result: analysis.setResultDM(dry_result) analysis.setResult(result) methods = self.request.form.get('Method', [{}])[0] instruments = self.request.form.get('Instrument', [{}])[0] analysts = self.request.form.get('Analyst', [{}])[0] uncertainties = self.request.form.get('Uncertainty', [{}])[0] dlimits = self.request.form.get('DetectionLimit', [{}])[0] # discover which items may be submitted submissable = [] for uid, analysis in selected_analyses.items(): analysis_active = isActive(analysis) # Need to save the instrument? if uid in instruments and analysis_active: # TODO: Add SetAnalysisInstrument permission # allow_setinstrument = sm.checkPermission(SetAnalysisInstrument) allow_setinstrument = True # ---8<----- if allow_setinstrument == True: # The current analysis allows the instrument regards # to its analysis service and method? if (instruments[uid] == ''): previnstr = analysis.getInstrument() if previnstr: previnstr.removeAnalysis(analysis) analysis.setInstrument(None) elif analysis.isInstrumentAllowed(instruments[uid]): previnstr = analysis.getInstrument() if previnstr: previnstr.removeAnalysis(analysis) analysis.setInstrument(instruments[uid]) instrument = analysis.getInstrument() instrument.addAnalysis(analysis) # Need to save the method? if uid in methods and analysis_active: # TODO: Add SetAnalysisMethod permission # allow_setmethod = sm.checkPermission(SetAnalysisMethod) allow_setmethod = True # ---8<----- if allow_setmethod == True and analysis.isMethodAllowed( methods[uid]): analysis.setMethod(methods[uid]) # Need to save the analyst? if uid in analysts and analysis_active: analysis.setAnalyst(analysts[uid]) # Need to save the uncertainty? if uid in uncertainties and analysis_active: analysis.setUncertainty(uncertainties[uid]) # Need to save the detection limit? if analysis_active: analysis.setDetectionLimitOperand(dlimits.get(uid, None)) if uid not in results or not results[uid]: continue can_submit = True # guard_submit does a lot of the same stuff, too. # the code there has also been commented. # we must find a better way to allow dependencies to control # this process. # for dependency in analysis.getDependencies(): # dep_state = workflow.getInfoFor(dependency, 'review_state') # if hasInterims[uid]: # if dep_state in ('to_be_sampled', 'to_be_preserved', # 'sample_due', 'sample_received', # 'attachment_due', 'to_be_verified',): # can_submit = False # break # else: # if dep_state in ('to_be_sampled', 'to_be_preserved', # 'sample_due', 'sample_received',): # can_submit = False # break if can_submit and analysis not in submissable: submissable.append(analysis) # and then submit them. for analysis in submissable: doActionFor(analysis, 'submit') message = PMF("Changes saved.") self.context.plone_utils.addPortalMessage(message, 'info') if checkPermission(EditResults, self.context): self.destination_url = self.context.absolute_url( ) + "/manage_results" else: self.destination_url = self.context.absolute_url() self.request.response.redirect(self.destination_url)