def get_transitions_for(self, obj): """Get the allowed transitions for the given object """ return api.get_transitions_for(obj)
def create_analysisrequest(context, request, values, analyses=None, partitions=None, specifications=None, prices=None): """This is meant for general use and should do everything necessary to create and initialise an AR and any other required auxilliary objects (Sample, SamplePartition, Analysis...) :param context: The container in which the ARs will be created. :param request: The current Request object. :param values: a dict, where keys are AR|Sample schema field names. :param analyses: Analysis services list. If specified, augments the values in values['Analyses']. May consist of service objects, UIDs, or Keywords. :param partitions: A list of dictionaries, if specific partitions are required. If not specified, AR's sample is created with a single partition. :param specifications: These values augment those found in values['Specifications'] :param prices: Allow different prices to be set for analyses. If not set, prices are read from the associated analysis service. """ # Gather neccesary tools workflow = getToolByName(context, 'portal_workflow') bc = getToolByName(context, 'bika_catalog') # Analyses are analyses services analyses_services = analyses analyses = [] # It's necessary to modify these and we don't want to pollute the # parent's data values = values.copy() analyses_services = analyses_services if analyses_services else [] anv = values['Analyses'] if values.get('Analyses', None) else [] analyses_services = anv + analyses_services if not analyses_services: raise RuntimeError( "create_analysisrequest: no analyses services provided") # Create new sample or locate the existing for secondary AR if not values.get('Sample', False): secondary = False workflow_enabled = context.bika_setup.getSamplingWorkflowEnabled() sample = create_sample(context, request, values) else: secondary = True sample = get_sample_from_values(context, values) workflow_enabled = sample.getSamplingWorkflowEnabled() # Create the Analysis Request ar = _createObjectByType('AnalysisRequest', context, tmpID()) # Set some required fields manually before processForm is called ar.setSample(sample) values['Sample'] = sample if values.get('DateSampled', False): #Inject the timezone into a selection by #datewidget which is timezone naive #ie. DateSampled is '2017-05-15 01:05' #but should be '2017/05/15 01:05:00 GMT+2' #else processForm => reindexObject() sets it to GMT+0 which results in #an incorrect date record. tz = DateTime().timezone() datesampled = DateTime(values['DateSampled'] + ' ' + tz) values['DateSampled'] = datesampled ar.processForm(REQUEST=request, values=values) # Object has been renamed ar.edit(RequestID=ar.getId()) # Set initial AR state action = '{0}sampling_workflow'.format('' if workflow_enabled else 'no_') workflow.doActionFor(ar, action) # Set analysis request analyses service_uids = _resolve_items_to_service_uids(analyses_services) # processForm already has created the analyses, but here we create the # analyses with specs and prices. This function, even it is called 'set', # deletes the old analyses, so eventually we obtain the desired analyses. ar.setAnalyses(service_uids, prices=prices, specs=specifications) # Gettin the ar objects analyses = ar.getAnalyses(full_objects=True) # Continue to set the state of the AR skip_receive = [ 'to_be_sampled', 'sample_due', 'sampled', 'to_be_preserved' ] if secondary: # Only 'sample_due' and 'sample_recieved' samples can be selected # for secondary analyses doActionFor(ar, 'sampled') doActionFor(ar, 'sample_due') sample_state = workflow.getInfoFor(sample, 'review_state') if sample_state not in skip_receive: doActionFor(ar, 'receive') # Set the state of analyses we created. for analysis in analyses: revers = analysis.getService().getNumberOfRequiredVerifications() analysis.setNumberOfRequiredVerifications(revers) doActionFor(analysis, 'sample_due') analysis_state = workflow.getInfoFor(analysis, 'review_state') if analysis_state not in skip_receive: doActionFor(analysis, 'receive') if not secondary: # Create sample partitions if not partitions: partitions = values.get('Partitions', [{'services': service_uids}]) for n, partition in enumerate(partitions): # Calculate partition id partition['object'] = create_samplepartition( sample, partition, analyses) # If Preservation is required for some partitions, # and the SamplingWorkflow is disabled, we need # to transition to to_be_preserved manually. if not workflow_enabled: to_be_preserved = [] sample_due = [] lowest_state = 'sample_due' for p in sample.objectValues('SamplePartition'): if p.getPreservation(): lowest_state = 'to_be_preserved' to_be_preserved.append(p) else: sample_due.append(p) for p in to_be_preserved: doActionFor(p, 'to_be_preserved') for p in sample_due: doActionFor(p, 'sample_due') doActionFor(sample, lowest_state) doActionFor(ar, lowest_state) # Transition pre-preserved partitions for p in partitions: if 'prepreserved' in p and p['prepreserved']: part = p['object'] state = workflow.getInfoFor(part, 'review_state') if state == 'to_be_preserved': workflow.doActionFor(part, 'preserve') # Once the ar is fully created, check if there are rejection reasons reject_field = values.get('RejectionReasons', '') if reject_field and reject_field.get('checkbox', False): doActionFor(ar, 'reject') # If the Sampling Workflow field values are valid, # and the SamplingWorkflow is enabled, we will # automatically kick off the "sample" transition now tids = [t['id'] for t in get_transitions_for(ar)] if 'sample' in tids and ar.getSampler() and ar.getDateSampled(): do_transition_for(ar, 'sample') # Return the newly created Analysis Request return ar
def workflow_script_import(self): """Create objects from valid ARImport """ def convert_date_string(datestr): return datestr.replace('-', '/') def lookup_sampler_uid(import_user): #Lookup sampler's uid found = False userid = None user_ids = [] users = getUsers(self, ['LabManager', 'Sampler']).items() for (samplerid, samplername) in users: if import_user == samplerid: found = True userid = samplerid break if import_user == samplername: user_ids.append(samplerid) if found: return userid if len(user_ids) == 1: return user_ids[0] if len(user_ids) > 1: #raise ValueError('Sampler %s is ambiguous' % import_user) return '' #Otherwise #raise ValueError('Sampler %s not found' % import_user) return '' bsc = getToolByName(self, 'bika_setup_catalog') workflow = getToolByName(self, 'portal_workflow') client = self.aq_parent title = _('Submitting AR Import') description = _('Creating and initialising objects') bar = ProgressBar(self, self.REQUEST, title, description) notify(InitialiseProgressBar(bar)) profiles = [x.getObject() for x in bsc(portal_type='AnalysisProfile')] gridrows = self.schema['SampleData'].get(self) row_cnt = 0 for therow in gridrows: row = therow.copy() row_cnt += 1 # Create Sample sample = _createObjectByType('Sample', client, tmpID()) sample.unmarkCreationFlag() # First convert all row values into something the field can take sample.edit(**row) sample._renameAfterCreation() event.notify(ObjectInitializedEvent(sample)) sample.at_post_create_script() swe = self.bika_setup.getSamplingWorkflowEnabled() if swe: workflow.doActionFor(sample, 'sampling_workflow') else: workflow.doActionFor(sample, 'no_sampling_workflow') part = _createObjectByType('SamplePartition', sample, 'part-1') part.unmarkCreationFlag() renameAfterCreation(part) if swe: workflow.doActionFor(part, 'sampling_workflow') else: workflow.doActionFor(part, 'no_sampling_workflow') container = self.get_row_container(row) if container: part.edit(Container=container) # Profiles are titles, profile keys, or UIDS: convert them to UIDs. newprofiles = [] for title in row['Profiles']: objects = [ x for x in profiles if title in (x.getProfileKey(), x.UID(), x.Title()) ] for obj in objects: newprofiles.append(obj.UID()) row['Profiles'] = newprofiles # BBB in bika.lims < 3.1.9, only one profile is permitted # on an AR. The services are all added, but only first selected # profile name is stored. row['Profile'] = newprofiles[0] if newprofiles else None # Same for analyses newanalyses = set( self.get_row_services(row) + self.get_row_profile_services(row)) row['Analyses'] = [] # get batch batch = self.schema['Batch'].get(self) if batch: row['Batch'] = batch # Add AR fields from schema into this row's data row['ClientReference'] = self.getClientReference() row['ClientOrderNumber'] = self.getClientOrderNumber() row['Contact'] = self.getContact() row['DateSampled'] = convert_date_string(row['DateSampled']) if row['Sampler']: row['Sampler'] = lookup_sampler_uid(row['Sampler']) # Create AR ar = _createObjectByType("AnalysisRequest", client, tmpID()) ar.setSample(sample) ar.unmarkCreationFlag() ar.edit(**row) ar._renameAfterCreation() ar.setAnalyses(list(newanalyses)) for analysis in ar.getAnalyses(full_objects=True): analysis.setSamplePartition(part) ar.at_post_create_script() if swe: workflow.doActionFor(ar, 'sampling_workflow') else: workflow.doActionFor(ar, 'no_sampling_workflow') # If the Sampling Workflow field values are valid, # and the SamplingWorkflow is enabled, we will # automatically kick off the "sample" transition now tids = [t['id'] for t in get_transitions_for(ar)] if 'sample' in tids and ar.getSampler() and ar.getDateSampled(): do_transition_for(ar, 'sample') progress_index = float(row_cnt) / len(gridrows) * 100 progress = ProgressState(self.REQUEST, progress_index) notify(UpdateProgressEvent(progress)) # document has been written to, and redirect() fails here self.REQUEST.response.write( '<script>document.location.href="%s"</script>' % (self.aq_parent.absolute_url()))