def test_MultiVerificationType(self): #Testing when the same user can verify multiple times self.portal.bika_setup.setNumberOfRequiredVerifications(4) self.portal.bika_setup.setTypeOfmultiVerification('self_multi_enabled') client = self.portal.clients['client-1'] sampletype = self.portal.bika_setup.bika_sampletypes['sampletype-1'] values = {'Client': client.UID(), 'Contact': client.getContacts()[0].UID(), 'SamplingDate': '2016-12-12', 'SampleType': sampletype.UID()} ar = _createObjectByType("AnalysisRequest", client, tmpID()) servs = self.portal.bika_setup.bika_analysisservices service=servs['analysisservice-3'] service.setSelfVerification(True) an = create_analysis(ar, service) member = self.portal.portal_membership.getMemberById('admin') an.setVerificators(member.getUserName()) an.setNumberOfRequiredVerifications(4) self.assertEquals(an.isUserAllowedToVerify(member), True) #Testing when the same user can verify multiple times but not consequetively self.portal.bika_setup.setTypeOfmultiVerification('self_multi_not_cons') self.assertEquals(an.isUserAllowedToVerify(member), False) #Testing when the same user can not verify more than once self.portal.bika_setup.setTypeOfmultiVerification('self_multi_disabled') self.assertEquals(an.isUserAllowedToVerify(member), False) an.addVerificator(TEST_USER_NAME) self.portal.bika_setup.setTypeOfmultiVerification('self_multi_not_cons') self.assertEquals(an.isUserAllowedToVerify(member), True) self.portal.bika_setup.setTypeOfmultiVerification('self_multi_disabled') self.assertEquals(an.isUserAllowedToVerify(member), False)
def test_MultiVerificationType(self): bika_setup = self.portal.bika_setup # Testing when the same user can verify multiple times bika_setup.setNumberOfRequiredVerifications(4) bika_setup.setTypeOfmultiVerification('self_multi_enabled') client = self.portal.clients['client-1'] ar = _createObjectByType("AnalysisRequest", client, tmpID()) servs = bika_setup.bika_analysisservices service = servs['analysisservice-3'] service.setSelfVerification(True) an = create_analysis(ar, service) member = self.portal.portal_membership.getMemberById('admin') an.setVerificators(member.getUserName()) an.setNumberOfRequiredVerifications(4) self.assertEquals(an.isUserAllowedToVerify(member), True) # Testing when the same user can verify multiple times but not # consequetively bika_setup.setTypeOfmultiVerification('self_multi_not_cons') self.assertEquals(an.isUserAllowedToVerify(member), False) # Testing when the same user can not verify more than once bika_setup.setTypeOfmultiVerification('self_multi_disabled') self.assertEquals(an.isUserAllowedToVerify(member), False) an.addVerificator(TEST_USER_NAME) bika_setup.setTypeOfmultiVerification('self_multi_not_cons') self.assertEquals(an.isUserAllowedToVerify(member), True) bika_setup.setTypeOfmultiVerification('self_multi_disabled') self.assertEquals(an.isUserAllowedToVerify(member), False)
def after_retract(obj): """Function triggered after a 'retract' transition for the analysis passed in is performed. Retracting an analysis cause its transition to 'retracted' state and the creation of a new copy of the same analysis as a retest. Note that retraction only affects to single Analysis and has no other effect in the status of the Worksheet to which the Analysis is assigned or to the Analysis Request to which belongs (transition is never proomoted) This function is called automatically by bika.lims.workflow.AfterTransitionEventHandler """ # TODO Workflow Analysis - review this function # Rename the analysis to make way for it's successor. # Support multiple retractions by renaming to *-0, *-1, etc parent = obj.aq_parent kw = obj.getKeyword() analyses = [ x for x in parent.objectValues("Analysis") if x.getId().startswith(obj.getId()) ] # LIMS-1290 - Analyst must be able to retract, which creates a new # Analysis. So, _verifyObjectPaste permission check must be cancelled: parent._verifyObjectPaste = str # This is needed for tests: # https://docs.plone.org/develop/plone/content/rename.html # Testing warning: Rename mechanism relies of Persistent attribute # called _p_jar to be present on the content object. By default, this is # not the case on unit tests. You need to call transaction.savepoint() to # make _p_jar appear on persistent objects. # If you don't do this, you'll receive a "CopyError" when calling # manage_renameObjects that the operation is not supported. transaction.savepoint() parent.manage_renameObject(kw, "{0}-{1}".format(kw, len(analyses))) delattr(parent, '_verifyObjectPaste') # Create new analysis from the retracted obj analysis = create_analysis(parent, obj) changeWorkflowState(analysis, "bika_analysis_workflow", "sample_received") # Assign the new analysis to this same worksheet, if any. ws = obj.getWorksheet() if ws: ws.addAnalysis(analysis) analysis.reindexObject() # retract our dependencies dependencies = obj.getDependencies() for dependency in dependencies: doActionFor(dependency, 'retract') # Retract our dependents dependents = obj.getDependents() for dependent in dependents: doActionFor(dependent, 'retract') _reindex_request(obj)
def after_retract(obj): """Function triggered after a 'retract' transition for the analysis passed in is performed. Retracting an analysis cause its transition to 'retracted' state and the creation of a new copy of the same analysis as a retest. Note that retraction only affects to single Analysis and has no other effect in the status of the Worksheet to which the Analysis is assigned or to the Analysis Request to which belongs (transition is never proomoted) This function is called automatically by bika.lims.workflow.AfterTransitionEventHandler """ # TODO Workflow Analysis - review this function # Rename the analysis to make way for it's successor. # Support multiple retractions by renaming to *-0, *-1, etc parent = obj.aq_parent kw = obj.getKeyword() analyses = [x for x in parent.objectValues("Analysis") if x.getId().startswith(obj.getId())] # LIMS-1290 - Analyst must be able to retract, which creates a new # Analysis. So, _verifyObjectPaste permission check must be cancelled: parent._verifyObjectPaste = str # This is needed for tests: # https://docs.plone.org/develop/plone/content/rename.html # Testing warning: Rename mechanism relies of Persistent attribute # called _p_jar to be present on the content object. By default, this is # not the case on unit tests. You need to call transaction.savepoint() to # make _p_jar appear on persistent objects. # If you don't do this, you'll receive a "CopyError" when calling # manage_renameObjects that the operation is not supported. transaction.savepoint() parent.manage_renameObject(kw, "{0}-{1}".format(kw, len(analyses))) delattr(parent, '_verifyObjectPaste') # Create new analysis from the retracted obj analysis = create_analysis(parent, obj) changeWorkflowState( analysis, "bika_analysis_workflow", "sample_received") # Assign the new analysis to this same worksheet, if any. ws = obj.getWorksheet() if ws: ws.addAnalysis(analysis) analysis.reindexObject() # retract our dependencies dependencies = obj.getDependencies() for dependency in dependencies: doActionFor(dependency, 'retract') # Retract our dependents dependents = obj.getDependents() for dependent in dependents: doActionFor(dependent, 'retract') _reindex_request(obj)
def after_retract(obj): """Function triggered after a 'retract' transition for the analysis passed in is performed. Retracting an analysis cause its transition to 'retracted' state and the creation of a new copy of the same analysis as a retest. Note that retraction only affects to single Analysis and has no other effect in the status of the Worksheet to which the Analysis is assigned or to the Analysis Request to which belongs (transition is never proomoted) This function is called automatically by bika.lims.workflow.AfterTransitionEventHandler """ # TODO Workflow Analysis - review this function # Rename the analysis to make way for it's successor. # Support multiple retractions by renaming to *-0, *-1, etc parent = obj.aq_parent kw = obj.getKeyword() analyses = [ x for x in parent.objectValues("Analysis") if x.getId().startswith(obj.getId()) ] # LIMS-1290 - Analyst must be able to retract, which creates a new # Analysis. So, _verifyObjectPaste permission check must be cancelled: parent._verifyObjectPaste = str parent.manage_renameObject(kw, "{0}-{1}".format(kw, len(analyses))) delattr(parent, '_verifyObjectPaste') # Create new analysis from the retracted obj analysis = create_analysis(parent, obj) changeWorkflowState(analysis, "bika_analysis_workflow", "sample_received") # Assign the new analysis to this same worksheet, if any. ws = obj.getWorksheet() if ws: ws.addAnalysis(analysis) analysis.reindexObject() # retract our dependencies dependencies = obj.getDependencies() for dependency in dependencies: doActionFor(dependency, 'retract') # Retract our dependents dependents = obj.getDependents() for dependent in dependents: doActionFor(dependent, 'retract') _reindex_request(obj)
def after_retract(analysis): """Function triggered after a 'retract' transition for the analysis passed in is performed. The analysis transitions to "retracted" state and a new copy of the analysis is created. The copy initial state is "unassigned", unless the the retracted analysis was assigned to a worksheet. In such case, the copy is transitioned to 'assigned' state too """ # Retract our dependents (analyses that depend on this analysis) cascade_to_dependents(analysis, "retract") # Retract our dependencies (analyses this analysis depends on) promote_to_dependencies(analysis, "retract") # Rename the analysis to make way for it's successor. # Support multiple retractions by renaming to *-0, *-1, etc parent = analysis.aq_parent keyword = analysis.getKeyword() # Get only those that are analyses and with same keyword as the original analyses = parent.getAnalyses(full_objects=True) analyses = filter(lambda an: an.getKeyword() == keyword, analyses) # TODO This needs to get managed by Id server in a nearly future! new_id = '{}-{}'.format(keyword, len(analyses)) # Create a copy of the retracted analysis an_uid = api.get_uid(analysis) new_analysis = create_analysis(parent, analysis, id=new_id, RetestOf=an_uid) new_analysis.setResult("") new_analysis.setResultCaptureDate(None) new_analysis.reindexObject() logger.info("Retest for {} ({}) created: {}".format( keyword, api.get_id(analysis), api.get_id(new_analysis))) # Assign the new analysis to this same worksheet, if any. worksheet = analysis.getWorksheet() if worksheet: worksheet.addAnalysis(new_analysis) # Try to rollback the Analysis Request if IRequestAnalysis.providedBy(analysis): doActionFor(analysis.getRequest(), "rollback_to_receive") reindex_request(analysis)
def add_analysis(self, instance, service, **kwargs): service_uid = api.get_uid(service) # Ensure we have suitable parameters specs = kwargs.get("specs") or {} # Get the hidden status for the service hidden = kwargs.get("hidden") or [] hidden = filter(lambda d: d.get("uid") == service_uid, hidden) hidden = hidden and hidden[0].get("hidden") or service.getHidden() # Get the price for the service prices = kwargs.get("prices") or {} price = prices.get(service_uid) or service.getPrice() # Gets the analysis or creates the analysis for this service # Note this returns a list, because is possible to have multiple # partitions with same analysis analyses = self.resolve_analyses(instance, service) if not analyses: # Create the analysis keyword = service.getKeyword() logger.info("Creating new analysis '{}'".format(keyword)) analysis = create_analysis(instance, service) analyses.append(analysis) skip = ["cancelled", "retracted", "rejected"] for analysis in analyses: # Skip analyses to better not modify if api.get_review_status(analysis) in skip: continue # Set the hidden status analysis.setHidden(hidden) # Set the price of the Analysis analysis.setPrice(price) # Set the internal use status parent_sample = analysis.getRequest() analysis.setInternalUse(parent_sample.getInternalUse()) # Set the result range to the analysis analysis_rr = specs.get(service_uid) or analysis.getResultsRange() analysis.setResultsRange(analysis_rr) analysis.reindexObject()
def after_retract(analysis): """Function triggered after a 'retract' transition for the analysis passed in is performed. The analysis transitions to "retracted" state and a new copy of the analysis is created. The copy initial state is "unassigned", unless the the retracted analysis was assigned to a worksheet. In such case, the copy is transitioned to 'assigned' state too """ # Rename the analysis to make way for it's successor. # Support multiple retractions by renaming to *-0, *-1, etc parent = analysis.aq_parent keyword = analysis.getKeyword() analyses = filter(lambda an: an.getKeyword() == keyword, parent.objectValues("Analysis")) # Rename the retracted analysis # https://docs.plone.org/develop/plone/content/rename.html # _verifyObjectPaste permission check must be cancelled parent._verifyObjectPaste = str retracted_id = '{}-{}'.format(keyword, len(analyses)) # Make sure all persistent objects have _p_jar attribute transaction.savepoint(optimistic=True) parent.manage_renameObject(analysis.getId(), retracted_id) delattr(parent, '_verifyObjectPaste') # Create a copy of the retracted analysis analysis_uid = api.get_uid(analysis) new_analysis = create_analysis(parent, analysis, RetestOf=analysis_uid) # Assign the new analysis to this same worksheet, if any. worksheet = analysis.getWorksheet() if worksheet: worksheet.addAnalysis(new_analysis) # Retract our dependents (analyses that depend on this analysis) cascade_to_dependents(analysis, "retract") # Try to rollback the Analysis Request if IRequestAnalysis.providedBy(analysis): doActionFor(analysis.getRequest(), "rollback_to_receive") reindex_request(analysis)
def create_ast_analysis(sample, keyword, microorganism, antibiotics): """Creates a new AST analysis """ # Convert antibiotics to interim fields interim_fields = map(lambda ab: to_interim(keyword, ab), antibiotics) # Create a new ID to prevent clashes new_id = new_analysis_id(sample, keyword) # Create the analysis service = get_service(keyword) analysis = create_analysis(sample, service, id=new_id) # Assign the name of the microorganism as the title title = get_analysis_title(keyword, microorganism) short_title = api.get_title(microorganism) analysis.setTitle(title) analysis.setShortTitle(short_title) # Assign the antibiotics as interim fields analysis.setInterimFields(interim_fields) # Compute all combinations of interim/antibiotic and possible result and # and generate the result options for this analysis (the "Result" field is # never displayed and is only used for reporting) result_options = get_result_options(analysis) analysis.setResultOptions(result_options) # Apply the IASTAnalysis and IInternalUser marker interfaces alsoProvides(analysis, IASTAnalysis) alsoProvides(analysis, IInternalUse) # Initialize the analysis and reindex doActionFor(analysis, "initialize") analysis.reindexObject() return analysis
def set(self, instance, items, prices=None, specs=None, hidden=None, **kw): """Set/Assign Analyses to this AR :param items: List of Analysis objects/brains, AnalysisService objects/brains and/or Analysis Service uids :type items: list :param prices: Mapping of AnalysisService UID -> price :type prices: dict :param specs: List of AnalysisService UID -> Result Range mappings :type specs: list :param hidden: List of AnalysisService UID -> Hidden mappings :type hidden: list :returns: list of new assigned Analyses """ # This setter returns a list of new set Analyses new_analyses = [] # Current assigned analyses analyses = instance.objectValues("Analysis") # Analyses which are in a non-open state must be retained, except those # that are in a registered state (the sample has not been received) non_open_analyses = filter(lambda an: not an.isOpen(), analyses) non_open_analyses = filter( lambda an: api.get_workflow_status_of(an) != "registered", non_open_analyses) # Prevent removing all analyses # # N.B.: Non-open analyses are rendered disabled in the HTML form. # Therefore, their UIDs are not included in the submitted UIDs. if not items and not non_open_analyses: logger.warn("Not allowed to remove all Analyses from AR.") return new_analyses # Bail out if the items is not a list type if not isinstance(items, (list, tuple)): raise TypeError( "Items parameter must be a tuple or list, got '{}'".format( type(items))) # Bail out if the AR is inactive if not api.is_active(instance): raise Unauthorized( "Inactive ARs can not be modified".format(AddAnalysis)) # Bail out if the user has not the right permission if not check_permission(AddAnalysis, instance): raise Unauthorized( "You do not have the '{}' permission".format(AddAnalysis)) # Convert the items to a valid list of AnalysisServices services = filter(None, map(self._to_service, items)) # Calculate dependencies # FIXME Infinite recursion error possible here, if the formula includes # the Keyword of the Service that includes the Calculation dependencies = map(lambda s: s.getServiceDependencies(), services) dependencies = list(itertools.chain.from_iterable(dependencies)) # Merge dependencies and services services = set(services + dependencies) # Modify existing AR specs with new form values of selected analyses. self._update_specs(instance, specs) # Create a mapping of Service UID -> Hidden status if hidden is None: hidden = [] hidden = dict(map(lambda d: (d.get("uid"), d.get("hidden")), hidden)) # Ensure we have a prices dictionary if prices is None: prices = dict() # CREATE/MODIFY ANALYSES for service in services: service_uid = api.get_uid(service) keyword = service.getKeyword() # Create the Analysis if it doesn't exist if shasattr(instance, keyword): analysis = instance._getOb(keyword) else: analysis = create_analysis(instance, service) new_analyses.append(analysis) # set the hidden status analysis.setHidden(hidden.get(service_uid, False)) # Set the price of the Analysis analysis.setPrice(prices.get(service_uid, service.getPrice())) # DELETE ANALYSES # Service UIDs service_uids = map(api.get_uid, services) # Analyses IDs to delete delete_ids = [] # Assigned Attachments assigned_attachments = [] for analysis in analyses: service_uid = analysis.getServiceUID() # Skip if the Service is selected if service_uid in service_uids: continue # Skip non-open Analyses if analysis in non_open_analyses: continue # Remember assigned attachments # https://github.com/senaite/senaite.core/issues/1025 assigned_attachments.extend(analysis.getAttachment()) analysis.setAttachment([]) # If it is assigned to a worksheet, unassign it before deletion. worksheet = analysis.getWorksheet() if worksheet: worksheet.removeAnalysis(analysis) # Unset the partition reference # TODO Remove in >v1.3.0 - This is kept for backwards-compatibility part = analysis.getSamplePartition() if part: # From this partition, remove the reference to the current # analysis that is going to be removed to prevent inconsistent # states (Sample Partitions referencing to Analyses that do not # exist anymore an_uid = api.get_uid(analysis) part_ans = part.getAnalyses() or [] part_ans = filter(lambda an: api.get_uid(an) != an_uid, part_ans) part.setAnalyses(part_ans) # Unset the Analysis-to-Partition reference analysis.setSamplePartition(None) delete_ids.append(analysis.getId()) if delete_ids: # Note: subscriber might promote the AR instance.manage_delObjects(ids=delete_ids) # Remove orphaned attachments for attachment in assigned_attachments: # only delete attachments which are no further linked if not attachment.getLinkedAnalyses(): logger.info("Deleting attachment: {}".format( attachment.getId())) attachment_id = api.get_id(attachment) api.get_parent(attachment).manage_delObjects(attachment_id) return new_analyses
def set(self, instance, items, prices=None, specs=None, **kwargs): """Set/Assign Analyses to this AR :param items: List of Analysis objects/brains, AnalysisService objects/brains and/or Analysis Service uids :type items: list :param prices: Mapping of AnalysisService UID -> price :type prices: dict :param specs: List of AnalysisService UID -> Result Range Record mappings :type specs: list :returns: list of new assigned Analyses """ # This setter returns a list of new set Analyses new_analyses = [] # Prevent removing all Analyses if not items: logger.warn("Not allowed to remove all Analyses from AR.") return new_analyses # Bail out if the items is not a list type if not isinstance(items, (list, tuple)): raise TypeError( "Items parameter must be a tuple or list, got '{}'".format( type(items))) # Bail out if the AR in frozen state if self._is_frozen(instance): raise ValueError( "Analyses can not be modified for inactive/verified ARs") # Convert the items to a valid list of AnalysisServices services = filter(None, map(self._to_service, items)) # Calculate dependencies # FIXME Infinite recursion error possible here, if the formula includes # the Keyword of the Service that includes the Calculation dependencies = map(lambda s: s.getServiceDependencies(), services) dependencies = list(itertools.chain.from_iterable(dependencies)) # Merge dependencies and services services = set(services + dependencies) # Service UIDs service_uids = map(api.get_uid, services) # Modify existing AR specs with new form values of selected analyses. self._update_specs(instance, specs) for service in services: keyword = service.getKeyword() # Create the Analysis if it doesn't exist if shasattr(instance, keyword): analysis = instance._getOb(keyword) else: # TODO Entry point for interims assignment and Calculation # decoupling from Analysis. See comments PR#593 analysis = create_analysis(instance, service) # TODO Remove when the `create_analysis` function supports this # Set the interim fields only for new created Analysis self._update_interims(analysis, service) new_analyses.append(analysis) # Set the price of the Analysis self._update_price(analysis, service, prices) # delete analyses delete_ids = [] for analysis in instance.objectValues('Analysis'): service_uid = analysis.getServiceUID() # Skip assigned Analyses if service_uid in service_uids: continue # Skip Analyses in frozen states if self._is_frozen(analysis, "retract"): logger.warn("Inactive/verified/retracted Analyses can not be " "removed.") continue # If it is assigned to a worksheet, unassign it before deletion. if self._is_assigned_to_worksheet(analysis): backrefs = self._get_assigned_worksheets(analysis) ws = backrefs[0] ws.removeAnalysis(analysis) # Unset the partition reference part = analysis.getSamplePartition() if part: # From this partition, remove the reference to the current # analysis that is going to be removed to prevent inconsistent # states (Sample Partitions referencing to Analyses that do not # exist anymore an_uid = api.get_uid(analysis) part_ans = part.getAnalyses() or [] part_ans = filter(lambda an: api.get_uid(an) != an_uid, part_ans) part.setAnalyses(part_ans) # Unset the Analysis-to-Partition reference analysis.setSamplePartition(None) delete_ids.append(analysis.getId()) if delete_ids: # Note: subscriber might promote the AR instance.manage_delObjects(ids=delete_ids) return new_analyses
def set(self, instance, service_uids, prices=None, specs={}, **kwargs): """ service_uids are the services selected on the AR Add/Edit form. prices is a service_uid keyed dictionary containing the prices entered on the form. specs is a uid keyed dict with values of {min:, max:, error:} """ if not service_uids: return assert type(service_uids) in (list, tuple) workflow = instance.portal_workflow # one can only edit Analyses up to a certain state. ar_state = workflow.getInfoFor(instance, 'review_state', '') assert ar_state in ('sample_registered', 'sampled', 'to_be_sampled', 'to_be_preserved', 'sample_due', 'sample_received', 'attachment_due', 'to_be_verified') bsc = getToolByName(instance, 'bika_setup_catalog') services = bsc(UID=service_uids) new_analyses = [] for service in services: service_uid = service.UID service = service.getObject() keyword = service.getKeyword() price = prices[service_uid] if prices and service_uid in prices \ else service.getPrice() vat = Decimal(service.getVAT()) # analysis->InterimFields calc = service.getCalculation() interim_fields = calc and list(calc.getInterimFields()) or [] # override defaults from service->InterimFields service_interims = service.getInterimFields() sif = dict([[x['keyword'], x.get('value', '')] for x in service_interims]) for i, i_f in enumerate(interim_fields): if i_f['keyword'] in sif: interim_fields[i]['value'] = sif[i_f['keyword']] service_interims = [x for x in service_interims if x['keyword'] != i_f['keyword']] # Add remaining service interims to the analysis for v in service_interims: interim_fields.append(v) # create the analysis if it doesn't exist if hasattr(instance, keyword): analysis = instance._getOb(keyword) else: analysis = create_analysis( instance, service, keyword, interim_fields ) new_analyses.append(analysis) # Note: subscriber might retract and/or unassign the AR spec = specs[service_uid] if specs and service_uid in specs \ else None # If no specification came to us from the form, then we will # see if there is a spec for this SampleType & AnalysisService if not spec: spec = analysis.get_default_specification() analysis.specification = spec # XXX Price? # analysis.setPrice(price) # delete analyses delete_ids = [] for analysis in instance.objectValues('Analysis'): service_uid = analysis.Schema()['Service'].getRaw(analysis) if service_uid not in service_uids: # If it is verified or published, don't delete it. if workflow.getInfoFor(analysis, 'review_state') in ('verified', 'published'): continue # log it # If it is assigned to a worksheet, unassign it before deletion. elif workflow.getInfoFor(analysis, 'worksheetanalysis_review_state') == 'assigned': ws = analysis.getBackReferences("WorksheetAnalysis")[0] ws.removeAnalysis(analysis) # Unset the partition reference analysis.edit(SamplePartition=None) delete_ids.append(analysis.getId()) if delete_ids: # Note: subscriber might promote the AR instance.manage_delObjects(ids=delete_ids) return new_analyses
def set(self, instance, service_uids, prices=None, specs=None, **kwargs): """Set the 'Analyses' field value, by creating and removing Analysis objects from the AR. service_uids is a list: The UIDs of all services which should exist in the AR. If a service is not included here, the corrosponding Analysis will be removed. prices is a dictionary: key = AnalysisService UID value = price specs is a dictionary: key = AnalysisService UID value = dictionary: defined in ResultsRange field definition """ if not service_uids: return assert type(service_uids) in (list, tuple) bsc = getToolByName(instance, 'bika_setup_catalog') workflow = getToolByName(instance, 'portal_workflow') # one can only edit Analyses up to a certain state. ar_state = workflow.getInfoFor(instance, 'review_state', '') assert ar_state in ('sample_registered', 'sampled', 'to_be_sampled', 'to_be_preserved', 'sample_due', 'sample_received', 'attachment_due', 'to_be_verified') # - Modify existing AR specs with new form values for selected analyses. # - new analysis requests are also using this function, so ResultsRange # may be undefined. in this case, specs= will contain the entire # AR spec. rr = instance.getResultsRange() specs = specs if specs else [] for s in specs: s_in_rr = False for i, r in enumerate(rr): if s['keyword'] == r['keyword']: rr[i].update(s) s_in_rr = True if not s_in_rr: rr.append(s) instance.setResultsRange(rr) new_analyses = [] proxies = bsc(UID=service_uids) for proxy in proxies: service = proxy.getObject() service_uid = service.UID() keyword = service.getKeyword() price = prices[service_uid] if prices and service_uid in prices \ else service.getPrice() vat = Decimal(service.getVAT()) # analysis->InterimFields calc = service.getCalculation() interim_fields = calc and list(calc.getInterimFields()) or [] # override defaults from service->InterimFields service_interims = service.getInterimFields() sif = dict([(x['keyword'], x.get('value', '')) for x in service_interims]) for i, i_f in enumerate(interim_fields): if i_f['keyword'] in sif: interim_fields[i]['value'] = sif[i_f['keyword']] service_interims = [ x for x in service_interims if x['keyword'] != i_f['keyword'] ] # Add remaining service interims to the analysis for v in service_interims: interim_fields.append(v) # create the analysis if it doesn't exist if shasattr(instance, keyword): analysis = instance._getOb(keyword) else: analysis = create_analysis(instance, service, keyword, interim_fields) new_analyses.append(analysis) for i, r in enumerate(rr): if r['keyword'] == analysis.getService().getKeyword(): r['uid'] = analysis.UID() # XXX Price? # analysis.setPrice(price) # We add rr to the AR after we create all the analyses instance.setResultsRange(rr) # delete analyses delete_ids = [] for analysis in instance.objectValues('Analysis'): service_uid = analysis.Schema()['Service'].getRaw(analysis) if service_uid not in service_uids: # If it is verified or published, don't delete it. if workflow.getInfoFor(analysis, 'review_state') in ('verified', 'published'): continue # log it # If it is assigned to a worksheet, unassign it before deletion. elif workflow.getInfoFor( analysis, 'worksheetanalysis_review_state') == 'assigned': ws = analysis.getBackReferences("WorksheetAnalysis")[0] ws.removeAnalysis(analysis) # Unset the partition reference analysis.edit(SamplePartition=None) delete_ids.append(analysis.getId()) if delete_ids: # Note: subscriber might promote the AR instance.manage_delObjects(ids=delete_ids) return new_analyses
def set(self, instance, service_uids, prices=None, specs=None, **kwargs): """Set the 'Analyses' field value, by creating and removing Analysis objects from the AR. service_uids is a list: The UIDs of all services which should exist in the AR. If a service is not included here, the corrosponding Analysis will be removed. prices is a dictionary: key = AnalysisService UID value = price specs is a dictionary: key = AnalysisService UID value = dictionary: defined in ResultsRange field definition """ if not service_uids: return assert type(service_uids) in (list, tuple) bsc = getToolByName(instance, 'bika_setup_catalog') workflow = getToolByName(instance, 'portal_workflow') # one can only edit Analyses up to a certain state. ar_state = workflow.getInfoFor(instance, 'review_state', '') assert ar_state in ('sample_registered', 'sampled', 'to_be_sampled', 'to_be_preserved', 'sample_due', 'sample_received', 'attachment_due', 'to_be_verified') # - Modify existing AR specs with new form values for selected analyses. # - new analysis requests are also using this function, so ResultsRange # may be undefined. in this case, specs= will contain the entire # AR spec. rr = instance.getResultsRange() specs = specs if specs else [] for s in specs: s_in_rr = False for i, r in enumerate(rr): if s['keyword'] == r['keyword']: rr[i].update(s) s_in_rr = True if not s_in_rr: rr.append(s) instance.setResultsRange(rr) new_analyses = [] proxies = bsc(UID=service_uids) for proxy in proxies: service = proxy.getObject() service_uid = service.UID() keyword = service.getKeyword() price = prices[service_uid] if prices and service_uid in prices \ else service.getPrice() vat = Decimal(service.getVAT()) # analysis->InterimFields calc = service.getCalculation() interim_fields = calc and list(calc.getInterimFields()) or [] # override defaults from service->InterimFields service_interims = service.getInterimFields() sif = dict([(x['keyword'], x.get('value', '')) for x in service_interims]) for i, i_f in enumerate(interim_fields): if i_f['keyword'] in sif: interim_fields[i]['value'] = sif[i_f['keyword']] service_interims = [x for x in service_interims if x['keyword'] != i_f['keyword']] # Add remaining service interims to the analysis for v in service_interims: interim_fields.append(v) # create the analysis if it doesn't exist if shasattr(instance, keyword): analysis = instance._getOb(keyword) else: analysis = create_analysis( instance, service, keyword, interim_fields ) new_analyses.append(analysis) for i, r in enumerate(rr): if r['keyword'] == analysis.getService().getKeyword(): r['uid'] = analysis.UID() # XXX Price? # analysis.setPrice(price) # We add rr to the AR after we create all the analyses instance.setResultsRange(rr) # delete analyses delete_ids = [] for analysis in instance.objectValues('Analysis'): service_uid = analysis.Schema()['Service'].getRaw(analysis) if service_uid not in service_uids: # If it is verified or published, don't delete it. if workflow.getInfoFor(analysis, 'review_state') in ('verified', 'published'): continue # log it # If it is assigned to a worksheet, unassign it before deletion. elif workflow.getInfoFor(analysis, 'worksheetanalysis_review_state') == 'assigned': ws = analysis.getBackReferences("WorksheetAnalysis")[0] ws.removeAnalysis(analysis) # Unset the partition reference analysis.edit(SamplePartition=None) delete_ids.append(analysis.getId()) if delete_ids: # Note: subscriber might promote the AR instance.manage_delObjects(ids=delete_ids) return new_analyses
def set(self, instance, items, prices=None, specs=None, **kwargs): """Set/Assign Analyses to this AR :param items: List of Analysis objects/brains, AnalysisService objects/brains and/or Analysis Service uids :type items: list :param prices: Mapping of AnalysisService UID -> price :type prices: dict :param specs: List of AnalysisService UID -> Result Range Record mappings :type specs: list :returns: list of new assigned Analyses """ # This setter returns a list of new set Analyses new_analyses = [] # Prevent removing all Analyses if not items: logger.warn("Not allowed to remove all Analyses from AR.") return new_analyses # Bail out if the items is not a list type if not isinstance(items, (list, tuple)): raise TypeError( "Items parameter must be a tuple or list, got '{}'".format( type(items))) # Bail out if the AR in frozen state if self._is_frozen(instance): raise ValueError( "Analyses can not be modified for inactive/verified ARs") # Convert the items to a valid list of AnalysisServices services = filter(None, map(self._to_service, items)) # Calculate dependencies # FIXME Infinite recursion error possible here, if the formula includes # the Keyword of the Service that includes the Calculation dependencies = map(lambda s: s.getServiceDependencies(), services) dependencies = list(itertools.chain.from_iterable(dependencies)) # Merge dependencies and services services = set(services + dependencies) # Service UIDs service_uids = map(api.get_uid, services) # Modify existing AR specs with new form values of selected analyses. self._update_specs(instance, specs) for service in services: keyword = service.getKeyword() # Create the Analysis if it doesn't exist if shasattr(instance, keyword): analysis = instance._getOb(keyword) else: # TODO Entry point for interims assignment and Calculation # decoupling from Analysis. See comments PR#593 analysis = create_analysis(instance, service) # TODO Remove when the `create_analysis` function supports this # Set the interim fields only for new created Analysis self._update_interims(analysis, service) new_analyses.append(analysis) # Set the price of the Analysis self._update_price(analysis, service, prices) # delete analyses delete_ids = [] for analysis in instance.objectValues('Analysis'): service_uid = analysis.getServiceUID() # Skip assigned Analyses if service_uid in service_uids: continue # Skip Analyses in frozen states if self._is_frozen(analysis): logger.warn("Inactive/verified Analyses can not be removed.") continue # If it is assigned to a worksheet, unassign it before deletion. if self._is_assigned_to_worksheet(analysis): backrefs = self._get_assigned_worksheets(analysis) ws = backrefs[0] ws.removeAnalysis(analysis) # Unset the partition reference analysis.edit(SamplePartition=None) delete_ids.append(analysis.getId()) if delete_ids: # Note: subscriber might promote the AR instance.manage_delObjects(ids=delete_ids) return new_analyses
def set(self, instance, items, prices=None, specs=None, **kwargs): """Set the 'Analyses' field value, by creating and removing Analysis objects from the AR. items is a list that contains the items to be set: The list can contain Analysis objects/brains, AnalysisService objects/brains and/or Analysis Service uids. prices is a dictionary: key = AnalysisService UID value = price specs is a dictionary: key = AnalysisService UID value = dictionary: defined in ResultsRange field definition """ if not items: return assert isinstance(items, (list, tuple)), "items must be a list or a tuple" # Convert the items list to a list of service uids and remove empties service_uids = map(self._get_service_uid, items) service_uids = filter(None, service_uids) bsc = getToolByName(instance, 'bika_setup_catalog') workflow = getToolByName(instance, 'portal_workflow') # one can only edit Analyses up to a certain state. ar_state = workflow.getInfoFor(instance, 'review_state', '') assert ar_state in ('sample_registered', 'sampled', 'to_be_sampled', 'to_be_preserved', 'sample_due', 'sample_received', 'attachment_due', 'to_be_verified') # - Modify existing AR specs with new form values for selected analyses. # - new analysis requests are also using this function, so ResultsRange # may be undefined. in this case, specs= will contain the entire # AR spec. rr = instance.getResultsRange() specs = specs if specs else [] for s in specs: s_in_rr = False for i, r in enumerate(rr): if s['keyword'] == r['keyword']: rr[i].update(s) s_in_rr = True if not s_in_rr: rr.append(s) instance.setResultsRange(rr) new_analyses = [] proxies = bsc(UID=service_uids) for proxy in proxies: service = proxy.getObject() keyword = service.getKeyword() # analysis->InterimFields calc = service.getCalculation() interim_fields = calc and list(calc.getInterimFields()) or [] # override defaults from service->InterimFields service_interims = service.getInterimFields() sif = dict([(x['keyword'], x.get('value', '')) for x in service_interims]) for i, i_f in enumerate(interim_fields): if i_f['keyword'] in sif: interim_fields[i]['value'] = sif[i_f['keyword']] service_interims = [ x for x in service_interims if x['keyword'] != i_f['keyword'] ] # Add remaining service interims to the analysis for v in service_interims: interim_fields.append(v) # create the analysis if it doesn't exist if shasattr(instance, keyword): analysis = instance._getOb(keyword) else: analysis = create_analysis(instance, service) new_analyses.append(analysis) for i, r in enumerate(rr): if r['keyword'] == analysis.getKeyword(): r['uid'] = analysis.UID() # delete analyses delete_ids = [] for analysis in instance.objectValues('Analysis'): service_uid = analysis.getServiceUID() if service_uid not in service_uids: # If it is verified or published, don't delete it. state = workflow.getInfoFor(analysis, 'review_state') if state in ('verified', 'published'): continue # If it is assigned to a worksheet, unassign it before deletion. state = workflow.getInfoFor(analysis, 'worksheetanalysis_review_state') if state == 'assigned': ws = analysis.getBackReferences("WorksheetAnalysis")[0] ws.removeAnalysis(analysis) # Unset the partition reference analysis.edit(SamplePartition=None) delete_ids.append(analysis.getId()) if delete_ids: # Note: subscriber might promote the AR instance.manage_delObjects(ids=delete_ids) return new_analyses
def set(self, instance, service_uids, prices=None, specs={}, **kwargs): """ service_uids are the services selected on the AR Add/Edit form. prices is a service_uid keyed dictionary containing the prices entered on the form. specs is a uid keyed dict with values of {min:, max:, error:} """ if not service_uids: return assert type(service_uids) in (list, tuple) workflow = instance.portal_workflow # one can only edit Analyses up to a certain state. ar_state = workflow.getInfoFor(instance, 'review_state', '') assert ar_state in ('sample_registered', 'sampled', 'to_be_sampled', 'to_be_preserved', 'sample_due', 'sample_received', 'attachment_due', 'to_be_verified') bsc = getToolByName(instance, 'bika_setup_catalog') services = bsc(UID=service_uids) new_analyses = [] for service in services: service_uid = service.UID service = service.getObject() keyword = service.getKeyword() price = prices[service_uid] if prices and service_uid in prices \ else service.getPrice() vat = Decimal(service.getVAT()) # analysis->InterimFields calc = service.getCalculation() interim_fields = calc and list(calc.getInterimFields()) or [] # override defaults from service->InterimFields service_interims = service.getInterimFields() sif = dict([[x['keyword'], x.get('value', '')] for x in service_interims]) for i, i_f in enumerate(interim_fields): if i_f['keyword'] in sif: interim_fields[i]['value'] = sif[i_f['keyword']] service_interims = [ x for x in service_interims if x['keyword'] != i_f['keyword'] ] # Add remaining service interims to the analysis for v in service_interims: interim_fields.append(v) # create the analysis if it doesn't exist if hasattr(instance, keyword): analysis = instance._getOb(keyword) else: analysis = create_analysis(instance, service, keyword, interim_fields) new_analyses.append(analysis) # Note: subscriber might retract and/or unassign the AR spec = specs[service_uid] if specs and service_uid in specs \ else None # If no specification came to us from the form, then we will # see if there is a spec for this SampleType & AnalysisService if not spec: spec = analysis.get_default_specification() analysis.specification = spec # XXX Price? # analysis.setPrice(price) # delete analyses delete_ids = [] for analysis in instance.objectValues('Analysis'): service_uid = analysis.Schema()['Service'].getRaw(analysis) if service_uid not in service_uids: # If it is verified or published, don't delete it. if workflow.getInfoFor(analysis, 'review_state') in ('verified', 'published'): continue # log it # If it is assigned to a worksheet, unassign it before deletion. elif workflow.getInfoFor( analysis, 'worksheetanalysis_review_state') == 'assigned': ws = analysis.getBackReferences("WorksheetAnalysis")[0] ws.removeAnalysis(analysis) # Unset the partition reference analysis.edit(SamplePartition=None) delete_ids.append(analysis.getId()) if delete_ids: # Note: subscriber might promote the AR instance.manage_delObjects(ids=delete_ids) return new_analyses
def doActionToAnalysis(source_analysis, action): """ This functions executes the action against the analysis. :base: a full analysis object. The new analyses will be cloned from it. :action: a dictionary representing an action row. [{'action': 'duplicate', ...}, {,}, ...] :returns: the new analysis """ if not IRequestAnalysis.providedBy(source_analysis): # Only routine analyses (assigned to a Request) are supported logger.warn("Only IRequestAnalysis are supported in reflex testing") return None state = api.get_review_status(source_analysis) action_id = action.get('action', '') if action_id == "new_analysis": # Create a new analysis (different from the original) service_uid = action.get("new_analysis", "") if not api.is_uid(service_uid): logger.error("Not a valid UID: {}".format(service_uid)) return None service = api.get_object_by_uid(service_uid, None) if not service or not IAnalysisService.providedBy(service): logger.error("No valid service for UID {}".format(service_uid)) return None analysis = create_analysis(source_analysis.aq_parent, service) analysis.setSamplePartition(source_analysis.getSamplePartition()) changeWorkflowState(analysis, "bika_analysis_workflow", "sample_received") elif action_id == 'setvisibility': target_id = action.get('setvisibilityof', '') if target_id == "original": analysis = source_analysis else: analysis = _fetch_analysis_for_local_id(source_analysis, target_id) elif action_id == 'repeat' and state != 'retracted': # Repeat an analysis consist on cancel it and then create a new # analysis with the same analysis service used for the canceled # one (always working with the same sample). It'll do a retract # action doActionFor(source_analysis, 'retract') analysis_request = source_analysis.getRequest() analysis = analysis_request.getAnalyses(sort_on="created")[-1] analysis = api.get_object(analysis) analysis.setResult('') elif action_id == 'duplicate' or state == 'retracted': analysis = duplicateAnalysis(source_analysis) analysis.setResult('') elif action_id == 'setresult': target = action.get('setresulton', '') result_value = action.get('setresultdiscrete', '') or \ action['setresultvalue'] if target == 'original': analysis = source_analysis.getOriginalReflexedAnalysis() analysis.setResult(result_value) elif target == 'new': # Create a new analysis analysis = duplicateAnalysis(source_analysis) analysis.setResult(result_value) doActionFor(analysis, 'submit') else: logger.error("Unknown 'setresulton' directive: {}".format(target)) return None else: logger.error("Unknown Reflex Rule action: {}".format(action_id)) return None analysis.setReflexRuleAction(action_id) analysis.setIsReflexAnalysis(True) analysis.setReflexAnalysisOf(source_analysis) analysis.setReflexRuleActionsTriggered( source_analysis.getReflexRuleActionsTriggered()) if action.get('showinreport', '') == "invisible": analysis.setHidden(True) elif action.get('showinreport', '') == "visible": analysis.setHidden(False) # Setting the original reflected analysis if source_analysis.getOriginalReflexedAnalysis(): analysis.setOriginalReflexedAnalysis( source_analysis.getOriginalReflexedAnalysis()) else: analysis.setOriginalReflexedAnalysis(source_analysis) analysis.setReflexRuleLocalID(action.get('an_result_id', '')) # Setting the remarks to base analysis #remarks = get_remarks(action, analysis) #analysis.setRemarks(remarks) return analysis