def guard_submit(analysis_request): """Return whether the transition "submit" can be performed or not. Returns True if there is at least one analysis in a non-detached state and all analyses in a non-detached analyses have been submitted. """ # Discard detached analyses analyses = analysis_request.getAnalyses(full_objects=True) analyses = filter( lambda an: api.get_workflow_status_of(an) not in ANALYSIS_DETACHED_STATES, analyses) # If not all analyses are for internal use, rely on "regular" analyses internals = map(IInternalUse.providedBy, analyses) omit_internals = not all(internals) analyses_ready = False for analysis in analyses: # Omit analyses for internal use if omit_internals and IInternalUse.providedBy(analysis): continue analysis_status = api.get_workflow_status_of(analysis) if analysis_status in ['assigned', 'unassigned', 'registered']: return False analyses_ready = True return analyses_ready
def cancellation_state(instance): """Acts as a mask for cancellation_workflow for those content types that are not bound to this workflow. Returns 'active' or 'cancelled' """ if api.get_workflow_status_of(instance) == "cancelled": return "cancelled" return "active"
def guard_verify(obj): """Returns True if 'verify' transition can be applied to the Worksheet passed in. This is, returns true if all the analyses assigned have already been verified. Those analyses that are in an inactive state (cancelled, inactive) are dismissed, but at least one analysis must be in an active state (and verified), otherwise always return False. Note this guard depends entirely on the current status of the children :returns: true or false """ analyses = obj.getAnalyses() if not analyses: # An empty worksheet cannot be verified return False can_verify = False for analysis in obj.getAnalyses(): # Dismiss analyses that are not active if not api.is_active(analysis): continue # Dismiss analyses that have been rejected or retracted if api.get_workflow_status_of(analysis) in ["rejected", "retracted"]: continue # Worksheet cannot be verified if there is one analysis not verified can_verify = IVerified.providedBy(analysis) if not can_verify: # No need to look further return False # This prevents the verification of the worksheet if all its analyses are in # a detached status (rejected, retracted or cancelled) return can_verify
def guard_submit(obj): """Returns if 'submit' transition can be applied to the worksheet passed in. By default, the target state for the 'submit' transition for a worksheet is 'to_be_verified', so this guard returns true if all the analyses assigned to the worksheet have already been submitted. Those analyses that are in a non-valid state (cancelled, inactive) are dismissed in the evaluation, but at least one analysis must be in an active state (and submitted) for this guard to return True. Otherwise, always returns False. Note this guard depends entirely on the current status of the children. """ analyses = obj.getAnalyses() if not analyses: # An empty worksheet cannot be submitted return False can_submit = False for analysis in obj.getAnalyses(): # Dismiss analyses that are not active if not api.is_active(analysis): continue # Dismiss analyses that have been rejected or retracted if api.get_workflow_status_of(analysis) in ["rejected", "retracted"]: continue # Worksheet cannot be submitted if there is one analysis not submitted can_submit = ISubmitted.providedBy(analysis) if not can_submit: # No need to look further return False # This prevents the submission of the worksheet if all its analyses are in # a detached status (rejected, retracted or cancelled) return can_submit
def guard_verify(analysis_request): """Returns whether the transition "verify" can be performed or not. Returns True if at there is at least one analysis in a non-dettached state and all analyses in a non-detached state are in "verified" state. """ # Discard detached analyses analyses = analysis_request.getAnalyses(full_objects=True) analyses = filter( lambda an: api.get_workflow_status_of(an) not in ANALYSIS_DETACHED_STATES, analyses) # If not all analyses are for internal use, rely on "regular" analyses internals = map(IInternalUse.providedBy, analyses) omit_internals = not all(internals) analyses_ready = False for analysis in analyses: # Omit analyses for internal use if omit_internals and IInternalUse.providedBy(analysis): continue # All analyses must be in verified (or further) status if not IVerified.providedBy(analysis): return False analyses_ready = True return analyses_ready
def getSiblings(self, retracted=False): """ Returns the list of analyses of the Analysis Request to which this analysis belongs to, but with the current analysis excluded. :param retracted: If false, retracted/rejected siblings are dismissed :type retracted: bool :return: list of siblings for this analysis :rtype: list of IAnalysis """ request = self.getRequest() if not request: return [] siblings = [] retracted_states = [STATE_RETRACTED, STATE_REJECTED] for sibling in request.getAnalyses(full_objects=True): if api.get_uid(sibling) == self.UID(): # Exclude me from the list continue if not retracted: if api.get_workflow_status_of(sibling) in retracted_states: # Exclude retracted analyses continue siblings.append(sibling) return siblings
def create_retest(ar): """Creates a retest (Analysis Request) from an invalidated Analysis Request :param ar: The invalidated Analysis Request :type ar: IAnalysisRequest :rtype: IAnalysisRequest """ if not ar: raise ValueError("Source Analysis Request cannot be None") if not IAnalysisRequest.providedBy(ar): raise ValueError("Type not supported: {}".format(repr(type(ar)))) if ar.getRetest(): # Do not allow the creation of another retest! raise ValueError("Retest already set") if not ar.isInvalid(): # Analysis Request must be in 'invalid' state raise ValueError("Cannot do a retest from an invalid Analysis Request" .format(repr(ar))) # 0. Open the actions pool actions_pool = ActionHandlerPool.get_instance() actions_pool.queue_pool() # 1. Create the Retest (Analysis Request) ignore = ['Analyses', 'DatePublished', 'Invalidated', 'Sample'] retest = _createObjectByType("AnalysisRequest", ar.aq_parent, tmpID()) retest.setSample(ar.getSample()) copy_field_values(ar, retest, ignore_fieldnames=ignore) renameAfterCreation(retest) # 2. Copy the analyses from the source intermediate_states = ['retracted', 'reflexed'] for an in ar.getAnalyses(full_objects=True): if (api.get_workflow_status_of(an) in intermediate_states): # Exclude intermediate analyses continue nan = _createObjectByType("Analysis", retest, an.getKeyword()) # Make a copy ignore_fieldnames = ['DataAnalysisPublished'] copy_field_values(an, nan, ignore_fieldnames=ignore_fieldnames) nan.unmarkCreationFlag() push_reindex_to_actions_pool(nan) # 3. Assign the source to retest retest.setInvalidated(ar) # 4. Transition the retest to "sample_received"! changeWorkflowState(retest, 'bika_ar_workflow', 'sample_received') # 5. Reindex and other stuff push_reindex_to_actions_pool(retest) push_reindex_to_actions_pool(retest.aq_parent) # 6. Resume the actions pool actions_pool.resume() return retest
def cancellation_state(instance): """Acts as a mask for cancellation_workflow that is not bound to Analysis content type. Returns 'active' or 'cancelled' """ if api.get_workflow_status_of(instance) == "cancelled": return "cancelled" return "active"
def is_assignment_allowed(self): """Check if analyst assignment is allowed """ if not self.is_manage_allowed(): return False review_state = api.get_workflow_status_of(self.context) edit_states = ["open", "attachment_due", "to_be_verified"] return review_state in edit_states
def is_analysis_attachment_allowed(self, analysis): """Checks if the analysis """ if analysis.getAttachmentOption() not in ["p", "r"]: return False if api.get_workflow_status_of(analysis) in ["retracted"]: return False return True
def publish_sample(self, sample): """Set status to prepublished/published/republished """ status = api.get_workflow_status_of(sample) transitions = {"verified": "publish", "published": "republish"} transition = transitions.get(status, "prepublish") succeed, message = doActionFor(sample, transition) return succeed
def _analysis_data(self, analysis): """ Returns a dict that represents the analysis """ decimalmark = analysis.aq_parent.aq_parent.getDecimalMark() keyword = analysis.getKeyword() andict = { 'obj': analysis, 'id': analysis.id, 'title': analysis.Title(), 'keyword': keyword, 'scientific_name': analysis.getScientificName(), 'accredited': analysis.getAccredited(), 'point_of_capture': to_utf8(POINTS_OF_CAPTURE.getValue(analysis.getPointOfCapture())), 'category': to_utf8(analysis.getCategoryTitle()), 'result': analysis.getResult(), 'unit': to_utf8(analysis.getUnit()), 'formatted_unit': format_supsub(to_utf8(analysis.getUnit())), 'capture_date': analysis.getResultCaptureDate(), 'request_id': analysis.aq_parent.getId(), 'formatted_result': '', 'uncertainty': analysis.getUncertainty(), 'formatted_uncertainty': '', 'retested': analysis.isRetest(), 'remarks': to_utf8(analysis.getRemarks()), 'outofrange': False, 'type': analysis.portal_type, 'reftype': analysis.getReferenceType() if hasattr( analysis, 'getReferenceType') else None, 'worksheet': None, 'specs': {}, 'formatted_specs': '', 'review_state': api.get_workflow_status_of(analysis), } andict['refsample'] = analysis.getSample().id \ if IReferenceAnalysis.providedBy(analysis) \ else analysis.getRequestID() specs = analysis.getResultsRange() andict['specs'] = specs scinot = self.context.bika_setup.getScientificNotationReport() andict['formatted_result'] = analysis.getFormattedResult(specs=specs, sciformat=int(scinot), decimalmark=decimalmark) fs = '' if specs.get('min', None) and specs.get('max', None): fs = '%s - %s' % (specs['min'], specs['max']) elif specs.get('min', None): fs = '> %s' % specs['min'] elif specs.get('max', None): fs = '< %s' % specs['max'] andict['formatted_specs'] = formatDecimalMark(fs, decimalmark) andict['formatted_uncertainty'] = format_uncertainty(analysis, analysis.getResult(), decimalmark=decimalmark, sciformat=int(scinot)) # Out of range? andict['outofrange'] = is_out_of_range(analysis)[0] return andict
def guard_deactivate(analysis_service): """Returns whether the transition deactivate can be performed for the analysis service passed in """ for dependant in analysis_service.getServiceDependants(): status = api.get_workflow_status_of(dependant) if status == "active": return False return True
def _is_assigned_to_worksheet(self, analysis): """Check if the Analysis is assigned to a worksheet :param analysis: Analysis Brain/Object :returns: True if the Analysis is assigned to a WS """ analysis = api.get_object(analysis) state = api.get_workflow_status_of( analysis, state_var='worksheetanalysis_review_state') return state == "assigned"
def guard_reinstate(analysis_request): """Returns whether 'reinstate" transition can be performed or not. Returns True only if this is not a partition or the parent analysis request can be reinstated or is not in a cancelled state """ parent = analysis_request.getParentAnalysisRequest() if not parent: return True if api.get_workflow_status_of(parent) != "cancelled": return True return isTransitionAllowed(parent, "reinstate")
def guard_prepublish(analysis_request): """Returns whether 'prepublish' transition can be perform or not. Returns True if the analysis request has at least one analysis in 'verified' or in 'to_be_verified' status. Otherwise, return False """ valid_states = ['verified', 'to_be_verified'] for analysis in analysis_request.getAnalyses(): analysis = api.get_object(analysis) if api.get_workflow_status_of(analysis) in valid_states: return True return False
def set_printed_time(self, sample): """Updates the printed time of the last results report from the sample """ if api.get_workflow_status_of(sample) != "published": return False reports = sample.objectValues("ARReport") reports = sorted(reports, key=lambda report: report.getDatePublished()) last_report = reports[-1] if not last_report.getDatePrinted(): last_report.setDatePrinted(DateTime()) sample.reindexObject(idxs=["getPrinted"]) return True
def getObjectWorkflowStates(self): """This method is used to populate catalog values Returns a dictionary with the workflow id as key and workflow state as value. :return: {'review_state':'active',...} """ workflow = getToolByName(self, 'portal_workflow') states = {} for w in workflow.getWorkflowsFor(self): state = api.get_workflow_status_of(self, w.state_var) states[w.state_var] = state return states
def get_item_info(self, brain_or_object): """Return the data of this brain or object """ return { "obj": brain_or_object, "uid": api.get_uid(brain_or_object), "url": api.get_url(brain_or_object), "id": api.get_id(brain_or_object), "title": api.get_title(brain_or_object), "portal_type": api.get_portal_type(brain_or_object), "review_state": api.get_workflow_status_of(brain_or_object), }
def get_prev_status_from_history(instance, status=None): """Returns the previous status of the object. If status is set, returns the previous status before the object reached the status passed in. If instance has reached the status passed in more than once, only the last one is considered. """ target = status or api.get_workflow_status_of(instance) history = getReviewHistory(instance, reverse=True) history = map(lambda event: event["review_state"], history) if target not in history or history.index(target) == len(history) - 1: return None return history[history.index(target) + 1]
def get_wide_interims(self): """Returns a dictionary with the analyses services from the current worksheet which have at least one interim with 'Wide' attribute set to true and that have not been yet submitted The structure of the returned dictionary is the following: <Analysis_keyword>: { 'analysis': <Analysis_name>, 'keyword': <Analysis_keyword>, 'interims': { <Interim_keyword>: { 'value': <Interim_default_value>, 'keyword': <Interim_key>, 'title': <Interim_title> } } } """ outdict = {} allowed_states = ['assigned', 'unassigned'] for analysis in self._getAnalyses(): # TODO Workflow - Analysis Use a query instead of this if api.get_workflow_status_of(analysis) not in allowed_states: continue if analysis.getKeyword() in outdict.keys(): continue calculation = analysis.getCalculation() if not calculation: continue andict = { "analysis": analysis.Title(), "keyword": analysis.getKeyword(), "interims": {} } # Analysis Service interim defaults for field in analysis.getInterimFields(): if field.get("wide", False): andict["interims"][field["keyword"]] = field # Interims from calculation for field in calculation.getInterimFields(): if field["keyword"] not in andict["interims"].keys() \ and field.get("wide", False): andict["interims"][field["keyword"]] = field if andict["interims"]: outdict[analysis.getKeyword()] = andict return outdict
def get_samples_from_uids(self, uids): """ Filter a list of sample uids by workflow status and return the list of sample objects with a valid workflow status for printing. :param uids: list of sample uids selected by the user in the samples listing :return: list of sample objects with a valid workflow status for printing """ samples = map(api.get_object_by_uid, uids) return filter( lambda obj: api.get_workflow_status_of(obj) in ["to_be_sampled", "to_be_scheduled"], samples)
def guard_submit(analysis_request): """Return whether the transition "submit" can be performed or not. Returns True if there is at least one analysis in a non-dettached state and all analyses in a non-dettached analyses have been submitted. """ analyses_ready = False for analysis in analysis_request.getAnalyses(): analysis_status = api.get_workflow_status_of(api.get_object(analysis)) if analysis_status in ANALYSIS_DETTACHED_STATES: continue if analysis_status in ['assigned', 'unassigned']: return False analyses_ready = True return analyses_ready
def guard_verify(analysis_request): """Returns whether the transition "verify" can be performed or not. Returns True if at there is at least one analysis in a non-dettached state and all analyses in a non-dettached state are in "verified" state. """ analyses_ready = False for analysis in analysis_request.getAnalyses(): analysis_status = api.get_workflow_status_of(api.get_object(analysis)) if analysis_status in ANALYSIS_DETTACHED_STATES: continue if analysis_status != 'verified': return False analyses_ready = True return analyses_ready
def get_base_info(self, obj): """Extract the base info from the given object """ review_state = api.get_workflow_status_of(obj) state_title = review_state.capitalize().replace("_", " ") return { "obj": obj, "id": api.get_id(obj), "uid": api.get_uid(obj), "title": api.get_title(obj), "path": api.get_path(obj), "url": api.get_url(obj), "review_state": review_state, "state_title": state_title, }
def _is_frozen(self, brain_or_object): """Check if the passed in object is frozen: the object is cancelled, inactive or has been verified at some point :param brain_or_object: Analysis or AR Brain/Object :returns: True if the object is frozen """ if not api.is_active(brain_or_object): return True if api.get_workflow_status_of(brain_or_object) in FROZEN_STATES: return True # Check the review history if one of the frozen transitions was done object = api.get_object(brain_or_object) performed_transitions = set(getReviewHistoryActionsList(object)) if set(FROZEN_TRANSITIONS).intersection(performed_transitions): return True return False
def guard_cancel(analysis_request): """Returns whether 'cancel' transition can be performed or not. Returns True only if all analyses are in "unassigned" status """ # Ask to partitions for partition in analysis_request.getDescendants(all_descendants=False): if not isTransitionAllowed(partition, "cancel"): return False # Look through analyses for analysis in analysis_request.getAnalyses(): analysis_object = api.get_object(analysis) if api.get_workflow_status_of(analysis_object) != "unassigned": return False return True
def assigned_state(instance): """Returns `assigned` or `unassigned` depending on the state of the analyses the analysisrequest contains. Return `unassigned` if the Analysis Request does not contain any analysis or if has at least one in `unassigned` state. Otherwise, returns `assigned`""" analyses = instance.getAnalyses() if not analyses: return 'unassigned' state_var = 'worksheetanalysis_review_state' for analysis in analyses: state = api.get_workflow_status_of(analysis, state_var) if state != 'assigned': return 'unassigned' return 'assigned'
def send_to_lab(self, ar, courier): """Set the courier and send the AR to the lab """ # Only proceed if the AR is in an allowed state if api.get_workflow_status_of(ar) not in ALLOWED_STATES: logger.info("Skipping already shipped AR {}".format(ar.getId())) return False # 1. Set the courier to the extended field ar.getField("Courier").set(ar, courier) # 2. Transition the AR to shipped wf_tool = api.get_tool("portal_workflow") wf_tool.doActionFor(ar, "send_to_lab", wf_id="bika_ar_workflow") return True
def guard_retract(worksheet): """Return whether the transition retract can be performed or not to the worksheet passed in. Since the retract transition from worksheet is a shortcut to retract transitions from all analyses the worksheet contains, this guard only returns True if retract transition is allowed for all analyses the worksheet contains """ analyses = worksheet.getAnalyses() detached = ['rejected', 'retracted'] num_detached = 0 for analysis in analyses: if api.get_workflow_status_of(analysis) in detached: num_detached += 1 elif not isTransitionAllowed(analysis, "retract"): return False return analyses and num_detached < len(analyses) or False
def _analysis_data(self, analysis): """ Returns a dict that represents the analysis """ decimalmark = analysis.aq_parent.aq_parent.getDecimalMark() keyword = analysis.getKeyword() andict = { 'obj': analysis, 'id': analysis.id, 'title': analysis.Title(), 'keyword': keyword, 'scientific_name': analysis.getScientificName(), 'accredited': analysis.getAccredited(), 'point_of_capture': to_utf8(POINTS_OF_CAPTURE.getValue(analysis.getPointOfCapture())), 'category': to_utf8(analysis.getCategoryTitle()), 'result': analysis.getResult(), 'unit': to_utf8(analysis.getUnit()), 'formatted_unit': format_supsub(to_utf8(analysis.getUnit())), 'capture_date': analysis.getResultCaptureDate(), 'request_id': analysis.aq_parent.getId(), 'formatted_result': '', 'uncertainty': analysis.getUncertainty(), 'formatted_uncertainty': '', 'retested': analysis.getRetested(), 'remarks': to_utf8(analysis.getRemarks()), 'resultdm': to_utf8(analysis.getResultDM()), 'outofrange': False, 'type': analysis.portal_type, 'reftype': analysis.getReferenceType() if hasattr( analysis, 'getReferenceType') else None, 'worksheet': None, 'specs': {}, 'formatted_specs': '', 'review_state': api.get_workflow_status_of(analysis), } andict['refsample'] = analysis.getSample().id \ if analysis.portal_type == 'Analysis' \ else '%s - %s' % (analysis.aq_parent.id, analysis.aq_parent.Title()) # Which analysis specs must be used? # Try first with those defined at AR Publish Specs level if analysis.portal_type == 'ReferenceAnalysis': # The analysis is a Control or Blank. We might use the # reference results instead other specs uid = analysis.getServiceUID() specs = analysis.aq_parent.getResultsRangeDict().get(uid, {}) else: # Get the specs directly from the analysis. The getResultsRange # function already takes care about which are the specs to be used: # AR, client or lab. specs = analysis.getResultsRange() andict['specs'] = specs scinot = self.context.bika_setup.getScientificNotationReport() andict['formatted_result'] = analysis.getFormattedResult(specs=specs, sciformat=int(scinot), decimalmark=decimalmark) fs = '' if specs.get('min', None) and specs.get('max', None): fs = '%s - %s' % (specs['min'], specs['max']) elif specs.get('min', None): fs = '> %s' % specs['min'] elif specs.get('max', None): fs = '< %s' % specs['max'] andict['formatted_specs'] = formatDecimalMark(fs, decimalmark) andict['formatted_uncertainty'] = format_uncertainty(analysis, analysis.getResult(), decimalmark=decimalmark, sciformat=int(scinot)) # Out of range? if specs: adapters = getAdapters((analysis, ), IResultOutOfRange) for name, adapter in adapters: ret = adapter(specification=specs) if ret and ret['out_of_range']: andict['outofrange'] = True break return andict