def fix_analysis_requests_assay_date(portal): logger.info("Updating Assay Date for old Analysis Requests ...") query = dict( portal_type="AnalysisRequest", review_state=["published", "to_be_verified", "verified", "invalid"]) brains = api.search(query, CATALOG_ANALYSIS_REQUEST_LISTING) total = len(brains) for num, brain in enumerate(brains): if num % 100 == 0: logger.info( "Updating Assay Date for old Analysis Requests: {}/{}".format( num, total)) if num % TRANSACTION_THERESHOLD == 0: commit_transaction(portal) request = api.get_object(brain) if not api.get_field_value(request, "AssayDate", None): review_states = ["to_be_verified", "published", "verified"] analyses = request.getAnalyses(review_state=review_states) captures = map(lambda an: an.getResultCaptureDate, analyses) captures = sorted(captures) if captures: api.set_field_value(request, "AssayDate", captures[-1]) request.reindexObject() commit_transaction(portal) logger.info("Updating Assay Date for old Analysis Requests [DONE]")
def after_no_sampling_workflow(analysis_request): """ Event fired for no_sampling_workflow that makes the status of the Analysis request or Sample to become sample_ordered """ if not analysis_request.isPartition(): # Generate the delivery pdf generate_requisition_pdf(analysis_request) # Set specifications by default sample_type = analysis_request.getSampleType() specs = api.get_field_value(sample_type, "DefaultAnalysisSpecifications", None) if specs: analysis_request.setSpecification(api.get_object(specs)) else: # Find out suitable specs by Sample Type name sample_type_title = sample_type.Title() specs_title = "{} - calculated".format(sample_type_title) query = dict(portal_type="AnalysisSpec", title=specs_title) specs = api.search(query, 'bika_setup_catalog') if specs: analysis_request.setSpecification(api.get_object(specs[0])) if analysis_request.isPartition(): # Change workflow state to "at_reception" wf.changeWorkflowState(analysis_request, wf_id="bika_ar_workflow", state_id="sample_at_reception")
def update_internal_use(portal): """Walks through all Samples and assigns its value to False if no value set """ logger.info("*** Updating InternalUse field on Samples/ARs ***") samples = api.search(dict(portal_type="Sample"), "bika_catalog") for sample in samples: sample = api.get_object(sample) if _api.get_field_value(sample, "InternalUse", None) is None: _api.set_field_value(sample, "InternalUse", False)
def isItemAllowed(self, obj): """Only display the Analysis Requests that are not for internal use """ if not ClientView.isItemAllowed(self, obj): return False # TODO Performance - This function wakes up the whole object # If the current user is a client contact, display non-internal ARs if self.client_contact: return not _api.get_field_value(obj, "InternalUse", False) return True
def folder_listing_item(listing, obj, item, index): sample = api.get_object(obj) primary = _api.get_field_value(sample, 'PrimarySample', None) item["primary"] = "" if not primary: return item primary = api.get_object(primary) primary_id = primary.getSampleID() primary_url = primary.absolute_url() item["primary"] = primary.getSampleID() item["replace"]["primary"] = get_link(primary_url, value=primary_id) return item
def to_dict(brain_or_object, skip_fields=None): if skip_fields is None: skip_fields = list() brain_or_object = api.get_object(brain_or_object) out = {} fields = brain_or_object.Schema().fields() for field in fields: fieldname = field.getName() if fieldname not in skip_fields: value = _api.get_field_value(brain_or_object, fieldname) out[fieldname] = value return out
def render(self): if IAnalysisRequest.providedBy(self.context): self.in_panic = self.context.has_analyses_in_panic() if not self.in_panic: return "" self.panic_email_sent = bapi.get_field_value( instance=self.context, field_name='PanicEmailAlertSent', default=False) self.ar_uid = api.get_uid(self.context) return self.template()
def get_specification_for(spec, default=_marker): """Returns a plain dictionary with specification values (min, max, etc.) It looks through an excel file provided as-is to find the record that better fits with the gender and age from the analysis request and for the analysis passed in :param: Analysis object or analysis uid or analysis brain """ analysis = api.get_object_by_uid(spec.get(('analysis_uid'))) if not analysis or not IRequestAnalysis.providedBy(analysis): if default is not _marker: return default api.fail("Type {} not supported: ".format(repr(analysis))) request = analysis.getRequest() gender = api.get_field_value(request, "Gender") if not gender or gender.lower() not in GENDERS.keys(): # If no gender is specified or not a valid value, assume any gender = 'a' dob = api.get_field_value(request, "DateOfBirth") sampled = request.getDateSampled() if not dob or not sampled: #logger.error("No DateSampled/ DateOfBirth set. Ignore if 1.3 upgrade") return {} specification = request.getSpecification() if not specification: # This should never happen, Since this function has been triggered, we # assume an specification has been set to the AR if default is not _marker: return default api.fail("Specification not set for request {}".format(request.id)) years, months, days = api.get_age(dob, sampled) return get_analysisspec(analysis_keyword=analysis.getKeyword(), gender=gender, years=years, months=months, days=days)
def get_age(self, instance): dob = get_field_value(instance, "DateOfBirth") sampled = instance.getDateSampled() if not dob or not sampled: return '' year, month, days = api.get_age(dob, sampled) arr = [ year and '{}y'.format(year) or '', month and '{}m'.format(month) or '', days and '{}d'.format(days) or '', ] return ' '.join(arr)
def get_referral_labs_data(self, sample): """Returns a dictionary with the data to be displayed if one or more analyses were tested by a referral lab """ data = {} for analysis in sample.getAnalyses(full_objects=True): ref_lab = api.get_field_value(analysis, "ReferralLab", None) if ref_lab: title = api.get_title(ref_lab) stored = data.get(title, []) stored.append(api.get_title(analysis)) data.update({title: stored}) return data
def folder_referral_lab(self, obj, item, index): """Adds the column Referral Lab to the item """ is_editable = self.listing.is_analysis_edition_allowed(obj) obj = api.get_object(obj) ref_lab = api.get_field_value(obj, "ReferralLab", None) if not is_editable: ref_lab_title = ref_lab and api.get_title(ref_lab) or "" item["ReferralLab"] = ref_lab_title return item # Referral Laboratory is editable item["ReferralLab"] = ref_lab and api.get_uid(ref_lab) or "" item["choices"]["ReferralLab"] = self.get_referral_labs() item['allow_edit'].append('ReferralLab') return item
def set_xlsx_specs(senaite_spec): logger.info("Applying specs to {}".format(senaite_spec.Title())) query = dict(portal_type="Calculation", title="Ranges calculation") calc = api.search(query, "bika_setup_catalog") if len(calc) == 0 or len(calc) > 1: logger.info("No calculation found [SKIP]") return calc_uid = api.get_uid(calc[0]) keywords = list() raw_specifications = get_xls_specifications() for spec in raw_specifications: keyword = spec.get("keyword") if keyword not in keywords: query = dict(portal_type="AnalysisService", getKeyword=keyword) brains = api.search(query, "bika_setup_catalog") if len(brains) == 0 or len(brains) > 1: logger.info( "No service found for {} [SKIP]".format(keyword)) continue keywords.append(keyword) specs_dict = { 'keyword': keyword, 'min_operator': 'geq', 'min': '0', 'max_operator': 'lt', 'max': '0', 'minpanic': '', 'maxpanic': '', 'warn_min': '', 'warn_max': '', 'hidemin': '', 'hidemax': '', 'rangecomments': '', 'calculation': calc_uid, } grades_dict = {grade: "" for grade in GRADES_KEYS} specs_dict.update(grades_dict) ranges = api.get_field_value(senaite_spec, 'ResultsRange', [{}]) ranges = filter(lambda val: val.get('keyword') != keyword, ranges) ranges.append(specs_dict) senaite_spec.setResultsRange(ranges)
def after_no_sampling_workflow(obj): """ Event fired for no_sampling_workflow that makes the status of the Analysis request or Sample to become sample_ordered """ logger.info("*** Custom after_no_sampling_workflow (order) transition ***") # Generate the requisition report if IAnalysisRequest.providedBy(obj): # Transition Analyses to sample_due ans = obj.getAnalyses(full_objects=True, cancellation_state='active') for analysis in ans: doActionFor(analysis, 'no_sampling_workflow') # Promote to sample sample = obj.getSample() if sample: doActionFor(sample, 'no_sampling_workflow') # Generate the delivery pdf generate_requisition_pdf(obj) # Set specifications by default sample_type = obj.getSampleType() specs = _api.get_field_value(sample_type, "DefaultAnalysisSpecifications", None) if specs: obj.setSpecification(api.get_object(specs)) else: # Find out suitable specs by Sample Type name sample_type = obj.getSampleType().Title() specs_title = "{} - calculated".format(sample_type) query = dict(portal_type="AnalysisSpec", title=specs_title) specs = api.search(query, 'bika_setup_catalog') if specs: obj.setSpecification(api.get_object(specs[0])) elif ISample.providedBy(obj): sample_events._cascade_transition(obj, 'no_sampling_workflow')
def workflow_action_send_to_lab(self): """Redirects the user to the requisition form automatically generated due to the send_to_lab transition """ logger.info("SampleARWorkflowAction.workflow_action_send_to_lab") action, came_from = self._get_form_workflow_action() if not get_field_value(self.context, "Courier"): message = "Cannot send to the lab. Courier is not set" self.context.plone_utils.addPortalMessage(message, "error") self.request.response.redirect(self.context.absolute_url()) return trans, dest = self.submitTransition(action, came_from, [self.context]) if trans: message = PMF('Changes saved.') self.context.plone_utils.addPortalMessage(message, 'info') # TODO Page does not get refreshed when displaying pdf #self.destination_url = '{}/workflow_action?workflow_action=download_requisition'\ # .format(self.context.absolute_url()) self.destination_url = self.context.absolute_url() self.request.response.redirect(self.destination_url) else: return
def getVisit(instance): """Returns the visit number of the current Analysis Request """ return api.get_field_value(instance, "Visit", default="")
def getParticipantID(instance): """Returns the patient ID of the current Analysis Request """ return api.get_field_value(instance, "ParticipantID", default="")
def get(self, instance, field_name, default=_marker): return api.get_field_value(instance, field_name, default)
def getPrimaryAnalysisRequest(self): return api.get_field_value(self, "PrimaryAnalysisRequest", None)
def getPrimarySample(self): return api.get_field_value(self, "PrimarySample", None)
def import_specifications_for_sample_type(portal, sample_type): logger.info("Importing specs for {}".format(sample_type.Title())) def get_bs_object(xlsx_row, xlsx_keyword, portal_type, criteria): text_value = xlsx_row.get(xlsx_keyword, None) if not text_value: logger.warn("Value not set for keyword {}".format(xlsx_keyword)) return None query = {"portal_type": portal_type, criteria: text_value} brain = api.search(query, 'bika_setup_catalog') if not brain: logger.warn("No objects found for type {} and {} '{}'".format( portal_type, criteria, text_value)) return None if len(brain) > 1: logger.warn( "More than one object found for type {} and {} '{}'".format( portal_type, criteria, text_value)) return None return api.get_object(brain[0]) raw_specifications = get_xls_specifications() for spec in raw_specifications: # Valid Analysis Service? service = get_bs_object(spec, "keyword", "AnalysisService", "getKeyword") if not service: continue # The calculation exists? calc_title = "Ranges calculation" query = dict(calculation=calc_title) calc = get_bs_object(query, "calculation", "Calculation", "title") if not calc: # Create a new one folder = portal.bika_setup.bika_calculations _id = folder.invokeFactory("Calculation", id=tmpID()) calc = folder[_id] calc.edit(title=calc_title, PythonImports=[{ "module": "bhp.lims.specscalculations", "function": "get_specification_for" }], Formula="get_specification_for($spec)") calc.unmarkCreationFlag() renameAfterCreation(calc) # Existing AnalysisSpec? specs_title = "{} - calculated".format(sample_type.Title()) query = dict(portal_type='AnalysisSpec', title=specs_title) aspec = api.search(query, 'bika_setup_catalog') if not aspec: # Create the new AnalysisSpecs object! folder = portal.bika_setup.bika_analysisspecs _id = folder.invokeFactory('AnalysisSpec', id=tmpID()) aspec = folder[_id] aspec.edit(title=specs_title) aspec.unmarkCreationFlag() renameAfterCreation(aspec) elif len(aspec) > 1: logger.warn( "More than one Analysis Specification found for {}".format( specs_title)) continue else: aspec = api.get_object(aspec[0]) aspec.setSampleType(sample_type) # Set the analysis keyword and bind it to the calculation to use keyword = service.getKeyword() specs_dict = { 'keyword': keyword, 'min_operator': 'geq', 'min': '0', 'max_operator': 'lt', 'max': '0', 'minpanic': '', 'maxpanic': '', 'warn_min': '', 'warn_max': '', 'hidemin': '', 'hidemax': '', 'rangecomments': '', 'calculation': api.get_uid(calc), } grades_dict = {grade: "" for grade in GRADES_KEYS} specs_dict.update(grades_dict) ranges = api.get_field_value(aspec, 'ResultsRange', [{}]) ranges = filter(lambda val: val.get('keyword') != keyword, ranges) ranges.append(specs_dict) aspec.setResultsRange(ranges)