def update_role_mappings(portal): logger.info("Updating role mappings ...") processed = dict() for rm_query in ROLE_MAPPINGS: wf_tool = api.get_tool("portal_workflow") wf_id = rm_query[0] workflow = wf_tool.getWorkflowById(wf_id) query = rm_query[1].copy() exclude_states = [] if 'not_review_state' in query: exclude_states = query.get('not_review_state', []) del query['not_review_state'] brains = api.search(query, rm_query[2]) total = len(brains) for num, brain in enumerate(brains): if num % 100 == 0: logger.info("Updating role mappings '{0}': {1}/{2}".format( wf_id, num, total)) if api.get_uid(brain) in processed.get(wf_id, []): # Already processed, skip continue if api.get_workflow_status_of(brain) in exclude_states: # We explicitely want to exclude objs in these states continue workflow.updateRoleMappingsFor(api.get_object(brain)) if wf_id not in processed: processed[wf_id] = [] processed[wf_id].append(api.get_uid(brain)) logger.info("Updating role mappings [DONE]")
def get_analyses_data(self): rows = [] ars = self.search() total = len(ars) logger.info("Exporting data of {} ARs".format(total)) for num, ar in enumerate(ars): ar = SuperModel(api.get_uid(ar)) for an in self.get_analyses(ar): data = [] an = SuperModel(api.get_uid(an)) for row in ANALYSES_ROWS: model = ar title, key, converter = row if key.startswith("Analysis"): key = ".".join(key.split(".")[1:]) model = an value = self.get(model, key) data.append(converter(model, key, value)) rows.append(data) if num % 100 == 0: logger.info("Exported {}/{}".format(num, total)) return rows
def fix_analyses_storage_instrument(portal): """Walks through all Analyses not yet verified and if they belong to the Storage requisition category, remove the instrument assignment """ logger.info("Sanitizing 'Storage instrument' from analyses") query = dict(portal_type="AnalysisCategory", title="Storage requisition") cat = api.search(query, "bika_setup_catalog") if not cat: logger.warn("Category 'Storage requisition' not found [SKIP]") return cat_uid = api.get_uid(cat[0]) # Cleanup analysis services first query = dict(portal_type="AnalysisService", getCategoryUID=cat_uid) brains = api.search(query, "bika_setup_catalog") for brain in brains: service = api.get_object(brain) if not service.getInstrument(): continue service.setInstrument(None) service.reindexObject() # Cleanup analyses query = dict(getCategoryUID=cat_uid, ) brains = api.search(query, CATALOG_ANALYSIS_LISTING) for brain in brains: if brain.review_state in ['published', 'rejected', 'invalid']: continue if not brain.getInstrumentUID: continue analysis = api.get_object(brain) analysis.setInstrument(None) analysis.reindexObject() logger.info("Sanitizing 'Storage instrument' from analyses [DONE]")
def search(self): """Search all ARs of the system """ catalog = api.get_tool("bika_catalog_analysisrequest_listing") query = { "portal_type": "AnalysisRequest", "sort_on": "created", "sort_order": "descending", } # Client? client = self.get_current_client() if client: query["getClientUID"] = api.get_uid(client) # review_state review_state = self.request.get("review_state") if review_state in REVIEW_STATE_MAP: query["review_state"] = review_state # limit limit = self.request.get("limit", "30") if limit.isdigit(): limit = int(limit) if limit > 0: query["sort_limit"] = int(limit) return catalog(query)
def sanitize_ranges_calculation_from_analyses(portal): """Walks through all Analyses not yet verified and remove the calculation if is Ranges Calculation set """ logger.info("Sanitizing 'Ranges Calculation' from analyses") query = dict(portal_type="Calculation", title="Ranges calculation") calc = api.search(query, "bika_setup_catalog") if not calc: logger.warn("Calculation 'Ranges calculation' not found! [SKIP]") return calc = api.get_object(calc[0]) calc_uid = api.get_uid(calc) # Cleanup analysis services first query = dict(portal_type="AnalysisService", getCalculationUID=calc_uid) brains = api.search(query, "bika_setup_catalog") for brain in brains: service = api.get_object(brain) service.setCalculation(None) service.reindexObject() # Cleanup analyses query = dict() brains = api.search(query, CATALOG_ANALYSIS_LISTING) for brain in brains: if brain.getCalculationUID != calc_uid: continue analysis = api.get_object(brain) analysis.setCalculation(None) analysis.reindexObject() logger.info("Sanitizing 'Ranges Calculation' from analyses [DONE]")
def handle_email_panic(view): # If the email for panic levels has been submitted, send the email if "email_popup_submit" in view.request: if send_panic_email(view): ar_uid = api.get_uid(view.context) url = "{}/analysisrequests/publish?items={}".format(view.portal_url, ar_uid) return view.request.response.redirect(url)
def get_calculations_choices(self): """Build a list of listing specific calculation choices """ calculations = self.get_calculations() return map( lambda brain: { "ResultValue": api.get_uid(brain), "ResultText": api.get_title(brain)}, calculations)
def get_recipient(self, contact): if not contact: return None contact_obj = api.get_object(contact) email = contact_obj.getEmailAddress() if not email: return None return {'uid': api.get_uid(contact_obj), 'name': contact_obj.Title(), 'email': email}
def get_recipients(self, ar): contacts = [ar.getContact(), ar.getCCContact()] recipients = map(lambda con: self.get_recipient(con), contacts) client = ar.getClient() client_email = client.getEmailAddress() if client_email: recipients.append({ 'uid': api.get_uid(client), 'name': client.Title(), 'email': client_email}) return filter(lambda recipient: recipient, recipients)
def render(self): if IAnalysisRequest.providedBy(self.context): self.in_panic = self.context.has_analyses_in_panic() if not self.in_panic: return "" self.panic_email_sent = api.get_field_value( instance=self.context, field_name='PanicEmailAlertSent', default=False) self.ar_uid = api.get_uid(self.context) return self.template()
def get_base_info(self, obj): """Extract the base info from the given object """ review_state = api.get_workflow_status_of(obj) state_title = review_state.capitalize().replace("_", " ") return { "obj": obj, "id": api.get_id(obj), "uid": api.get_uid(obj), "title": api.get_title(obj), "path": api.get_path(obj), "url": api.get_url(obj), "review_state": review_state, "state_title": state_title, }
def folder_referral_lab(self, obj, item, index): """Adds the column Referral Lab to the item """ is_editable = self.listing.is_analysis_edition_allowed(obj) obj = api.get_object(obj) ref_lab = api.get_field_value(obj, "ReferralLab", None) if not is_editable: ref_lab_title = ref_lab and api.get_title(ref_lab) or "" item["ReferralLab"] = ref_lab_title return item # Referral Laboratory is editable item["ReferralLab"] = ref_lab and api.get_uid(ref_lab) or "" item["choices"]["ReferralLab"] = self.get_referral_labs() item['allow_edit'].append('ReferralLab') return item
def get_referral_labs(self): """Returns a list for selection with the available and active Referral Lab objects, sorted by title ascending """ empty = {"ResultValue": "", "ResultText": ""} results = list([ empty, ]) query = dict(portal_type="ReferralLab", is_active=True, sort_on="sortable_title") for brain in api.search(query, "bika_setup_catalog"): results.append({ "ResultValue": api.get_uid(brain), "ResultText": api.get_title(brain) }) return results
def set_xlsx_specs(senaite_spec): logger.info("Applying specs to {}".format(senaite_spec.Title())) query = dict(portal_type="Calculation", title="Ranges calculation") calc = api.search(query, "bika_setup_catalog") if len(calc) == 0 or len(calc) > 1: logger.info("No calculation found [SKIP]") return calc_uid = api.get_uid(calc[0]) keywords = list() raw_specifications = get_xls_specifications() for spec in raw_specifications: keyword = spec.get("keyword") if keyword not in keywords: query = dict(portal_type="AnalysisService", getKeyword=keyword) brains = api.search(query, "bika_setup_catalog") if len(brains) == 0 or len(brains) > 1: logger.info( "No service found for {} [SKIP]".format(keyword)) continue keywords.append(keyword) specs_dict = { 'keyword': keyword, 'min_operator': 'geq', 'min': '0', 'max_operator': 'lt', 'max': '0', 'minpanic': '', 'maxpanic': '', 'warn_min': '', 'warn_max': '', 'hidemin': '', 'hidemax': '', 'rangecomments': '', 'calculation': calc_uid, } grades_dict = {grade: "" for grade in GRADES_KEYS} specs_dict.update(grades_dict) ranges = api.get_field_value(senaite_spec, 'ResultsRange', [{}]) ranges = filter(lambda val: val.get('keyword') != keyword, ranges) ranges.append(specs_dict) senaite_spec.setResultsRange(ranges)
def to_super_model(obj): # avoid circular imports from senaite.core.supermodel import SuperModel # Object is already a SuperModel if isinstance(obj, SuperModel): return obj # Only portal objects are supported if not api.is_object(obj): raise TypeError("Expected a portal object, got '{}'".format( type(obj))) # Wrap the object into a specific Publication Object Adapter uid = api.get_uid(obj) portal_type = api.get_portal_type(obj) adapter = queryAdapter(uid, ISuperModel, name=portal_type) if adapter is None: return SuperModel(uid) return adapter
def get_samples_data(self): rows = [] ars = self.search() total = len(ars) logger.info("Exporting data of {} ARs".format(total)) for num, ar in enumerate(ars): data = [] ar = SuperModel(api.get_uid(ar)) model = ar for row in SAMPLES_ROWS: title, key, converter = row if key.startswith("Analysis"): continue value = self.get(model, key) data.append(converter(ar, key, value)) rows.append(data) if num % 100 == 0: logger.info("Exported {}/{}".format(num, total)) return rows
def import_specifications_for_sample_type(portal, sample_type): logger.info("Importing specs for {}".format(sample_type.Title())) def get_bs_object(xlsx_row, xlsx_keyword, portal_type, criteria): text_value = xlsx_row.get(xlsx_keyword, None) if not text_value: logger.warn("Value not set for keyword {}".format(xlsx_keyword)) return None query = {"portal_type": portal_type, criteria: text_value} brain = api.search(query, 'bika_setup_catalog') if not brain: logger.warn("No objects found for type {} and {} '{}'".format( portal_type, criteria, text_value)) return None if len(brain) > 1: logger.warn( "More than one object found for type {} and {} '{}'".format( portal_type, criteria, text_value)) return None return api.get_object(brain[0]) raw_specifications = get_xls_specifications() for spec in raw_specifications: # Valid Analysis Service? service = get_bs_object(spec, "keyword", "AnalysisService", "getKeyword") if not service: continue # The calculation exists? calc_title = "Ranges calculation" query = dict(calculation=calc_title) calc = get_bs_object(query, "calculation", "Calculation", "title") if not calc: # Create a new one folder = portal.bika_setup.bika_calculations _id = folder.invokeFactory("Calculation", id=tmpID()) calc = folder[_id] calc.edit(title=calc_title, PythonImports=[{ "module": "bhp.lims.specscalculations", "function": "get_specification_for" }], Formula="get_specification_for($spec)") calc.unmarkCreationFlag() renameAfterCreation(calc) # Existing AnalysisSpec? specs_title = "{} - calculated".format(sample_type.Title()) query = dict(portal_type='AnalysisSpec', title=specs_title) aspec = api.search(query, 'bika_setup_catalog') if not aspec: # Create the new AnalysisSpecs object! folder = portal.bika_setup.bika_analysisspecs _id = folder.invokeFactory('AnalysisSpec', id=tmpID()) aspec = folder[_id] aspec.edit(title=specs_title) aspec.unmarkCreationFlag() renameAfterCreation(aspec) elif len(aspec) > 1: logger.warn( "More than one Analysis Specification found for {}".format( specs_title)) continue else: aspec = api.get_object(aspec[0]) aspec.setSampleType(sample_type) # Set the analysis keyword and bind it to the calculation to use keyword = service.getKeyword() specs_dict = { 'keyword': keyword, 'min_operator': 'geq', 'min': '0', 'max_operator': 'lt', 'max': '0', 'minpanic': '', 'maxpanic': '', 'warn_min': '', 'warn_max': '', 'hidemin': '', 'hidemax': '', 'rangecomments': '', 'calculation': api.get_uid(calc), } grades_dict = {grade: "" for grade in GRADES_KEYS} specs_dict.update(grades_dict) ranges = api.get_field_value(aspec, 'ResultsRange', [{}]) ranges = filter(lambda val: val.get('keyword') != keyword, ranges) ranges.append(specs_dict) aspec.setResultsRange(ranges)