def _to_service(self, thing): """Convert to Analysis Service :param thing: UID/Catalog Brain/Object/Something :returns: Analysis Service object or None """ # Convert UIDs to objects if api.is_uid(thing): thing = api.get_object_by_uid(thing, None) # Bail out if the thing is not a valid object if not api.is_object(thing): logger.warn("'{}' is not a valid object!".format(repr(thing))) return None # Ensure we have an object here and not a brain obj = api.get_object(thing) if IAnalysisService.providedBy(obj): return obj if IAnalysis.providedBy(obj): return obj.getAnalysisService() # An object, but neither an Analysis nor AnalysisService? # This should never happen. msg = "ARAnalysesField doesn't accept objects from {} type. " \ "The object will be dismissed.".format(api.get_portal_type(obj)) logger.warn(msg) return None
def folder_item(self, obj, item, index): batch = api.get_object(obj) # Doctor doctor = get_field_value(batch, "Doctor", None) item["Doctor"] = doctor and doctor.Title() or "" item["replace"]["Doctor"] = doctor and get_link(api.get_url(doctor), doctor.Title()) # Onset Date onset = get_field_value(batch, "OnsetDate", None) item["OnsetDate"] = onset and self.listing.ulocalized_time(onset) or "" # Patient item["Patient"] = "" item["getPatientID"] = "" item["getClientPatientID"] = "" patient = get_field_value(batch, "Patient", None) if patient: url = api.get_url(patient) item["Patient"] = patient.Title() item["replace"]["Patient"] = get_link(url, patient.Title()) item["getPatientID"] = patient.id item["replace"]["getPatientID"] = get_link(url, patient.id) pid = patient.getClientPatientID() pid_link = pid and get_link(url, pid) or "" item["getClientPatientID"] = pid or "" item["replace"]["getClientPatientID"] = pid_link return item
def get_workflow_actions(self): """ Compile a list of possible workflow transitions for items in this Table. """ # cbb return empty list if we are unable to select items if not self.bika_listing.show_select_column: return [] workflow = getToolByName(self.context, 'portal_workflow') # get all transitions for all items. transitions = {} actions = [] for obj in [i.get('obj', '') for i in self.items]: obj = get_object(obj) for it in workflow.getTransitionsFor(obj): transitions[it['id']] = it # the list is restricted to and ordered by these transitions. if 'transitions' in self.bika_listing.review_state: for tdict in self.bika_listing.review_state['transitions']: if tdict['id'] in transitions: actions.append(transitions[tdict['id']]) else: actions = transitions.values() new_actions = [] # remove any invalid items with a warning for a, action in enumerate(actions): if isinstance(action, dict) \ and 'id' in action: new_actions.append(action) else: logger.warning("bad action in review_state['transitions']: %s. " "(complete list: %s)." % (action, actions)) actions = new_actions # and these are removed if 'hide_transitions' in self.bika_listing.review_state: hidden_transitions = self.bika_listing.review_state['hide_transitions'] actions = [a for a in actions if a['id'] not in hidden_transitions] # cheat: until workflow_action is abolished, all URLs defined in # GS workflow setup will be ignored, and the default will apply. # (that means, WorkflowAction-bound URL is called). for i, action in enumerate(actions): actions[i]['url'] = '' # if there is a self.review_state['some_state']['custom_transitions'] # attribute on the BikaListingView, add these actions to the list. if 'custom_transitions' in self.bika_listing.review_state: for action in self.bika_listing.review_state['custom_transitions']: if isinstance(action, dict) and 'id' in action: actions.append(action) for a, action in enumerate(actions): actions[a]['title'] = t(PMF(actions[a]['title'])) return actions
def _get_assigned_worksheets(self, analysis): """Return the assigned worksheets of this Analysis :param analysis: Analysis Brain/Object :returns: Worksheet Backreferences """ analysis = api.get_object(analysis) return analysis.getBackReferences("WorksheetAnalysis")
def isItemAllowed(self, obj): # TODO: Performance tip. We need the full object to filter by Insurance uid = api.get_uid(self.context) full_obj = api.get_object(obj) insurance_company = full_obj.getInsuranceCompany() if not insurance_company: return False return api.get_uid(insurance_company) == uid
def get_objects_in_sequence(brain_or_object, ctype, cref): """Return a list of items """ obj = api.get_object(brain_or_object) if ctype == "backreference": return get_backreferences(obj, cref) if ctype == "contained": return get_contained_items(obj, cref) raise ValueError("Reference value is mandatory for sequence type counter")
def get_field_value(instance, field_name, default=_marker): """Returns the value of a Schema field from the instance passed in """ instance = api.get_object(instance) field = instance.Schema() and instance.Schema().getField(field_name) or None if not field: if default is not _marker: return default api.fail("No field {} found for {}".format(field_name, repr(instance))) return instance.Schema().getField(field_name).get(instance)
def getBatches(self, full_objects=False): """ Returns the Batches this Doctor is assigned to :return: """ query = dict(portal_type='Batch', getDoctorUID=self.UID()) brains = api.search(query, 'bika_catalog') if full_objects: return map(lambda brain: api.get_object(brain), brains) return brains
def getAnalysisRequests(self, full_objects=False): """ Returns the Analysis Requests this Doctor is assigned to :return: """ query = dict(portal_type='AnalysisRequest', getDoctorUID=self.UID()) brains = api.search(query, CATALOG_ANALYSIS_REQUEST_LISTING) if full_objects: return map(lambda brain: api.get_object(brain), brains) return brains
def _is_frozen(self, brain_or_object): """Check if the passed in object is frozen :param obj: Analysis or AR Brain/Object :returns: True if the object is frozen """ obj = api.get_object(brain_or_object) active = api.is_active(obj) verified = wasTransitionPerformed(obj, 'verify') return not active or verified
def _is_assigned_to_worksheet(self, analysis): """Check if the Analysis is assigned to a worksheet :param analysis: Analysis Brain/Object :returns: True if the Analysis is assigned to a WS """ analysis = api.get_object(analysis) state = api.get_workflow_status_of( analysis, state_var='worksheetanalysis_review_state') return state == "assigned"
def fix_service_status_inconsistences(): catalog = api.get_tool('bika_setup_catalog') brains = catalog(portal_type='AnalysisService') for brain in brains: obj = api.get_object(brain) if not isActive(obj): continue # If this service is active, then all the services this service # depends on must be active too, as well as the calculation calculation = obj.getCalculation() if not calculation: continue dependencies = calculation.getDependentServices() for dependency in dependencies: dependency = api.get_object(dependency) if not isActive(dependency): _change_inactive_state(dependency, 'active')
def fix_service_profile_template_inconsistences(): catalog = api.get_tool('bika_setup_catalog') brains = catalog(portal_type='AnalysisService') for brain in brains: obj = api.get_object(brain) if isActive(obj): continue # If this service is inactive, be sure is not used neither in Profiles # nor in AR Templates obj.after_deactivate_transition_event()
def set_field_value(instance, field_name, value): """Sets the value to a Schema field """ if field_name == "id": logger.warn("Assignment of id is not allowed") return logger.info("Field {} = {}".format(field_name, repr(value))) instance = api.get_object(instance) field = instance.Schema() and instance.Schema().getField(field_name) or None if not field: api.fail("No field {} found for {}".format(field_name, repr(instance))) field.set(instance, value)
def getAnalysis(self): """Return the analysis to which this is linked it may not be linked to an analysis """ analysis = get_backreferences(self, 'AnalysisAttachment', as_brains=True) if not analysis: return None if len(analysis) > 1: logger.warn("Single attachment assigned to more than one Analysis") analysis = api.get_object(analysis[0]) return analysis
def folderitem(self, obj, item, index): """Applies new properties to the item to be rendered """ item = super(DoctorsView, self).folderitem(obj, item, index) url = item.get("url") doctor_id = item.get("getDoctorID") item['replace']['getDoctorID'] = get_link(url, value=doctor_id) item['getPrimaryReferrer'] = "" doctor = api.get_object(obj) pri = doctor.getPrimaryReferrer() if pri: pri_url = pri.absolute_url() pri = pri.Title() item['replace']['getPrimaryReferrer'] = get_link(pri_url, value=pri) return item
def setup_patients_ownership(portal): """Set the role "Owner" to all the client contacts that belong to the same client as the patient, if any """ logger.info("Applying Patients ownership ...") brains = api.search(dict(portal_type="Patient"), CATALOG_PATIENTS) total = len(brains) for num, brain in enumerate(brains): if num % 100 == 0: logger.info("Applying Patients Ownership {}/{}".format(num, total)) purge_owners_for(api.get_object(brain)) if num % 1000 == 0: commit_transaction() commit_transaction()
def getTemplateInstruments(self): """ Distionary of instruments per template Used in bika_listing.pt """ items = dict() templates = self._get_worksheet_templates_brains() for template in templates: template_obj = api.get_object(template) uid_template = api.get_uid(template_obj) instrument = template_obj.getInstrument() uid_instrument = '' if instrument: uid_instrument = api.get_uid(instrument) items[uid_template] = uid_instrument return json.dumps(items)
def get_info(brain_or_object, endpoint=None, complete=False): """Extract the data from the catalog brain or object :param brain_or_object: A single catalog brain or content object :type brain_or_object: ATContentType/DexterityContentType/CatalogBrain :param endpoint: The named URL endpoint for the root of the items :type endpoint: str/unicode :param complete: Flag to wake up the object and fetch all data :type complete: bool :returns: Data mapping for the object/catalog brain :rtype: dict """ # extract the data from the initial object with the proper adapter info = IInfo(brain_or_object).to_dict() # update with url info (always included) url_info = get_url_info(brain_or_object, endpoint) info.update(url_info) # include the parent url info parent = get_parent_info(brain_or_object) info.update(parent) # add the complete data of the object if requested # -> requires to wake up the object if it is a catalog brain if complete: # ensure we have a full content object obj = api.get_object(brain_or_object) # get the compatible adapter adapter = IInfo(obj) # update the data set with the complete information info.update(adapter.to_dict()) # update the data set with the workflow information # -> only possible if `?complete=yes&workflow=yes` if req.get_workflow(False): info.update(get_workflow_info(obj)) # # add sharing data if the user requested it # # -> only possible if `?complete=yes` # if req.get_sharing(False): # sharing = get_sharing_info(obj) # info.update({"sharing": sharing}) return info
def get_service_by_keyword(self, keyword, default=None): """Get a service by keyword """ logger.info("Get service by keyword={}".format(keyword)) bsc = api.get_tool("bika_setup_catalog") results = bsc(portal_type='AnalysisService', getKeyword=keyword) if not results: logger.exception("No Analysis Service found for Keyword '{}'. " "Related: LIMS-1614".format(keyword)) return default elif len(results) > 1: logger.exception("More than one Analysis Service found for Keyword '{}'. " .format(keyword)) return default else: return api.get_object(results[0])
def purge_owners_for(batch): """Remove role "Owner" from all those client contacts that do not belong to the same Client the batch is assigned to and assigns the role "Owner" to the client contacts assigned to the batch """ # Add role "Owner" for this batch to all contacts from this Client assign_owners_for(batch) # Unassign role "Owner" from contacts that belong to another Client batch_client = batch.getClient() batch_client_uid = batch_client and api.get_uid(batch_client) or None for client in api.search(dict(portal_type="Client"), "portal_catalog"): if api.get_uid(client) == batch_client_uid: continue client = api.get_object(client) contacts = client.objectValues("Contact") users = map(lambda contact: contact.getUser(), contacts) users = filter(None, users) for user in users: security.revoke_local_roles_for(batch, ["Owner"], user=user) batch.reindexObjectSecurity()
def folderitem(self, obj, item, index): """Applies new properties to the item (Batch) that is currently being rendered as a row in the list :param obj: client to be rendered as a row in the list :param item: dict representation of the batch, suitable for the list :param index: current position of the item within the list :type obj: ATContentType/DexterityContentType :type item: dict :type index: int :return: the dict representation of the item :rtype: dict """ # TODO This can be done entirely by using brains full_obj = api.get_object(obj) bid = full_obj.getId() item['BatchID'] = bid item['replace']['BatchID'] = "<a href='%s/%s'>%s</a>" % ( item['url'], 'analysisrequests', bid) title = full_obj.Title() item['Title'] = title item['replace']['Title'] = "<a href='%s/%s'>%s</a>" % ( item['url'], 'analysisrequests', title) item['Client'] = '' client = full_obj.getClient() if client: item['Client'] = client.Title() item['replace']['Client'] = "<a href='%s'>%s</a>" % ( client.absolute_url(), client.Title()) # TODO This workaround is necessary? date = full_obj.Schema().getField('BatchDate').get(obj) if callable(date): date = date() item['BatchDate'] = date item['replace']['BatchDate'] = self.ulocalized_time(date) return item
def apply_doctor_permissions_for_clients(portal, ut): # Add doctor action for client portal_type add_doctor_action_for_client(portal) # Allow client contacts to list/add/edit Doctors workflow_tool = api.get_tool("portal_workflow") workflow = workflow_tool.getWorkflowById('bika_doctor_workflow') catalog = api.get_tool('portal_catalog') # Adding new index and columns in portal_catalog for doctors ut.addIndexAndColumn('portal_catalog', 'allowedRolesAndUsers', 'FieldIndex') ut.addIndex('portal_catalog', 'getPrimaryReferrerUID', 'FieldIndex') brains = catalog(portal_type='Doctor') counter = 0 total = len(brains) logger.info( "Changing permissions for doctor objects: {0}".format(total)) for brain in brains: allowed = brain.allowedRolesAndUsers or [] if 'Client' not in allowed: obj = api.get_object(brain) workflow.updateRoleMappingsFor(obj) obj.reindexObject() counter += 1 if counter % 100 == 0: logger.info( "Changing permissions for doctor objects: " + "{0}/{1}".format(counter, total)) logger.info( "Changed permissions for doctor objects: " + "{0}/{1}".format(counter, total)) # Allowing client to view clients folder add_permission_for_role(portal.doctors, permissions.View, 'Client') add_permission_for_role(portal.doctors, AddDoctor, 'Client')
def is_out_of_range(brain_or_object, result=_marker): """Checks if the result for the analysis passed in is out of range and/or out of shoulders range. min max warn min max warn ·········|---------------|=====================|---------------|········· ----- out-of-range -----><----- in-range ------><----- out-of-range ----- <-- shoulder --><----- in-range ------><-- shoulder --> :param brain_or_object: A single catalog brain or content object :param result: Tentative result. If None, use the analysis result :type brain_or_object: ATContentType/DexterityContentType/CatalogBrain :returns: Tuple of two elements. The first value is `True` if the result is out of range and `False` if it is in range. The second value is `True` if the result is out of shoulder range and `False` if it is in shoulder range :rtype: (bool, bool) """ analysis = api.get_object(brain_or_object) if not IAnalysis.providedBy(analysis) and \ not IReferenceAnalysis.providedBy(analysis): api.fail("{} is not supported. Needs to be IAnalysis or " "IReferenceAnalysis".format(repr(analysis))) if result is _marker: result = api.safe_getattr(analysis, "getResult", None) if not api.is_floatable(result): # Result is empty/None or not a valid number return False, False result = api.to_float(result) # Note that routine analyses, duplicates and reference analyses all them # implement the function getResultRange: # - For routine analyses, the function returns the valid range based on the # specs assigned during the creation process. # - For duplicates, the valid range is the result of the analysis the # the duplicate was generated from +/- the duplicate variation. # - For reference analyses, getResultRange returns the valid range as # indicated in the Reference Sample from which the analysis was created. result_range = api.safe_getattr(analysis, "getResultsRange", None) if not result_range: # No result range defined or the passed in object does not suit return False, False # Maybe there is a custom adapter adapters = getAdapters((analysis, ), IResultOutOfRange) for name, adapter in adapters: ret = adapter(result=result, specification=result_range) if not ret or not ret.get('out_of_range', False): continue if not ret.get('acceptable', True): # Out of range + out of shoulders return True, True # Out of range, but in shoulders return True, False # The assignment of result as default fallback for min and max guarantees # the result will be in range also if no min/max values are defined specs_min = api.to_float(result_range.get('min', result), result) specs_max = api.to_float(result_range.get('max', result), result) if specs_min <= result <= specs_max: # In range, no need to check shoulders return False, False # Out of range, check shoulders. If no explicit warn_min or warn_max have # been defined, no shoulders must be considered for this analysis. Thus, use # specs' min and max as default fallback values warn_min = api.to_float(result_range.get('warn_min', specs_min), specs_min) warn_max = api.to_float(result_range.get('warn_max', specs_max), specs_max) in_shoulder = warn_min <= result <= warn_max return True, not in_shoulder
def assign_retracted_to_retests(portal): logger.info("Reassigning retracted to retests ...") # Note this is confusing, getRetested index tells us if the analysis is a # retest, not the other way round! (the analysis has been retested) catalog = api.get_tool(CATALOG_ANALYSIS_LISTING) if "getRetested" not in catalog.indexes(): return processed = list() query = dict(getRetested="True") brains = api.search(query, CATALOG_ANALYSIS_LISTING) total = len(brains) for num, brain in enumerate(brains): retest = api.get_object(brain) retest_uid = api.get_uid(retest) if retest.getRetestOf(): # We've been resolved this inconsistency already total -= 1 continue # Look for the retest if IDuplicateAnalysis.providedBy(retest): worksheet = retest.getWorksheet() if not worksheet: total -= 1 continue for dup in worksheet.get_duplicates_for(retest.getAnalysis()): if api.get_uid(dup) != retest_uid \ and api.get_workflow_status_of(dup) == "retracted": retest.setRetestOf(dup) processed.append(retest) break elif IReferenceAnalysis.providedBy(retest): worksheet = retest.getWorksheet() if not worksheet: total -= 1 continue ref_type = retest.getReferenceType() slot = worksheet.get_slot_position(retest.getSample(), ref_type) for ref in worksheet.get_analyses_at(slot): if api.get_uid(ref) != retest_uid \ and api.get_workflow_status_of(ref) == "retracted": retest.setRetestOf(ref) processed.append(retest) break else: request = retest.getRequest() keyword = retest.getKeyword() analyses = request.getAnalyses(review_state="retracted", getKeyword=keyword) if not analyses: total -= 1 continue retest.setRetestOf(analyses[-1]) processed.append(retest) if num % 100 == 0: logger.info("Reassigning retracted analysis: {}/{}".format( num, total)) del_metadata(portal, catalog_id=CATALOG_ANALYSIS_LISTING, column="getRetested") add_metadata(portal, catalog_id=CATALOG_ANALYSIS_LISTING, column="getRetestOfUID") del_index(portal, catalog_id=CATALOG_ANALYSIS_LISTING, index_name="getRetested") add_index(portal, catalog_id=CATALOG_ANALYSIS_LISTING, index_name="isRetest", index_attribute="isRetest", index_metatype="BooleanIndex") total = len(processed) for num, analysis in enumerate(processed): if num % 100 == 0: logger.info("Reindexing retests: {}/{}".format(num, total)) analysis.reindexObject(idxs="isRetest")
def fix_workflow_transitions(portal): """ Replace target states from some workflow statuses """ logger.info("Fixing workflow transitions...") tochange = [{ 'wfid': 'bika_duplicateanalysis_workflow', 'trid': 'submit', 'changes': { 'new_state_id': 'to_be_verified', 'guard_expr': '' }, 'update': { 'catalog': CATALOG_ANALYSIS_LISTING, 'portal_type': 'DuplicateAnalysis', 'status_from': 'attachment_due', 'status_to': 'to_be_verified' } }] wtool = api.get_tool('portal_workflow') for item in tochange: wfid = item['wfid'] trid = item['trid'] workflow = wtool.getWorkflowById(wfid) transitions = workflow.transitions transition = transitions[trid] changes = item.get('changes', {}) if 'new_state_id' in changes: new_state_id = changes['new_state_id'] oldstate = transition.new_state_id logger.info( "Replacing target state '{0}' from '{1}.{2}' to {3}".format( oldstate, wfid, trid, new_state_id)) transition.new_state_id = new_state_id if 'guard_expr' in changes: new_guard = changes['guard_expr'] if not new_guard: transition.guard = None logger.info("Removing guard expression from '{0}.{1}'".format( wfid, trid)) else: guard = transition.getGuard() guard.expr = Expression(new_guard) transition.guard = guard logger.info( "Replacing guard expression from '{0}.{1}' to {2}".format( wfid, trid, new_guard)) update = item.get('update', {}) if update: catalog_id = update['catalog'] portal_type = update['portal_type'] catalog = api.get_tool(catalog_id) brains = catalog(portal_type=portal_type) for brain in brains: obj = api.get_object(brain) if 'status_from' in update and 'status_to' in update: status_from = update['status_from'] status_to = update['status_to'] if status_from == brain.review_state: logger.info( "Changing status for {0} from '{1} to {2}".format( obj.getId(), status_from, status_to)) changeWorkflowState(obj, wfid, status_to) workflow.updateRoleMappingsFor(obj) obj.reindexObject()
def workflow_action_submit(self): uids = self.get_selected_uids() if not uids: message = _('No items selected.') self.context.plone_utils.addPortalMessage(message, 'info') self.request.response.redirect(self.context.absolute_url()) return if not is_active(self.context): message = _('Item is inactive.') self.context.plone_utils.addPortalMessage(message, 'info') self.request.response.redirect(self.context.absolute_url()) return form = self.request.form remarks = form.get('Remarks', [{}])[0] results = form.get('Result', [{}])[0] retested = form.get('retested', {}) methods = form.get('Method', [{}])[0] instruments = form.get('Instrument', [{}])[0] analysts = self.request.form.get('Analyst', [{}])[0] uncertainties = self.request.form.get('Uncertainty', [{}])[0] dlimits = self.request.form.get('DetectionLimit', [{}])[0] # XXX combine data from multiple bika listing tables. # TODO: Is this necessary? item_data = {} if 'item_data' in form: if type(form['item_data']) == list: for i_d in form['item_data']: for i, d in json.loads(i_d).items(): item_data[i] = d else: item_data = json.loads(form['item_data']) # Store affected Analysis Requests affected_ars = set() # Store affected Worksheets affected_ws = set() # Store invalid instruments-ref.analyses invalid_instrument_refs = dict() # We manually query by all analyses uids at once here instead of using # _get_selected_items from the base class, cause that function fetches # the objects by uid, but sequentially one by one query = dict(UID=uids) for brain in api.search(query, CATALOG_ANALYSIS_LISTING): uid = api.get_uid(brain) analysis = api.get_object(brain) # If not active, do nothing if not is_active(brain): continue # Need to save remarks? if uid in remarks: analysis.setRemarks(remarks[uid]) # Retested? if uid in retested: analysis.setRetested(retested[uid]) # Need to save the instrument? if uid in instruments: instrument = instruments[uid] or None analysis.setInstrument(instrument) if instrument and IReferenceAnalysis.providedBy(analysis): if is_out_of_range(analysis): # This reference analysis is out of range, so we have # to retract all analyses assigned to this same # instrument that are awaiting for verification if uid not in invalid_instrument_refs: invalid_instrument_refs[uid] = set() invalid_instrument_refs[uid].add(analysis) else: # The reference result is valid, so make the instrument # available again for further analyses instrument.setDisposeUntilNextCalibrationTest(False) # Need to save the method? if uid in methods: method = methods[uid] or None analysis.setMethod(method) # Need to save the analyst? if uid in analysts: analysis.setAnalyst(analysts[uid]) # Need to save the uncertainty? if uid in uncertainties: analysis.setUncertainty(uncertainties[uid]) # Need to save the detection limit? if uid in dlimits and dlimits[uid]: analysis.setDetectionLimitOperand(dlimits[uid]) # Need to save results? submitted = False if uid in results and results[uid]: interims = item_data.get(uid, []) analysis.setInterimFields(interims) analysis.setResult(results[uid]) # Can the analysis be submitted? # An analysis can only be submitted if all its dependencies # are valid and have been submitted already can_submit = True invalid_states = [ 'to_be_sampled', 'to_be_preserved', 'sample_due', 'sample_received' ] for dependency in analysis.getDependencies(): if in_state(dependency, invalid_states): can_submit = False break if can_submit: # doActionFor transitions the analysis to verif pending, # so must only be done when results are submitted. doActionFor(analysis, 'submit') submitted = True if IRequestAnalysis.providedBy(analysis): # Store the AR uids to be reindexed later. affected_ars.add(brain.getParentUID) if brain.worksheetanalysis_review_state == 'assigned': worksheet_uid = analysis.getWorksheetUID() if worksheet_uid: affected_ws.add(worksheet_uid) if not submitted: # Analysis has not been submitted, so we need to reindex the # object manually, to update catalog's metadata. analysis.reindexObject() # If a reference analysis with an out-of-range result and instrument # assigned has been submitted, retract then routine analyses that are # awaiting for verification and with same instrument associated retracted = list() for invalid_instrument_uid in invalid_instrument_refs.keys(): query = dict( getInstrumentUID=invalid_instrument_uid, portal_type=['Analysis', 'DuplicateAnalysis'], review_state='to_be_verified', cancellation_state='active', ) brains = api.search(query, CATALOG_ANALYSIS_LISTING) for brain in brains: analysis = api.get_object(brain) failed_msg = '{0}: {1}'.format( ulocalized_time(DateTime(), long_format=1), _("Instrument failed reference test")) an_remarks = analysis.getRemarks() analysis.setRemarks('. '.join([an_remarks, failed_msg])) doActionFor(analysis, 'retract') retracted.append(analysis) # If some analyses have been retracted because instrument failed a # reference test, then generate a pdf report if retracted: # Create the Retracted Analyses List report = AnalysesRetractedListReport(self.context, self.request, self.portal_url, 'Retracted analyses', retracted) # Attach the pdf to all ReferenceAnalysis that failed (accessible # from Instrument's Internal Calibration Tests list pdf = report.toPdf() for ref in invalid_instrument_refs.values(): ref.setRetractedAnalysesPdfReport(pdf) # Send the email try: report.sendEmail() except: pass # Finally, when we are done processing all applicable analyses, we must # attempt to initiate the submit transition on the ARs and Worksheets # the processed analyses belong to. # We stick only to affected_ars, and affected_ws # Reindex the Analysis Requests for which at least one Analysis has # been submitted. We do this here because one AR can contain multiple # Analyses, so better to just reindex the AR once instead of each time. # AR Catalog contains some metadata that that rely on the Analyses an # Analysis Request contains. if affected_ars: query = dict(UID=list(affected_ars), portal_type="AnalysisRequest") for ar_brain in api.search(query, CATALOG_ANALYSIS_REQUEST_LISTING): if ar_brain.review_state == 'to_be_verified': continue ar = api.get_object(ar_brain) if isTransitionAllowed(ar, "submit"): doActionFor(ar, "submit") else: ar.reindexObject() if affected_ws: query = dict(UID=list(affected_ws), portal_type="Worksheet") for ws_brain in api.search(query, CATALOG_WORKSHEET_LISTING): if ws_brain.review_state == 'to_be_verified': continue ws = api.get_object(ws_brain) if isTransitionAllowed(ws, "submit"): doActionFor(ws, "submit") message = PMF("Changes saved.") self.context.plone_utils.addPortalMessage(message, 'info') self.destination_url = self.request.get_header( "referer", self.context.absolute_url()) self.request.response.redirect(self.destination_url)
def folderitem(self, obj, item, index): obj = api.get_object(obj) item["Description"] = obj.Description() item["Gender"] = GENDERS_APPLY.getValue(obj.getGender()) item["replace"]["Title"] = get_link(item["url"], item["Title"]) return item
def __call__(self): parms = [] query = dict(portal_type="Analysis", sort_on="getDateReceived", sort_order="ascending") # Filter by Service UID self.add_filter_by_service(query=query, out_params=parms) # Filter by Analyst self.add_filter_by_analyst(query=query, out_params=parms) # Filter by date range self.add_filter_by_date_range(query=query, out_params=parms) # Period period = self.request.form.get("Period", "Day") parms.append({"title": _("Period"), "value": period, "type": "text"}) # Fetch the data data_lines = [] prev_date_key = None count = 0 duration = 0 total_count = 0 total_duration = 0 analyses = api.search(query, CATALOG_ANALYSIS_LISTING) for analysis in analyses: analysis = api.get_object(analysis) date_key = self.get_period_key(analysis.getDateReceived(), self.date_format_short) if date_key and date_key != prev_date_key: if prev_date_key: # Calculate averages data_lines.append([{ "value": prev_date_key, 'class': '' }, { "value": api.to_dhm_format(minutes=(duration // count)), "class": "number" }]) count = 0 duration = 0 analysis_duration = analysis.getDuration() count += 1 total_count += 1 duration += analysis_duration total_duration += analysis_duration prev_date_key = date_key if prev_date_key: # Calculate averages data_lines.append([{ "value": prev_date_key, 'class': '' }, { "value": api.to_dhm_format(minutes=(duration // count)), "class": "number" }]) # Totals total_duration = total_count and total_duration / total_count or 0 total_duration = api.to_dhm_format(minutes=total_duration) if self.request.get("output_format", "") == "CSV": return self.generate_csv(data_lines) self.report_content = { 'headings': self.headings, 'parms': parms, 'formats': self.formats, 'datalines': data_lines, 'footings': [[{ 'value': _('Total data points'), 'class': 'total' }, { 'value': total_count, 'class': 'total number' }], [{ 'value': _('Average TAT'), 'class': 'total' }, { 'value': total_duration, 'class': 'total number' }]] } return { 'report_title': t(self.headings['header']), 'report_data': self.template() }
def folderitems(self): bsc = getToolByName(self.context, 'bika_setup_catalog') analysis_categories = bsc(portal_type="AnalysisCategory", sort_on="sortable_title") analysis_categories_order = dict([(b.Title, "{:04}".format(a)) for a, b in enumerate(analysis_categories)]) workflow = getToolByName(self.context, 'portal_workflow') mtool = getToolByName(self.context, 'portal_membership') checkPermission = mtool.checkPermission if not self.allow_edit: can_edit_analyses = False else: if self.contentFilter.get('getPointOfCapture', '') == 'field': can_edit_analyses = checkPermission(EditFieldResults, self.context) else: can_edit_analyses = checkPermission(EditResults, self.context) self.allow_edit = can_edit_analyses self.show_select_column = self.allow_edit context_active = isActive(self.context) self.categories = [] items = super(AnalysesView, self).folderitems(full_objects=True) member = mtool.getAuthenticatedMember() self.interim_fields = {} self.interim_columns = {} self.specs = {} show_methodinstr_columns = False dmk = self.context.bika_setup.getResultsDecimalMark() for item in items: if 'obj' not in item: logger.warn("Missing 'obj' key in Analysis item '{}'".format(item)) continue # self.contentsMethod may return brains or objects. obj = api.get_object(item["obj"]) if workflow.getInfoFor(obj, 'review_state') == 'retracted' \ and not checkPermission(ViewRetractedAnalyses, self.context): logger.info("Skipping retracted analysis {}".format(obj.getId())) continue result = obj.getResult() service = obj.getService() calculation = service.getCalculation() unit = service.getUnit() keyword = service.getKeyword() if self.show_categories: cat = obj.getService().getCategoryTitle() cat_order = analysis_categories_order.get(cat) item['category'] = cat if (cat, cat_order) not in self.categories: self.categories.append((cat, cat_order)) # Check for InterimFields attribute on our object, interim_fields = hasattr(obj, 'getInterimFields') \ and obj.getInterimFields() or [] # kick some pretty display values in. for x in range(len(interim_fields)): interim_fields[x]['formatted_value'] = \ formatDecimalMark(interim_fields[x]['value'], dmk) self.interim_fields[obj.UID()] = interim_fields item['service_uid'] = service.UID() item['Service'] = service.Title() item['Keyword'] = keyword item['Unit'] = format_supsub(unit) if unit else '' item['Result'] = '' item['formatted_result'] = '' item['interim_fields'] = interim_fields item['Remarks'] = obj.getRemarks() item['Uncertainty'] = '' item['DetectionLimit'] = '' item['retested'] = obj.getRetested() item['class']['retested'] = 'center' item['result_captured'] = self.ulocalized_time( obj.getResultCaptureDate(), long_format=0) item['calculation'] = calculation and True or False try: item['Partition'] = obj.getSamplePartition().getId() except AttributeError: item['Partition'] = '' if obj.portal_type == "ReferenceAnalysis": item['DueDate'] = self.ulocalized_time(obj.aq_parent.getExpiryDate(), long_format=0) else: item['DueDate'] = self.ulocalized_time(obj.getDueDate(), long_format=1) cd = obj.getResultCaptureDate() item['CaptureDate'] = cd and self.ulocalized_time(cd, long_format=1) or '' item['Attachments'] = '' item['allow_edit'] = [] tblrowclass = item.get('table_row_class') if obj.portal_type == 'ReferenceAnalysis': item['st_uid'] = obj.aq_parent.UID() item['table_row_class'] = ' '.join([tblrowclass, 'qc-analysis']) elif obj.portal_type == 'DuplicateAnalysis' and \ obj.getAnalysis().portal_type == 'ReferenceAnalysis': item['st_uid'] = obj.aq_parent.UID() item['table_row_class'] = ' '.join([tblrowclass, 'qc-analysis']) else: sample = None if self.context.portal_type == 'AnalysisRequest': sample = self.context.getSample() elif self.context.portal_type == 'Worksheet': if obj.portal_type in ('DuplicateAnalysis', 'RejectAnalysis'): sample = obj.getAnalysis().getSample() else: sample = obj.aq_parent.getSample() elif self.context.portal_type == 'Sample': sample = self.context st_uid = sample.getSampleType().UID() if sample else '' item['st_uid'] = st_uid if checkPermission(ManageBika, self.context): # service_uid = service.UID() # latest = rc.lookupObject(service_uid).version_id item['Service'] = service.Title() item['class']['Service'] = "service_title" # Show version number of out-of-date objects # No: This should be done in another column, if at all. # The (vX) value confuses some more fragile forms. # if hasattr(obj, 'reference_versions') and \ # service_uid in obj.reference_versions and \ # latest != obj.reference_versions[service_uid]: # items[i]['after']['Service'] = "(v%s)" % \ # (obj.reference_versions[service_uid]) # choices defined on Service apply to result fields. choices = service.getResultOptions() if choices: item['choices']['Result'] = choices # permission to view this item's results can_view_result = \ getSecurityManager().checkPermission(ViewResults, obj) # permission to edit this item's results # Editing Field Results is possible while in Sample Due. poc = self.contentFilter.get("getPointOfCapture", 'lab') can_edit_analysis = self.allow_edit and context_active and \ ((poc == 'field' and getSecurityManager().checkPermission(EditFieldResults, obj)) or (poc != 'field' and getSecurityManager().checkPermission(EditResults, obj))) allowed_method_states = [ 'to_be_sampled', 'to_be_preserved', 'sample_received', 'sample_registered', 'sampled', 'assigned', ] # Prevent from being edited if the instrument assigned # is not valid (out-of-date or uncalibrated), except if # the analysis is a QC with assigned status can_edit_analysis = can_edit_analysis \ and (obj.isInstrumentValid() or (obj.portal_type == 'ReferenceAnalysis' and item['review_state'] in allowed_method_states)) if can_edit_analysis: item['allow_edit'].extend(['Analyst', 'Result', 'Remarks']) # if the Result field is editable, our interim fields are too for f in self.interim_fields[obj.UID()]: item['allow_edit'].append(f['keyword']) # if there isn't a calculation then result must be re-testable, # and if there are interim fields, they too must be re-testable. if not item['calculation'] or \ (item['calculation'] and self.interim_fields[obj.UID()]): item['allow_edit'].append('retested') # TODO: Only the labmanager must be able to change the method # can_set_method = getSecurityManager().checkPermission(SetAnalysisMethod, obj) can_set_method = can_edit_analysis \ and item['review_state'] in allowed_method_states method = obj.getMethod() \ if hasattr(obj, 'getMethod') and obj.getMethod() else service.getMethod() # Display the methods selector if the AS has at least one # method assigned item['Method'] = '' item['replace']['Method'] = '' if can_set_method: voc = self.get_methods_vocabulary(obj) if voc: # The service has at least one method available item['Method'] = method.UID() if method else '' item['choices']['Method'] = voc item['allow_edit'].append('Method') show_methodinstr_columns = True elif method: # This should never happen # The analysis has set a method, but its parent # service hasn't any method available O_o item['Method'] = method.Title() item['replace']['Method'] = "<a href='%s'>%s</a>" % \ (method.absolute_url(), method.Title()) show_methodinstr_columns = True elif method: # Edition not allowed, but method set item['Method'] = method.Title() item['replace']['Method'] = "<a href='%s'>%s</a>" % \ (method.absolute_url(), method.Title()) show_methodinstr_columns = True # TODO: Instrument selector dynamic behavior in worksheet Results # Only the labmanager must be able to change the instrument to be used. Also, # the instrument selection should be done in accordance with the method selected # can_set_instrument = service.getInstrumentEntryOfResults() and getSecurityManager().checkPermission(SetAnalysisInstrument, obj) can_set_instrument = service.getInstrumentEntryOfResults() \ and can_edit_analysis \ and item['review_state'] in allowed_method_states item['Instrument'] = '' item['replace']['Instrument'] = '' if service.getInstrumentEntryOfResults(): instrument = None # If the analysis has an instrument already assigned, use it if service.getInstrumentEntryOfResults() \ and hasattr(obj, 'getInstrument') \ and obj.getInstrument(): instrument = obj.getInstrument() # Otherwise, use the Service's default instrument elif service.getInstrumentEntryOfResults(): instrument = service.getInstrument() if can_set_instrument: # Edition allowed voc = self.get_instruments_vocabulary(obj) if voc: # The service has at least one instrument available item['Instrument'] = instrument.UID() if instrument else '' item['choices']['Instrument'] = voc item['allow_edit'].append('Instrument') show_methodinstr_columns = True elif instrument: # This should never happen # The analysis has an instrument set, but the # service hasn't any available instrument item['Instrument'] = instrument.Title() item['replace']['Instrument'] = "<a href='%s'>%s</a>" % \ (instrument.absolute_url(), instrument.Title()) show_methodinstr_columns = True elif instrument: # Edition not allowed, but instrument set item['Instrument'] = instrument.Title() item['replace']['Instrument'] = "<a href='%s'>%s</a>" % \ (instrument.absolute_url(), instrument.Title()) show_methodinstr_columns = True else: # Manual entry of results, instrument not allowed item['Instrument'] = _('Manual') msgtitle = t(_( "Instrument entry of results not allowed for ${service}", mapping={"service": safe_unicode(service.Title())}, )) item['replace']['Instrument'] = \ '<a href="#" title="%s">%s</a>' % (msgtitle, t(_('Manual'))) # Sets the analyst assigned to this analysis if can_edit_analysis: analyst = obj.getAnalyst() # widget default: current user if not analyst: analyst = mtool.getAuthenticatedMember().getUserName() item['Analyst'] = analyst item['choices']['Analyst'] = self.getAnalysts() else: item['Analyst'] = obj.getAnalystName() # If the user can attach files to analyses, show the attachment col can_add_attachment = \ getSecurityManager().checkPermission(AddAttachment, obj) if can_add_attachment or can_view_result: attachments = "" if hasattr(obj, 'getAttachment'): for attachment in obj.getAttachment(): af = attachment.getAttachmentFile() icon = af.icon # handle blob icons if callable(icon): icon = icon() attachments += "<span class='attachment' attachment_uid='%s'>" % (attachment.UID()) if icon: attachments += "<img src='%s/%s'/>" % (self.portal_url, icon) attachments += '<a href="%s/at_download/AttachmentFile"/>%s</a>' % (attachment.absolute_url(), af.filename) if can_edit_analysis: attachments += "<img class='deleteAttachmentButton' attachment_uid='%s' src='%s'/>" % (attachment.UID(), "++resource++bika.lims.images/delete.png") attachments += "</br></span>" item['replace']['Attachments'] = attachments[:-12] + "</span>" # Only display data bearing fields if we have ViewResults # permission, otherwise just put an icon in Result column. if can_view_result: item['Result'] = result scinot = self.context.bika_setup.getScientificNotationResults() item['formatted_result'] = obj.getFormattedResult(sciformat=int(scinot), decimalmark=dmk) # LIMS-1379 Allow manual uncertainty value input # https://jira.bikalabs.com/browse/LIMS-1379 fu = format_uncertainty(obj, result, decimalmark=dmk, sciformat=int(scinot)) fu = fu if fu else '' if can_edit_analysis and service.getAllowManualUncertainty() is True: unc = obj.getUncertainty(result) item['allow_edit'].append('Uncertainty') item['Uncertainty'] = unc if unc else '' item['before']['Uncertainty'] = '± ' item['after']['Uncertainty'] = '<em class="discreet" style="white-space:nowrap;"> %s</em>' % item['Unit'] item['structure'] = False elif fu: item['Uncertainty'] = fu item['before']['Uncertainty'] = '± ' item['after']['Uncertainty'] = '<em class="discreet" style="white-space:nowrap;"> %s</em>' % item['Unit'] item['structure'] = True # LIMS-1700. Allow manual input of Detection Limits # LIMS-1775. Allow to select LDL or UDL defaults in results with readonly mode # https://jira.bikalabs.com/browse/LIMS-1700 # https://jira.bikalabs.com/browse/LIMS-1775 if can_edit_analysis and \ hasattr(obj, 'getDetectionLimitOperand') and \ hasattr(service, 'getDetectionLimitSelector') and \ service.getDetectionLimitSelector() is True: isldl = obj.isBelowLowerDetectionLimit() isudl = obj.isAboveUpperDetectionLimit() dlval = '' if isldl or isudl: dlval = '<' if isldl else '>' item['allow_edit'].append('DetectionLimit') item['DetectionLimit'] = dlval choices = [{'ResultValue': '<', 'ResultText': '<'}, {'ResultValue': '>', 'ResultText': '>'}] item['choices']['DetectionLimit'] = choices self.columns['DetectionLimit']['toggle'] = True srv = obj.getService() defdls = {'min': srv.getLowerDetectionLimit(), 'max': srv.getUpperDetectionLimit(), 'manual': srv.getAllowManualDetectionLimit()} defin = '<input type="hidden" id="DefaultDLS.%s" value=\'%s\'/>' defin = defin % (obj.UID(), json.dumps(defdls)) item['after']['DetectionLimit'] = defin # LIMS-1769. Allow to use LDL and UDL in calculations. # https://jira.bikalabs.com/browse/LIMS-1769 # Since LDL, UDL, etc. are wildcards that can be used # in calculations, these fields must be loaded always # for 'live' calculations. if can_edit_analysis: dls = {'default_ldl': 'none', 'default_udl': 'none', 'below_ldl': False, 'above_udl': False, 'is_ldl': False, 'is_udl': False, 'manual_allowed': False, 'dlselect_allowed': False} if hasattr(obj, 'getDetectionLimits'): dls['below_ldl'] = obj.isBelowLowerDetectionLimit() dls['above_udl'] = obj.isBelowLowerDetectionLimit() dls['is_ldl'] = obj.isLowerDetectionLimit() dls['is_udl'] = obj.isUpperDetectionLimit() dls['default_ldl'] = service.getLowerDetectionLimit() dls['default_udl'] = service.getUpperDetectionLimit() dls['manual_allowed'] = service.getAllowManualDetectionLimit() dls['dlselect_allowed'] = service.getDetectionLimitSelector() dlsin = '<input type="hidden" id="AnalysisDLS.%s" value=\'%s\'/>' dlsin = dlsin % (obj.UID(), json.dumps(dls)) item['after']['Result'] = dlsin else: item['Specification'] = "" if 'Result' in item['allow_edit']: item['allow_edit'].remove('Result') item['before']['Result'] = \ '<img width="16" height="16" ' + \ 'src="%s/++resource++bika.lims.images/to_follow.png"/>' % \ (self.portal_url) # Everyone can see valid-ranges spec = self.get_analysis_spec(obj) if spec: min_val = spec.get('min', '') min_str = ">{0}".format(min_val) if min_val else '' max_val = spec.get('max', '') max_str = "<{0}".format(max_val) if max_val else '' error_val = spec.get('error', '') error_str = "{0}%".format(error_val) if error_val else '' rngstr = ",".join([x for x in [min_str, max_str, error_str] if x]) else: rngstr = "" item['Specification'] = rngstr # Add this analysis' interim fields to the interim_columns list for f in self.interim_fields[obj.UID()]: if f['keyword'] not in self.interim_columns and not f.get('hidden', False): self.interim_columns[f['keyword']] = f['title'] # and to the item itself item[f['keyword']] = f item['class'][f['keyword']] = 'interim' # check if this analysis is late/overdue resultdate = obj.aq_parent.getDateSampled() \ if obj.portal_type == 'ReferenceAnalysis' \ else obj.getResultCaptureDate() duedate = obj.aq_parent.getExpiryDate() \ if obj.portal_type == 'ReferenceAnalysis' \ else obj.getDueDate() item['replace']['DueDate'] = \ self.ulocalized_time(duedate, long_format=1) if item['review_state'] not in ['to_be_sampled', 'to_be_preserved', 'sample_due', 'published']: if (resultdate and resultdate > duedate) \ or (not resultdate and DateTime() > duedate): item['replace']['DueDate'] = '%s <img width="16" height="16" src="%s/++resource++bika.lims.images/late.png" title="%s"/>' % \ (self.ulocalized_time(duedate, long_format=1), self.portal_url, t(_("Late Analysis"))) after_icons = [] # Submitting user may not verify results unless the user is labman # or manager and the AS has isSelfVerificationEnabled set to True if item['review_state'] == 'to_be_verified': # If multi-verification required, place an informative icon numverifications = obj.getNumberOfRequiredVerifications() if numverifications > 1: # More than one verification required, place an icon # Get the number of verifications already done: done = obj.getNumberOfVerifications() pending = numverifications - done ratio = float(done) / float(numverifications) \ if done > 0 else 0 scale = '' if ratio < 0.25 else '25' \ if ratio < 0.50 else '50' \ if ratio < 0.75 else '75' anchor = "<a href='#' title='%s %s %s' " \ "class='multi-verification scale-%s'>%s/%s</a>" anchor = anchor % (t(_("Multi-verification required")), str(pending), t(_("verification(s) pending")), scale, str(done), str(numverifications)) after_icons.append(anchor) username = member.getUserName() allowed = ploneapi.user.has_permission(VerifyPermission, username=username) if allowed and not obj.isUserAllowedToVerify(member): after_icons.append( "<img src='++resource++bika.lims.images/submitted-by-current-user.png' title='%s'/>" % (t(_("Cannot verify, submitted or verified by current user before")))) elif allowed: if obj.getSubmittedBy() == member.getUser().getId(): after_icons.append( "<img src='++resource++bika.lims.images/warning.png' title='%s'/>" % (t(_("Can verify, but submitted by current user")))) # If analysis Submitted and Verified by the same person, then warning icon will appear. submitter = obj.getSubmittedBy() if submitter and obj.wasVerifiedByUser(submitter): after_icons.append( "<img src='++resource++bika.lims.images/warning.png' title='%s'/>" % (t(_("Submited and verified by the same user- " + submitter)))) # add icon for assigned analyses in AR views if self.context.portal_type == 'AnalysisRequest': obj = item['obj'] if obj.portal_type in ['ReferenceAnalysis', 'DuplicateAnalysis'] or \ workflow.getInfoFor(obj, 'worksheetanalysis_review_state') == 'assigned': br = obj.getBackReferences('WorksheetAnalysis') if len(br) > 0: ws = br[0] after_icons.append("<a href='%s'><img src='++resource++bika.lims.images/worksheet.png' title='%s'/></a>" % (ws.absolute_url(), t(_("Assigned to: ${worksheet_id}", mapping={'worksheet_id': safe_unicode(ws.id)})))) item['after']['state_title'] = ' '.join(after_icons) # the TAL requires values for all interim fields on all # items, so we set blank values in unused cells for item in items: for field in self.interim_columns: if field not in item: item[field] = '' # XXX order the list of interim columns interim_keys = self.interim_columns.keys() interim_keys.reverse() # add InterimFields keys to columns for col_id in interim_keys: if col_id not in self.columns: self.columns[col_id] = { 'title': self.interim_columns[col_id], 'input_width': '6', 'input_class': 'ajax_calculate numeric', 'sortable': False } if can_edit_analyses: new_states = [] for state in self.review_states: # InterimFields are displayed in review_state # They are anyway available through View.columns though. # In case of hidden fields, the calcs.py should check calcs/services # for additional InterimFields!! pos = 'Result' in state['columns'] and \ state['columns'].index('Result') or len(state['columns']) for col_id in interim_keys: if col_id not in state['columns']: state['columns'].insert(pos, col_id) # retested column is added after Result. pos = 'Result' in state['columns'] and \ state['columns'].index('Uncertainty') + 1 or len(state['columns']) state['columns'].insert(pos, 'retested') new_states.append(state) self.review_states = new_states # Allow selecting individual analyses self.show_select_column = True # Dry Matter. # The Dry Matter column is never enabled for reference sample contexts # and refers to getReportDryMatter in ARs. if items and \ (hasattr(self.context, 'getReportDryMatter') and self.context.getReportDryMatter()): # look through all items # if the item's Service supports ReportDryMatter, add getResultDM(). for item in items: if item['obj'].getService().getReportDryMatter(): item['ResultDM'] = item['obj'].getResultDM() else: item['ResultDM'] = '' if item['ResultDM']: item['after']['ResultDM'] = "<em class='discreet'>%</em>" # modify the review_states list to include the ResultDM column new_states = [] for state in self.review_states: pos = 'Result' in state['columns'] and \ state['columns'].index('Uncertainty') + 1 or len(state['columns']) state['columns'].insert(pos, 'ResultDM') new_states.append(state) self.review_states = new_states if self.show_categories: self.categories = map(lambda x: x[0], sorted(self.categories, key=lambda x: x[1])) else: self.categories.sort() # self.json_specs = json.dumps(self.specs) self.json_interim_fields = json.dumps(self.interim_fields) self.items = items # Method and Instrument columns must be shown or hidden at the # same time, because the value assigned to one causes # a value reassignment to the other (one method can be performed # by different instruments) self.columns['Method']['toggle'] = show_methodinstr_columns self.columns['Instrument']['toggle'] = show_methodinstr_columns return items
def folderitem(self, obj, item, index): """Augment folder listing item """ obj = api.get_object(obj) ar = obj.getAnalysisRequest() uid = api.get_uid(obj) review_state = api.get_workflow_status_of(ar) status_title = review_state.capitalize().replace("_", " ") # Report Info Popup # see: bika.lims.site.coffee for the attached event handler item["Info"] = get_link( "analysisreport_info?report_uid={}".format(uid), value="<span class='glyphicon glyphicon-info-sign'></span>", css_class="service_info") item["replace"]["AnalysisRequest"] = get_link(ar.absolute_url(), value=ar.Title()) pdf = self.get_pdf(obj) filesize = self.get_filesize(pdf) if filesize > 0: url = "{}/download_pdf".format(obj.absolute_url()) item["replace"]["PDF"] = get_link(url, value="PDF", target="_blank") item["State"] = _BMF(status_title) item["state_class"] = "state-{}".format(review_state) item["FileSize"] = "{:.2f} Kb".format(filesize) fmt_date = self.localize_date(obj.created()) item["Date"] = fmt_date item["PublishedBy"] = self.user_fullname(obj.Creator()) # N.B. There is a bug in the current publication machinery, so that # only the primary contact get stored in the Attachment as recipient. # # However, we're interested to show here the full list of recipients, # so we use the recipients of the containing AR instead. recipients = [] for recipient in self.get_recipients(ar): email = safe_unicode(recipient["EmailAddress"]) fullname = safe_unicode(recipient["Fullname"]) if email: value = u"<a href='mailto:{}'>{}</a>".format(email, fullname) recipients.append(value) else: message = _("No email address set for this contact") value = u"<span title='{}' class='text text-danger'>" \ u"⚠ {}</span>".format(message, fullname) recipients.append(value) item["replace"]["Recipients"] = ", ".join(recipients) # No recipient with email set preference found in the AR, so we also # flush the Recipients data from the Attachment if not recipients: item["Recipients"] = "" return item
def folderitem(self, obj, item, index): """Service triggered each time an item is iterated in folderitems. The use of this service prevents the extra-loops in child objects. :obj: the instance of the class to be foldered :item: dict containing the properties of the object to be used by the template :index: current index of the item """ service = api.get_object(obj) if service.getKeyword() in self.specsresults: specresults = self.specsresults[service.getKeyword()] else: specresults = { "keyword": service.getKeyword(), "min_operator": "", "min": "", "max_operator": "", "max": "", "warn_min": "", "warn_max": "", "hidemin": "", "hidemax": "", "rangecomment": "", } # Icons after_icons = "" if service.getAccredited(): after_icons += get_image("accredited.png", title=_("Accredited")) if service.getAttachmentOption() == "r": after_icons += get_image("attach_reqd.png", title=_("Attachment required")) if service.getAttachmentOption() == "n": after_icons += get_image("attach_no.png", title=_("Attachment not permitted")) state = api.get_workflow_status_of(service, state_var="inactive_state") unit = service.getUnit() item = { "obj": service, "id": service.getId(), "uid": service.UID(), "keyword": service.getKeyword(), "title": service.Title(), "unit": unit, "category": service.getCategoryTitle(), "selected": service.getKeyword() in self.specsresults.keys(), "type_class": "contenttype-ReferenceResult", "url": service.absolute_url(), "relative_url": service.absolute_url(), "view_url": service.absolute_url(), "service": service.Title(), "min_operator": specresults.get("min_operator", "geq"), "min": specresults.get("min", ""), "max_operator": specresults.get("max_operator", "leq"), "max": specresults.get("max", ""), "warn_min": specresults.get("warn_min", ""), "warn_max": specresults.get("warn_max", ""), "hidemin": specresults.get("hidemin", ""), "hidemax": specresults.get("hidemax", ""), "rangecomment": specresults.get("rangecomment", ""), "replace": {}, "before": {}, "after": { "service": after_icons, }, "choices": { "min_operator": to_choices(MIN_OPERATORS), "max_operator": to_choices(MAX_OPERATORS), }, "class": "state-%s" % (state), "state_class": "state-%s" % (state), "allow_edit": [ "min", "max", "warn_min", "warn_max", "hidemin", "hidemax", "rangecomment", "min_operator", "max_operator" ], "table_row_class": "even", "required": ["min_operator", "max_operator"] } # Add methods methods = service.getMethods() if methods: links = map( lambda m: get_link( m.absolute_url(), value=m.Title(), css_class="link"), methods) item["replace"]["methods"] = ", ".join(links) else: item["methods"] = "" return item
def folderitem(self, obj, item, index): obj = api.get_object(obj) item["Description"] = obj.Description() item["replace"]["Title"] = get_link(item["url"], item["Title"]) return item
Nitrate;\ Nitrite;\ Mold;\ Yeast;\ Enterobactereacea;\ Aerobic/Total Plate Count;\ Geomen;\ STV\n") #Get all ARs (Sample) ARs = api.search({'portal_type': 'AnalysisRequest'}) #Get MBG ARs mbgARs = {} for i in ARs: AR = api.get_object(i) id = AR.getId() if id in mbg: mbgARs[i] = AR ##### Get Sample Data ##### for mbgAR_brain in mbgARs: #Get AR object, Sample ID, Client, and Batch mbgAR = api.get_object(mbgAR_brain) id = mbgAR.getId() client = mbgAR.getClient() client_name = client.getName() batch = mbgAR.getBatch() analyses = mbgAR.getAnalyses() #Grower # no_grower = 0
def get_object(brain_or_object): """Proxy to bika.lims.api.get_object """ return api.get_object(brain_or_object)
def __call__(self): parms = [] titles = [] self.contentFilter = dict(portal_type="AnalysisRequest", is_active=True, sort_on="getDateReceived") val = self.selection_macros.parse_daterange(self.request, 'getDateReceived', _('Date Received')) if val: self.contentFilter[val['contentFilter'] [0]] = val['contentFilter'][1] parms.append(val['parms']) titles.append(val['titles']) # Query the catalog and store results in a dictionary ars = api.search(self.contentFilter, CATALOG_ANALYSIS_REQUEST_LISTING) if not ars: message = _("No samples matched your query") self.context.plone_utils.addPortalMessage(message, "error") return self.default_template() datalines = {} footlines = {} total_received_count = 0 total_published_count = 0 for ar in ars: published = api.get_workflow_status_of(ar) == "published" ar = api.get_object(ar) datereceived = ar.getDateReceived() monthyear = datereceived.strftime( "%B") + " " + datereceived.strftime("%Y") received = 1 publishedcnt = published and 1 or 0 if (monthyear in datalines): received = datalines[monthyear]['ReceivedCount'] + 1 publishedcnt = published and datalines[monthyear][ 'PublishedCount'] + 1 or \ datalines[monthyear]['PublishedCount'] ratio = publishedcnt / received dataline = { 'MonthYear': monthyear, 'ReceivedCount': received, 'PublishedCount': publishedcnt, 'UnpublishedCount': received - publishedcnt, 'Ratio': ratio, 'RatioPercentage': '%02d' % (100 * (float(publishedcnt) / float(received))) + '%' } datalines[monthyear] = dataline total_received_count += 1 total_published_count = published and total_published_count + 1 or total_published_count # Footer total data ratio = total_published_count / total_received_count footline = { 'ReceivedCount': total_received_count, 'PublishedCount': total_published_count, 'UnpublishedCount': total_received_count - total_published_count, 'Ratio': ratio, 'RatioPercentage': '%02d' % (100 * (float(total_published_count) / float(total_received_count))) + '%' } footlines['Total'] = footline self.report_data = { 'parameters': parms, 'datalines': datalines, 'footlines': footlines } if self.request.get('output_format', '') == 'CSV': import csv import StringIO import datetime fieldnames = [ 'MonthYear', 'ReceivedCount', 'PublishedCount', 'RatioPercentage', ] output = StringIO.StringIO() dw = csv.DictWriter(output, extrasaction='ignore', fieldnames=fieldnames) dw.writerow(dict((fn, fn) for fn in fieldnames)) for row in datalines.values(): dw.writerow(row) report_data = output.getvalue() output.close() date = datetime.datetime.now().strftime("%Y%m%d%H%M") setheader = self.request.RESPONSE.setHeader setheader('Content-Type', 'text/csv') setheader( "Content-Disposition", "attachment;filename=\"receivedvspublished_%s.csv\"" % date) self.request.RESPONSE.write(report_data) else: return { 'report_title': _('Samples received vs. reported'), 'report_data': self.template() }
def __call__(self): form = self.request.form # Form submit toggle form_submitted = form.get("submitted", False) # Buttons form_preview = form.get("button_preview", False) form_create = form.get("button_create", False) form_cancel = form.get("button_cancel", False) objs = self.get_objects() # No ARs selected if not objs: return self.redirect(message=_("No items selected"), level="warning") # Handle preview if form_submitted and form_preview: logger.info("*** PREVIEW ***") # Handle create if form_submitted and form_create: logger.info("*** CREATE PARTITIONS ***") partitions = [] # create the partitions for partition in form.get("partitions", []): primary_uid = partition.get("primary_uid") sampletype_uid = partition.get("sampletype_uid") container_uid = partition.get("container_uid") preservation_uid = partition.get("preservation_uid") internal_use = partition.get("internal_use") if not primary_uid: continue # The creation of partitions w/o analyses is allowed. Maybe the # user wants to add the analyses later manually or wants to keep # this partition stored in a freezer for some time analyses_uids = partition.get("analyses", []) partition = create_partition( request=self.request, analysis_request=primary_uid, sample_type=sampletype_uid, container=container_uid, preservation=preservation_uid, analyses=analyses_uids, internal_use=internal_use, ) partitions.append(partition) # Remove analyses from primary once all partitions are created primary = api.get_object(primary_uid) self.push_primary_analyses_for_removal(primary, analyses_uids) logger.info("Successfully created partition: {}".format( api.get_path(partition))) if not partitions: # If no partitions were created, show a warning message return self.redirect(message=_("No partitions were created")) message = _("Created {} partitions: {}".format( len(partitions), ", ".join(map(api.get_title, partitions)))) return self.redirect(message=message) # Handle cancel if form_submitted and form_cancel: logger.info("*** CANCEL ***") return self.redirect(message=_("Partitioning canceled")) return self.template()
def __call__(self, action, objects): # Store invalid instruments-ref.analyses invalid_instrument_refs = defaultdict(set) # Get interims data interims_data = self.get_interims_data() for analysis in objects: uid = api.get_uid(analysis) # Need to save remarks? remarks = self.get_form_value("Remarks", uid, default="") analysis.setRemarks(remarks) # Need to save the instrument? instrument = self.get_form_value("Instrument", uid, None) if instrument is not None: # Could be an empty string instrument = instrument or None analysis.setInstrument(instrument) if instrument and IReferenceAnalysis.providedBy(analysis): if is_out_of_range(analysis): # This reference analysis is out of range, so we have # to retract all analyses assigned to this same # instrument that are awaiting for verification invalid_instrument_refs[uid].add(analysis) else: # The reference result is valid, so make the instrument # available again for further analyses instrument.setDisposeUntilNextCalibrationTest(False) # Need to save the method? method = self.get_form_value("Method", uid, default=None) if method is not None: method = method or None analysis.setMethod(method) # Need to save analyst? analyst = self.get_form_value("Analyst", uid, default=None) if analyst is not None: analysis.setAnalyst(analyst) # Save uncertainty uncertainty = self.get_form_value("Uncertainty", uid, "") analysis.setUncertainty(uncertainty) # Save detection limit dlimit = self.get_form_value("DetectionLimit", uid, "") analysis.setDetectionLimitOperand(dlimit) # Interim fields interims = interims_data.get(uid, analysis.getInterimFields()) analysis.setInterimFields(interims) # Save Hidden hidden = self.get_form_value("Hidden", uid, "") analysis.setHidden(hidden == "on") # Result result = self.get_form_value("Result", uid, default=analysis.getResult()) analysis.setResult(result) # Submit all analyses transitioned = self.do_action(action, objects) if not transitioned: return self.redirect(message=_("No changes made"), level="warning") # If a reference analysis with an out-of-range result and instrument # assigned has been submitted, retract then routine analyses that are # awaiting for verification and with same instrument associated retracted = list() for invalid_instrument_uid in invalid_instrument_refs.keys(): query = dict( getInstrumentUID=invalid_instrument_uid, portal_type=['Analysis', 'DuplicateAnalysis'], review_state='to_be_verified', ) brains = api.search(query, CATALOG_ANALYSIS_LISTING) for brain in brains: analysis = api.get_object(brain) failed_msg = '{0}: {1}'.format( ulocalized_time(DateTime(), long_format=1), _("Instrument failed reference test")) an_remarks = analysis.getRemarks() analysis.setRemarks('. '.join([an_remarks, failed_msg])) retracted.append(analysis) # If some analyses have been retracted because instrument failed a # reference test, then generate a pdf report if self.do_action("retract", retracted): # Create the Retracted Analyses List portal_url = api.get_url(api.get_portal()) report = AnalysesRetractedListReport(self.context, self.request, portal_url, 'Retracted analyses', retracted) # Attach the pdf to all ReferenceAnalysis that failed (accessible # from Instrument's Internal Calibration Tests list pdf = report.toPdf() for ref in invalid_instrument_refs.values(): ref.setRetractedAnalysesPdfReport(pdf) # Send the email try: report.sendEmail() except Exception as err_msg: message = "Unable to send email: {}".format(err_msg) logger.warn(message) # Redirect to success view return self.success(transitioned)
def is_out_of_range(brain_or_object, result=_marker): """Checks if the result for the analysis passed in is out of range and/or out of shoulders range. min max warn min max warn ·········|---------------|=====================|---------------|········· ----- out-of-range -----><----- in-range ------><----- out-of-range ----- <-- shoulder --><----- in-range ------><-- shoulder --> :param brain_or_object: A single catalog brain or content object :param result: Tentative result. If None, use the analysis result :type brain_or_object: ATContentType/DexterityContentType/CatalogBrain :returns: Tuple of two elements. The first value is `True` if the result is out of range and `False` if it is in range. The second value is `True` if the result is out of shoulder range and `False` if it is in shoulder range :rtype: (bool, bool) """ analysis = api.get_object(brain_or_object) if not IAnalysis.providedBy(analysis) and \ not IReferenceAnalysis.providedBy(analysis): api.fail("{} is not supported. Needs to be IAnalysis or " "IReferenceAnalysis".format(repr(analysis))) if result is _marker: result = api.safe_getattr(analysis, "getResult", None) if result in [None, '']: # Empty result return False, False if IDuplicateAnalysis.providedBy(analysis): # Result range for duplicate analyses is calculated from the original # result, applying a variation % in shoulders. If the analysis has # result options enabled or string results enabled, system returns an # empty result range for the duplicate: result must match %100 with the # original result original = analysis.getAnalysis() original_result = original.getResult() # Does original analysis have a valid result? if original_result in [None, '']: return False, False # Does original result type matches with duplicate result type? if api.is_floatable(result) != api.is_floatable(original_result): return True, True # Does analysis has result options enabled or non-floatable? if analysis.getResultOptions() or not api.is_floatable(original_result): # Let's always assume the result is 'out from shoulders', cause we # consider the shoulders are precisely the duplicate variation % out_of_range = original_result != result return out_of_range, out_of_range elif not api.is_floatable(result): # A non-duplicate with non-floatable result. There is no chance to know # if the result is out-of-range return False, False # Convert result to a float result = api.to_float(result) # Note that routine analyses, duplicates and reference analyses all them # implement the function getResultRange: # - For routine analyses, the function returns the valid range based on the # specs assigned during the creation process. # - For duplicates, the valid range is the result of the analysis the # the duplicate was generated from +/- the duplicate variation. # - For reference analyses, getResultRange returns the valid range as # indicated in the Reference Sample from which the analysis was created. result_range = api.safe_getattr(analysis, "getResultsRange", None) if not result_range: # No result range defined or the passed in object does not suit return False, False # Maybe there is a custom adapter adapters = getAdapters((analysis,), IResultOutOfRange) for name, adapter in adapters: ret = adapter(result=result, specification=result_range) if not ret or not ret.get('out_of_range', False): continue if not ret.get('acceptable', True): # Out of range + out of shoulders return True, True # Out of range, but in shoulders return True, False result_range = ResultsRangeDict(result_range) # The assignment of result as default fallback for min and max guarantees # the result will be in range also if no min/max values are defined specs_min = api.to_float(result_range.min, result) specs_max = api.to_float(result_range.max, result) in_range = False min_operator = result_range.min_operator if min_operator == "geq": in_range = result >= specs_min else: in_range = result > specs_min max_operator = result_range.max_operator if in_range: if max_operator == "leq": in_range = result <= specs_max else: in_range = result < specs_max # If in range, no need to check shoulders if in_range: return False, False # Out of range, check shoulders. If no explicit warn_min or warn_max have # been defined, no shoulders must be considered for this analysis. Thus, use # specs' min and max as default fallback values warn_min = api.to_float(result_range.warn_min, specs_min) warn_max = api.to_float(result_range.warn_max, specs_max) in_shoulder = warn_min <= result <= warn_max return True, not in_shoulder
def folderitem(self, obj, item, index): """Service triggered each time an item is iterated in folderitems. The use of this service prevents the extra-loops in child objects. :obj: the instance of the class to be foldered :item: dict containing the properties of the object to be used by the template :index: current index of the item """ obj = api.get_object(obj) # We are using the existing logic from the auditview logview = api.get_view("auditlog", context=obj, request=self.request) # get the last snapshot snapshot = get_last_snapshot(obj) # get the metadata of the last snapshot metadata = get_snapshot_metadata(snapshot) title = obj.Title() url = obj.absolute_url() auditlog_url = "{}/@@auditlog".format(url) # Title item["title"] = title # Link the title to the auditlog of the object item["replace"]["title"] = get_link(auditlog_url, value=title) # Version version = get_snapshot_version(obj, snapshot) item["version"] = version # Modification Date m_date = metadata.get("modified") item["modified"] = logview.to_localized_time(m_date) # Actor actor = metadata.get("actor") item["actor"] = actor # Fullname properties = api.get_user_properties(actor) item["fullname"] = properties.get("fullname", actor) # Roles roles = metadata.get("roles", []) item["roles"] = ", ".join(roles) # Remote Address remote_address = metadata.get("remote_address") item["remote_address"] = remote_address # Action action = metadata.get("action") item["action"] = logview.translate_state(action) # Review State review_state = metadata.get("review_state") item["review_state"] = logview.translate_state(review_state) # get the previous snapshot prev_snapshot = get_snapshot_by_version(obj, version - 1) if prev_snapshot: prev_metadata = get_snapshot_metadata(prev_snapshot) prev_review_state = prev_metadata.get("review_state") if prev_review_state != review_state: item["replace"]["review_state"] = "{} → {}".format( logview.translate_state(prev_review_state), logview.translate_state(review_state)) # Rendered Diff diff = compare_snapshots(snapshot, prev_snapshot) item["diff"] = logview.render_diff(diff) return item
def folderitem(self, obj, item, index): """Service triggered each time an item is iterated in folderitems. The use of this service prevents the extra-loops in child objects. :obj: the instance of the class to be foldered :item: dict containing the properties of the object to be used by the template :index: current index of the item """ # ensure we have an object and not a brain obj = api.get_object(obj) uid = api.get_uid(obj) # settings for this analysis service_settings = self.context.getAnalysisServiceSettings(uid) hidden = service_settings.get("hidden", obj.getHidden()) # get the category category = obj.getCategoryTitle() item["category"] = category if category not in self.categories: self.categories.append(category) price = obj.getPrice() keyword = obj.getKeyword() if uid in self.analyses: analysis = self.analyses[uid] # Might differ from the service keyword keyword = analysis.getKeyword() # Mark the row as disabled if the analysis has been submitted item["disabled"] = ISubmitted.providedBy(analysis) # get the hidden status of the analysis hidden = analysis.getHidden() # get the price of the analysis price = analysis.getPrice() # get the specification of this object rr = self.get_results_range() spec = rr.get(keyword, ResultsRangeDict()) item["Title"] = obj.Title() item["Unit"] = obj.getUnit() item["Price"] = price item["before"]["Price"] = self.get_currency_symbol() item["allow_edit"] = self.get_editable_columns(obj) item["selected"] = uid in self.selected item["min"] = str(spec.get("min", "")) item["max"] = str(spec.get("max", "")) item["warn_min"] = str(spec.get("warn_min", "")) item["warn_max"] = str(spec.get("warn_max", "")) item["Hidden"] = hidden # Append info link before the service # see: bika.lims.site.coffee for the attached event handler item["before"]["Title"] = get_link( "analysisservice_info?service_uid={}".format(uid), value="<span class='glyphicon glyphicon-info-sign'></span>", css_class="service_info") # Icons after_icons = "" if obj.getAccredited(): after_icons += get_image( "accredited.png", title=t(_("Accredited"))) if obj.getAttachmentOption() == "r": after_icons += get_image( "attach_reqd.png", title=t(_("Attachment required"))) if obj.getAttachmentOption() == "n": after_icons += get_image( "attach_no.png", title=t(_('Attachment not permitted'))) if after_icons: item["after"]["Title"] = after_icons return item
def create_partition(analysis_request, request, analyses, sample_type=None, container=None, preservation=None, skip_fields=None, remove_primary_analyses=True): """ Creates a partition for the analysis_request (primary) passed in :param analysis_request: uid/brain/object of IAnalysisRequest type :param request: the current request object :param analyses: uids/brains/objects of IAnalysis type :param sampletype: uid/brain/object of SampleType :param container: uid/brain/object of Container :param preservation: uid/brain/object of Preservation :param skip_fields: names of fields to be skipped on copy from primary :param remove_primary_analyses: removes the analyses from the parent :return: the new partition """ partition_skip_fields = [ "Analyses", "Attachment", "Client", "Profile", "Profiles", "RejectionReasons", "Remarks", "ResultsInterpretation", "ResultsInterpretationDepts", "Sample", "Template", "creation_date", "id", "modification_date", "ParentAnalysisRequest", ] if skip_fields: partition_skip_fields.extend(skip_fields) partition_skip_fields = list(set(partition_skip_fields)) # Copy field values from the primary analysis request ar = api.get_object(analysis_request) record = fields_to_dict(ar, partition_skip_fields) # Update with values that are partition-specific record.update({ "InternalUse": True, "ParentAnalysisRequest": api.get_uid(ar), }) if sample_type is not None: record["SampleType"] = sample_type and api.get_uid(sample_type) or "" if container is not None: record["Container"] = container and api.get_uid(container) or "" if preservation is not None: record["Preservation"] = preservation and api.get_uid( preservation) or "" # Create the Partition client = ar.getClient() analyses = list(set(map(api.get_object, analyses))) services = map(lambda an: an.getAnalysisService(), analyses) specs = ar.getSpecification() specs = specs and specs.getResultsRange() or [] partition = create_analysisrequest(client, request=request, values=record, analyses=services, specifications=specs) # Remove analyses from the primary if remove_primary_analyses: analyses_ids = map(api.get_id, analyses) ar.manage_delObjects(analyses_ids) # Reindex Parent Analysis Request ar.reindexObject(idxs=["isRootAncestor"]) # Manually set the Date Received to match with its parent. This is # necessary because crar calls to processForm, so DateReceived is not # set because the partition has not been received yet partition.setDateReceived(ar.getDateReceived()) partition.reindexObject(idxs="getDateReceived") # Force partition to same status as the primary status = api.get_workflow_status_of(ar) changeWorkflowState(partition, "bika_ar_workflow", status) # And initialize the analyses the partition contains. This is required # here because the transition "initialize" of analyses rely on a guard, # so the initialization can only be performed when the sample has been # received (DateReceived is set) ActionHandlerPool.get_instance().queue_pool() for analysis in partition.getAnalyses(full_objects=True): doActionFor(analysis, "initialize") ActionHandlerPool.get_instance().resume() return partition
def folderitem(self, obj, item, index): """Service triggered each time an item is iterated in folderitems. The use of this service prevents the extra-loops in child objects. :obj: the instance of the class to be foldered :item: dict containing the properties of the object to be used by the template :index: current index of the item """ # ensure we have an object and not a brain obj = api.get_object(obj) url = api.get_url(obj) title = api.get_title(obj) keyword = obj.getKeyword() # get the category if self.show_categories_enabled(): category = obj.getCategoryTitle() if category not in self.categories: self.categories.append(category) item["category"] = category item["Title"] = title item["replace"]["Title"] = get_link(url, value=title) item["choices"]["min_operator"] = self.min_operator_choices item["choices"]["max_operator"] = self.max_operator_choices item["allow_edit"] = self.get_editable_columns() item["required"] = self.get_required_columns() spec = self.specification.get(keyword, {}) item["selected"] = spec and True or False item["min_operator"] = spec.get("min_operator", "geq") item["min"] = spec.get("min", "") item["max_operator"] = spec.get("max_operator", "leq") item["max"] = spec.get("max", "") item["warn_min"] = spec.get("warn_min", "") item["warn_max"] = spec.get("warn_max", "") item["hidemin"] = spec.get("hidemin", "") item["hidemax"] = spec.get("hidemax", "") item["rangecomment"] = spec.get("rangecomment", "") # Add methods methods = obj.getMethods() if methods: links = map( lambda m: get_link( m.absolute_url(), value=m.Title(), css_class="link"), methods) item["replace"]["Methods"] = ", ".join(links) else: item["methods"] = "" # Icons after_icons = "" if obj.getAccredited(): after_icons += get_image("accredited.png", title=_("Accredited")) if obj.getAttachmentOption() == "r": after_icons += get_image("attach_reqd.png", title=_("Attachment required")) if obj.getAttachmentOption() == "n": after_icons += get_image("attach_no.png", title=_("Attachment not permitted")) if after_icons: item["after"]["Title"] = after_icons return item
def setup_catalogs(portal): """Setup Plone catalogs """ logger.info("Setup Catalogs ...") # Setup catalogs by type for type_name, catalogs in CATALOGS_BY_TYPE: at = api.get_tool("archetype_tool") # get the current registered catalogs current_catalogs = at.getCatalogsByType(type_name) # get the desired catalogs this type should be in desired_catalogs = map(api.get_tool, catalogs) # check if the catalogs changed for this portal_type if set(desired_catalogs).difference(current_catalogs): # fetch the brains to reindex brains = api.search({"portal_type": type_name}) # updated the catalogs at.setCatalogsByType(type_name, catalogs) logger.info("Assign '%s' type to Catalogs %s" % (type_name, catalogs)) for brain in brains: obj = api.get_object(brain) logger.info("Reindexing '%s'" % repr(obj)) obj.reindexObject() # Setup catalog indexes to_index = [] for catalog, name, meta_type in INDEXES: c = api.get_tool(catalog) indexes = c.indexes() if name in indexes: logger.info("Index '%s' already in Catalog [SKIP]" % name) continue logger.info("Adding Index '%s' for field '%s' to catalog '%s" % (meta_type, name, catalog)) if meta_type == "ZCTextIndex": addZCTextIndex(c, name) else: c.addIndex(name, meta_type) to_index.append((c, name)) logger.info("Added Index '%s' for field '%s' to catalog [DONE]" % (meta_type, name)) for catalog, name in to_index: logger.info("Indexing new index '%s' ..." % name) catalog.manage_reindexIndex(name) logger.info("Indexing new index '%s' [DONE]" % name) # Setup catalog metadata columns for catalog, name in COLUMNS: c = api.get_tool(catalog) if name not in c.schema(): logger.info("Adding Column '%s' to catalog '%s' ..." % (name, catalog)) c.addColumn(name) logger.info("Added Column '%s' to catalog '%s' [DONE]" % (name, catalog)) else: logger.info("Column '%s' already in catalog '%s' [SKIP]" % (name, catalog)) continue
def __call__(self): parms = [] titles = [] self.contentFilter = dict(portal_type="AnalysisRequest", is_active=True, sort_on="getDateReceived") val = self.selection_macros.parse_daterange(self.request, 'getDateReceived', _('Date Received')) if val: self.contentFilter[val['contentFilter'] [0]] = val['contentFilter'][1] parms.append(val['parms']) titles.append(val['titles']) # Query the catalog and store results in a dictionary ars = api.search(self.contentFilter, CATALOG_ANALYSIS_REQUEST_LISTING) if not ars: message = _("No Samples matched your query") self.context.plone_utils.addPortalMessage(message, "error") return self.default_template() datalines = [] analyses_count = 0 for ar in ars: ar = api.get_object(ar) # For each sample, retrieve the analyses and generate # a data line for each one for analysis in ar.getAnalyses(): analysis = analysis.getObject() ds = ar.getDateSampled() sd = ar.getSamplingDate() dataline = { 'AnalysisKeyword': analysis.getKeyword(), 'AnalysisTitle': analysis.Title(), 'SampleID': ar.getId(), 'SampleType': ar.getSampleType().Title(), 'DateReceived': self.ulocalized_time(ar.getDateReceived(), long_format=1), 'DateSampled': self.ulocalized_time(ds, long_format=1), } if self.context.bika_setup.getSamplingWorkflowEnabled(): dataline['SamplingDate'] = self.ulocalized_time( sd, long_format=1) datalines.append(dataline) analyses_count += 1 # Footer total data footlines = [] footline = {'TotalCount': analyses_count} footlines.append(footline) self.report_data = { 'parameters': parms, 'datalines': datalines, 'footlines': footlines } if self.request.get('output_format', '') == 'CSV': import csv import StringIO import datetime fieldnames = [ 'SampleID', 'SampleType', 'DateSampled', 'DateReceived', 'AnalysisTitle', 'AnalysisKeyword', ] if self.context.bika_setup.getSamplingWorkflowEnabled(): fieldnames.append('SamplingDate') output = StringIO.StringIO() dw = csv.DictWriter(output, fieldnames=fieldnames) dw.writerow(dict((fn, fn) for fn in fieldnames)) for row in datalines: dw.writerow(row) report_data = output.getvalue() output.close() date = datetime.datetime.now().strftime("%Y%m%d%H%M") setheader = self.request.RESPONSE.setHeader setheader('Content-Type', 'text/csv') setheader( "Content-Disposition", "attachment;filename=\"dailysamplesreceived_%s.csv\"" % date) self.request.RESPONSE.write(report_data) else: return { 'report_title': _('Daily samples received'), 'report_data': self.template() }
def __call__(self): plone.protect.CheckAuthenticator(self.request) bsc = getToolByName(self.context, 'bika_setup_catalog') uc = getToolByName(self.context, 'uid_catalog') service_title = self.request.get('service_title', '').strip() if not service_title: return '' self.an_service_title = "" self.an_service_version = 0 self.an_service_url = "" analysis = uc(UID=self.request.get('analysis_uid', None)) if analysis: analysis = analysis[0].getObject() service = analysis.getService() self.an_service_title = service.Title() self.an_service_version = api.get_version(service) self.an_service_url = service.absolute_url() self.request['ajax_load'] = 1 tmp = LogView(analysis, self.request) self.log = tmp.folderitems() self.log.reverse() else: self.log = [] brains = bsc(portal_type="AnalysisService", title=to_unicode(service_title)) if not brains: return '' self.service = api.get_object(brains[0]) self.calc = self.service.getCalculation() self.partsetup = self.service.getPartitionSetup() # convert uids to comma-separated list of display titles for i, ps in enumerate(self.partsetup): self.partsetup[i]['separate'] = 'separate' in ps and _('Yes') or _('No') if type(ps['sampletype']) == str: ps['sampletype'] = [ps['sampletype'], ] sampletypes = [] for st in ps['sampletype']: res = bsc(UID=st) sampletypes.append(res and res[0].Title or st) self.partsetup[i]['sampletype'] = ", ".join(sampletypes) if 'container' in ps: if type(ps['container']) == str: self.partsetup[i]['container'] = [ps['container'], ] try: containers = [bsc(UID=c)[0].Title for c in ps['container']] except IndexError: containers = [c for c in ps['container']] self.partsetup[i]['container'] = ", ".join(containers) else: self.partsetup[i]['container'] = '' if 'preservation' in ps: if type(ps['preservation']) == str: ps['preservation'] = [ps['preservation'], ] try: preservations = [bsc(UID=c)[0].Title for c in ps['preservation']] except IndexError: preservations = [c for c in ps['preservation']] self.partsetup[i]['preservation'] = ", ".join(preservations) else: self.partsetup[i]['preservation'] = '' return self.template()
def folderitem(self, obj, item, index): """Service triggered each time an item is iterated in folderitems. The use of this service prevents the extra-loops in child objects. :obj: the instance of the class to be foldered :item: dict containing the properties of the object to be used by the template :index: current index of the item """ # ensure we have an object and not a brain obj = api.get_object(obj) uid = api.get_uid(obj) # settings for this analysis service_settings = self.context.getAnalysisServiceSettings(uid) hidden = service_settings.get("hidden", obj.getHidden()) # get the category category = obj.getCategoryTitle() item["category"] = category if category not in self.categories: self.categories.append(category) parts = filter(api.is_active, self.get_partitions()) partitions = map( lambda part: { "ResultValue": part.Title(), "ResultText": part.getId() }, parts) keyword = obj.getKeyword() partition = None if uid in self.analyses: analysis = self.analyses[uid] # Might differ from the service keyword keyword = analysis.getKeyword() # Mark the row as disabled if the analysis is not in an open state item["disabled"] = not analysis.isOpen() # get the hidden status of the analysis hidden = analysis.getHidden() # get the partition of the analysis partition = self.get_partition(analysis) else: partition = self.get_partitions()[0] # get the specification of this object rr = self.get_results_range() spec = rr.get(keyword, ResultsRangeDict()) item["Title"] = obj.Title() item["Unit"] = obj.getUnit() item["Price"] = obj.getPrice() item["before"]["Price"] = self.get_currency_symbol() item["allow_edit"] = self.get_editable_columns(obj) item["selected"] = uid in self.selected item["min"] = str(spec.get("min", "")) item["max"] = str(spec.get("max", "")) item["warn_min"] = str(spec.get("warn_min", "")) item["warn_max"] = str(spec.get("warn_max", "")) item["Hidden"] = hidden item["Partition"] = partition.getId() item["choices"]["Partition"] = partitions # Icons after_icons = "" if obj.getAccredited(): after_icons += get_image("accredited.png", title=t(_("Accredited"))) if obj.getAttachmentOption() == "r": after_icons += get_image("attach_reqd.png", title=t(_("Attachment required"))) if obj.getAttachmentOption() == "n": after_icons += get_image("attach_no.png", title=t(_('Attachment not permitted'))) if after_icons: item["after"]["Title"] = after_icons return item
def get_project_contact(self): batch = api.get_object(self.getBatch()) project_contact = batch.getReferences( relationship="SDGProjectContact")[0] project_contact_name = project_contact.Firstname + " " + project_contact.Surname return project_contact_name
def remove_cascaded_analyses_of_root_samples(portal): """Removes Analyses from Root Samples that belong to Partitions https://github.com/senaite/senaite.core/issues/1504 """ logger.info("Removing cascaded analyses from Root Samples...") # Query all root Samples query = { "isRootAncestor": True, "sort_on": "created", "sort_order": "ascending", } root_samples = api.search(query, "bika_catalog_analysisrequest_listing") total = len(root_samples) logger.info("{} Samples to check... ".format(total)) to_clean = [] for num, brain in enumerate(root_samples): logger.debug("Checking Root Sample {}/{}".format(num + 1, total)) # No Partitions, continue... if not brain.getDescendantsUIDs: continue # get the root sample root_sample = api.get_object(brain) # get the contained analyses of the root sample root_analyses = root_sample.objectIds(spec=["Analysis"]) # Mapping of cascaded Analysis -> Partition analysis_mapping = {} # check if a root analysis is located as well in one of the partitions for partition in root_sample.getDescendants(): # get the contained analyses of the partition part_analyses = partition.objectIds(spec=["Analysis"]) # filter analyses that cascade root analyses cascaded = filter(lambda an: an in root_analyses, part_analyses) # keep a mapping of analysis -> partition for analysis in cascaded: analysis_mapping[analysis] = partition if analysis_mapping: to_clean.append((root_sample, analysis_mapping)) # count the cases for each condition case_counter = defaultdict(int) # cleanup cascaded analyses # mapping maps the analysis id -> partition for sample, mapping in to_clean: # go through the cascaded analyses and decide if the cascaded analysis # should be removed from (a) the root sample or (b) the partition. for analysis_id, partition in mapping.items(): # analysis from the root sample root_an = sample[analysis_id] # WF state from the root sample analysis root_an_state = api.get_workflow_status_of(root_an) # analysis from the partition sample part_an = partition[analysis_id] # WF state from the partition sample analysis part_an_state = api.get_workflow_status_of(part_an) case_counter["{}_{}".format(root_an_state, part_an_state)] += 1 # both analyses have the same WF state if root_an_state == part_an_state: # -> remove the analysis from the root sample sample._delObject(analysis_id) logger.info( "Remove analysis '{}' in state '{}' from sample {}: {}". format(analysis_id, root_an_state, api.get_id(sample), api.get_url(sample))) # both are in verified/published state elif IVerified.providedBy(root_an) and IVerified.providedBy( part_an): root_an_result = root_an.getResult() part_an_result = root_an.getResult() if root_an_result == part_an_result: # remove the root analysis sample._delObject(analysis_id) logger.info( "Remove analysis '{}' in state '{}' from sample {}: {}" .format(analysis_id, root_an_state, api.get_id(sample), api.get_url(sample))) else: # -> unsolvable edge case # display an error message logger.error("Analysis '{}' of root sample in state '{}' " "and Analysis of partition in state {}. " "Please fix manually: {}".format( analysis_id, root_an_state, part_an_state, api.get_url(sample))) # root analysis is in invalid state elif root_an_state in ["rejected", "retracted"]: # -> probably the retest was automatically created in the # parent instead of the partition pass # partition analysis is in invalid state elif part_an_state in ["rejected", "retracted"]: # -> probably the retest was automatically created in the # parent instead of the partition pass # root analysis was submitted, but not the partition analysis elif ISubmitted.providedBy( root_an) and not ISubmitted.providedBy(part_an): # -> remove the analysis from the partition partition._delObject(analysis_id) logger.info( "Remove analysis '{}' in state '{}' from partition {}: {}". format(analysis_id, part_an_state, api.get_id(partition), api.get_url(partition))) # partition analysis was submitted, but not the root analysis elif ISubmitted.providedBy( part_an) and not ISubmitted.providedBy(root_an): # -> remove the analysis from the root sample sample._delObject(analysis_id) logger.info( "Remove analysis '{}' in state '{}' from sample {}: {}". format(analysis_id, root_an_state, api.get_id(sample), api.get_url(sample))) # inconsistent state else: logger.warning( "Can not handle analysis '{}' located in '{}' (state {}) and '{}' (state {})" .format(analysis_id, repr(sample), root_an_state, repr(partition), part_an_state)) logger.info("Removing cascaded analyses from Root Samples... [DONE]") logger.info("State Combinations (root_an_state, part_an_state): {}".format( sorted(case_counter.items(), key=itemgetter(1), reverse=True)))
def fix_workflow_transitions(portal): """ Replace target states from some workflow statuses """ logger.info("Fixing workflow transitions...") tochange = [ {'wfid': 'bika_duplicateanalysis_workflow', 'trid': 'submit', 'changes': { 'new_state_id': 'to_be_verified', 'guard_expr': '' }, 'update': { 'catalog': CATALOG_ANALYSIS_LISTING, 'portal_type': 'DuplicateAnalysis', 'status_from': 'attachment_due', 'status_to': 'to_be_verified' } } ] wtool = api.get_tool('portal_workflow') for item in tochange: wfid = item['wfid'] trid = item['trid'] workflow = wtool.getWorkflowById(wfid) transitions = workflow.transitions transition = transitions[trid] changes = item.get('changes', {}) if 'new_state_id' in changes: new_state_id = changes['new_state_id'] oldstate = transition.new_state_id logger.info( "Replacing target state '{0}' from '{1}.{2}' to {3}" .format(oldstate, wfid, trid, new_state_id) ) transition.new_state_id = new_state_id if 'guard_expr' in changes: new_guard = changes['guard_expr'] if not new_guard: transition.guard = None logger.info( "Removing guard expression from '{0}.{1}'" .format(wfid, trid)) else: guard = transition.getGuard() guard.expr = Expression(new_guard) transition.guard = guard logger.info( "Replacing guard expression from '{0}.{1}' to {2}" .format(wfid, trid, new_guard)) update = item.get('update', {}) if update: catalog_id = update['catalog'] portal_type = update['portal_type'] catalog = api.get_tool(catalog_id) brains = catalog(portal_type=portal_type) for brain in brains: obj = api.get_object(brain) if 'status_from' in update and 'status_to' in update: status_from = update['status_from'] status_to = update['status_to'] if status_from == brain.review_state: logger.info( "Changing status for {0} from '{1} to {2}" .format(obj.getId(), status_from, status_to)) changeWorkflowState(obj, wfid, status_to) workflow.updateRoleMappingsFor(obj) obj.reindexObject()
from AccessControl.User import UnrestrictedUser from AccessControl.SecurityManagement import newSecurityManager from bika.lims import api from datetime import datetime portal = api.get_portal() me = UnrestrictedUser(getSecurityManager().getUser().getUserName(), '', ['LabManager'], '') me = me.__of__(portal.acl_users) newSecurityManager(None, me) batches = api.searhc({'portal_type': 'Batch'}) #Print all Batch info for batch in batches: x = api.get_object(batch) id = x.getId() title = x.getClientBatchID() client = x.getClient().getName() recvX = x.BatchDate if recvX is not None: recv = "" + recvX.Date() + " " + recvX.Time() else: recv = "" testX = x.DateTimeIn if testX is not None: test = "" + testX.Date() + " " + testX.Time() else: test = "" print( "ID: {}\nTitle: {}\nClient: {}\nReceived Date/Time: {}\nTest Date/Time: {}\n"
def get_slot_header(self, item): """ Generates a slot header (the first cell of the row) for the item :param item: the item for which the slot header is requested :return: the html contents to be displayed in the first cell of a slot """ obj = item['obj'] obj = api.get_object(obj) # TODO All contents below have to be refactored/cleaned-up! # fill the rowspan with a little table # parent is either an AR, a Worksheet, or a # ReferenceSample (analysis parent). parent = api.get_parent(obj) if parent.aq_parent.portal_type == "WorksheetFolder": # we're a duplicate; get original object's client client = obj.getAnalysis().aq_parent.aq_parent elif parent.aq_parent.portal_type == "Supplier": # we're a reference sample; get reference definition client = obj.getReferenceDefinition() else: client = parent.aq_parent pos_text = "<table class='worksheet-position' width='100%%' cellpadding='0' cellspacing='0' style='padding-bottom:5px;'><tr>" + \ "<td class='pos' rowspan='3'>%s</td>" % item['Pos'] if obj.portal_type == 'ReferenceAnalysis': pos_text += "<td class='pos_top'>%s</td>" % obj.getReferenceAnalysesGroupID() elif obj.portal_type == 'DuplicateAnalysis' and \ obj.getAnalysis().portal_type == 'ReferenceAnalysis': pos_text += "<td class='pos_top'><a href='%s'>%s</a></td>" % \ (obj.aq_parent.absolute_url(), obj.aq_parent.id) elif client: pos_text += "<td class='pos_top'><a href='%s'>%s</a></td>" % \ (client.absolute_url(), client.Title()) else: pos_text += "<td class='pos_top'> </td>" pos_text += "<td class='pos_top_icons' rowspan='3'>" if obj.portal_type == 'DuplicateAnalysis': pos_text += "<img title='%s' src='%s/++resource++bika.lims.images/duplicate.png'/>" % ( _("Duplicate").encode('utf-8'), self.context.absolute_url()) pos_text += "<br/>" elif obj.portal_type == 'ReferenceAnalysis' and obj.ReferenceType == 'b': pos_text += "<a href='%s'><img title='%s' src='++resource++bika.lims.images/blank.png'></a>" % ( parent.absolute_url(), parent.Title()) pos_text += "<br/>" elif obj.portal_type == 'ReferenceAnalysis' and obj.ReferenceType == 'c': pos_text += "<a href='%s'><img title='%s' src='++resource++bika.lims.images/control.png'></a>" % ( parent.absolute_url(), parent.Title()) pos_text += "<br/>" if parent.portal_type == 'AnalysisRequest': sample = parent.getSample() pos_text += "<a href='%s'><img title='%s' src='++resource++bika.lims.images/sample.png'></a>" % ( sample.absolute_url(), sample.Title()) pos_text += "</td></tr>" pos_text += "<tr><td>" if parent.portal_type == 'AnalysisRequest': pos_text += "<a href='%s'>%s</a>" % ( parent.absolute_url(), parent.Title()) elif parent.portal_type == 'ReferenceSample': pos_text += "<a href='%s'>%s</a>" % ( parent.absolute_url(), parent.Title()) elif obj.portal_type == 'DuplicateAnalysis': pos_text += "<a style='white-space:nowrap' href='%s'>%s</a>" % ( obj.getAnalysis().aq_parent.absolute_url(), obj.getReferenceAnalysesGroupID()) elif parent.portal_type == 'Worksheet': parent = obj.getAnalysis().aq_parent pos_text += "<a href='%s'>(%s)</a>" % ( parent.absolute_url(), parent.Title()) pos_text += "</td></tr>" # sampletype pos_text += "<tr><td>" if obj.portal_type == 'Analysis': pos_text += obj.aq_parent.getSample().getSampleType().Title() elif obj.portal_type == 'ReferenceAnalysis' or \ (obj.portal_type == 'DuplicateAnalysis' and \ obj.getAnalysis().portal_type == 'ReferenceAnalysis'): pos_text += "" # obj.aq_parent.getReferenceDefinition().Title() elif obj.portal_type == 'DuplicateAnalysis': pos_text += obj.getAnalysis().aq_parent.getSample().getSampleType().Title() pos_text += "</td></tr>" # samplingdeviation if obj.portal_type == 'Analysis': deviation = obj.aq_parent.getSample().getSamplingDeviation() if deviation: pos_text += "<tr><td> </td>" pos_text += "<td colspan='2'>" pos_text += deviation.Title() pos_text += "</td></tr>" ## # barcode ## barcode = parent.id.replace("-", "") ## if obj.portal_type == 'DuplicateAnalysis': ## barcode += "D" ## pos_text += "<tr><td class='barcode' colspan='3'><div id='barcode_%s'></div>" % barcode + \ ## "<script type='text/javascript'>$('#barcode_%s').barcode('%s', 'code128', {'barHeight':15, addQuietZone:false, showHRI: false })</script>" % (barcode, barcode) + \ ## "</td></tr>" pos_text += "</table>" return pos_text
def folderitem(self, obj, item, index): """Service triggered each time an item is iterated in folderitems. The use of this service prevents the extra-loops in child objects. :obj: the instance of the class to be foldered :item: dict containing the properties of the object to be used by the template :index: current index of the item """ # ensure we have an object and not a brain obj = api.get_object(obj) uid = api.get_uid(obj) url = api.get_url(obj) title = api.get_title(obj) # get the category if self.show_categories_enabled(): category = obj.getCategoryTitle() if category not in self.categories: self.categories.append(category) item["category"] = category config = self.configuration.get(uid, {}) hidden = config.get("hidden", False) item["replace"]["Title"] = get_link(url, value=title) item["Price"] = self.format_price(obj.Price) item["allow_edit"] = self.get_editable_columns() item["selected"] = False item["Hidden"] = hidden item["selected"] = uid in self.configuration # Add methods methods = obj.getMethods() if methods: links = map( lambda m: get_link( m.absolute_url(), value=m.Title(), css_class="link"), methods) item["replace"]["Methods"] = ", ".join(links) else: item["methods"] = "" # Unit unit = obj.getUnit() item["Unit"] = unit and format_supsub(unit) or "" # Icons after_icons = "" if obj.getAccredited(): after_icons += get_image("accredited.png", title=_("Accredited")) if obj.getAttachmentOption() == "r": after_icons += get_image("attach_reqd.png", title=_("Attachment required")) if obj.getAttachmentOption() == "n": after_icons += get_image("attach_no.png", title=_("Attachment not permitted")) if after_icons: item["after"]["Title"] = after_icons return item
def fix_worksheet_qc_number_analyses_inconsistences(portal, ut): query = {'portal_type': 'Worksheet', 'review_state': 'open'} brains = api.search(query, CATALOG_WORKSHEET_LISTING) for brain in brains: worksheet = api.get_object(brain) worksheet.reindexObject()
def setup_catalogs(catalogs_by_type, indexes_by_catalog, columns_by_catalog): """Setup Plone catalogs """ logger.info("Setup Catalogs ...") # Setup catalogs by type for type_name, catalogs in catalogs_by_type: at = api.get_tool("archetype_tool") # get the current registered catalogs current_catalogs = at.getCatalogsByType(type_name) # get the desired catalogs this type should be in desired_catalogs = map(api.get_tool, catalogs) # check if the catalogs changed for this portal_type if set(desired_catalogs).difference(current_catalogs): # fetch the brains to reindex brains = api.search({"portal_type": type_name}) # updated the catalogs at.setCatalogsByType(type_name, catalogs) logger.info("Assign '%s' type to Catalogs %s" % (type_name, catalogs)) for brain in brains: obj = api.get_object(brain) logger.info("Reindexing '%s'" % repr(obj)) obj.reindexObject() # Setup catalog indexes to_index = [] for catalog, name, meta_type in indexes_by_catalog: c = api.get_tool(catalog) indexes = c.indexes() if name in indexes: logger.info("Index '%s' already in Catalog [SKIP]" % name) continue logger.info("Adding Index '%s' for field '%s' to catalog '%s" % (meta_type, name, catalog)) if meta_type == "ZCTextIndex": addZCTextIndex(c, name) else: c.addIndex(name, meta_type) to_index.append((c, name)) logger.info("Added Index '%s' for field '%s' to catalog [DONE]" % (meta_type, name)) for catalog, name in to_index: logger.info("Indexing new index '%s' ..." % name) catalog.manage_reindexIndex(name) logger.info("Indexing new index '%s' [DONE]" % name) # Setup catalog metadata columns for catalog, name in columns_by_catalog: c = api.get_tool(catalog) if name not in c.schema(): logger.info("Adding Column '%s' to catalog '%s' ..." % (name, catalog)) c.addColumn(name) logger.info("Added Column '%s' to catalog '%s' [DONE]" % (name, catalog)) else: logger.info("Column '%s' already in catalog '%s' [SKIP]" % (name, catalog)) continue
def is_accredited(brain): obj = api.get_object(brain) return obj.getAccredited()