コード例 #1
0
ファイル: worksheet.py プロジェクト: Espurna/senaite.core
 def getDuplicateAnalyses(self):
     """Return the duplicate analyses assigned to the current worksheet
     :return: List of DuplicateAnalysis
     :rtype: List of IDuplicateAnalysis objects"""
     ans = self.getAnalyses()
     duplicates = [an for an in ans if IDuplicateAnalysis.providedBy(an)]
     return duplicates
コード例 #2
0
    def folderitem(self, obj, item, index):
        item = super(QCAnalysesView, self).folderitem(obj, item, index)

        obj = self.get_object(obj)

        # Fill Worksheet cell
        worksheet = obj.getWorksheet()
        if not worksheet:
            return item

        # Fill the Worksheet cell
        ws_id = api.get_id(worksheet)
        ws_url = api.get_url(worksheet)
        item["replace"]["Worksheet"] = get_link(ws_url, value=ws_id)

        if IDuplicateAnalysis.providedBy(obj):
            an_type = "d"
            img_name = "duplicate.png"
            parent = obj.getRequest()
        else:
            an_type = obj.getReferenceType()
            img_name = an_type == "c" and "control.png" or "blank.png"
            parent = obj.aq_parent

        # Render the image
        an_type = QCANALYSIS_TYPES.getValue(an_type)
        item['before']['Service'] = get_image(img_name, title=an_type)

        # Fill the Parent cell
        parent_url = api.get_url(parent)
        parent_id = api.get_id(parent)
        item["replace"]["Parent"] = get_link(parent_url, value=parent_id)

        return item
コード例 #3
0
ファイル: analysis.py プロジェクト: nassimcha/sencua
def is_result_range_compliant(analysis):
    """Returns whether the result range from the analysis matches with the
    result range for the service counterpart defined in the Sample
    """
    if not IRequestAnalysis.providedBy(analysis):
        return True

    if IDuplicateAnalysis.providedBy(analysis):
        # Does not make sense to apply compliance to a duplicate, cause its
        # valid range depends on the result of the original analysis
        return True

    rr = analysis.getResultsRange()
    service_uid = rr.get("uid", None)
    if not api.is_uid(service_uid):
        return True

    # Compare with Sample
    sample = analysis.getRequest()

    # If no Specification is set, assume is compliant
    specification = sample.getRawSpecification()
    if not specification:
        return True

    # Compare with the Specification that was initially set to the Sample
    sample_rr = sample.getResultsRange(search_by=service_uid)
    if not sample_rr:
        # This service is not defined in Sample's ResultsRange, we
        # assume this *does not* break the compliance
        return True

    return rr == sample_rr
コード例 #4
0
 def remove_duplicates(self, ws):
     """When this analysis is unassigned from a worksheet, this function
     is responsible for deleting DuplicateAnalysis objects from the ws.
     """
     for analysis in ws.objectValues():
         if IDuplicateAnalysis.providedBy(analysis) \
                 and analysis.getAnalysis().UID() == self.UID():
             ws.removeAnalysis(analysis)
コード例 #5
0
ファイル: analysis.py プロジェクト: KaskMartin/Bika-LIMS
 def getSample(self):
     # ReferenceSample cannot provide a 'getSample'
     if IReferenceAnalysis.providedBy(self):
         return None
     if IDuplicateAnalysis.providedBy(self) \
             or self.portal_type == 'RejectAnalysis':
         return self.getAnalysis().aq_parent.getSample()
     return self.aq_parent.getSample()
コード例 #6
0
ファイル: abstractanalysis.py プロジェクト: xispa/bika.lims
 def remove_duplicates(self, ws):
     """When this analysis is unassigned from a worksheet, this function
     is responsible for deleting DuplicateAnalysis objects from the ws.
     """
     for analysis in ws.objectValues():
         if IDuplicateAnalysis.providedBy(analysis) \
                 and analysis.getAnalysis().UID() == self.UID():
             ws.removeAnalysis(analysis)
コード例 #7
0
ファイル: analysis.py プロジェクト: traceanalytics/Bika-LIMS
 def getSample(self):
     # ReferenceSample cannot provide a 'getSample'
     if IReferenceAnalysis.providedBy(self):
         return None
     if IDuplicateAnalysis.providedBy(self) \
             or self.portal_type == 'RejectAnalysis':
         return self.getAnalysis().aq_parent.getSample()
     return self.aq_parent.getSample()
コード例 #8
0
ファイル: events.py プロジェクト: tdiphale/bhp.lims-1
def after_verify(obj):
    """Event fired after receive (Process) transition is triggered
    """
    logger.info("*** Custom after_verify transition ***")
    if IAnalysis.providedBy(obj) or IDuplicateAnalysis.providedBy(obj):
        analysis_events.after_verify(obj)

    if IAnalysisRequest.providedBy(obj):
        _promote_transition(obj, "verify")
コード例 #9
0
ファイル: events.py プロジェクト: tdiphale/bhp.lims-1
def after_submit(obj):
    """Event fired after submit transition is triggered
    """
    logger.info("*** Custom after_submit transition ***")
    if IAnalysis.providedBy(obj) or IDuplicateAnalysis.providedBy(obj):
        analysis_events.after_submit(obj)

    if IAnalysisRequest.providedBy(obj):
        _promote_transition(obj, "submit")
コード例 #10
0
def reindex_request(analysis, idxs=None):
    """Reindex the Analysis Request the analysis belongs to, as well as the
    ancestors recursively
    """
    if not IRequestAnalysis.providedBy(analysis) or \
            IDuplicateAnalysis.providedBy(analysis):
        # Analysis not directly bound to an Analysis Request. Do nothing
        return

    n_idxs = ['assigned_state', 'getDueDate']
    n_idxs = idxs and list(set(idxs + n_idxs)) or n_idxs
    request = analysis.getRequest()
    ancestors = [request] + request.getAncestors(all_ancestors=True)
    for ancestor in ancestors:
        push_reindex_to_actions_pool(ancestor, n_idxs)
コード例 #11
0
ファイル: analyses.py プロジェクト: siyamalan/senaite.core
    def get_slot_header_data(self, obj):
        """Prepare the data for the slot header template
        """

        item_obj = None
        item_title = ""
        item_url = ""
        item_img = ""
        item_img_url = ""
        item_img_text = ""
        additional_item_icons = []

        parent_obj = None
        parent_title = ""
        parent_url = ""
        parent_img = ""
        parent_img_text = ""
        additional_parent_icons = []

        sample_type_obj = None
        sample_type_title = ""
        sample_type_url = ""
        sample_type_img = ""
        sample_type_img_text = ""

        if IDuplicateAnalysis.providedBy(obj):
            # item
            request = obj.getRequest()
            item_obj = request
            item_title = api.get_id(request)
            item_url = api.get_url(request)
            item_img = "duplicate.png"
            item_img_url = api.get_url(request)
            item_img_text = t(_("Duplicate"))
            # additional item icons
            additional_item_icons.append(self.render_remarks_tag(request))
            # parent
            client = request.getClient()
            parent_obj = client
            parent_title = api.get_title(client)
            parent_url = api.get_url(client)
            parent_img = "client.png"
            parent_img_text = t(_("Client"))
            # sample type
            sample_type = request.getSampleType()
            sample_type_title = request.getSampleTypeTitle()
            sample_type_url = api.get_url(sample_type)
            sample_type_img = "sampletype.png"
            sample_type_img_text = t(_("Sample Type"))

        elif IReferenceAnalysis.providedBy(obj):
            # item
            sample = obj.getSample()
            item_obj = sample
            item_title = api.get_id(sample)
            item_url = api.get_url(sample)
            item_img_url = api.get_url(sample)
            item_img = "control.png"
            item_img_text = t(_("Control"))
            if obj.getReferenceType() == "b":
                item_img = "blank.png"
                item_img_text = t(_("Blank"))
            # parent
            supplier = obj.getSupplier()
            parent_obj = supplier
            parent_title = api.get_title(supplier)
            parent_url = api.get_url(supplier)
            parent_img = "supplier.png"
            parent_img_text = t(_("Supplier"))
        elif IRoutineAnalysis.providedBy(obj):
            # item
            request = obj.getRequest()
            item_obj = request
            item_title = api.get_id(request)
            item_url = api.get_url(request)
            item_img = "sample.png"
            item_img_url = api.get_url(request)
            item_img_text = t(_("Sample"))
            # additional item icons
            additional_item_icons.append(self.render_remarks_tag(request))

            # parent
            client = obj.getClient()
            parent_obj = client
            parent_title = api.get_title(client)
            parent_url = api.get_url(client)
            parent_img = "client.png"
            parent_img_text = t(_("Client"))
            # sample type
            client_ref = request.getClientReference()
            client_sid = request.getClientSampleID()
            tokens = filter(None, [client_ref, client_sid])
            sample_type_title = ' / '.join(tokens)
            sample_type = obj.getSampleType()
            sample_type_img = "sampletype.png"
            sample_type_img_text = t(_("Tank / Blend ID"))

        return {
            # item
            "item_obj":
            item_obj,
            "item_title":
            item_title,
            "item_url":
            item_url,
            "item_img":
            get_image(item_img, title=item_img_text),
            "item_img_url":
            item_img_url,
            "additional_item_icons":
            additional_item_icons,
            # parent
            "parent_obj":
            parent_obj,
            "parent_title":
            parent_title,
            "parent_url":
            parent_url,
            "parent_img":
            get_image(parent_img, title=parent_img_text),
            "additional_parent_icons":
            additional_parent_icons,
            # sample type
            "sample_type_obj":
            sample_type_obj,
            "sample_type_title":
            sample_type_title,
            "sample_type_url":
            sample_type_url,
            "sample_type_img":
            get_image(sample_type_img, title=sample_type_img_text),
        }
コード例 #12
0
ファイル: analysis.py プロジェクト: nassimcha/sencua
def is_out_of_range(brain_or_object, result=_marker):
    """Checks if the result for the analysis passed in is out of range and/or
    out of shoulders range.

            min                                                   max
            warn            min                   max             warn
    ·········|---------------|=====================|---------------|·········
    ----- out-of-range -----><----- in-range ------><----- out-of-range -----
             <-- shoulder --><----- in-range ------><-- shoulder -->

    :param brain_or_object: A single catalog brain or content object
    :param result: Tentative result. If None, use the analysis result
    :type brain_or_object: ATContentType/DexterityContentType/CatalogBrain
    :returns: Tuple of two elements. The first value is `True` if the result is
    out of range and `False` if it is in range. The second value is `True` if
    the result is out of shoulder range and `False` if it is in shoulder range
    :rtype: (bool, bool)
    """
    analysis = api.get_object(brain_or_object)
    if not IAnalysis.providedBy(analysis) and \
            not IReferenceAnalysis.providedBy(analysis):
        api.fail("{} is not supported. Needs to be IAnalysis or "
                 "IReferenceAnalysis".format(repr(analysis)))

    if result is _marker:
        result = api.safe_getattr(analysis, "getResult", None)

    if result in [None, '']:
        # Empty result
        return False, False

    if IDuplicateAnalysis.providedBy(analysis):
        # Result range for duplicate analyses is calculated from the original
        # result, applying a variation % in shoulders. If the analysis has
        # result options enabled or string results enabled, system returns an
        # empty result range for the duplicate: result must match %100 with the
        # original result
        original = analysis.getAnalysis()
        original_result = original.getResult()

        # Does original analysis have a valid result?
        if original_result in [None, '']:
            return False, False

        # Does original result type matches with duplicate result type?
        if api.is_floatable(result) != api.is_floatable(original_result):
            return True, True

        # Does analysis has result options enabled or non-floatable?
        if analysis.getResultOptions() or not api.is_floatable(original_result):
            # Let's always assume the result is 'out from shoulders', cause we
            # consider the shoulders are precisely the duplicate variation %
            out_of_range = original_result != result
            return out_of_range, out_of_range

    elif not api.is_floatable(result):
        # A non-duplicate with non-floatable result. There is no chance to know
        # if the result is out-of-range
        return False, False

    # Convert result to a float
    result = api.to_float(result)

    # Note that routine analyses, duplicates and reference analyses all them
    # implement the function getResultRange:
    # - For routine analyses, the function returns the valid range based on the
    #   specs assigned during the creation process.
    # - For duplicates, the valid range is the result of the analysis the
    #   the duplicate was generated from +/- the duplicate variation.
    # - For reference analyses, getResultRange returns the valid range as
    #   indicated in the Reference Sample from which the analysis was created.
    result_range = api.safe_getattr(analysis, "getResultsRange", None)
    if not result_range:
        # No result range defined or the passed in object does not suit
        return False, False

    # Maybe there is a custom adapter
    adapters = getAdapters((analysis,), IResultOutOfRange)
    for name, adapter in adapters:
        ret = adapter(result=result, specification=result_range)
        if not ret or not ret.get('out_of_range', False):
            continue
        if not ret.get('acceptable', True):
            # Out of range + out of shoulders
            return True, True
        # Out of range, but in shoulders
        return True, False

    result_range = ResultsRangeDict(result_range)

    # The assignment of result as default fallback for min and max guarantees
    # the result will be in range also if no min/max values are defined
    specs_min = api.to_float(result_range.min, result)
    specs_max = api.to_float(result_range.max, result)

    in_range = False
    min_operator = result_range.min_operator
    if min_operator == "geq":
        in_range = result >= specs_min
    else:
        in_range = result > specs_min

    max_operator = result_range.max_operator
    if in_range:
        if max_operator == "leq":
            in_range = result <= specs_max
        else:
            in_range = result < specs_max

    # If in range, no need to check shoulders
    if in_range:
        return False, False

    # Out of range, check shoulders. If no explicit warn_min or warn_max have
    # been defined, no shoulders must be considered for this analysis. Thus, use
    # specs' min and max as default fallback values
    warn_min = api.to_float(result_range.warn_min, specs_min)
    warn_max = api.to_float(result_range.warn_max, specs_max)
    in_shoulder = warn_min <= result <= warn_max
    return True, not in_shoulder
コード例 #13
0
def assign_retracted_to_retests(portal):
    logger.info("Reassigning retracted to retests ...")
    # Note this is confusing, getRetested index tells us if the analysis is a
    # retest, not the other way round! (the analysis has been retested)
    catalog = api.get_tool(CATALOG_ANALYSIS_LISTING)
    if "getRetested" not in catalog.indexes():
        return

    processed = list()
    query = dict(getRetested="True")
    brains = api.search(query, CATALOG_ANALYSIS_LISTING)
    total = len(brains)
    for num, brain in enumerate(brains):
        retest = api.get_object(brain)
        retest_uid = api.get_uid(retest)
        if retest.getRetestOf():
            # We've been resolved this inconsistency already
            total -= 1
            continue
        # Look for the retest
        if IDuplicateAnalysis.providedBy(retest):
            worksheet = retest.getWorksheet()
            if not worksheet:
                total -= 1
                continue
            for dup in worksheet.get_duplicates_for(retest.getAnalysis()):
                if api.get_uid(dup) != retest_uid \
                        and api.get_workflow_status_of(dup) == "retracted":
                    retest.setRetestOf(dup)
                    processed.append(retest)
                    break
        elif IReferenceAnalysis.providedBy(retest):
            worksheet = retest.getWorksheet()
            if not worksheet:
                total -= 1
                continue
            ref_type = retest.getReferenceType()
            slot = worksheet.get_slot_position(retest.getSample(), ref_type)
            for ref in worksheet.get_analyses_at(slot):
                if api.get_uid(ref) != retest_uid \
                        and api.get_workflow_status_of(ref) == "retracted":
                    retest.setRetestOf(ref)
                    processed.append(retest)
                    break
        else:
            request = retest.getRequest()
            keyword = retest.getKeyword()
            analyses = request.getAnalyses(review_state="retracted",
                                           getKeyword=keyword)
            if not analyses:
                total -= 1
                continue
            retest.setRetestOf(analyses[-1])
            processed.append(retest)

        if num % 100 == 0:
            logger.info("Reassigning retracted analysis: {}/{}".format(
                num, total))

    del_metadata(portal,
                 catalog_id=CATALOG_ANALYSIS_LISTING,
                 column="getRetested")

    add_metadata(portal,
                 catalog_id=CATALOG_ANALYSIS_LISTING,
                 column="getRetestOfUID")

    del_index(portal,
              catalog_id=CATALOG_ANALYSIS_LISTING,
              index_name="getRetested")

    add_index(portal,
              catalog_id=CATALOG_ANALYSIS_LISTING,
              index_name="isRetest",
              index_attribute="isRetest",
              index_metatype="BooleanIndex")

    total = len(processed)
    for num, analysis in enumerate(processed):
        if num % 100 == 0:
            logger.info("Reindexing retests: {}/{}".format(num, total))
        analysis.reindexObject(idxs="isRetest")