Пример #1
0
 def is_analysis(self, obj):
     """Check if the object is an analysis
     """
     if IRoutineAnalysis.providedBy(obj):
         return True
     if IReferenceAnalysis.providedBy(obj):
         return True
     return False
Пример #2
0
 def getSample(self):
     # ReferenceSample cannot provide a 'getSample'
     if IReferenceAnalysis.providedBy(self):
         return None
     if IDuplicateAnalysis.providedBy(self) \
             or self.portal_type == 'RejectAnalysis':
         return self.getAnalysis().aq_parent.getSample()
     return self.aq_parent.getSample()
Пример #3
0
 def getSample(self):
     # ReferenceSample cannot provide a 'getSample'
     if IReferenceAnalysis.providedBy(self):
         return None
     if IDuplicateAnalysis.providedBy(self) \
             or self.portal_type == 'RejectAnalysis':
         return self.getAnalysis().aq_parent.getSample()
     return self.aq_parent.getSample()
Пример #4
0
 def getReferenceAnalyses(self):
     """Return the reference analyses (controls) assigned to the current
     worksheet
     :return: List of reference analyses
     :rtype: List of IReferenceAnalysis objects"""
     ans = self.getAnalyses()
     references = [an for an in ans if IReferenceAnalysis.providedBy(an)]
     return references
Пример #5
0
    def _analysis_data(self, analysis):
        """ Returns a dict that represents the analysis
        """
        decimalmark = analysis.aq_parent.aq_parent.getDecimalMark()
        keyword = analysis.getKeyword()
        andict = {
            'obj': analysis,
            'id': analysis.id,
            'title': analysis.Title(),
            'keyword': keyword,
            'scientific_name': analysis.getScientificName(),
            'accredited': analysis.getAccredited(),
            'point_of_capture': to_utf8(POINTS_OF_CAPTURE.getValue(analysis.getPointOfCapture())),
            'category': to_utf8(analysis.getCategoryTitle()),
            'result': analysis.getResult(),
            'unit': to_utf8(analysis.getUnit()),
            'formatted_unit': format_supsub(to_utf8(analysis.getUnit())),
            'capture_date': analysis.getResultCaptureDate(),
            'request_id': analysis.aq_parent.getId(),
            'formatted_result': '',
            'uncertainty': analysis.getUncertainty(),
            'formatted_uncertainty': '',
            'retested': analysis.isRetest(),
            'remarks': to_utf8(analysis.getRemarks()),
            'outofrange': False,
            'type': analysis.portal_type,
            'reftype': analysis.getReferenceType() if hasattr(
                analysis, 'getReferenceType') else None,
            'worksheet': None,
            'specs': {},
            'formatted_specs': '',
            'review_state': api.get_workflow_status_of(analysis),
        }

        andict['refsample'] = analysis.getSample().id \
            if IReferenceAnalysis.providedBy(analysis) \
            else analysis.getRequestID()

        specs = analysis.getResultsRange()
        andict['specs'] = specs
        scinot = self.context.bika_setup.getScientificNotationReport()
        andict['formatted_result'] = analysis.getFormattedResult(specs=specs, sciformat=int(scinot), decimalmark=decimalmark)

        fs = ''
        if specs.get('min', None) and specs.get('max', None):
            fs = '%s - %s' % (specs['min'], specs['max'])
        elif specs.get('min', None):
            fs = '> %s' % specs['min']
        elif specs.get('max', None):
            fs = '< %s' % specs['max']
        andict['formatted_specs'] = formatDecimalMark(fs, decimalmark)
        andict['formatted_uncertainty'] = format_uncertainty(analysis, analysis.getResult(), decimalmark=decimalmark, sciformat=int(scinot))

        # Out of range?
        andict['outofrange'] = is_out_of_range(analysis)[0]
        return andict
Пример #6
0
    def get_slot_header_data(self, obj):
        """Prepare the data for the slot header template
        """

        item_obj = None
        item_title = ""
        item_url = ""
        item_img = ""
        item_img_url = ""
        item_img_text = ""
        additional_item_icons = []

        parent_obj = None
        parent_title = ""
        parent_url = ""
        parent_img = ""
        parent_img_text = ""
        additional_parent_icons = []

        sample_type_obj = None
        sample_type_title = ""
        sample_type_url = ""
        sample_type_img = ""
        sample_type_img_text = ""

        if IDuplicateAnalysis.providedBy(obj):
            # item
            request = obj.getRequest()
            item_obj = request
            item_title = api.get_id(request)
            item_url = api.get_url(request)
            item_img = "duplicate.png"
            item_img_url = api.get_url(request)
            item_img_text = t(_("Duplicate"))
            # additional item icons
            additional_item_icons.append(self.render_remarks_tag(request))
            # parent
            client = request.getClient()
            parent_obj = client
            parent_title = api.get_title(client)
            parent_url = api.get_url(client)
            parent_img = "client.png"
            parent_img_text = t(_("Client"))
            # sample type
            sample_type = request.getSampleType()
            sample_type_title = request.getSampleTypeTitle()
            sample_type_url = api.get_url(sample_type)
            sample_type_img = "sampletype.png"
            sample_type_img_text = t(_("Sample Type"))

        elif IReferenceAnalysis.providedBy(obj):
            # item
            sample = obj.getSample()
            item_obj = sample
            item_title = api.get_id(sample)
            item_url = api.get_url(sample)
            item_img_url = api.get_url(sample)
            item_img = "control.png"
            item_img_text = t(_("Control"))
            if obj.getReferenceType() == "b":
                item_img = "blank.png"
                item_img_text = t(_("Blank"))
            # parent
            supplier = obj.getSupplier()
            parent_obj = supplier
            parent_title = api.get_title(supplier)
            parent_url = api.get_url(supplier)
            parent_img = "supplier.png"
            parent_img_text = t(_("Supplier"))
        elif IRoutineAnalysis.providedBy(obj):
            # item
            request = obj.getRequest()
            item_obj = request
            item_title = api.get_id(request)
            item_url = api.get_url(request)
            item_img = "sample.png"
            item_img_url = api.get_url(request)
            item_img_text = t(_("Sample"))
            # additional item icons
            additional_item_icons.append(self.render_remarks_tag(request))

            # parent
            client = obj.getClient()
            parent_obj = client
            parent_title = api.get_title(client)
            parent_url = api.get_url(client)
            parent_img = "client.png"
            parent_img_text = t(_("Client"))
            # sample type
            client_ref = request.getClientReference()
            client_sid = request.getClientSampleID()
            tokens = filter(None, [client_ref, client_sid])
            sample_type_title = ' / '.join(tokens)
            sample_type = obj.getSampleType()
            sample_type_img = "sampletype.png"
            sample_type_img_text = t(_("Tank / Blend ID"))

        return {
            # item
            "item_obj":
            item_obj,
            "item_title":
            item_title,
            "item_url":
            item_url,
            "item_img":
            get_image(item_img, title=item_img_text),
            "item_img_url":
            item_img_url,
            "additional_item_icons":
            additional_item_icons,
            # parent
            "parent_obj":
            parent_obj,
            "parent_title":
            parent_title,
            "parent_url":
            parent_url,
            "parent_img":
            get_image(parent_img, title=parent_img_text),
            "additional_parent_icons":
            additional_parent_icons,
            # sample type
            "sample_type_obj":
            sample_type_obj,
            "sample_type_title":
            sample_type_title,
            "sample_type_url":
            sample_type_url,
            "sample_type_img":
            get_image(sample_type_img, title=sample_type_img_text),
        }
Пример #7
0
    def __call__(self, action, objects):
        # Store invalid instruments-ref.analyses
        invalid_instrument_refs = defaultdict(set)

        # Get interims data
        interims_data = self.get_interims_data()

        for analysis in objects:
            uid = api.get_uid(analysis)

            # Need to save remarks?
            remarks = self.get_form_value("Remarks", uid, default="")
            analysis.setRemarks(remarks)

            # Need to save the instrument?
            instrument = self.get_form_value("Instrument", uid, None)
            if instrument is not None:
                # Could be an empty string
                instrument = instrument or None
                analysis.setInstrument(instrument)
                if instrument and IReferenceAnalysis.providedBy(analysis):
                    if is_out_of_range(analysis):
                        # This reference analysis is out of range, so we have
                        # to retract all analyses assigned to this same
                        # instrument that are awaiting for verification
                        invalid_instrument_refs[uid].add(analysis)
                    else:
                        # The reference result is valid, so make the instrument
                        # available again for further analyses
                        instrument.setDisposeUntilNextCalibrationTest(False)

            # Need to save the method?
            method = self.get_form_value("Method", uid, default=None)
            if method is not None:
                method = method or None
                analysis.setMethod(method)

            # Need to save analyst?
            analyst = self.get_form_value("Analyst", uid, default=None)
            if analyst is not None:
                analysis.setAnalyst(analyst)

            # Save uncertainty
            uncertainty = self.get_form_value("Uncertainty", uid, "")
            analysis.setUncertainty(uncertainty)

            # Save detection limit
            dlimit = self.get_form_value("DetectionLimit", uid, "")
            analysis.setDetectionLimitOperand(dlimit)

            # Interim fields
            interims = interims_data.get(uid, analysis.getInterimFields())
            analysis.setInterimFields(interims)

            # Save Hidden
            hidden = self.get_form_value("Hidden", uid, "")
            analysis.setHidden(hidden == "on")

            # Result
            result = self.get_form_value("Result",
                                         uid,
                                         default=analysis.getResult())
            analysis.setResult(result)

        # Submit all analyses
        transitioned = self.do_action(action, objects)
        if not transitioned:
            return self.redirect(message=_("No changes made"), level="warning")

        # If a reference analysis with an out-of-range result and instrument
        # assigned has been submitted, retract then routine analyses that are
        # awaiting for verification and with same instrument associated
        retracted = list()
        for invalid_instrument_uid in invalid_instrument_refs.keys():
            query = dict(
                getInstrumentUID=invalid_instrument_uid,
                portal_type=['Analysis', 'DuplicateAnalysis'],
                review_state='to_be_verified',
                cancellation_state='active',
            )
            brains = api.search(query, CATALOG_ANALYSIS_LISTING)
            for brain in brains:
                analysis = api.get_object(brain)
                failed_msg = '{0}: {1}'.format(
                    ulocalized_time(DateTime(), long_format=1),
                    _("Instrument failed reference test"))
                an_remarks = analysis.getRemarks()
                analysis.setRemarks('. '.join([an_remarks, failed_msg]))
                retracted.append(analysis)

        # If some analyses have been retracted because instrument failed a
        # reference test, then generate a pdf report
        if self.do_action("retract", retracted):
            # Create the Retracted Analyses List
            portal_url = api.get_url(api.get_portal())
            report = AnalysesRetractedListReport(self.context, self.request,
                                                 portal_url,
                                                 'Retracted analyses',
                                                 retracted)

            # Attach the pdf to all ReferenceAnalysis that failed (accessible
            # from Instrument's Internal Calibration Tests list
            pdf = report.toPdf()
            for ref in invalid_instrument_refs.values():
                ref.setRetractedAnalysesPdfReport(pdf)

            # Send the email
            try:
                report.sendEmail()
            except Exception as err_msg:
                message = "Unable to send email: {}".format(err_msg)
                logger.warn(message)

        # Redirect to success view
        return self.success(transitioned)
Пример #8
0
def is_out_of_range(brain_or_object, result=_marker):
    """Checks if the result for the analysis passed in is out of range and/or
    out of shoulders range.

            min                                                   max
            warn            min                   max             warn
    ·········|---------------|=====================|---------------|·········
    ----- out-of-range -----><----- in-range ------><----- out-of-range -----
             <-- shoulder --><----- in-range ------><-- shoulder -->

    :param brain_or_object: A single catalog brain or content object
    :param result: Tentative result. If None, use the analysis result
    :type brain_or_object: ATContentType/DexterityContentType/CatalogBrain
    :returns: Tuple of two elements. The first value is `True` if the result is
    out of range and `False` if it is in range. The second value is `True` if
    the result is out of shoulder range and `False` if it is in shoulder range
    :rtype: (bool, bool)
    """
    analysis = api.get_object(brain_or_object)
    if not IAnalysis.providedBy(analysis) and \
            not IReferenceAnalysis.providedBy(analysis):
        api.fail("{} is not supported. Needs to be IAnalysis or "
                 "IReferenceAnalysis".format(repr(analysis)))

    if result is _marker:
        result = api.safe_getattr(analysis, "getResult", None)

    if result in [None, '']:
        # Empty result
        return False, False

    if IDuplicateAnalysis.providedBy(analysis):
        # Result range for duplicate analyses is calculated from the original
        # result, applying a variation % in shoulders. If the analysis has
        # result options enabled or string results enabled, system returns an
        # empty result range for the duplicate: result must match %100 with the
        # original result
        original = analysis.getAnalysis()
        original_result = original.getResult()

        # Does original analysis have a valid result?
        if original_result in [None, '']:
            return False, False

        # Does original result type matches with duplicate result type?
        if api.is_floatable(result) != api.is_floatable(original_result):
            return True, True

        # Does analysis has result options enabled or non-floatable?
        if analysis.getResultOptions() or not api.is_floatable(original_result):
            # Let's always assume the result is 'out from shoulders', cause we
            # consider the shoulders are precisely the duplicate variation %
            out_of_range = original_result != result
            return out_of_range, out_of_range

    elif not api.is_floatable(result):
        # A non-duplicate with non-floatable result. There is no chance to know
        # if the result is out-of-range
        return False, False

    # Convert result to a float
    result = api.to_float(result)

    # Note that routine analyses, duplicates and reference analyses all them
    # implement the function getResultRange:
    # - For routine analyses, the function returns the valid range based on the
    #   specs assigned during the creation process.
    # - For duplicates, the valid range is the result of the analysis the
    #   the duplicate was generated from +/- the duplicate variation.
    # - For reference analyses, getResultRange returns the valid range as
    #   indicated in the Reference Sample from which the analysis was created.
    result_range = api.safe_getattr(analysis, "getResultsRange", None)
    if not result_range:
        # No result range defined or the passed in object does not suit
        return False, False

    # Maybe there is a custom adapter
    adapters = getAdapters((analysis,), IResultOutOfRange)
    for name, adapter in adapters:
        ret = adapter(result=result, specification=result_range)
        if not ret or not ret.get('out_of_range', False):
            continue
        if not ret.get('acceptable', True):
            # Out of range + out of shoulders
            return True, True
        # Out of range, but in shoulders
        return True, False

    result_range = ResultsRangeDict(result_range)

    # The assignment of result as default fallback for min and max guarantees
    # the result will be in range also if no min/max values are defined
    specs_min = api.to_float(result_range.min, result)
    specs_max = api.to_float(result_range.max, result)

    in_range = False
    min_operator = result_range.min_operator
    if min_operator == "geq":
        in_range = result >= specs_min
    else:
        in_range = result > specs_min

    max_operator = result_range.max_operator
    if in_range:
        if max_operator == "leq":
            in_range = result <= specs_max
        else:
            in_range = result < specs_max

    # If in range, no need to check shoulders
    if in_range:
        return False, False

    # Out of range, check shoulders. If no explicit warn_min or warn_max have
    # been defined, no shoulders must be considered for this analysis. Thus, use
    # specs' min and max as default fallback values
    warn_min = api.to_float(result_range.warn_min, specs_min)
    warn_max = api.to_float(result_range.warn_max, specs_max)
    in_shoulder = warn_min <= result <= warn_max
    return True, not in_shoulder
Пример #9
0
def is_out_of_range(brain_or_object, result=_marker):
    """Checks if the result for the analysis passed in is out of range and/or
    out of shoulders range.

            min                                                   max
            warn            min                   max             warn
    ·········|---------------|=====================|---------------|·········
    ----- out-of-range -----><----- in-range ------><----- out-of-range -----
             <-- shoulder --><----- in-range ------><-- shoulder -->

    :param brain_or_object: A single catalog brain or content object
    :param result: Tentative result. If None, use the analysis result
    :type brain_or_object: ATContentType/DexterityContentType/CatalogBrain
    :returns: Tuple of two elements. The first value is `True` if the result is
    out of range and `False` if it is in range. The second value is `True` if
    the result is out of shoulder range and `False` if it is in shoulder range
    :rtype: (bool, bool)
    """
    analysis = api.get_object(brain_or_object)
    if not IAnalysis.providedBy(analysis) and \
            not IReferenceAnalysis.providedBy(analysis):
        api.fail("{} is not supported. Needs to be IAnalysis or "
                 "IReferenceAnalysis".format(repr(analysis)))

    if result is _marker:
        result = api.safe_getattr(analysis, "getResult", None)
    if not api.is_floatable(result):
        # Result is empty/None or not a valid number
        return False, False

    result = api.to_float(result)

    # Note that routine analyses, duplicates and reference analyses all them
    # implement the function getResultRange:
    # - For routine analyses, the function returns the valid range based on the
    #   specs assigned during the creation process.
    # - For duplicates, the valid range is the result of the analysis the
    #   the duplicate was generated from +/- the duplicate variation.
    # - For reference analyses, getResultRange returns the valid range as
    #   indicated in the Reference Sample from which the analysis was created.
    result_range = api.safe_getattr(analysis, "getResultsRange", None)
    if not result_range:
        # No result range defined or the passed in object does not suit
        return False, False

    # Maybe there is a custom adapter
    adapters = getAdapters((analysis, ), IResultOutOfRange)
    for name, adapter in adapters:
        ret = adapter(result=result, specification=result_range)
        if not ret or not ret.get('out_of_range', False):
            continue
        if not ret.get('acceptable', True):
            # Out of range + out of shoulders
            return True, True
        # Out of range, but in shoulders
        return True, False

    # The assignment of result as default fallback for min and max guarantees
    # the result will be in range also if no min/max values are defined
    specs_min = api.to_float(result_range.get('min', result), result)
    specs_max = api.to_float(result_range.get('max', result), result)
    if specs_min <= result <= specs_max:
        # In range, no need to check shoulders
        return False, False

    # Out of range, check shoulders. If no explicit warn_min or warn_max have
    # been defined, no shoulders must be considered for this analysis. Thus, use
    # specs' min and max as default fallback values
    warn_min = api.to_float(result_range.get('warn_min', specs_min), specs_min)
    warn_max = api.to_float(result_range.get('warn_max', specs_max), specs_max)
    in_shoulder = warn_min <= result <= warn_max
    return True, not in_shoulder
Пример #10
0
def assign_retracted_to_retests(portal):
    logger.info("Reassigning retracted to retests ...")
    # Note this is confusing, getRetested index tells us if the analysis is a
    # retest, not the other way round! (the analysis has been retested)
    catalog = api.get_tool(CATALOG_ANALYSIS_LISTING)
    if "getRetested" not in catalog.indexes():
        return

    processed = list()
    query = dict(getRetested="True")
    brains = api.search(query, CATALOG_ANALYSIS_LISTING)
    total = len(brains)
    for num, brain in enumerate(brains):
        retest = api.get_object(brain)
        retest_uid = api.get_uid(retest)
        if retest.getRetestOf():
            # We've been resolved this inconsistency already
            total -= 1
            continue
        # Look for the retest
        if IDuplicateAnalysis.providedBy(retest):
            worksheet = retest.getWorksheet()
            if not worksheet:
                total -= 1
                continue
            for dup in worksheet.get_duplicates_for(retest.getAnalysis()):
                if api.get_uid(dup) != retest_uid \
                        and api.get_workflow_status_of(dup) == "retracted":
                    retest.setRetestOf(dup)
                    processed.append(retest)
                    break
        elif IReferenceAnalysis.providedBy(retest):
            worksheet = retest.getWorksheet()
            if not worksheet:
                total -= 1
                continue
            ref_type = retest.getReferenceType()
            slot = worksheet.get_slot_position(retest.getSample(), ref_type)
            for ref in worksheet.get_analyses_at(slot):
                if api.get_uid(ref) != retest_uid \
                        and api.get_workflow_status_of(ref) == "retracted":
                    retest.setRetestOf(ref)
                    processed.append(retest)
                    break
        else:
            request = retest.getRequest()
            keyword = retest.getKeyword()
            analyses = request.getAnalyses(review_state="retracted",
                                           getKeyword=keyword)
            if not analyses:
                total -= 1
                continue
            retest.setRetestOf(analyses[-1])
            processed.append(retest)

        if num % 100 == 0:
            logger.info("Reassigning retracted analysis: {}/{}".format(
                num, total))

    del_metadata(portal,
                 catalog_id=CATALOG_ANALYSIS_LISTING,
                 column="getRetested")

    add_metadata(portal,
                 catalog_id=CATALOG_ANALYSIS_LISTING,
                 column="getRetestOfUID")

    del_index(portal,
              catalog_id=CATALOG_ANALYSIS_LISTING,
              index_name="getRetested")

    add_index(portal,
              catalog_id=CATALOG_ANALYSIS_LISTING,
              index_name="isRetest",
              index_attribute="isRetest",
              index_metatype="BooleanIndex")

    total = len(processed)
    for num, analysis in enumerate(processed):
        if num % 100 == 0:
            logger.info("Reindexing retests: {}/{}".format(num, total))
        analysis.reindexObject(idxs="isRetest")
Пример #11
0
    def workflow_action_submit(self):
        uids = self.get_selected_uids()
        if not uids:
            message = _('No items selected.')
            self.context.plone_utils.addPortalMessage(message, 'info')
            self.request.response.redirect(self.context.absolute_url())
            return

        if not is_active(self.context):
            message = _('Item is inactive.')
            self.context.plone_utils.addPortalMessage(message, 'info')
            self.request.response.redirect(self.context.absolute_url())
            return

        form = self.request.form
        remarks = form.get('Remarks', [{}])[0]
        results = form.get('Result', [{}])[0]
        retested = form.get('retested', {})
        methods = form.get('Method', [{}])[0]
        instruments = form.get('Instrument', [{}])[0]
        analysts = self.request.form.get('Analyst', [{}])[0]
        uncertainties = self.request.form.get('Uncertainty', [{}])[0]
        dlimits = self.request.form.get('DetectionLimit', [{}])[0]

        # XXX combine data from multiple bika listing tables.
        # TODO: Is this necessary?
        item_data = {}
        if 'item_data' in form:
            if type(form['item_data']) == list:
                for i_d in form['item_data']:
                    for i, d in json.loads(i_d).items():
                        item_data[i] = d
            else:
                item_data = json.loads(form['item_data'])

        # Store affected Analysis Requests
        affected_ars = set()

        # Store affected Worksheets
        affected_ws = set()

        # Store invalid instruments-ref.analyses
        invalid_instrument_refs = dict()

        # We manually query by all analyses uids at once here instead of using
        # _get_selected_items from the base class, cause that function fetches
        # the objects by uid, but sequentially one by one
        query = dict(UID=uids)
        for brain in api.search(query, CATALOG_ANALYSIS_LISTING):
            uid = api.get_uid(brain)
            analysis = api.get_object(brain)

            # If not active, do nothing
            if not is_active(brain):
                continue

            # Need to save remarks?
            if uid in remarks:
                analysis.setRemarks(remarks[uid])

            # Retested?
            if uid in retested:
                analysis.setRetested(retested[uid])

            # Need to save the instrument?
            if uid in instruments:
                instrument = instruments[uid] or None
                analysis.setInstrument(instrument)
                if instrument and IReferenceAnalysis.providedBy(analysis):
                    if is_out_of_range(analysis):
                        # This reference analysis is out of range, so we have
                        # to retract all analyses assigned to this same
                        # instrument that are awaiting for verification
                        if uid not in invalid_instrument_refs:
                            invalid_instrument_refs[uid] = set()
                        invalid_instrument_refs[uid].add(analysis)
                    else:
                        # The reference result is valid, so make the instrument
                        # available again for further analyses
                        instrument.setDisposeUntilNextCalibrationTest(False)

            # Need to save the method?
            if uid in methods:
                method = methods[uid] or None
                analysis.setMethod(method)

            # Need to save the analyst?
            if uid in analysts:
                analysis.setAnalyst(analysts[uid])

            # Need to save the uncertainty?
            if uid in uncertainties:
                analysis.setUncertainty(uncertainties[uid])

            # Need to save the detection limit?
            if uid in dlimits and dlimits[uid]:
                analysis.setDetectionLimitOperand(dlimits[uid])

            # Need to save results?
            submitted = False
            if uid in results and results[uid]:
                interims = item_data.get(uid, [])
                analysis.setInterimFields(interims)
                analysis.setResult(results[uid])

                # Can the analysis be submitted?
                # An analysis can only be submitted if all its dependencies
                # are valid and have been submitted already
                can_submit = True
                invalid_states = [
                    'to_be_sampled', 'to_be_preserved', 'sample_due',
                    'sample_received'
                ]
                for dependency in analysis.getDependencies():
                    if in_state(dependency, invalid_states):
                        can_submit = False
                        break
                if can_submit:
                    # doActionFor transitions the analysis to verif pending,
                    # so must only be done when results are submitted.
                    doActionFor(analysis, 'submit')
                    submitted = True
                    if IRequestAnalysis.providedBy(analysis):
                        # Store the AR uids to be reindexed later.
                        affected_ars.add(brain.getParentUID)

                    if brain.worksheetanalysis_review_state == 'assigned':
                        worksheet_uid = analysis.getWorksheetUID()
                        if worksheet_uid:
                            affected_ws.add(worksheet_uid)

            if not submitted:
                # Analysis has not been submitted, so we need to reindex the
                # object manually, to update catalog's metadata.
                analysis.reindexObject()

        # If a reference analysis with an out-of-range result and instrument
        # assigned has been submitted, retract then routine analyses that are
        # awaiting for verification and with same instrument associated
        retracted = list()
        for invalid_instrument_uid in invalid_instrument_refs.keys():
            query = dict(
                getInstrumentUID=invalid_instrument_uid,
                portal_type=['Analysis', 'DuplicateAnalysis'],
                review_state='to_be_verified',
                cancellation_state='active',
            )
            brains = api.search(query, CATALOG_ANALYSIS_LISTING)
            for brain in brains:
                analysis = api.get_object(brain)
                failed_msg = '{0}: {1}'.format(
                    ulocalized_time(DateTime(), long_format=1),
                    _("Instrument failed reference test"))
                an_remarks = analysis.getRemarks()
                analysis.setRemarks('. '.join([an_remarks, failed_msg]))
                doActionFor(analysis, 'retract')
                retracted.append(analysis)

        # If some analyses have been retracted because instrument failed a
        # reference test, then generate a pdf report
        if retracted:
            # Create the Retracted Analyses List
            report = AnalysesRetractedListReport(self.context, self.request,
                                                 self.portal_url,
                                                 'Retracted analyses',
                                                 retracted)

            # Attach the pdf to all ReferenceAnalysis that failed (accessible
            # from Instrument's Internal Calibration Tests list
            pdf = report.toPdf()
            for ref in invalid_instrument_refs.values():
                ref.setRetractedAnalysesPdfReport(pdf)

            # Send the email
            try:
                report.sendEmail()
            except:
                pass

        # Finally, when we are done processing all applicable analyses, we must
        # attempt to initiate the submit transition on the ARs and Worksheets
        # the processed analyses belong to.
        # We stick only to affected_ars, and affected_ws

        # Reindex the Analysis Requests for which at least one Analysis has
        # been submitted. We do this here because one AR can contain multiple
        # Analyses, so better to just reindex the AR once instead of each time.
        # AR Catalog contains some metadata that that rely on the Analyses an
        # Analysis Request contains.
        if affected_ars:
            query = dict(UID=list(affected_ars), portal_type="AnalysisRequest")
            for ar_brain in api.search(query,
                                       CATALOG_ANALYSIS_REQUEST_LISTING):
                if ar_brain.review_state == 'to_be_verified':
                    continue
                ar = api.get_object(ar_brain)
                if isTransitionAllowed(ar, "submit"):
                    doActionFor(ar, "submit")
                else:
                    ar.reindexObject()

        if affected_ws:
            query = dict(UID=list(affected_ws), portal_type="Worksheet")
            for ws_brain in api.search(query, CATALOG_WORKSHEET_LISTING):
                if ws_brain.review_state == 'to_be_verified':
                    continue
                ws = api.get_object(ws_brain)
                if isTransitionAllowed(ws, "submit"):
                    doActionFor(ws, "submit")

        message = PMF("Changes saved.")
        self.context.plone_utils.addPortalMessage(message, 'info')
        self.destination_url = self.request.get_header(
            "referer", self.context.absolute_url())
        self.request.response.redirect(self.destination_url)
Пример #12
0
    def workflow_action_submit(self):
        uids = self.get_selected_uids()
        if not uids:
            message = _('No items selected.')
            self.context.plone_utils.addPortalMessage(message, 'info')
            self.request.response.redirect(self.context.absolute_url())
            return

        if not is_active(self.context):
            message = _('Item is inactive.')
            self.context.plone_utils.addPortalMessage(message, 'info')
            self.request.response.redirect(self.context.absolute_url())
            return

        form = self.request.form
        remarks = form.get('Remarks', [{}])[0]
        results = form.get('Result', [{}])[0]
        methods = form.get('Method', [{}])[0]
        instruments = form.get('Instrument', [{}])[0]
        analysts = self.request.form.get('Analyst', [{}])[0]
        uncertainties = self.request.form.get('Uncertainty', [{}])[0]
        dlimits = self.request.form.get('DetectionLimit', [{}])[0]

        # XXX combine data from multiple bika listing tables.
        # TODO: Is this necessary?
        item_data = {}
        if 'item_data' in form:
            if type(form['item_data']) == list:
                for i_d in form['item_data']:
                    for i, d in json.loads(i_d).items():
                        item_data[i] = d
            else:
                item_data = json.loads(form['item_data'])

        # Store invalid instruments-ref.analyses
        invalid_instrument_refs = dict()

        # We manually query by all analyses uids at once here instead of using
        # _get_selected_items from the base class, cause that function fetches
        # the objects by uid, but sequentially one by one
        actions_pool = ActionsPool()
        query = dict(UID=uids, cancellation_state="active")
        for brain in api.search(query, CATALOG_ANALYSIS_LISTING):
            uid = api.get_uid(brain)
            analysis = api.get_object(brain)

            # Need to save remarks?
            if uid in remarks:
                analysis.setRemarks(remarks[uid])

            # Need to save the instrument?
            if uid in instruments:
                instrument = instruments[uid] or None
                analysis.setInstrument(instrument)
                if instrument and IReferenceAnalysis.providedBy(analysis):
                    if is_out_of_range(analysis):
                        # This reference analysis is out of range, so we have
                        # to retract all analyses assigned to this same
                        # instrument that are awaiting for verification
                        if uid not in invalid_instrument_refs:
                            invalid_instrument_refs[uid] = set()
                        invalid_instrument_refs[uid].add(analysis)
                    else:
                        # The reference result is valid, so make the instrument
                        # available again for further analyses
                        instrument.setDisposeUntilNextCalibrationTest(False)

            # Need to save the method?
            if uid in methods:
                method = methods[uid] or None
                analysis.setMethod(method)

            # Need to save the analyst?
            if uid in analysts:
                analysis.setAnalyst(analysts[uid])

            # Need to save the uncertainty?
            if uid in uncertainties:
                analysis.setUncertainty(uncertainties[uid])

            # Need to save the detection limit?
            analysis.setDetectionLimitOperand(dlimits.get(uid, ""))

            interims = item_data.get(uid, analysis.getInterimFields())
            analysis.setInterimFields(interims)
            analysis.setResult(results.get('uid', analysis.getResult()))

            # Add this analysis to the actions pool. We want to submit all them
            # together, when all have values set for results, interims, etc.
            actions_pool.add(analysis, "submit")

        # Submit all analyses
        actions_pool.resume()

        # If a reference analysis with an out-of-range result and instrument
        # assigned has been submitted, retract then routine analyses that are
        # awaiting for verification and with same instrument associated
        retracted = list()
        for invalid_instrument_uid in invalid_instrument_refs.keys():
            query = dict(
                getInstrumentUID=invalid_instrument_uid,
                portal_type=['Analysis', 'DuplicateAnalysis'],
                review_state='to_be_verified',
                cancellation_state='active',
            )
            brains = api.search(query, CATALOG_ANALYSIS_LISTING)
            for brain in brains:
                analysis = api.get_object(brain)
                failed_msg = '{0}: {1}'.format(
                    ulocalized_time(DateTime(), long_format=1),
                    _("Instrument failed reference test"))
                an_remarks = analysis.getRemarks()
                analysis.setRemarks('. '.join([an_remarks, failed_msg]))
                doActionFor(analysis, 'retract')
                retracted.append(analysis)

        # If some analyses have been retracted because instrument failed a
        # reference test, then generate a pdf report
        if retracted:
            # Create the Retracted Analyses List
            report = AnalysesRetractedListReport(self.context, self.request,
                                                 self.portal_url,
                                                 'Retracted analyses',
                                                 retracted)

            # Attach the pdf to all ReferenceAnalysis that failed (accessible
            # from Instrument's Internal Calibration Tests list
            pdf = report.toPdf()
            for ref in invalid_instrument_refs.values():
                ref.setRetractedAnalysesPdfReport(pdf)

            # Send the email
            try:
                report.sendEmail()
            except:
                pass

        message = PMF("Changes saved.")
        self.context.plone_utils.addPortalMessage(message, 'info')
        self.destination_url = self.request.get_header(
            "referer", self.context.absolute_url())
        self.request.response.redirect(self.destination_url)