Ejemplo n.º 1
0
 def get_analysis_spec(self, analysis):
     keyword = analysis.getService().getKeyword()
     uid = analysis.UID()
     if hasattr(analysis.aq_parent, 'getResultsRange'):
         rr = dicts_to_dict(analysis.aq_parent.getResultsRange(), 'keyword')
         return rr.get(analysis.getKeyword(), None)
     if hasattr(analysis.aq_parent, 'getReferenceResults'):
         rr = dicts_to_dict(analysis.aq_parent.getReferenceResults(), 'uid')
         return rr.get(analysis.UID(), None)
     return {'keyword':keyword, 'uid':uid, 'min':'', 'max':'', 'error':''}
Ejemplo n.º 2
0
 def get_results_range(self):
     """Get the results Range from the AR
     """
     spec = self.context.getResultsRange()
     if spec:
         return dicts_to_dict(spec, "keyword")
     return ResultsRangeDict()
Ejemplo n.º 3
0
 def __call__(self, result=None, specification=None):
     workflow = getToolByName(self.context, 'portal_workflow')
     astate = workflow.getInfoFor(self.context, 'review_state')
     if astate == 'retracted':
         return None
     result = result is not None and str(result) or self.context.getResult()
     if result == '':
         return None
     # if analysis result is not a number, then we assume in range:
     try:
         result = float(str(result))
     except ValueError:
         return None
     # The spec is found in the parent AR's ResultsRange field.
     if not specification:
         rr = dicts_to_dict(self.context.aq_parent.getResultsRange(), 'keyword')
         specification = rr.get(self.context.getKeyword(), None)
         # No specs available, assume in range:
         if not specification:
             return None
     outofrange, acceptable = \
         self.isOutOfRange(result,
                           specification.get('min', ''),
                           specification.get('max', ''),
                           specification.get('error', ''))
     return {
         'out_of_range': outofrange,
         'acceptable': acceptable,
         'spec_values': specification
     }
Ejemplo n.º 4
0
 def get_analysis_spec(self, analysis):
     if analysis.portal_type == 'DuplicateAnalysis':
         # No specs for duplicates, ever. This should not be necessary,
         # but during rendering worksheets, duplicates are passed in here.
         return None
     if hasattr(analysis, 'getResultsRange'):
         return analysis.getResultsRange()
     if hasattr(analysis.aq_parent, 'getResultsRange'):
         rr = dicts_to_dict(analysis.aq_parent.getResultsRange(), 'keyword')
         return rr.get(analysis.getKeyword(), None)
     if hasattr(analysis.aq_parent, 'getReferenceResults'):
         rr = dicts_to_dict(analysis.aq_parent.getReferenceResults(), 'uid')
         return rr.get(analysis.UID(), None)
     keyword = analysis.getService().getKeyword()
     uid = analysis.UID()
     return {'keyword': keyword, 'uid': uid, 'min': '', 'max': '', 'error': ''}
Ejemplo n.º 5
0
 def __call__(self, result=None, specification=None):
     workflow = getToolByName(self.context, 'portal_workflow')
     astate = workflow.getInfoFor(self.context, 'review_state')
     if astate == 'retracted':
         return None
     result = result is not None and str(result) or self.context.getResult()
     if result == '':
         return None
     # if analysis result is not a number, then we assume in range:
     try:
         result = float(str(result))
     except ValueError:
         return None
     # The spec is found in the parent AR's ResultsRange field.
     if not specification:
         rr = dicts_to_dict(self.context.aq_parent.getResultsRange(),
                            'keyword')
         specification = rr.get(self.context.getKeyword(), None)
         # No specs available, assume in range:
         if not specification:
             return None
     outofrange, acceptable = \
         self.isOutOfRange(result,
                           specification.get('min', ''),
                           specification.get('max', ''),
                           specification.get('error', ''))
     return {
         'out_of_range': outofrange,
         'acceptable': acceptable,
         'spec_values': specification
     }
Ejemplo n.º 6
0
 def update(self):
     """Update hook
     """
     super(AnalysisSpecificationView, self).update()
     self.allow_edit = self.is_edit_allowed()
     results_range = self.context.getResultsRange()
     self.specification = dicts_to_dict(results_range, "keyword")
     self.dynamic_spec = self.context.getDynamicAnalysisSpec()
Ejemplo n.º 7
0
    def get_specs_from_request(self, dicts_to_dict_rr=None):
        """Specifications for analyses are given on the request in *Spec

        >>> portal = layer['portal']
        >>> portal_url = portal.absolute_url()
        >>> from plone.app.testing import SITE_OWNER_NAME
        >>> from plone.app.testing import SITE_OWNER_PASSWORD

        >>> browser = layer['getBrowser'](portal, loggedIn=True, username=SITE_OWNER_NAME, password=SITE_OWNER_PASSWORD)
        >>> browser.open(portal_url+"/@@API/create", "&".join([
        ... "obj_type=AnalysisRequest",
        ... "Client=portal_type:Client|id:client-1",
        ... "SampleType=portal_type:SampleType|title:Apple Pulp",
        ... "Contact=portal_type:Contact|getFullname:Rita Mohale",
        ... "Services:list=portal_type:AnalysisService|title:Calcium",
        ... "Services:list=portal_type:AnalysisService|title:Copper",
        ... "Services:list=portal_type:AnalysisService|title:Magnesium",
        ... "SamplingDate=2013-09-29",
        ... "Specification=portal_type:AnalysisSpec|title:Apple Pulp",
        ... 'ResultsRange=[{"keyword":"Cu","min":5,"max":10,"error":10},{"keyword":"Mg","min":6,"max":11,"error":11}]',
        ... ]))
        >>> browser.contents
        '{..."success": true...}'

        """

        # valid output for ResultsRange goes here.
        specs = []

        context = self.context
        request = self.request
        brains = resolve_request_lookup(context, request, "Specification")
        spec_rr = brains[0].getObject().getResultsRange() if brains else {}
        spec_rr = dicts_to_dict(spec_rr, 'keyword')
        #
        bsc = getToolByName(context, "bika_setup_catalog")
        req_rr = request.get('ResultsRange', "[]")
        try:
            req_rr = json.loads(req_rr)
        except:
            raise BadRequest("Invalid value for ResultsRange (%s)"%req_rr)
        req_rr = dicts_to_dict(req_rr, 'keyword')
        #
        spec_rr.update(req_rr)

        return spec_rr.values()
Ejemplo n.º 8
0
    def get_results_range(self):
        """Get the results Range from the Sample, but gives priority to the
        result ranges set in analyses. This guarantees that result ranges for
        already present analyses are not overriden after form submission
        """
        # Extract the result ranges from Sample analyses
        analyses = self.analyses.values()
        analyses_rrs = map(lambda an: an.getResultsRange(), analyses)
        analyses_rrs = filter(None, analyses_rrs)
        rrs = dicts_to_dict(analyses_rrs, "keyword")

        # Bail out ranges from Sample that are already present in analyses
        sample_rrs = self.context.getResultsRange()
        sample_rrs = filter(lambda rr: rr["keyword"] not in rrs, sample_rrs)
        sample_rrs = dicts_to_dict(sample_rrs, "keyword")

        # Extend result ranges with those from Sample
        rrs.update(sample_rrs)
        return rrs
Ejemplo n.º 9
0
 def get_spec_from_ar(self, ar, keyword):
     empty = {'min': '', 'max': '', 'error': '', 'keyword':keyword}
     spec = ar.getResultsRange()
     if spec:
         return dicts_to_dict(spec, 'keyword').get(keyword, empty)
     return empty
 def get_analysis_spec(self, analysis):
     rr = dicts_to_dict(analysis.aq_parent.getResultsRange(), 'keyword')
     return rr.get(analysis.getKeyword(), None)
Ejemplo n.º 11
0
    def __call__(self):

        form = self.request.form
        plone.protect.CheckAuthenticator(self.request.form)
        plone.protect.PostOnly(self.request.form)
        came_from = 'came_from' in form and form['came_from'] or 'add'
        wftool = getToolByName(self.context, 'portal_workflow')
        uc = getToolByName(self.context, 'uid_catalog')
        bsc = getToolByName(self.context, 'bika_setup_catalog')

        errors = {}

        form_parts = json.loads(self.request.form['parts'])

        # First make a list of non-empty columns
        columns = []
        for column in range(int(form['col_count'])):
            name = 'ar.%s' % column
            ar = form.get(name, None)
            if ar and 'Analyses' in ar.keys():
                columns.append(column)

        if len(columns) == 0:
            ajax_form_error(errors, message=t(_("No analyses have been selected")))
            return json.dumps({'errors':errors})

        # Now some basic validation
        required_fields = [field.getName() for field
                           in AnalysisRequestSchema.fields()
                           if field.required]

        for column in columns:
            formkey = "ar.%s" % column
            ar = form[formkey]

            # check that required fields have values
            for field in required_fields:
                # This one is still special.
                if field in ['RequestID']:
                    continue
                # And these are not required if this is a secondary AR
                if ar.get('Sample', '') != '' and field in [
                    'SamplingDate',
                    'SampleType'
                ]:
                    continue
                if not ar.get(field, ''):
                    ajax_form_error(errors, field, column)
        # Return errors if there are any
        if errors:
            return json.dumps({'errors': errors})

        # Get the prices from the form data
        prices = form.get('Prices', None)
        # Initialize the Anlysis Request collection
        ARs = []
        # if a new profile is created automatically,
        # this flag triggers the status message
        new_profile = None
        # The actual submission
        for column in columns:
            # Get partitions from the form data
            if form_parts:
                partitions = form_parts[str(column)]
            else:
                partitions = []
            # Get the form data using the appropriate form key
            formkey = "ar.%s" % column
            values = form[formkey].copy()
            # resolved values is formatted as acceptable by archetypes
            # widget machines
            resolved_values = {}
            for k, v in values.items():
                # Analyses, we handle that specially.
                if k == 'Analyses':
                    continue
                # Insert the reference *_uid values instead of titles.
                if "_uid" in k:
                    v = values[k]
                    v = v.split(",") if v and "," in v else v
                    fname = k.replace("_uid", "")
                    resolved_values[fname] = v
                    continue
                # we want to write the UIDs and ignore the title values
                if k+"_uid" in values:
                    continue
                resolved_values[k] = values[k]
            # Get the analyses from the form data
            analyses = values["Analyses"]

            # Gather the specifications from the form
            specs = json.loads(form['copy_to_new_specs']).get(str(column), {})
            if not specs:
                specs = json.loads(form['specs']).get(str(column), {})
            if specs:
                specs = dicts_to_dict(specs, 'keyword')
            # Modify the spec with all manually entered values
            for service_uid in analyses:
                min_element_name = "ar.%s.min.%s" % (column, service_uid)
                max_element_name = "ar.%s.max.%s" % (column, service_uid)
                error_element_name = "ar.%s.error.%s" % (column, service_uid)
                service_keyword = bsc(UID=service_uid)[0].getKeyword
                if min_element_name in form:
                    if service_keyword not in specs:
                        specs[service_keyword] = {}
                    specs[service_keyword]["keyword"] = service_keyword
                    specs[service_keyword]["min"] = form[min_element_name]
                    specs[service_keyword]["max"] = form[max_element_name]
                    specs[service_keyword]["error"] = form[error_element_name]

            # Selecting a template sets the hidden 'parts' field to template values.
            # Selecting a profile will allow ar_add.js to fill in the parts field.
            # The result is the same once we are here.
            if not partitions:
                partitions = [{
                    'services': [],
                    'container': None,
                    'preservation': '',
                    'separate': False
                }]
            # Apply DefaultContainerType to partitions without a container
            default_container_type = resolved_values.get(
                'DefaultContainerType', None
            )
            if default_container_type:
                container_type = bsc(UID=default_container_type)[0].getObject()
                containers = container_type.getContainers()
                for partition in partitions:
                    if not partition.get("container", None):
                        partition['container'] = containers
            # Retrieve the catalogue reference to the client
            client = uc(UID=resolved_values['Client'])[0].getObject()
            # Create the Analysis Request
            ar = create_analysisrequest(
                client,
                self.request,
                resolved_values,
                analyses=analyses,
                partitions=partitions,
                specifications=specs.values(),
                prices=prices
            )
            # Add the created analysis request to the list
            ARs.append(ar.getId())
        # Display the appropriate message after creation
        if len(ARs) > 1:
            message = _("Analysis requests ${ARs} were successfully created.",
                        mapping={'ARs': safe_unicode(', '.join(ARs))})
        else:
            message = _("Analysis request ${AR} was successfully created.",
                        mapping={'AR': safe_unicode(ARs[0])})
        self.context.plone_utils.addPortalMessage(message, 'info')
        # Automatic label printing
        # Won't print labels for Register on Secondary ARs
        new_ars = None
        if came_from == 'add':
            new_ars = [ar for ar in ARs if ar[-2:] == '01']
        if 'register' in self.context.bika_setup.getAutoPrintStickers() and new_ars:
            return json.dumps({
                'success': message,
                'stickers': new_ars,
                'stickertemplate': self.context.bika_setup.getAutoStickerTemplate()
            })
        else:
            return json.dumps({'success': message})
Ejemplo n.º 12
0
 def get_spec_from_ar(self, ar, keyword):
     empty = ResultsRangeDict(keyword=keyword)
     spec = ar.getResultsRange()
     if spec:
         return dicts_to_dict(spec, 'keyword').get(keyword, empty)
     return empty
Ejemplo n.º 13
0
    def analysis_specification(self):
        ar = self.context.aq_parent
        rr = dicts_to_dict(ar.getResultsRange(), 'keyword')

        return rr[self.context.getService().getKeyword()]
Ejemplo n.º 14
0
    def __call__(self):
        bsc = getToolByName(self.context, 'bika_setup_catalog')
        bac = getToolByName(self.context, 'bika_analysis_catalog')
        self.report_content = {}
        parms = []
        headings = {}
        headings['header'] = _("Analyses out of range")
        headings['subheader'] = _("Analyses results out of specified range")

        count_all = 0

        query = {"portal_type": "Analysis",
                 "sort_order": "reverse"}

        spec_uid = self.request.form.get("spec", False)
        spec_obj = None
        spec_title = ""
        if spec_uid:
            brains = bsc(UID=spec_uid)
            if brains:
                spec_obj = brains[0].getObject()
                spec_title = spec_obj.Title()
        parms.append(
            {"title": _("Range spec"),
             "value": spec_title,
             "type": "text"})

        date_query = formatDateQuery(self.context, 'Received')
        if date_query:
            query['getDateReceived'] = date_query
            received = formatDateParms(self.context, 'Received')
        else:
            received = 'Undefined'
        parms.append(
            {'title': _('Received'),
             'value': received,
             'type': 'text'})

        wf_tool = getToolByName(self.context, 'portal_workflow')
        if self.request.form.has_key('bika_analysis_workflow'):
            query['review_state'] = self.request.form['bika_analysis_workflow']
            review_state = wf_tool.getTitleForStateOnType(
                self.request.form['bika_analysis_workflow'], 'Analysis')
        else:
            review_state = 'Undefined'
        parms.append(
            {'title': _('Status'),
             'value': review_state,
             'type': 'text'})

        if self.request.form.has_key('bika_cancellation_workflow'):
            query['cancellation_state'] = self.request.form[
                'bika_cancellation_workflow']
            cancellation_state = wf_tool.getTitleForStateOnType(
                self.request.form['bika_cancellation_workflow'], 'Analysis')
        else:
            cancellation_state = 'Undefined'
        parms.append(
            {'title': _('Active'),
             'value': cancellation_state,
             'type': 'text'})

        if self.request.form.has_key('bika_worksheetanalysis_workflow'):
            query['worksheetanalysis_review_state'] = self.request.form[
                'bika_worksheetanalysis_workflow']
            ws_review_state = wf_tool.getTitleForStateOnType(
                self.request.form['bika_worksheetanalysis_workflow'], 'Analysis')
        else:
            ws_review_state = 'Undefined'
        parms.append(
            {'title': _('Assigned to worksheet'),
             'value': ws_review_state,
             'type': 'text'})

        # and now lets do the actual report lines
        col_heads = [_('Client'),
                     _('Request'),
                     _('Sample type'),
                     _('Sample point'),
                     _('Category'),
                     _('Analysis'),
                     _('Result'),
                     _('Min'),
                     _('Max'),
                     _('Status'),
        ]
        if isAttributeHidden('Sample', 'SamplePoint'):
            col_heads.remove(_('Sample point'))

        formats = {'columns': 10,
                   'col_heads': col_heads,
                   'class': '',
        }

        datalines = []

        for a_proxy in bac(query):
            analysis = a_proxy.getObject()
            if analysis.getResult():
                try:
                    result = float(analysis.getResult())
                except:
                    continue
            else:
                continue

            keyword = analysis.getKeyword()

            # determine which specs to use for this particular analysis
            # 1) if a spec is given in the query form, use it.
            # 2) if a spec is entered directly on the analysis, use it.
            # otherwise just continue to the next object.
            spec_dict = False
            if spec_obj:
                rr = spec_obj.getResultsRangeDict()
                if keyword in rr:
                    spec_dict = rr[keyword]
            else:
                ar = analysis.aq_parent
                rr = dicts_to_dict(ar.getResultsRange(), 'keyword')
                if keyword in rr:
                    spec_dict = rr[keyword]
                else:
                    continue
            if not spec_dict:
                continue
            try:
                spec_min = float(spec_dict['min'])
                spec_max = float(spec_dict['max'])
            except ValueError:
                continue
            if spec_min <= result <= spec_max:
                continue

            # check if in shoulder: out of range, but in acceptable
            # error percentage
            shoulder = False
            error = 0
            try:
                error = float(spec_dict.get('error', '0'))
            except:
                error = 0
                pass
            error_amount = (result / 100) * error
            error_min = result - error_amount
            error_max = result + error_amount
            if ((result < spec_min) and (error_max >= spec_min)) or \
                    ((result > spec_max) and (error_min <= spec_max)):
                shoulder = True

            dataline = []

            dataitem = {'value': analysis.getClientTitle()}
            dataline.append(dataitem)

            dataitem = {'value': analysis.getRequestID()}
            dataline.append(dataitem)

            dataitem = {'value': analysis.aq_parent.getSampleTypeTitle()}
            dataline.append(dataitem)

            if isAttributeHidden('Sample', 'SamplePoint'):
                dataitem = {'value': analysis.aq_parent.getSamplePointTitle()}
                dataline.append(dataitem)

            dataitem = {'value': analysis.getCategoryTitle()}
            dataline.append(dataitem)

            dataitem = {'value': analysis.Title()}
            dataline.append(dataitem)

            if shoulder:
                dataitem = {'value': analysis.getResult(),
                            'img_after': '++resource++bika.lims.images/exclamation.png'}
            else:
                dataitem = {'value': analysis.getResult()}

            dataline.append(dataitem)

            dataitem = {'value': spec_dict['min']}
            dataline.append(dataitem)

            dataitem = {'value': spec_dict['max']}
            dataline.append(dataitem)

            state = wf_tool.getInfoFor(analysis, 'review_state', '')
            review_state = wf_tool.getTitleForStateOnType(
                state, 'Analysis')
            dataitem = {'value': review_state}
            dataline.append(dataitem)

            datalines.append(dataline)

            count_all += 1

        # table footer data
        footlines = []
        footline = []
        footitem = {'value': _('Number of analyses out of range for period'),
                    'colspan': 9,
                    'class': 'total_label'}
        footline.append(footitem)
        footitem = {'value': count_all}
        footline.append(footitem)
        footlines.append(footline)

        # report footer data
        footnotes = []
        footline = []
        footitem = {'value': _('Analysis result within error range'),
                    'img_before': '++resource++bika.lims.images/exclamation.png'
        }
        footline.append(footitem)
        footnotes.append(footline)

        self.report_content = {
            'headings': headings,
            'parms': parms,
            'formats': formats,
            'datalines': datalines,
            'footings': footlines,
            'footnotes': footnotes}

        title = t(headings['header'])

        return {'report_title': title,
                'report_data': self.template()}
Ejemplo n.º 15
0
    def __call__(self):

        form = self.request.form
        plone.protect.CheckAuthenticator(self.request.form)
        plone.protect.PostOnly(self.request.form)
        came_from = 'came_from' in form and form['came_from'] or 'add'
        wftool = getToolByName(self.context, 'portal_workflow')
        uc = getToolByName(self.context, 'uid_catalog')
        bsc = getToolByName(self.context, 'bika_setup_catalog')

        errors = {}

        form_parts = json.loads(self.request.form['parts'])

        # First make a list of non-empty columns
        columns = []
        for column in range(int(form['col_count'])):
            name = 'ar.%s' % column
            ar = form.get(name, None)
            if ar and 'Analyses' in ar.keys():
                columns.append(column)

        if len(columns) == 0:
            ajax_form_error(errors,
                            message=t(_("No analyses have been selected")))
            return json.dumps({'errors': errors})

        # Now some basic validation
        required_fields = [
            field.getName() for field in AnalysisRequestSchema.fields()
            if field.required
        ]

        for column in columns:
            formkey = "ar.%s" % column
            ar = form[formkey]

            # check that required fields have values
            for field in required_fields:
                # This one is still special.
                if field in ['RequestID']:
                    continue
                # And these are not required if this is a secondary AR
                if ar.get('Sample', '') != '' and field in [
                        'SamplingDate', 'SampleType'
                ]:
                    continue
                if not ar.get(field, ''):
                    ajax_form_error(errors, field, column)
        # Return errors if there are any
        if errors:
            return json.dumps({'errors': errors})

        # Get the prices from the form data
        prices = form.get('Prices', None)
        # Initialize the Anlysis Request collection
        ARs = []
        # if a new profile is created automatically,
        # this flag triggers the status message
        new_profile = None
        # The actual submission
        for column in columns:
            # Get partitions from the form data
            if form_parts:
                partitions = form_parts[str(column)]
            else:
                partitions = []
            # Get the form data using the appropriate form key
            formkey = "ar.%s" % column
            values = form[formkey].copy()
            # resolved values is formatted as acceptable by archetypes
            # widget machines
            resolved_values = {}
            for k, v in values.items():
                # Analyses, we handle that specially.
                if k == 'Analyses':
                    continue
                # Insert the reference *_uid values instead of titles.
                if "_uid" in k:
                    v = values[k]
                    v = v.split(",") if v and "," in v else v
                    fname = k.replace("_uid", "")
                    resolved_values[fname] = v
                    continue
                # we want to write the UIDs and ignore the title values
                if k + "_uid" in values:
                    continue
                resolved_values[k] = values[k]
            # Get the analyses from the form data
            analyses = values["Analyses"]

            # Gather the specifications from the form
            specs = json.loads(form['copy_to_new_specs']).get(str(column), {})
            if not specs:
                specs = json.loads(form['specs']).get(str(column), {})
            if specs:
                specs = dicts_to_dict(specs, 'keyword')
            # Modify the spec with all manually entered values
            for service_uid in analyses:
                min_element_name = "ar.%s.min.%s" % (column, service_uid)
                max_element_name = "ar.%s.max.%s" % (column, service_uid)
                error_element_name = "ar.%s.error.%s" % (column, service_uid)
                service_keyword = bsc(UID=service_uid)[0].getKeyword
                if min_element_name in form:
                    if service_keyword not in specs:
                        specs[service_keyword] = {}
                    specs[service_keyword]["keyword"] = service_keyword
                    specs[service_keyword]["min"] = form[min_element_name]
                    specs[service_keyword]["max"] = form[max_element_name]
                    specs[service_keyword]["error"] = form[error_element_name]

            # Selecting a template sets the hidden 'parts' field to template values.
            # Selecting a profile will allow ar_add.js to fill in the parts field.
            # The result is the same once we are here.
            if not partitions:
                partitions = [{
                    'services': [],
                    'container': None,
                    'preservation': '',
                    'separate': False
                }]
            # Apply DefaultContainerType to partitions without a container
            default_container_type = resolved_values.get(
                'DefaultContainerType', None)
            if default_container_type:
                container_type = bsc(UID=default_container_type)[0].getObject()
                containers = container_type.getContainers()
                for partition in partitions:
                    if not partition.get("container", None):
                        partition['container'] = containers
            # Retrieve the catalogue reference to the client
            client = uc(UID=resolved_values['Client'])[0].getObject()
            # Create the Analysis Request
            ar = create_analysisrequest(client,
                                        self.request,
                                        resolved_values,
                                        analyses=analyses,
                                        partitions=partitions,
                                        specifications=specs.values(),
                                        prices=prices)
            # Add the created analysis request to the list
            ARs.append(ar.getId())
        # Display the appropriate message after creation
        if len(ARs) > 1:
            message = _("Analysis requests ${ARs} were successfully created.",
                        mapping={'ARs': safe_unicode(', '.join(ARs))})
        else:
            message = _("Analysis request ${AR} was successfully created.",
                        mapping={'AR': safe_unicode(ARs[0])})
        self.context.plone_utils.addPortalMessage(message, 'info')
        # Automatic label printing
        # Won't print labels for Register on Secondary ARs
        new_ars = None
        if came_from == 'add':
            new_ars = [ar for ar in ARs if ar[-2:] == '01']
        if 'register' in self.context.bika_setup.getAutoPrintStickers(
        ) and new_ars:
            return json.dumps({
                'success':
                message,
                'stickers':
                new_ars,
                'stickertemplate':
                self.context.bika_setup.getAutoStickerTemplate()
            })
        else:
            return json.dumps({'success': message})
Ejemplo n.º 16
0
    def analysis_specification(self):
        ar = self.context.aq_parent
        rr = dicts_to_dict(ar.getResultsRange(),'keyword')

        return rr[self.context.getService().getKeyword()]
Ejemplo n.º 17
0
 def get_spec_from_ar(self, ar, keyword):
     empty = {'min': '', 'max': '', 'error': '', 'keyword': keyword}
     spec = ar.getResultsRange()
     if spec:
         return dicts_to_dict(spec, 'keyword').get(keyword, empty)
     return empty
Ejemplo n.º 18
0
 def get_analysis_spec(self, analysis):
     rr = dicts_to_dict(analysis.aq_parent.getResultsRange(), 'keyword')
     return rr.get(analysis.getKeyword(), None)
Ejemplo n.º 19
0
    def create_metrc_csv(self, ars):
        analyses = []
        output = StringIO.StringIO()
        writer = csv.writer(output)
        for ar in ars:
            ar_id = ar.id
            date_published = ar.getDatePublished()
            if date_published:
                date_published = date_published.split(' ')[0]
            else:
                date_published = self.ulocalized_time(DateTime(), long_format=0)

            client_sampleid = to_utf8(ar.getClientSampleID())
            as_keyword = ''
            result = ''
            is_in_range = 'True'
            unit_and_ar_id = ''
            sample_type_uid = ar.getSampleType().UID()
            bsc = getToolByName(self, 'bika_setup_catalog')
            analysis_specs = bsc(portal_type='AnalysisSpec',
                          getSampleTypeUID=sample_type_uid)
            dmk = ar.bika_setup.getResultsDecimalMark()

            lines = []
            analyses = ar.getAnalyses(full_objects=True)
            for analysis in analyses:
                service = analysis.getService()
                if service.getHidden():
                    continue
                specification =  analysis.getResultsRange()
                result =  analysis.getFormattedResult(html=False)
                if not specification:
                    rr = dicts_to_dict(analysis.aq_parent.getResultsRange(), 'keyword')
                    specification = rr.get(analysis.getKeyword(), None)
                    # No specs available, assume in range:
                    if not specification:
                        is_in_range = True
                else:
                    minimum = specification.get('min', '')
                    maximum = specification.get('max', '')
                    error = specification.get('error', '')
                    if minimum == '' and maximum == '' and error == '':
                        is_in_range = True
                    else:
                        outofrange, acceptable = \
                            isOutOfRange(result, minimum, maximum, error)
                        if outofrange == False:
                            is_in_range = True
                        elif outofrange == True:
                            is_in_range = False

                unit = service.getUnit()
                unit = '({})-'.format(unit) if unit else ''
                unit_and_ar_id = '{}{}'.format(unit, ar_id)

                #Check unit conversion
                if sample_type_uid:
                    i = 0
                    new_text = []
                    hide_original = False
                    an_dict = {'converted_units': []}
                    for unit_conversion in service.getUnitConversions():
                        if unit_conversion.get('SampleType') and \
                           unit_conversion.get('Unit') and \
                           unit_conversion.get('SampleType') == sample_type_uid:
                            i += 1
                            new = dict({})
                            conv = ploneapi.content.get(
                                                UID=unit_conversion['Unit'])
                            unit_and_ar_id = '({})-{}'.format(
                                                    conv.converted_unit, ar_id)
                            result = convert_unit(
                                            analysis.getResult(),
                                            conv.formula,
                                            dmk,
                                            analysis.getPrecision())
                            break

                line = {'date_published': date_published,
                        'client_sampleid': client_sampleid,
                        'as_keyword': service.getShortTitle(),
                        'result': result,
                        'is_in_range': is_in_range,
                        'unit_and_ar_id' : unit_and_ar_id,
                        }
                lines.append(line)

            for l in lines:
                writer.writerow([l['date_published'], l['client_sampleid'],
                                l['as_keyword'], l['result'],
                                l['is_in_range'], l['unit_and_ar_id'],
                                ])

        return output.getvalue()