Exemple #1
0
    def __init__(self, context, request):
        BrowserView.__init__(self, context, request)
        self.selection_macros = SelectionMacrosView(context, request)
        self.icon = "++resource++bika.lims.images/query_big.png"
        self.getAnalysts = getUsers(context,
                                    ['Manager', 'LabManager', 'Analyst'])

        request.set('disable_border', 1)
Exemple #2
0
    def __call__(self):
        self.selection_macros = SelectionMacrosView(self.context, self.request)
        self.icon = self.portal_url + "/++resource++bika.lims.images/report_big.png"

        self.additional_reports = []
        adapters = getAdapters((self.context, ), IAdministrationReport)
        for name, adapter in adapters:
            report_dict = adapter(self.context, self.request)
            report_dict['id'] = name
            self.additional_reports.append(report_dict)

        return self.template()
Exemple #3
0
    def __call__(self):
        self.selection_macros = SelectionMacrosView(self.context, self.request)
        self.icon = self.portal_url + "/++resource++bika.lims.images/report_big.png"
        self.getAnalysts = getUsers(self.context,
                                    ['Manager', 'LabManager', 'Analyst'])

        self.additional_reports = []
        adapters = getAdapters((self.context, ), IProductivityReport)
        for name, adapter in adapters:
            report_dict = adapter(self.context, self.request)
            report_dict['id'] = name
            self.additional_reports.append(report_dict)

        return self.template()
Exemple #4
0
    def __call__(self):
        """Create and render selected report
        """

        # if there's an error, we return productivity.pt which requires these.
        self.selection_macros = SelectionMacrosView(self.context, self.request)
        self.additional_reports = []
        adapters = getAdapters((self.context, ), IProductivityReport)
        for name, adapter in adapters:
            report_dict = adapter(self.context, self.request)
            report_dict['id'] = name
            self.additional_reports.append(report_dict)

        report_id = self.request.get('report_id', '')
        if not report_id:
            message = _("No report specified in request")
            self.logger.error(message)
            self.context.plone_utils.addPortalMessage(message, 'error')
            return self.template()

        self.date = DateTime()
        username = self.context.portal_membership.getAuthenticatedMember(
        ).getUserName()
        self.reporter = self.user_fullname(username)
        self.reporter_email = self.user_email(username)

        # signature image
        self.reporter_signature = ""
        c = [
            x for x in self.bika_setup_catalog(portal_type='LabContact')
            if x.getObject().getUsername() == username
        ]
        if c:
            sf = c[0].getObject().getSignature()
            if sf:
                self.reporter_signature = sf.absolute_url() + "/Signature"

        lab = self.context.bika_setup.laboratory
        self.laboratory = lab
        self.lab_title = lab.getName()
        self.lab_address = lab.getPrintAddress()
        self.lab_email = lab.getEmailAddress()
        self.lab_url = lab.getLabURL()

        client = logged_in_client(self.context)
        if client:
            clientuid = client.UID()
            self.client_title = client.Title()
            self.client_address = client.getPrintAddress()
        else:
            clientuid = None
            self.client_title = None
            self.client_address = None

        # Render form output

        # the report can add file names to this list; they will be deleted
        # once the PDF has been generated.  temporary plot image files, etc.
        self.request['to_remove'] = []

        if "report_module" in self.request:
            module = self.request["report_module"]
        else:
            module = "bika.lims.browser.reports.%s" % report_id
        try:
            exec("from %s import Report" % module)
            # required during error redirect: the report must have a copy of
            # additional_reports, because it is used as a surrogate view.
            Report.additional_reports = self.additional_reports
        except ImportError:
            message = "Report %s.Report not found (shouldn't happen)" % module
            self.logger.error(message)
            self.context.plone_utils.addPortalMessage(message, 'error')
            return self.template()

        # Report must return dict with:
        # - report_title - title string for pdf/history listing
        # - report_data - rendered report
        output = Report(self.context, self.request)()

        # if CSV output is chosen, report returns None
        if not output:
            return

        if type(output) in (str, unicode, bytes):
            # remove temporary files
            for f in self.request['to_remove']:
                os.remove(f)
            return output

        # The report output gets pulled through report_frame.pt
        self.reportout = output['report_data']
        framed_output = self.frame_template()

        # this is the good part
        result = createPdf(framed_output, False)

        # remove temporary files
        for f in self.request['to_remove']:
            os.remove(f)

        if result:
            # Create new report object
            reportid = self.aq_parent.generateUniqueId('Report')
            report = _createObjectByType("Report", self.aq_parent, reportid)
            report.edit(Client=clientuid)
            report.processForm()

            # write pdf to report object
            report.edit(title=output['report_title'], ReportFile=result)
            report.reindexObject()

            fn = "%s - %s" % (self.date.strftime(
                self.date_format_short), _u(output['report_title']))

            setheader = self.request.RESPONSE.setHeader
            setheader('Content-Type', 'application/pdf')
            setheader("Content-Disposition",
                      "attachment;filename=\"%s\"" % _c(fn))
            self.request.RESPONSE.write(result)

        return
class Report(BrowserView):
    implements(IViewView)
    default_template = ViewPageTemplateFile("templates/productivity.pt")
    template = ViewPageTemplateFile("templates/productivity_samplereceivedvsreported.pt")

    def __init__(self, context, request, report=None):
        super(Report, self).__init__(context, request)
        self.report = report
        self.selection_macros = SelectionMacrosView(self.context, self.request)

    def __call__(self):
        
        parms = []
        titles = []
        
        self.contentFilter = {'portal_type': 'Sample',
                              'review_state': ['sample_received', 'expired', 'disposed'],
                              'sort_on': 'getDateReceived'}
               
        val = self.selection_macros.parse_daterange(self.request,
                                                    'getDateReceived',
                                                    _('Date Received'))        
        if val:
            self.contentFilter[val['contentFilter'][0]] = val['contentFilter'][1]
            parms.append(val['parms'])
            titles.append(val['titles'])
            
        # Query the catalog and store results in a dictionary             
        samples = self.bika_catalog(self.contentFilter)
        if not samples:
            message = _("No samples matched your query")
            self.context.plone_utils.addPortalMessage(message, "error")
            return self.default_template()
              
        datalines = {}
        footlines = {}
        total_received_count = 0;
        total_published_count = 0;
        for sample in samples:
            sample = sample.getObject()
            
            # For each sample, retrieve check is has results published
            # and add it to datalines
            published = False
            analyses = sample.getAnalyses({})            
            for analysis in analyses:         
                analysis = analysis.getObject() 
                if not (analysis.getDateAnalysisPublished() is None):
                    published = True                
                    break
            
            datereceived = sample.getDateReceived()                                    
            monthyear = datereceived.strftime("%B")+" "+datereceived.strftime("%Y")
            received = 1
            publishedcnt = published and 1 or 0
            if (monthyear in datalines):
                received = datalines[monthyear]['ReceivedCount']+1
                publishedcnt = published and datalines[monthyear]['PublishedCount'] + 1 or datalines[monthyear]['PublishedCount']                
            ratio = publishedcnt/received
            dataline = {'MonthYear': monthyear,
                        'ReceivedCount': received,
                        'PublishedCount': publishedcnt,
                        'UnpublishedCount': received-publishedcnt,
                        'Ratio':ratio,
                        'RatioPercentage':('{0:.1g}'.format(ratio*100))+"%"}   
            datalines[monthyear] = dataline
            
            total_received_count += 1 
            total_published_count = published and total_published_count+1 or total_published_count
            
        # Footer total data      
        ratio = total_published_count/total_received_count
        footline = {'ReceivedCount': total_received_count,
                    'PublishedCount' : total_published_count,
                    'UnpublishedCount' : total_received_count-total_published_count,
                    'Ratio':ratio,
                    'RatioPercentage':('{0:.1g}'.format(ratio*100))+"%"}
        footlines['Total'] = footline;
        
        self.report_data = {
            'parameters': parms,
            'datalines': datalines,
            'footlines': footlines }
        
        return {'report_title': _('Samples received vs. reported'),
                'report_data': self.template()}
class Report(BrowserView):
    implements(IViewView)
    template = ViewPageTemplateFile(
        "templates/qualitycontrol_resultspersamplepoint.pt")
    # if unsuccessful we return here:
    default_template = ViewPageTemplateFile("templates/qualitycontrol.pt")

    def __init__(self, context, request, report=None):
        super(Report, self).__init__(context, request)
        self.report = report
        self.selection_macros = SelectionMacrosView(self.context, self.request)

    def get_analysis_spec(self, analysis):
        rr = dicts_to_dict(analysis.aq_parent.getResultsRange(), 'keyword')
        return rr.get(analysis.getKeyword(), None)

    def ResultOutOfRange(self, analysis):
        """Template wants to know, is this analysis out of range? We scan 
        IResultOutOfRange adapters, and return True if any IAnalysis adapters 
        trigger a result. """
        adapters = getAdapters((analysis, ), IResultOutOfRange)
        spec = self.get_analysis_spec(analysis)
        for name, adapter in adapters:
            if not spec:
                return False
            if adapter(specification=spec):
                return True

    def __call__(self):

        MinimumResults = self.context.bika_setup.getMinimumResults()
        warning_icon = "<img " + \
                       "src='" + self.portal_url + "/++resource++bika.lims.images/warning.png' " + \
                       "height='9' width='9'/>"
        error_icon = "<img " + \
                     "src='" + self.portal_url + "/++resource++bika.lims.images/exclamation.png' " + \
                     "height='9' width='9'/>"

        header = _("Results per sample point")
        subheader = _(
            "Analysis results for per sample point and analysis service")

        self.contentFilter = {'portal_type': 'Analysis',
                              'review_state': ['verified', 'published']}

        parms = []
        titles = []

        val = self.selection_macros.parse_client(self.request)
        if val:
            self.contentFilter[val['contentFilter'][0]] = val['contentFilter'][1]
            parms.append(val['parms'])
            titles.append(val['titles'])

        val = self.selection_macros.parse_samplepoint(self.request)
        sp_uid = val
        if val:
            self.contentFilter[val['contentFilter'][0]] = val['contentFilter'][1]
            parms.append(val['parms'])
            titles.append(val['titles'])

        val = self.selection_macros.parse_sampletype(self.request)
        st_uid = val
        if val:
            self.contentFilter[val['contentFilter'][0]] = val['contentFilter'][1]
            parms.append(val['parms'])
            titles.append(val['titles'])

        val = self.selection_macros.parse_analysisservice(self.request)
        if val:
            self.contentFilter[val['contentFilter'][0]] = val['contentFilter'][1]
            parms.append(val['parms'])
        else:
            message = _("No analysis services were selected.")
            self.context.plone_utils.addPortalMessage(message, 'error')
            return self.default_template()

        val = self.selection_macros.parse_daterange(self.request,
                                                    'getDateSampled',
                                                    'DateSampled')
        if val:
            self.contentFilter[val['contentFilter'][0]] = val['contentFilter'][1]
            parms.append(val['parms'])
            titles.append(val['titles'])

        val = self.selection_macros.parse_state(self.request,
                                                'bika_worksheetanalysis_workflow',
                                                'worksheetanalysis_review_state',
                                                'Worksheet state')
        if val:
            self.contentFilter[val['contentFilter'][0]] = val['contentFilter'][1]
            parms.append(val['parms'])

        # Query the catalog and store analysis data in a dict
        analyses = {}
        out_of_range_count = 0
        in_shoulder_range_count = 0
        analysis_count = 0

        proxies = self.bika_analysis_catalog(self.contentFilter)

        if not proxies:
            message = _("No analyses matched your query")
            self.context.plone_utils.addPortalMessage(message, 'error')
            return self.default_template()

        # # Compile a list of dictionaries, with all relevant analysis data
        for analysis in proxies:
            analysis = analysis.getObject()
            result = analysis.getResult()
            client = analysis.aq_parent.aq_parent
            uid = analysis.UID()
            keyword = analysis.getKeyword()
            service_title = "%s (%s)" % (analysis.Title(), keyword)
            result_in_range = self.ResultOutOfRange(analysis)

            if service_title not in analyses.keys():
                analyses[service_title] = []
            try:
                result = float(analysis.getResult())
            except:
                # XXX Unfloatable analysis results should be indicated
                continue
            analyses[service_title].append({
                # The report should not mind taking 'analysis' in place of
                # 'service' - the service field values are placed in analysis.
                'service': analysis,
                'obj': analysis,
                'Request ID': analysis.aq_parent.getId(),
                'Analyst': analysis.getAnalyst(),
                'Result': result,
                'Sampled': analysis.getDateSampled(),
                'Captured': analysis.getResultCaptureDate(),
                'Uncertainty': analysis.getUncertainty(),
                'result_in_range': result_in_range,
                'Unit': analysis.getUnit(),
                'Keyword': keyword,
                'icons': '',
            })
            analysis_count += 1

        keys = analyses.keys()
        keys.sort()

        parms += [
            {"title": _("Total analyses"), "value": analysis_count},
        ]

        ## This variable is output to the TAL
        self.report_data = {
            'header': header,
            'subheader': subheader,
            'parms': parms,
            'tables': [],
            'footnotes': [],
        }

        plotscript = """
        set terminal png transparent truecolor enhanced size 700,350 font "Verdana, 8"
        set title "%(title)s"
        set xlabel "%(xlabel)s"
        set ylabel "%(ylabel)s"
        set key off
        #set logscale
        set timefmt "%(date_format_long)s"
        set xdata time
        set format x "%(date_format_short)s\\n%(time_format)s"
        set xrange ["%(x_start)s":"%(x_end)s"]
        set auto fix
        set offsets graph 0, 0, 1, 1
        set xtics border nomirror rotate by 90 font "Verdana, 5" offset 0,-3
        set ytics nomirror

        f(x) = mean_y
        fit f(x) 'gpw_DATAFILE_gpw' u 1:3 via mean_y
        stddev_y = sqrt(FIT_WSSR / (FIT_NDF + 1))

        plot mean_y-stddev_y with filledcurves y1=mean_y lt 1 lc rgb "#efefef",\
             mean_y+stddev_y with filledcurves y1=mean_y lt 1 lc rgb "#efefef",\
             mean_y with lines lc rgb '#ffffff' lw 3,\
             "gpw_DATAFILE_gpw" using 1:3 title 'data' with points pt 7 ps 1 lc rgb '#0000ee' lw 2,\
               '' using 1:3 smooth unique lc rgb '#aaaaaa' lw 2,\
               '' using 1:4 with lines lc rgb '#000000' lw 1,\
               '' using 1:5 with lines lc rgb '#000000' lw 1"""

        ## Compile plots and format data for display
        for service_title in keys:
            # used to calculate XY axis ranges
            result_values = [int(o['Result']) for o in analyses[service_title]]
            result_dates = [o['Sampled'] for o in analyses[service_title]]

            parms = []
            plotdata = str()

            range_min = ''
            range_max = ''

            for a in analyses[service_title]:

                a['Sampled'] = a['Sampled'].strftime(self.date_format_long) if a[
                    'Sampled'] else ''
                a['Captured'] = a['Captured'].strftime(self.date_format_long) if \
                a['Captured'] else ''

                R = a['Result']
                U = a['Uncertainty']

                a['Result'] = a['obj'].getFormattedResult()

                in_range = a['result_in_range']
                # result out of range
                if str(in_range) == 'False':
                    out_of_range_count += 1
                    a['Result'] = "%s %s" % (a['Result'], error_icon)
                # result almost out of range
                if str(in_range) == '1':
                    in_shoulder_range_count += 1
                    a['Result'] = "%s %s" % (a['Result'], warning_icon)

                spec = {}
                if hasattr(a["obj"], 'specification') and a["obj"].specification:
                    spec = a["obj"].specification

                plotdata += "%s\t%s\t%s\t%s\t%s\n" % (
                    a['Sampled'],
                    R,
                    spec.get("min", ""),
                    spec.get("max", ""),
                    U and U or 0,
                )
                plotdata.encode('utf-8')

            unit = analyses[service_title][0]['Unit']
            if MinimumResults <= len(dict([(d, d) for d in result_dates])):
                _plotscript = str(plotscript) % {
                    'title': "",
                    'xlabel': t(_("Date Sampled")),
                    'ylabel': unit and unit or '',
                    'x_start': "%s" % min(result_dates).strftime(
                        self.date_format_long),
                    'x_end': "%s" % max(result_dates).strftime(
                        self.date_format_long),
                    'date_format_long': self.date_format_long,
                    'date_format_short': self.date_format_short,
                    'time_format': self.time_format,
                }

                plot_png = plot(str(plotdata),
                                plotscript=str(_plotscript),
                                usefifo=False)

                # Temporary PNG data file
                fh, data_fn = tempfile.mkstemp(suffix='.png')
                os.write(fh, plot_png)
                plot_url = data_fn
                self.request['to_remove'].append(data_fn)

                plot_url = data_fn
            else:
                plot_url = ""

            table = {
                'title': "%s: %s" % (
                    t(_("Analysis Service")),
                    service_title),
                'parms': parms,
                'columns': ['Request ID',
                            'Analyst',
                            'Result',
                            'Sampled',
                            'Captured'],
                'data': analyses[service_title],
                'plot_url': plot_url,
            }

            self.report_data['tables'].append(table)

        translate = self.context.translate

        ## footnotes
        if out_of_range_count:
            msgid = _("Analyses out of range")
            self.report_data['footnotes'].append(
                "%s %s" % (error_icon, t(msgid)))
        if in_shoulder_range_count:
            msgid = _("Analyses in error shoulder range")
            self.report_data['footnotes'].append(
                "%s %s" % (warning_icon, t(msgid)))

        self.report_data['parms'].append(
            {"title": _("Analyses out of range"),
             "value": out_of_range_count})
        self.report_data['parms'].append(
            {"title": _("Analyses in error shoulder range"),
             "value": in_shoulder_range_count})

        title = t(header)
        if titles:
            title += " (%s)" % " ".join(titles)
        return {
            'report_title': title,
            'report_data': self.template(),
        }
class Report(BrowserView):
    implements(IViewView)
    default_template = ViewPageTemplateFile("templates/productivity.pt")
    template = ViewPageTemplateFile(
        "templates/productivity_analysesperdepartment.pt")

    def __init__(self, context, request, report=None):
        super(Report, self).__init__(context, request)
        self.report = report
        self.selection_macros = SelectionMacrosView(self.context, self.request)

    def __call__(self):

        parms = []
        titles = []

        # Apply filters
        self.contentFilter = {'portal_type': 'Analysis'}
        val = self.selection_macros.parse_daterange(self.request,
                                                    'getDateRequested',
                                                    _('Date Requested'))
        if val:
            self.contentFilter[val['contentFilter'][0]] = val['contentFilter'][1]
            parms.append(val['parms'])
            titles.append(val['titles'])

        val = self.selection_macros.parse_state(self.request,
                                                'bika_analysis_workflow',
                                                'getAnalysisState',
                                                _('Analysis State'))
        if val:
            self.contentFilter[val['contentFilter'][0]] = val['contentFilter'][1]
            parms.append(val['parms'])
            titles.append(val['titles'])

        # Query the catalog and store results in a dictionary
        analyses = self.bika_analysis_catalog(self.contentFilter)
        if not analyses:
            message = _("No analyses matched your query")
            self.context.plone_utils.addPortalMessage(message, "error")
            return self.default_template()

        groupby = self.request.form.get('GroupingPeriod', '')
        if (groupby != ''):
            parms.append({"title": _("Grouping period"), "value": _(groupby)})

        datalines = {}
        footlines = {}
        totalcount = len(analyses)
        totalpublishedcount = 0
        totalperformedcount = 0
        for analysis in analyses:
            analysis = analysis.getObject()
            analysisservice = analysis.getService()
            department = analysisservice.getDepartment()
            department = department.Title() if department else ''
            daterequested = analysis.created()

            group = ''
            if groupby == 'Day':
                group = self.ulocalized_time(daterequested)
            elif groupby == 'Week':
                group = daterequested.strftime(
                    "%Y") + ", " + daterequested.strftime("%U")
            elif groupby == 'Month':
                group = daterequested.strftime(
                    "%B") + " " + daterequested.strftime("%Y")
            elif groupby == 'Year':
                group = daterequested.strftime("%Y")
            else:
                group = ''

            dataline = {'Group': group, 'Requested': 0, 'Performed': 0,
                        'Published': 0, 'Departments': {}}
            deptline = {'Department': department, 'Requested': 0, 'Performed': 0,
                        'Published': 0}
            if (group in datalines):
                dataline = datalines[group]
                if (department in dataline['Departments']):
                    deptline = dataline['Departments'][department]

            grouptotalcount = dataline['Requested'] + 1
            groupperformedcount = dataline['Performed']
            grouppublishedcount = dataline['Published']

            depttotalcount = deptline['Requested'] + 1
            deptperformedcount = deptline['Performed']
            deptpubishedcount = deptline['Published']

            workflow = getToolByName(self.context, 'portal_workflow')
            arstate = workflow.getInfoFor(analysis.aq_parent, 'review_state', '')
            if (arstate == 'published'):
                deptpubishedcount += 1
                grouppublishedcount += 1
                totalpublishedcount += 1

            if (analysis.getResult()):
                deptperformedcount += 1
                groupperformedcount += 1
                totalperformedcount += 1

            group_performedrequested_ratio = float(groupperformedcount) / float(
                grouptotalcount)
            group_publishedperformed_ratio = groupperformedcount > 0 and float(
                grouppublishedcount) / float(groupperformedcount) or 0

            anl_performedrequested_ratio = float(deptperformedcount) / float(
                depttotalcount)
            anl_publishedperformed_ratio = deptperformedcount > 0 and float(
                deptpubishedcount) / float(deptperformedcount) or 0

            dataline['Requested'] = grouptotalcount
            dataline['Performed'] = groupperformedcount
            dataline['Published'] = grouppublishedcount
            dataline['PerformedRequestedRatio'] = group_performedrequested_ratio
            dataline['PerformedRequestedRatioPercentage'] = ('{0:.0f}'.format(
                group_performedrequested_ratio * 100)) + "%"
            dataline['PublishedPerformedRatio'] = group_publishedperformed_ratio
            dataline['PublishedPerformedRatioPercentage'] = ('{0:.0f}'.format(
                group_publishedperformed_ratio * 100)) + "%"

            deptline['Requested'] = depttotalcount
            deptline['Performed'] = deptperformedcount
            deptline['Published'] = deptpubishedcount
            deptline['PerformedRequestedRatio'] = anl_performedrequested_ratio
            deptline['PerformedRequestedRatioPercentage'] = ('{0:.0f}'.format(
                anl_performedrequested_ratio * 100)) + "%"
            deptline['PublishedPerformedRatio'] = anl_publishedperformed_ratio
            deptline['PublishedPerformedRatioPercentage'] = ('{0:.0f}'.format(
                anl_publishedperformed_ratio * 100)) + "%"

            dataline['Departments'][department] = deptline
            datalines[group] = dataline

        # Footer total data
        total_performedrequested_ratio = float(totalperformedcount) / float(
            totalcount)
        total_publishedperformed_ratio = totalperformedcount > 0 and float(
            totalpublishedcount) / float(totalperformedcount) or 0

        footline = {'Requested': totalcount,
                    'Performed': totalperformedcount,
                    'Published': totalpublishedcount,
                    'PerformedRequestedRatio': total_performedrequested_ratio,
                    'PerformedRequestedRatioPercentage': ('{0:.0f}'.format(
                        total_performedrequested_ratio * 100)) + "%",
                    'PublishedPerformedRatio': total_publishedperformed_ratio,
                    'PublishedPerformedRatioPercentage': ('{0:.0f}'.format(
                        total_publishedperformed_ratio * 100)) + "%"}

        footlines['Total'] = footline

        self.report_data = {'parameters': parms,
                            'datalines': datalines,
                            'footlines': footlines}

        if self.request.get('output_format', '') == 'CSV':
            import csv
            import StringIO
            import datetime

            fieldnames = [
                'Group',
                'Department',
                'Requested',
                'Performed',
                'Published',
            ]
            output = StringIO.StringIO()
            dw = csv.DictWriter(output, extrasaction='ignore',
                                fieldnames=fieldnames)
            dw.writerow(dict((fn, fn) for fn in fieldnames))
            for group_name, group in datalines.items():
                for dept_name, dept in group['Departments'].items():
                    dw.writerow({
                        'Group': group_name,
                        'Department': dept_name,
                        'Requested': dept['Requested'],
                        'Performed': dept['Performed'],
                        'Published': dept['Published'],
                    })
            report_data = output.getvalue()
            output.close()
            date = datetime.datetime.now().strftime("%Y%m%d%H%M")
            setheader = self.request.RESPONSE.setHeader
            setheader('Content-Type', 'text/csv')
            setheader("Content-Disposition",
                      "attachment;filename=\"analysesperdepartment_%s.csv\"" % date)
            self.request.RESPONSE.write(report_data)
        else:
            return {'report_title': _('Analyses summary per department'),
                    'report_data': self.template()}
Exemple #8
0
class Report(BrowserView):
    implements(IViewView)
    default_template = ViewPageTemplateFile("templates/productivity.pt")
    template = ViewPageTemplateFile(
        "templates/productivity_dailysamplesreceived.pt")

    def __init__(self, context, request, report=None):
        super(Report, self).__init__(context, request)
        self.report = report
        self.selection_macros = SelectionMacrosView(self.context, self.request)

    def __call__(self):

        parms = []
        titles = []

        self.contentFilter = {
            'portal_type': 'Sample',
            'review_state': ['sample_received', 'expired', 'disposed'],
            'sort_on': 'getDateReceived'
        }

        val = self.selection_macros.parse_daterange(self.request,
                                                    'getDateReceived',
                                                    _('Date Received'))
        if val:
            self.contentFilter[val['contentFilter']
                               [0]] = val['contentFilter'][1]
            parms.append(val['parms'])
            titles.append(val['titles'])

        # Query the catalog and store results in a dictionary
        samples = self.bika_catalog(self.contentFilter)
        if not samples:
            message = _("No samples matched your query")
            self.context.plone_utils.addPortalMessage(message, "error")
            return self.default_template()

        datalines = []
        analyses_count = 0
        for sample in samples:
            sample = sample.getObject()

            # For each sample, retrieve the analyses and generate
            # a data line for each one
            analyses = sample.getAnalyses({})
            for analysis in analyses:
                analysis = analysis.getObject()
                ds = sample.getDateSampled()
                sd = sample.getSamplingDate()
                dataline = {
                    'AnalysisKeyword':
                    analysis.getKeyword(),
                    'AnalysisTitle':
                    analysis.Title(),
                    'SampleID':
                    sample.getSampleID(),
                    'SampleType':
                    sample.getSampleType().Title(),
                    'DateReceived':
                    self.ulocalized_time(sample.getDateReceived(),
                                         long_format=1),
                    'DateSampled':
                    self.ulocalized_time(ds, long_format=1),
                }
                if self.context.bika_setup.getSamplingWorkflowEnabled():
                    dataline['SamplingDate'] = self.ulocalized_time(
                        sd, long_format=1)
                datalines.append(dataline)
                analyses_count += 1

        # Footer total data
        footlines = []
        footline = {'TotalCount': analyses_count}
        footlines.append(footline)

        self.report_data = {
            'parameters': parms,
            'datalines': datalines,
            'footlines': footlines
        }

        if self.request.get('output_format', '') == 'CSV':
            import csv
            import StringIO
            import datetime

            fieldnames = [
                'SampleID',
                'SampleType',
                'DateSampled',
                'DateReceived',
                'AnalysisTitle',
                'AnalysisKeyword',
            ]
            if self.context.bika_setup.getSamplingWorkflowEnabled():
                fieldnames.append('SamplingDate')
            output = StringIO.StringIO()
            dw = csv.DictWriter(output, fieldnames=fieldnames)
            dw.writerow(dict((fn, fn) for fn in fieldnames))
            for row in datalines:
                dw.writerow(row)
            report_data = output.getvalue()
            output.close()
            date = datetime.datetime.now().strftime("%Y%m%d%H%M")
            setheader = self.request.RESPONSE.setHeader
            setheader('Content-Type', 'text/csv')
            setheader(
                "Content-Disposition",
                "attachment;filename=\"dailysamplesreceived_%s.csv\"" % date)
            self.request.RESPONSE.write(report_data)
        else:
            return {
                'report_title': _('Daily samples received'),
                'report_data': self.template()
            }
class Report(BrowserView):
    implements(IViewView)
    template = ViewPageTemplateFile("templates/qualitycontrol_referenceanalysisqc.pt")
    # if unsuccessful we return here:
    default_template = ViewPageTemplateFile("templates/qualitycontrol.pt")

    def __init__(self, context, request, report=None):
        super(Report, self).__init__(context, request)
        self.report = report
        self.selection_macros = SelectionMacrosView(self.context, self.request)

    def __call__(self):
        MinimumResults = self.context.bika_setup.getMinimumResults()
        warning_icon = "<img " +\
            "src='"+self.portal_url+"/++resource++bika.lims.images/warning.png' " +\
            "height='9' width='9'/>"
        error_icon = "<img " +\
            "src='"+self.portal_url+"/++resource++bika.lims.images/exclamation.png' " +\
            "height='9' width='9'/>"

        header = _("Reference analysis QC")
        subheader = _("Reference analysis quality control graphs ")

        self.contentFilter = {'portal_type': 'ReferenceAnalysis',
                              'review_state': ['verified', 'published'],
                              }

        self.parms = []
        titles = []

        sample_uid = self.request.form.get('ReferenceSampleUID', '')
        sample = self.reference_catalog.lookupObject(sample_uid)
        if not sample:
            message = _("No reference sample was selected.")
            self.context.plone_utils.addPortalMessage(message, 'error')
            return self.default_template()
        self.parms.append({'title':_("Reference Sample"),'value':sample.Title()})
        titles.append(sample.Title())

        service_uid = self.request.form.get('ReferenceServiceUID', '')
        service = self.reference_catalog.lookupObject(service_uid)
        if not service:
            message = _("No analysis services were selected.")
            self.context.plone_utils.addPortalMessage(message, 'error')
            return self.default_template()

        self.contentFilter['path'] = {"query": "/".join(sample.getPhysicalPath()),
                                      "level" : 0 }
        keyword = service.getKeyword()
        unit = service.getUnit()
        service_title = "%s (%s)" % (service.Title(), service.getKeyword())
        try:
            precision = str(service.getPrecision())
        except:
            precision = "2"
        self.parms.append({'title':_("Analysis Service"),'value':service.Title()})
        titles.append(service.Title())

        val = self.selection_macros.parse_daterange(self.request,
                                                    'getDateVerified',
                                                    'DateVerified')
        if val:
            self.contentFilter[val['contentFilter'][0]] = val['contentFilter'][1]
            self.parms.append(val['parms'])
            titles.append(val['titles'])

        # GET min/max for range checking

        proxies = self.bika_analysis_catalog(self.contentFilter)
        if not proxies:
            message = _("No analyses matched your query")
            self.context.plone_utils.addPortalMessage(message, 'error')
            return self.default_template()

        ## Compile a list with all relevant analysis data
        analyses = []
        out_of_range_count = 0
        in_shoulder_range_count = 0
        plot_data = ""
        formatted_results = []
        results = []
        tabledata = []

        for analysis in proxies:
            analysis = analysis.getObject()
            analyses.append(analysis)
            try:
                result = float(analysis.getResult())
            except ValueError:
                pass
            results.append(result)
            captured = self.ulocalized_time(analysis.getResultCaptureDate(), long_format=1)
            analyst = analysis.getAnalyst()
            title = analysis.getId()
            formatted_result = str("%." + precision + "f")%result
            formatted_results.append(formatted_result)
            tabledata.append({_("Analysis"): title,
                              _("Result"): formatted_result,
                              _("Analyst"): analyst,
                              _("Captured"): captured})
        plotdata = "\n".join(formatted_results)
        plotdata.encode('utf-8')

        ### CHECK RANGES

        self.parms += [
            {"title": _("Total analyses"), "value": len(analyses)},
        ]

        ## This variable is output to the TAL
        self.report_data = {
            'header': header,
            'subheader': subheader,
            'parms': self.parms,
            'tables': [],
            'footnotes': [],
        }

        plotscript = """
        set terminal png transparent truecolor enhanced size 700,350 font "Verdana, 8"
        set title "%(title)s"
        set xlabel "%(xlabel)s"
        set ylabel "%(ylabel)s"
        set yzeroaxis
        #set logscale
        set xrange [highest:lowest]
        set xtics border nomirror rotate by 90 font "Verdana, 5" offset 0,-3
        set ytics nomirror

        binwidth = %(highest)-%(lowest)/100
        scale = (binwidth/(%(highest)-%(lowest)))

        bin_number(x) = floor(x/binwidth)
        rounded(x) = binwidth * ( binnumber(x) + 0.5 )

        #f(x) = mean_x
        #fit f(x) 'gpw_DATAFILE_gpw' u 1:2 via mean_x
        #stddev_x = sqrt(FIT_WSSR / (FIT_NDF + 1))
        #
        #plot mean_y-stddev_y with lines y1=mean_y lt 1 lc rgb "#afafaf",\
        #     mean_y+stddev_y with lines y1=mean_y lt 1 lc rgb "#afafaf",\
        #     mean_y with lines lc rgb '#000000' lw 1,\
        plot "gpw_DATAFILE_gpw" using (rounded($1)):(1) smooth frequency
        """

        if MinimumResults <= len(analyses):
            _plotscript = str(plotscript)%\
            {'title': "",
             'xlabel': "",
             'ylabel': "",
             'highest': max(results),
             'lowest': min(results)}

            plot_png = plot(str(plotdata),
                                plotscript=str(_plotscript),
                                usefifo=False)

            print plotdata
            print _plotscript
            print "-------"

            # Temporary PNG data file
            fh,data_fn = tempfile.mkstemp(suffix='.png')
            os.write(fh, plot_png)
            plot_url = data_fn
            self.request['to_remove'].append(data_fn)

            plot_url = data_fn
        else:
            plot_url = ""

        table = {
            'title': "%s: %s" % (
                self.context.translate(_("Analysis Service")),
                service_title),
            'columns': [_('Analysis'),
                        _('Result'),
                        _('Analyst'),
                        _('Captured')],
            'parms':[],
            'data': tabledata,
            'plot_url': plot_url,
        }

        self.report_data['tables'].append(table)

        ## footnotes
        if out_of_range_count:
            msgid = _("Analyses out of range")
            translate = self.context.translate
            self.report_data['footnotes'].append(
                "%s %s" % (error_icon, translate(msgid)))
        if in_shoulder_range_count:
            msgid = _("Analyses in error shoulder range")
            self.report_data['footnotes'].append(
                "%s %s" % (warning_icon, translate(msgid)))

        self.report_data['parms'].append(
            {"title": _("Analyses out of range"),
             "value": out_of_range_count})
        self.report_data['parms'].append(
            {"title": _("Analyses in error shoulder range"),
             "value": in_shoulder_range_count})

        title = self.context.translate(header)
        if titles:
            title += " (%s)" % " ".join(titles)
        return {
            'report_title': title,
            'report_data': self.template(),
        }
class Report(BrowserView):
    implements(IViewView)
    default_template = ViewPageTemplateFile("templates/productivity.pt")
    template = ViewPageTemplateFile("templates/productivity_dataentrydaybook.pt")

    def __init__(self, context, request, report=None):
        super(Report, self).__init__(context, request)
        self.report = report
        self.selection_macros = SelectionMacrosView(self.context, self.request)

    def __call__(self):
        
        parms = []
        titles = []
        
        # Apply filters
        self.contentFilter = {'portal_type': 'AnalysisRequest'}
        val = self.selection_macros.parse_daterange(self.request,
                                                    'getDateCreated',
                                                    _('Date Created')) 
        if val:
            self.contentFilter[val['contentFilter'][0]] = val['contentFilter'][1]
            parms.append(val['parms'])
            titles.append(val['titles'])
            
                                
        # Query the catalog and store results in a dictionary             
        ars = self.bika_catalog(self.contentFilter)
        if not ars:
            message = _("No Analysis Requests matched your query")
            self.context.plone_utils.addPortalMessage(message, "error")
            return self.default_template()

        datalines = {}
        footlines = {}
        totalcreatedcount = len(ars)
        totalreceivedcount = 0
        totalpublishedcount = 0  
        totalanlcount = 0     
        totalreceptionlag = 0
        totalpublicationlag = 0
        
        for ar in ars:
            ar = ar.getObject()
            datecreated = ar.created()
            datereceived = ar.getDateReceived()
            datepublished = ar.getDatePublished()
            receptionlag = 0
            publicationlag = 0
            anlcount = len(ar.getAnalyses())
            
            dataline = {
                        "AnalysisRequestID": ar.getRequestID(),
                        "DateCreated": self.ulocalized_time(datecreated),
                        "DateReceived": self.ulocalized_time(datereceived),
                        "DatePublished": self.ulocalized_time(datepublished),
                        "ReceptionLag": receptionlag,
                        "PublicationLag": publicationlag,
                        "TotalLag": receptionlag + publicationlag,
                        "BatchID": ar.getBatch(),
                        "SampleID": ar.getSample().Title(),
                        "SampleType": ar.getSampleTypeTitle(),
                        "NumAnalyses": anlcount,
                        "ClientID": ar.aq_parent.id,
                        "Creator": ar.Creator(),
                        "Remarks": ar.getRemarks()
                        }
            
            datalines[ar.getRequestID()] = dataline
            
            totalreceivedcount += ar.getDateReceived() and 1 or 0
            totalpublishedcount += ar.getDatePublished() and 1 or 0 
            totalanlcount += anlcount     
            totalreceptionlag += receptionlag
            totalpublicationlag += publicationlag              
                
        # Footer total data                       
        totalreceivedcreated_ratio = float(totalreceivedcount)/float(totalcreatedcount)
        totalpublishedcreated_ratio = float(totalpublishedcount)/float(totalcreatedcount)
        totalpublishedreceived_ratio = float(totalpublishedcount)/float(totalreceivedcount)
            
        footline = {'Created': totalcreatedcount,
                    'Received': totalreceivedcount,
                    'Published': totalpublishedcount,
                    'ReceivedCreatedRatio': totalreceivedcreated_ratio,
                    'ReceivedCreatedRatioPercentage': ('{0:.0f}'.format(totalreceivedcreated_ratio*100))+"%",
                    'PublishedCreatedRatio': totalpublishedcreated_ratio,
                    'PublishedCreatedRatioPercentage': ('{0:.0f}'.format(totalpublishedcreated_ratio*100))+"%",
                    'PublishedReceivedRatio': totalpublishedreceived_ratio,
                    'PublishedReceivedRatioPercentage': ('{0:.0f}'.format(totalpublishedreceived_ratio*100))+"%",
                    'AvgReceptionLag': ('{0:.1f}'.format(totalreceptionlag/totalcreatedcount)),
                    'AvgPublicationLag': ('{0:.1f}'.format(totalpublicationlag/totalcreatedcount)),
                    'AvgTotalLag': ('{0:.1f}'.format((totalreceptionlag+totalpublicationlag)/totalcreatedcount)),
                    'NumAnalyses': totalanlcount
                    }
                    
        footlines['Total'] = footline;
        
        self.report_data = {'parameters': parms,
                            'datalines': datalines,
                            'footlines': footlines }       
        
        return {'report_title': _('Data entry day book'),
                'report_data': self.template()}    
        
Exemple #11
0
 def __init__(self, context, request, report=None):
     BrowserView.__init__(self, context, request)
     self.report = report
     self.selection_macros = SelectionMacrosView(self.context, self.request)
     self.cells = dict()
     self.workbook = None
class Report(BrowserView):
    implements(IViewView)
    default_template = ViewPageTemplateFile("templates/productivity.pt")
    template = ViewPageTemplateFile("templates/productivity_analysesperdepartment.pt")

    def __init__(self, context, request, report=None):
        super(Report, self).__init__(context, request)
        self.report = report
        self.selection_macros = SelectionMacrosView(self.context, self.request)

    def __call__(self):

        parms = []
        titles = []

        # Apply filters
        self.contentFilter = {'portal_type': 'Analysis'}
        val = self.selection_macros.parse_daterange(self.request,
                                                    'getDateRequested',
                                                    _('Date Requested'))
        if val:
            self.contentFilter[val['contentFilter'][0]] = val['contentFilter'][1]
            parms.append(val['parms'])
            titles.append(val['titles'])

        val = self.selection_macros.parse_state(self.request,
                                                'bika_analysis_workflow',
                                                'getAnalysisState',
                                                _('Analysis State'))
        if val:
            self.contentFilter[val['contentFilter'][0]] = val['contentFilter'][1]
            parms.append(val['parms'])
            titles.append(val['titles'])

        # Query the catalog and store results in a dictionary
        analyses = self.bika_analysis_catalog(self.contentFilter)
        if not analyses:
            message = _("No analyses matched your query")
            self.context.plone_utils.addPortalMessage(message, "error")
            return self.default_template()

        groupby = self.request.form.get('GroupingPeriod', '')
        if (groupby != ''):
            parms.append({"title": _("Grouping period"), "value": _(groupby)})

        datalines = {}
        footlines = {}
        totalcount = len(analyses)
        totalpublishedcount = 0
        totalperformedcount = 0
        for analysis in analyses:
            analysis = analysis.getObject()
            analysisservice = analysis.getService()
            department = analysisservice.getDepartment().Title()
            daterequested = analysis.created()

            group = ''
            if groupby == 'Day':
                group = self.ulocalized_time(daterequested)
            elif groupby == 'Week':
                group = daterequested.strftime("%Y") + ", " + daterequested.strftime("%U")
            elif groupby == 'Month':
                group = daterequested.strftime("%B") + " " + daterequested.strftime("%Y")
            elif groupby == 'Year':
                group = daterequested.strftime("%Y")
            else :
                group = ''

            dataline = {'Group': group, 'Requested': 0, 'Performed': 0, 'Published': 0, 'Departments': {} }
            deptline = {'Department':department, 'Requested':0, 'Performed': 0, 'Published': 0 }
            if (group in datalines):
                dataline = datalines[group]
                if (department in dataline['Departments']):
                    deptline = dataline['Departments'][department]

            grouptotalcount = dataline['Requested']+1
            groupperformedcount = dataline['Performed']
            grouppublishedcount = dataline['Published']

            depttotalcount = deptline['Requested']+1
            deptperformedcount = deptline['Performed']
            deptpubishedcount = deptline['Published']

            workflow = getToolByName(self.context, 'portal_workflow')
            arstate = workflow.getInfoFor(analysis.aq_parent, 'review_state', '')
            if (arstate == 'published'):
                deptpubishedcount += 1
                grouppublishedcount += 1
                totalpublishedcount += 1

            if (analysis.getResult()):
                deptperformedcount += 1
                groupperformedcount += 1
                totalperformedcount += 1

            group_performedrequested_ratio = float(groupperformedcount)/float(grouptotalcount)
            group_publishedperformed_ratio = groupperformedcount > 0 and float(grouppublishedcount)/float(groupperformedcount) or 0

            anl_performedrequested_ratio = float(deptperformedcount)/float(depttotalcount)
            anl_publishedperformed_ratio = deptperformedcount > 0 and float(deptpubishedcount)/float(deptperformedcount) or 0

            dataline['Requested'] = grouptotalcount
            dataline['Performed'] = groupperformedcount
            dataline['Published'] = grouppublishedcount
            dataline['PerformedRequestedRatio'] = group_performedrequested_ratio
            dataline['PerformedRequestedRatioPercentage'] = ('{0:.0f}'.format(group_performedrequested_ratio*100))+"%"
            dataline['PublishedPerformedRatio'] = group_publishedperformed_ratio
            dataline['PublishedPerformedRatioPercentage'] = ('{0:.0f}'.format(group_publishedperformed_ratio*100))+"%"

            deptline['Requested'] = depttotalcount
            deptline['Performed'] = deptperformedcount
            deptline['Published'] = deptpubishedcount
            deptline['PerformedRequestedRatio'] = anl_performedrequested_ratio
            deptline['PerformedRequestedRatioPercentage'] = ('{0:.0f}'.format(anl_performedrequested_ratio*100))+"%"
            deptline['PublishedPerformedRatio'] = anl_publishedperformed_ratio
            deptline['PublishedPerformedRatioPercentage'] = ('{0:.0f}'.format(anl_publishedperformed_ratio*100))+"%"

            dataline['Departments'][department]=deptline
            datalines[group] = dataline

        # Footer total data
        total_performedrequested_ratio = float(totalperformedcount)/float(totalcount)
        total_publishedperformed_ratio = totalperformedcount > 0 and float(totalpublishedcount)/float(totalperformedcount) or 0

        footline = {'Requested': totalcount,
                    'Performed': totalperformedcount,
                    'Published': totalpublishedcount,
                    'PerformedRequestedRatio': total_performedrequested_ratio,
                    'PerformedRequestedRatioPercentage': ('{0:.0f}'.format(total_performedrequested_ratio*100))+"%",
                    'PublishedPerformedRatio': total_publishedperformed_ratio,
                    'PublishedPerformedRatioPercentage': ('{0:.0f}'.format(total_publishedperformed_ratio*100))+"%" }

        footlines['Total'] = footline;

        self.report_data = {'parameters': parms,
                            'datalines': datalines,
                            'footlines': footlines }

        return {'report_title': _('Analyses summary per department'),
                'report_data': self.template()}
class Report(BrowserView):
    implements(IViewView)
    default_template = ViewPageTemplateFile("templates/productivity.pt")
    template = ViewPageTemplateFile(
        "templates/productivity_dataentrydaybook.pt")

    def __init__(self, context, request, report=None):
        super(Report, self).__init__(context, request)
        self.report = report
        self.selection_macros = SelectionMacrosView(self.context, self.request)

    def __call__(self):

        parms = []
        titles = []

        # Apply filters
        self.contentFilter = {'portal_type': 'AnalysisRequest'}
        val = self.selection_macros.parse_daterange(self.request,
                                                    'getDateCreated',
                                                    _('Date Created'))
        if val:
            self.contentFilter[val['contentFilter']
                               [0]] = val['contentFilter'][1]
            parms.append(val['parms'])
            titles.append(val['titles'])

        # Query the catalog and store results in a dictionary
        ars = self.bika_catalog(self.contentFilter)
        if not ars:
            message = _("No Analysis Requests matched your query")
            self.context.plone_utils.addPortalMessage(message, "error")
            return self.default_template()

        datalines = {}
        footlines = {}
        totalcreatedcount = len(ars)
        totalreceivedcount = 0
        totalpublishedcount = 0
        totalanlcount = 0
        totalreceptionlag = 0
        totalpublicationlag = 0

        for ar in ars:
            ar = ar.getObject()
            datecreated = ar.created()
            datereceived = ar.getDateReceived()
            datepublished = ar.getDatePublished()
            receptionlag = 0
            publicationlag = 0
            anlcount = len(ar.getAnalyses())

            dataline = {
                "AnalysisRequestID": ar.getRequestID(),
                "DateCreated": self.ulocalized_time(datecreated),
                "DateReceived": self.ulocalized_time(datereceived),
                "DatePublished": self.ulocalized_time(datepublished),
                "ReceptionLag": receptionlag,
                "PublicationLag": publicationlag,
                "TotalLag": receptionlag + publicationlag,
                "BatchID": ar.getBatch(),
                "SampleID": ar.getSample().Title(),
                "SampleType": ar.getSampleTypeTitle(),
                "NumAnalyses": anlcount,
                "ClientID": ar.aq_parent.id,
                "Creator": ar.Creator(),
                "Remarks": ar.getRemarks()
            }

            datalines[ar.getRequestID()] = dataline

            totalreceivedcount += ar.getDateReceived() and 1 or 0
            totalpublishedcount += ar.getDatePublished() and 1 or 0
            totalanlcount += anlcount
            totalreceptionlag += receptionlag
            totalpublicationlag += publicationlag

        # Footer total data
        totalreceivedcreated_ratio = float(totalreceivedcount) / float(
            totalcreatedcount)
        totalpublishedcreated_ratio = float(totalpublishedcount) / float(
            totalcreatedcount)
        totalpublishedreceived_ratio = float(totalpublishedcount) / float(
            totalreceivedcount)

        footline = {
            'Created':
            totalcreatedcount,
            'Received':
            totalreceivedcount,
            'Published':
            totalpublishedcount,
            'ReceivedCreatedRatio':
            totalreceivedcreated_ratio,
            'ReceivedCreatedRatioPercentage':
            ('{0:.0f}'.format(totalreceivedcreated_ratio * 100)) + "%",
            'PublishedCreatedRatio':
            totalpublishedcreated_ratio,
            'PublishedCreatedRatioPercentage':
            ('{0:.0f}'.format(totalpublishedcreated_ratio * 100)) + "%",
            'PublishedReceivedRatio':
            totalpublishedreceived_ratio,
            'PublishedReceivedRatioPercentage':
            ('{0:.0f}'.format(totalpublishedreceived_ratio * 100)) + "%",
            'AvgReceptionLag':
            ('{0:.1f}'.format(totalreceptionlag / totalcreatedcount)),
            'AvgPublicationLag':
            ('{0:.1f}'.format(totalpublicationlag / totalcreatedcount)),
            'AvgTotalLag': ('{0:.1f}'.format(
                (totalreceptionlag + totalpublicationlag) /
                totalcreatedcount)),
            'NumAnalyses':
            totalanlcount
        }

        footlines['Total'] = footline

        self.report_data = {
            'parameters': parms,
            'datalines': datalines,
            'footlines': footlines
        }

        return {
            'report_title': _('Data entry day book'),
            'report_data': self.template()
        }
class Report(BrowserView):
    implements(IViewView)
    template = ViewPageTemplateFile(
        "templates/qualitycontrol_referenceanalysisqc.pt")
    # if unsuccessful we return here:
    default_template = ViewPageTemplateFile("templates/qualitycontrol.pt")

    def __init__(self, context, request, report=None):
        super(Report, self).__init__(context, request)
        self.report = report
        self.selection_macros = SelectionMacrosView(self.context, self.request)

    def __call__(self):
        MinimumResults = self.context.bika_setup.getMinimumResults()
        warning_icon = "<img " +\
            "src='"+self.portal_url+"/++resource++bika.lims.images/warning.png' " +\
            "height='9' width='9'/>"
        error_icon = "<img " +\
            "src='"+self.portal_url+"/++resource++bika.lims.images/exclamation.png' " +\
            "height='9' width='9'/>"

        header = _("Reference analysis QC")
        subheader = _("Reference analysis quality control graphs ")

        self.contentFilter = {
            'portal_type': 'ReferenceAnalysis',
            'review_state': ['verified', 'published'],
        }

        self.parms = []
        titles = []

        sample_uid = self.request.form.get('ReferenceSampleUID', '')
        sample = self.reference_catalog.lookupObject(sample_uid)
        if not sample:
            message = _("No reference sample was selected.")
            self.context.plone_utils.addPortalMessage(message, 'error')
            return self.default_template()
        self.parms.append({
            'title': _("Reference Sample"),
            'value': sample.Title()
        })
        titles.append(sample.Title())

        service_uid = self.request.form.get('ReferenceServiceUID', '')
        service = self.reference_catalog.lookupObject(service_uid)
        if not service:
            message = _("No analysis services were selected.")
            self.context.plone_utils.addPortalMessage(message, 'error')
            return self.default_template()

        self.contentFilter['path'] = {
            "query": "/".join(sample.getPhysicalPath()),
            "level": 0
        }
        keyword = service.getKeyword()
        unit = service.getUnit()
        service_title = "%s (%s)" % (service.Title(), service.getKeyword())
        try:
            precision = str(service.getPrecision())
        except:
            precision = "2"
        self.parms.append({
            'title': _("Analysis Service"),
            'value': service.Title()
        })
        titles.append(service.Title())

        val = self.selection_macros.parse_daterange(self.request,
                                                    'getDateVerified',
                                                    'DateVerified')
        if val:
            self.contentFilter[val['contentFilter']
                               [0]] = val['contentFilter'][1]
            self.parms.append(val['parms'])
            titles.append(val['titles'])

        # GET min/max for range checking

        proxies = self.bika_analysis_catalog(self.contentFilter)
        if not proxies:
            message = _("No analyses matched your query")
            self.context.plone_utils.addPortalMessage(message, 'error')
            return self.default_template()

        ## Compile a list with all relevant analysis data
        analyses = []
        out_of_range_count = 0
        in_shoulder_range_count = 0
        plot_data = ""
        formatted_results = []
        results = []
        tabledata = []

        for analysis in proxies:
            analysis = analysis.getObject()
            analyses.append(analysis)
            try:
                result = float(analysis.getResult())
            except ValueError:
                pass
            results.append(result)
            captured = self.ulocalized_time(analysis.getResultCaptureDate(),
                                            long_format=1)
            analyst = analysis.getAnalyst()
            title = analysis.getId()
            formatted_result = str("%." + precision + "f") % result
            formatted_results.append(formatted_result)
            tabledata.append({
                _("Analysis"): title,
                _("Result"): formatted_result,
                _("Analyst"): analyst,
                _("Captured"): captured
            })
        plotdata = "\n".join(formatted_results)
        plotdata.encode('utf-8')

        ### CHECK RANGES

        self.parms += [
            {
                "title": _("Total analyses"),
                "value": len(analyses)
            },
        ]

        ## This variable is output to the TAL
        self.report_data = {
            'header': header,
            'subheader': subheader,
            'parms': self.parms,
            'tables': [],
            'footnotes': [],
        }

        plotscript = """
        set terminal png transparent truecolor enhanced size 700,350 font "Verdana, 8"
        set title "%(title)s"
        set xlabel "%(xlabel)s"
        set ylabel "%(ylabel)s"
        set yzeroaxis
        #set logscale
        set xrange [highest:lowest]
        set xtics border nomirror rotate by 90 font "Verdana, 5" offset 0,-3
        set ytics nomirror

        binwidth = %(highest)-%(lowest)/100
        scale = (binwidth/(%(highest)-%(lowest)))

        bin_number(x) = floor(x/binwidth)
        rounded(x) = binwidth * ( binnumber(x) + 0.5 )

        #f(x) = mean_x
        #fit f(x) 'gpw_DATAFILE_gpw' u 1:2 via mean_x
        #stddev_x = sqrt(FIT_WSSR / (FIT_NDF + 1))
        #
        #plot mean_y-stddev_y with lines y1=mean_y lt 1 lc rgb "#afafaf",\
        #     mean_y+stddev_y with lines y1=mean_y lt 1 lc rgb "#afafaf",\
        #     mean_y with lines lc rgb '#000000' lw 1,\
        plot "gpw_DATAFILE_gpw" using (rounded($1)):(1) smooth frequency
        """

        if MinimumResults <= len(analyses):
            _plotscript = str(plotscript)%\
            {'title': "",
             'xlabel': "",
             'ylabel': "",
             'highest': max(results),
             'lowest': min(results)}

            plot_png = plot(str(plotdata),
                            plotscript=str(_plotscript),
                            usefifo=False)

            print plotdata
            print _plotscript
            print "-------"

            # Temporary PNG data file
            fh, data_fn = tempfile.mkstemp(suffix='.png')
            os.write(fh, plot_png)
            plot_url = data_fn
            self.request['to_remove'].append(data_fn)

            plot_url = data_fn
        else:
            plot_url = ""

        table = {
            'title':
            "%s: %s" %
            (self.context.translate(_("Analysis Service")), service_title),
            'columns':
            [_('Analysis'),
             _('Result'),
             _('Analyst'),
             _('Captured')],
            'parms': [],
            'data':
            tabledata,
            'plot_url':
            plot_url,
        }

        self.report_data['tables'].append(table)

        ## footnotes
        if out_of_range_count:
            msgid = _("Analyses out of range")
            translate = self.context.translate
            self.report_data['footnotes'].append(
                "%s %s" % (error_icon, translate(msgid)))
        if in_shoulder_range_count:
            msgid = _("Analyses in error shoulder range")
            self.report_data['footnotes'].append(
                "%s %s" % (warning_icon, translate(msgid)))

        self.report_data['parms'].append({
            "title": _("Analyses out of range"),
            "value": out_of_range_count
        })
        self.report_data['parms'].append({
            "title":
            _("Analyses in error shoulder range"),
            "value":
            in_shoulder_range_count
        })

        title = self.context.translate(header)
        if titles:
            title += " (%s)" % " ".join(titles)
        return {
            'report_title': title,
            'report_data': self.template(),
        }
 def __init__(self, context, request, report=None):
     super(Report, self).__init__(context, request)
     self.report = report
     self.selection_macros = SelectionMacrosView(self.context, self.request)
Exemple #16
0
 def __call__(self):
     self.selection_macros = SelectionMacrosView(self.context, self.request)
     self.icon = self.portal_url + "/++resource++bika.lims.images/report_big.png"
     self.getAnalysts = getUsers(self.context,
                                 ['Manager', 'LabManager', 'Analyst'])
     return self.template()
class Report(BrowserView):
    implements(IViewView)
    default_template = ViewPageTemplateFile("templates/productivity.pt")
    template = ViewPageTemplateFile(
        "templates/productivity_samplereceivedvsreported.pt")

    def __init__(self, context, request, report=None):
        super(Report, self).__init__(context, request)
        self.report = report
        self.selection_macros = SelectionMacrosView(self.context, self.request)

    def __call__(self):

        parms = []
        titles = []

        self.contentFilter = {
            'portal_type': 'Sample',
            'review_state': ['sample_received', 'expired', 'disposed'],
            'sort_on': 'getDateReceived'
        }

        val = self.selection_macros.parse_daterange(self.request,
                                                    'getDateReceived',
                                                    _('Date Received'))
        if val:
            self.contentFilter[val['contentFilter']
                               [0]] = val['contentFilter'][1]
            parms.append(val['parms'])
            titles.append(val['titles'])

        # Query the catalog and store results in a dictionary
        samples = self.bika_catalog(self.contentFilter)
        if not samples:
            message = _("No samples matched your query")
            self.context.plone_utils.addPortalMessage(message, "error")
            return self.default_template()

        datalines = {}
        footlines = {}
        total_received_count = 0
        total_published_count = 0
        for sample in samples:
            sample = sample.getObject()

            # For each sample, retrieve check is has results published
            # and add it to datalines
            published = False
            analyses = sample.getAnalyses({})
            for analysis in analyses:
                analysis = analysis.getObject()
                if not (analysis.getDateAnalysisPublished() is None):
                    published = True
                    break

            datereceived = sample.getDateReceived()
            monthyear = datereceived.strftime(
                "%B") + " " + datereceived.strftime("%Y")
            received = 1
            publishedcnt = published and 1 or 0
            if (monthyear in datalines):
                received = datalines[monthyear]['ReceivedCount'] + 1
                publishedcnt = published and datalines[monthyear][
                    'PublishedCount'] + 1 or datalines[monthyear][
                        'PublishedCount']
            ratio = publishedcnt / received
            dataline = {
                'MonthYear': monthyear,
                'ReceivedCount': received,
                'PublishedCount': publishedcnt,
                'UnpublishedCount': received - publishedcnt,
                'Ratio': ratio,
                'RatioPercentage': ('{0:.1g}'.format(ratio * 100)) + "%"
            }
            datalines[monthyear] = dataline

            total_received_count += 1
            total_published_count = published and total_published_count + 1 or total_published_count

        # Footer total data
        ratio = total_published_count / total_received_count
        footline = {
            'ReceivedCount': total_received_count,
            'PublishedCount': total_published_count,
            'UnpublishedCount': total_received_count - total_published_count,
            'Ratio': ratio,
            'RatioPercentage': ('{0:.1g}'.format(ratio * 100)) + "%"
        }
        footlines['Total'] = footline

        self.report_data = {
            'parameters': parms,
            'datalines': datalines,
            'footlines': footlines
        }

        return {
            'report_title': _('Samples received vs. reported'),
            'report_data': self.template()
        }
class Report(BrowserView):
    implements(IViewView)
    default_template = ViewPageTemplateFile("templates/productivity.pt")
    template = ViewPageTemplateFile(
        "templates/productivity_dailysamplesreceived.pt")

    def __init__(self, context, request, report=None):
        super(Report, self).__init__(context, request)
        self.report = report
        self.selection_macros = SelectionMacrosView(self.context, self.request)

    def __call__(self):

        parms = []
        titles = []

        self.contentFilter = {'portal_type': 'Sample',
                              'review_state': ['sample_received', 'expired',
                                               'disposed'],
                              'sort_on': 'getDateReceived'}

        val = self.selection_macros.parse_daterange(self.request,
                                                    'getDateReceived',
                                                    _('Date Received'))
        if val:
            self.contentFilter[val['contentFilter'][0]] = val['contentFilter'][1]
            parms.append(val['parms'])
            titles.append(val['titles'])

        # Query the catalog and store results in a dictionary
        samples = self.bika_catalog(self.contentFilter)
        if not samples:
            message = _("No samples matched your query")
            self.context.plone_utils.addPortalMessage(message, "error")
            return self.default_template()

        datalines = []
        analyses_count = 0
        for sample in samples:
            sample = sample.getObject()

            # For each sample, retrieve the analyses and generate
            # a data line for each one
            analyses = sample.getAnalyses({})
            for analysis in analyses:
                analysis = analysis.getObject()
                sd = sample.getSamplingDate()
                dataline = {'AnalysisKeyword': analysis.getKeyword(),
                            'AnalysisTitle': analysis.getServiceTitle(),
                            'SampleID': sample.getSampleID(),
                            'SampleType': sample.getSampleType().Title(),
                            'SampleDateReceived': self.ulocalized_time(
                                sample.getDateReceived(), long_format=1),
                            'SampleSamplingDate': self.ulocalized_time(
                                sd, long_format=1) if sd else ''
                            }
                datalines.append(dataline)
                analyses_count += 1

        # Footer total data
        footlines = []
        footline = {'TotalCount': analyses_count}
        footlines.append(footline)

        self.report_data = {
            'parameters': parms,
            'datalines': datalines,
            'footlines': footlines}

        if self.request.get('output_format', '') == 'CSV':
            import csv
            import StringIO
            import datetime

            fieldnames = [
                'SampleID',
                'SampleType',
                'SampleSamplingDate',
                'SampleDateReceived',
                'AnalysisTitle',
                'AnalysisKeyword',
            ]
            output = StringIO.StringIO()
            dw = csv.DictWriter(output, fieldnames=fieldnames)
            dw.writerow(dict((fn, fn) for fn in fieldnames))
            for row in datalines:
                dw.writerow(row)
            report_data = output.getvalue()
            output.close()
            date = datetime.datetime.now().strftime("%Y%m%d%H%M")
            setheader = self.request.RESPONSE.setHeader
            setheader('Content-Type', 'text/csv')
            setheader("Content-Disposition",
                      "attachment;filename=\"dailysamplesreceived_%s.csv\"" % date)
            self.request.RESPONSE.write(report_data)
        else:
            return {'report_title': _('Daily samples received'),
                    'report_data': self.template()}
Exemple #19
0
class Report(BrowserView):
    implements(IViewView)
    template = ViewPageTemplateFile("templates/report_out.pt")

    def __init__(self, context, request, report=None):
        BrowserView.__init__(self, context, request)
        self.report = report
        self.selection_macros = SelectionMacrosView(self.context, self.request)
        self.uids_map = dict()

    def __call__(self):
        # get all the data into datalines
        catalog = api.get_tool(CATALOG_ANALYSIS_LISTING, self.context)
        self.report_content = {}
        parms = []
        headings = {}
        headings['header'] = ""
        count_all = 0
        query = {}
        # Getting the query filters
        val = self.selection_macros.parse_client(self.request)
        if val:
            query[val['contentFilter'][0]] = val['contentFilter'][1]
            parms.append(val['parms'])

        val = self.selection_macros.parse_sampletype(self.request)
        if val:
            query[val['contentFilter'][0]] = val['contentFilter'][1]
            parms.append(val['parms'])

        val = self.selection_macros.parse_analysisservice(self.request)
        if val:
            query[val['contentFilter'][0]] = val['contentFilter'][1]
            parms.append(val['parms'])

        val = self.selection_macros.parse_daterange(self.request, 'created',
                                                    'Created')
        if val:
            query[val['contentFilter'][0]] = val['contentFilter'][1]
            parms.append(val['parms'])

        val = self.selection_macros.parse_daterange(self.request,
                                                    'getDatePublished',
                                                    'Date Published')

        if val:
            query[val['contentFilter'][0]] = val['contentFilter'][1]
            parms.append(val['parms'])

        val = self.selection_macros.parse_daterange(self.request,
                                                    'getDateReceived',
                                                    'Date Received')

        if val:
            query[val['contentFilter'][0]] = val['contentFilter'][1]
            parms.append(val['parms'])

        formats = {
            'columns':
            25,
            'col_heads': [
                _('Lab Number'),
                _('Testing Lab'),
                _('First Name'),
                _('Middle Name'),
                _('Last Name'),
                _('Gender'),
                _('Age'),
                _('Age Type'),
                _('Town'),
                _('Reporting County'),
                _('Reporting District'),
                _('Reporting Facility'),
                _('Date Onset'),
                _('Date of Reporting'),
                _('Was Specimen collected? '),
                _('Date specimen collected'),
                _('Type of Specimen'),
                _('Date Specimen Sent to Lab'),
                _('Date Specimen Received in Lab'),
                _('Date Published'),
                _('Condition of Specimen'),
                _('Comment'),
                _('Test Result')
            ],
        }
        # and now lets do the actual report lines
        datalines = []
        laboratory = self.context.bika_setup.laboratory

        # Get analyses brains
        logger.info("Searching Analyses: {}".format(repr(query)))
        brains = catalog(query)

        # Collect all AR uids and Patient UIDs so only one query to get all
        # them will be needed
        ar_uids = list(set([brain.getParentUID for brain in brains]))
        ar_uids = filter(None, ar_uids)
        self.map_uids_to_brains(ar_uids)

        logger.info("Filling datalines with {} Analyses".format(len(brains)))
        for analysis in brains:
            # We get the AR and the patient of the
            # analysis here to avoid having to get them
            # inside each of the following method calls.
            # If they are not found its value will be None
            ar_brain = self.get_ar_brain(analysis)
            patient_brain = self.get_patient_brain(analysis)

            dataline = []

            # Lab Number
            dataitem = self.get_lab_number(analysis)
            dataline.append(dataitem)

            # Testing Lab
            dataline.append({'value': laboratory.Title()})

            #First Name
            dataitem = self.get_firstname(patient_brain)
            dataline.append(dataitem)

            #Middle Name
            dataitem = self.get_middlename(patient_brain)
            dataline.append(dataitem)

            #Last Name
            dataitem = self.get_lastname(patient_brain)
            dataline.append(dataitem)

            #Gender
            dataitem = self.get_gender(patient_brain)
            dataline.append(dataitem)

            #Age
            dataitem = self.get_age(patient_brain)
            dataline.append(dataitem)

            #AgeType
            dataitem = self.get_agetype(patient_brain)
            dataline.append(dataitem)

            # Facility Province
            dataitem = self.get_facility_province(ar_brain)
            dataline.append(dataitem)

            # Facility District
            dataitem = self.get_facility_district(ar_brain)
            dataline.append(dataitem)

            # Facility
            dataitem = self.get_client_name(ar_brain)
            dataline.append(dataitem)

            # Date of Collection - Onset
            dataitem = self.get_date_of_collection(ar_brain)
            dataline.append(dataitem)

            # Date Reporting
            dataitem = self.get_date_of_dispatch(ar_brain)
            dataline.append(dataitem)

            # Specimen Collected
            dataitem = self.get_date_of_collection(ar_brain)
            dataline.append(dataitem)

            # Date of Collection - Onset
            dataitem = self.get_date_of_collection(ar_brain)
            dataline.append(dataitem)

            # Specimen Type
            dataitem = self.get_specimentype(ar_brain)
            dataline.append(dataitem)

            # Date of Dispatch
            dataitem = self.get_date_of_dispatch(ar_brain)
            dataline.append(dataitem)

            # Date of Receiving
            dataitem = self.get_date_of_receiving(ar_brain)
            dataline.append(dataitem)

            # Date of Publication
            dataitem = self.get_date_published(analysis)
            dataline.append(dataitem)

            # Condition of Specimen
            #dataitem = self.get_date_published(analysis)
            #dataline.append(dataitem)

            # Comment
            #dataitem = self.get_date_published(analysis)
            ##dataline.append(dataitem)

            # Sex
            #dataitem = self.get_patient_sex(patient_brain)
            #dataline.append(dataitem)

            # Date Of Birth
            #dataitem = self.get_patient_dob(patient_brain)
            #dataline.append(dataitem)

            # Date of Testing
            #dataitem = self.get_date_of_testing(analysis)
            #dataline.append(dataitem)

            #Test Result
            dataitem = self.get_result(analysis)
            dataline.append(dataitem)

            count_all += 1
            datalines.append(dataline)

        logger.info("Generating output")

        # footer data
        footlines = []
        footline = []
        footitem = {'value': _('Total'), 'class': 'total_label'}
        footline.append(footitem)
        footitem = {'value': count_all}
        footline.append(footitem)
        footlines.append(footline)

        self.report_content = {
            'headings': headings,
            'parms': parms,
            'formats': formats,
            'datalines': datalines,
            'footings': footlines
        }

        if self.request.get('output_format', '') == 'CSV':
            fieldnames = formats.get('col_heads')
            output = StringIO.StringIO()
            dw = csv.DictWriter(output,
                                extrasaction='ignore',
                                fieldnames=fieldnames)
            dw.writerow(dict((fn, fn) for fn in fieldnames))
            for row in datalines:
                dict_row = {}
                row_idx = 0
                for column in fieldnames:
                    dict_row[column] = row[row_idx]['value']
                    row_idx += 1
                dw.writerow(dict_row)

            report_data = output.getvalue()
            output.close()
            date = datetime.datetime.now().strftime("%Y%m%d%H%M")
            setheader = self.request.RESPONSE.setHeader
            setheader('Content-Type', 'text/csv')
            setheader(
                "Content-Disposition",
                "attachment;filename=\"analysisresultbyclient_%s.csv"
                "\"" % date)
            self.request.RESPONSE.write(report_data)
        else:
            return {
                'report_title': t(headings['header']),
                'report_data': self.template()
            }

    @viewcache.memoize
    def get_brain(self, uid, catalog):
        brain = self.uids_map.get(uid, None)
        if brain:
            return brain

        logger.warning("UID not found in brains map: {}".format(uid))
        cat = api.get_tool(catalog)
        brain = cat(UID=uid)
        if not brain or len(brain) == 0:
            return None
        return brain[0]

    @viewcache.memoize
    def get_object(self, brain_or_object_or_uid):
        """Get the full content object. Returns None if the param passed in is
        not a valid, not a valid object or not found

        :param brain_or_object_or_uid: UID/Catalog brain/content object
        :returns: content object
        """
        if api.is_uid(brain_or_object_or_uid):
            brain = self.uids_map.get(brain_or_object_or_uid, None)
            if brain:
                return self.get_object(brain)
            return api.get_object_by_uid(brain_or_object_or_uid, default=None)
        if api.is_object(brain_or_object_or_uid):
            return api.get_object(brain_or_object_or_uid)
        return None

    def get_lab_number(self, analysis):
        try:
            """Client Sample ID"""
            return {'value': self.context.bika_setup.laboratory.getTaxNumber()}
        except:
            return {'value': 'MPH'}

    def get_firstname(self, patient):
        if not patient:
            return {'value': ''}
        return {'value': patient.getFirstname}

    def get_middlename(self, patient):
        if not patient:
            return {'value': ''}
        return {'value': patient.getMiddlename}

    def get_lastname(self, patient):
        if not patient:
            return {'value': ''}
        return {'value': patient.getSurname}

    def get_gender(self, patient):
        if not patient:
            return {'value': ''}
        return {'value': patient.getGender}

    def get_age(self, patient):
        if not patient:
            return {'value': ''}
        return {'value': patient.getAgeSplittedStr}

    def get_agetype(self, patient):
        if not patient:
            return {'value': ''}
        return {'value': patient.getAgeSplittedStr}

    def get_facility_province(self, ar):
        """Client province"""
        if not ar:
            return {'value': ''}
        return {'value': ar.getProvince}

    def get_facility_district(self, ar):
        """Client district"""
        if not ar:
            return {'value': ''}
        return {'value': ar.getDistrict}

    def get_client_name(self, ar):
        """Client name"""
        if not ar:
            return {'value': ''}
        return {'value': ar.getClientTitle}

    def get_patient_sex(self, patient):
        """Patient gender"""
        if not patient:
            return {'value': 'U'}
        genders = {'male': 'M', 'female': 'F'}
        return {'value': genders.get(patient.getGender, patient.getGender)}

    def get_patient_dob(self, patient):
        """Patient Date Of Birth"""
        if not patient:
            return {'value': ''}
        return {'value': self.ulocalized_time(patient.getBirthDate)}

    def get_date_of_collection(self, ar):
        """Patient Date Of Collection"""
        if not ar:
            return {'value': ''}
        return {'value': self.ulocalized_time(ar.getDateSampled)}

    def get_specimentype(self, ar):
        """Specimen Type"""
        if not ar:
            return {'value': ''}
        return {'value': ar.getSampleType}

    def get_date_of_receiving(self, ar):
        """Patient Date Of Receiving"""
        if not ar:
            return {'value': ''}
        return {'value': self.ulocalized_time(ar.getDateReceived)}

    def get_date_of_dispatch(self, ar):
        """Patient Date Of Publication"""
        if not ar:
            return {'value': ''}
        return {'value': self.ulocalized_time(ar.getDatePublished)}

    def get_date_of_testing(self, analysis):
        """Date Of Testing"""
        try:
            date = analysis.getResultCaptureDate
            date = self.ulocalized_time(date)
            return {'value': date}
        except:
            return {'value': ''}

    def get_result(self, analysis):
        """Result"""
        dataitem = {
            'value': analysis.getResult.replace('&lt;',
                                                '<').replace('&gt;', '>')
        }
        return dataitem

    def get_ar_brain(self, analysis_brain):
        """
        Get the brain of the Analysis request the analysis
        is coming from.

        :param analysis_brain: The analysis brain from which
        we want to get its analysis request brain
        :return: Analysis Request brain if found else None
        """
        ar_uid = analysis_brain.getParentUID
        if not ar_uid:
            return None
        return self.get_brain(ar_uid, CATALOG_ANALYSIS_REQUEST_LISTING)

    def get_patient_brain(self, analysis_brain):
        """
        Get the brain of the patient the analysis is assigned to.

        :param analysis_brain: The analysis brain from which
        we want to get the patient it is assigned to
        :return: Patient brain if found else None
        """
        ar = self.get_ar_brain(analysis_brain)
        if not ar:
            return None
        patient_uid = ar.getPatientUID
        if not patient_uid:
            return None
        patient = self.get_brain(patient_uid, CATALOG_PATIENT_LISTING)
        return patient

    def map_uids_to_brains(self, ar_uids=None):
        """Fetches AR brains and patients and stores them in a generalist map
        where the key is the brain's uid and the value is the brain"""
        if not ar_uids:
            return
        logger.info("Mapping UIDs to brains for {} AR UIDs".format(
            len(ar_uids)))
        self.uids_map = dict()
        pat_uids = set()
        query = dict(UID=ar_uids)
        ar_brains = api.search(query, CATALOG_ANALYSIS_REQUEST_LISTING)
        for ar_brain in ar_brains:
            pat_uid = ar_brain.getPatientUID
            if pat_uid:
                pat_uids.add(pat_uid)
            self.uids_map[api.get_uid(ar_brain)] = ar_brain

        logger.info("Mapping UIDs to brains for {} Patient UIDs".format(
            len(pat_uids)))
        query = dict(UID=list(pat_uids))
        pat_brains = api.search(query, CATALOG_PATIENT_LISTING)
        self.uids_map.update({api.get_uid(pat): pat for pat in pat_brains})
Exemple #20
0
class Report(BrowserView):
    implements(IViewView)
    template = ViewPageTemplateFile(
        "templates/qualitycontrol_resultspersamplepoint.pt")
    # if unsuccessful we return here:
    default_template = ViewPageTemplateFile("templates/qualitycontrol.pt")

    def __init__(self, context, request, report=None):
        super(Report, self).__init__(context, request)
        self.report = report
        self.selection_macros = SelectionMacrosView(self.context, self.request)

    def get_analysis_spec(self, analysis):
        rr = dicts_to_dict(analysis.aq_parent.getResultsRange(), 'keyword')
        return rr.get(analysis.getKeyword(), None)

    def ResultOutOfRange(self, analysis):
        """ Template wants to know, is this analysis out of range?
        We scan IResultOutOfRange adapters, and return True if any IAnalysis
        adapters trigger a result.
        """
        adapters = getAdapters((analysis, ), IResultOutOfRange)
        spec = self.get_analysis_spec(analysis)
        for name, adapter in adapters:
            if not spec:
                return False
            if adapter(specification=spec):
                return True

    def __call__(self):

        MinimumResults = self.context.bika_setup.getMinimumResults()
        warning_icon = "<img " + \
                       "src='" + self.portal_url + "/++resource++bika.lims.images/warning.png' " + \
                       "height='9' width='9'/>"
        error_icon = "<img " + \
                     "src='" + self.portal_url + "/++resource++bika.lims.images/exclamation.png' " + \
                     "height='9' width='9'/>"

        header = _("Results per sample point")
        subheader = _(
            "Analysis results for per sample point and analysis service")

        self.contentFilter = {'portal_type': 'Analysis',
                              'review_state': ['verified', 'published']}

        parms = []
        titles = []

        val = self.selection_macros.parse_client(self.request)
        if val:
            self.contentFilter[val['contentFilter'][0]] = val['contentFilter'][1]
            parms.append(val['parms'])
            titles.append(val['titles'])

        val = self.selection_macros.parse_samplepoint(self.request)
        sp_uid = val
        if val:
            self.contentFilter[val['contentFilter'][0]] = val['contentFilter'][1]
            parms.append(val['parms'])
            titles.append(val['titles'])

        val = self.selection_macros.parse_sampletype(self.request)
        st_uid = val
        if val:
            self.contentFilter[val['contentFilter'][0]] = val['contentFilter'][1]
            parms.append(val['parms'])
            titles.append(val['titles'])

        val = self.selection_macros.parse_analysisservice(self.request)
        if val:
            self.contentFilter[val['contentFilter'][0]] = val['contentFilter'][1]
            parms.append(val['parms'])
        else:
            message = _("No analysis services were selected.")
            self.context.plone_utils.addPortalMessage(message, 'error')
            return self.default_template()

        val = self.selection_macros.parse_daterange(self.request,
                                                    'getDateSampled',
                                                    'DateSampled')
        if val:
            self.contentFilter[val['contentFilter'][0]] = val['contentFilter'][1]
            parms.append(val['parms'])
            titles.append(val['titles'])

        val = self.selection_macros.parse_state(self.request,
                                                'bika_worksheetanalysis_workflow',
                                                'worksheetanalysis_review_state',
                                                'Worksheet state')
        if val:
            self.contentFilter[val['contentFilter'][0]] = val['contentFilter'][1]
            parms.append(val['parms'])

        # Query the catalog and store analysis data in a dict
        analyses = {}
        out_of_range_count = 0
        in_shoulder_range_count = 0
        analysis_count = 0

        proxies = self.bika_analysis_catalog(self.contentFilter)

        if not proxies:
            message = _("No analyses matched your query")
            self.context.plone_utils.addPortalMessage(message, 'error')
            return self.default_template()

        # # Compile a list of dictionaries, with all relevant analysis data
        for analysis in proxies:
            analysis = analysis.getObject()
            result = analysis.getResult()
            client = analysis.aq_parent.aq_parent
            uid = analysis.UID()
            service = analysis.getService()
            keyword = service.getKeyword()
            service_title = "%s (%s)" % (service.Title(), keyword)
            result_in_range = self.ResultOutOfRange(analysis)

            if service_title not in analyses.keys():
                analyses[service_title] = []
            try:
                result = float(analysis.getResult())
            except:
                # XXX Unfloatable analysis results should be indicated
                continue
            analyses[service_title].append({
                'service': service,
                'obj': analysis,
                'Request ID': analysis.aq_parent.getId(),
                'Analyst': analysis.getAnalyst(),
                'Result': result,
                'Sampled': analysis.getDateSampled(),
                'Captured': analysis.getResultCaptureDate(),
                'Uncertainty': analysis.getUncertainty(),
                'result_in_range': result_in_range,
                'Unit': service.getUnit(),
                'Keyword': keyword,
                'icons': '',
            })
            analysis_count += 1

        keys = analyses.keys()
        keys.sort()

        parms += [
            {"title": _("Total analyses"), "value": analysis_count},
        ]

        ## This variable is output to the TAL
        self.report_data = {
            'header': header,
            'subheader': subheader,
            'parms': parms,
            'tables': [],
            'footnotes': [],
        }

        plotscript = """
        set terminal png transparent truecolor enhanced size 700,350 font "Verdana, 8"
        set title "%(title)s"
        set xlabel "%(xlabel)s"
        set ylabel "%(ylabel)s"
        set key off
        #set logscale
        set timefmt "%(date_format_long)s"
        set xdata time
        set format x "%(date_format_short)s\\n%(time_format)s"
        set xrange ["%(x_start)s":"%(x_end)s"]
        set auto fix
        set offsets graph 0, 0, 1, 1
        set xtics border nomirror rotate by 90 font "Verdana, 5" offset 0,-3
        set ytics nomirror

        f(x) = mean_y
        fit f(x) 'gpw_DATAFILE_gpw' u 1:3 via mean_y
        stddev_y = sqrt(FIT_WSSR / (FIT_NDF + 1))

        plot mean_y-stddev_y with filledcurves y1=mean_y lt 1 lc rgb "#efefef",\
             mean_y+stddev_y with filledcurves y1=mean_y lt 1 lc rgb "#efefef",\
             mean_y with lines lc rgb '#ffffff' lw 3,\
             "gpw_DATAFILE_gpw" using 1:3 title 'data' with points pt 7 ps 1 lc rgb '#0000ee' lw 2,\
               '' using 1:3 smooth unique lc rgb '#aaaaaa' lw 2,\
               '' using 1:4 with lines lc rgb '#000000' lw 1,\
               '' using 1:5 with lines lc rgb '#000000' lw 1"""

        ## Compile plots and format data for display
        for service_title in keys:
            # used to calculate XY axis ranges
            result_values = [int(o['Result']) for o in analyses[service_title]]
            result_dates = [o['Sampled'] for o in analyses[service_title]]

            parms = []
            plotdata = str()

            range_min = ''
            range_max = ''

            for a in analyses[service_title]:

                a['Sampled'] = a['Sampled'].strftime(self.date_format_long) if a[
                    'Sampled'] else ''
                a['Captured'] = a['Captured'].strftime(self.date_format_long) if \
                a['Captured'] else ''

                R = a['Result']
                U = a['Uncertainty']

                a['Result'] = a['obj'].getFormattedResult()

                in_range = a['result_in_range']
                # result out of range
                if str(in_range[0]) == 'False':
                    out_of_range_count += 1
                    a['Result'] = "%s %s" % (a['Result'], error_icon)
                # result almost out of range
                if str(in_range[0]) == '1':
                    in_shoulder_range_count += 1
                    a['Result'] = "%s %s" % (a['Result'], warning_icon)

                spec = {}
                if hasattr(a["obj"], 'specification') and a["obj"].specification:
                    spec = a["obj"].specification

                plotdata += "%s\t%s\t%s\t%s\t%s\n" % (
                    a['Sampled'],
                    R,
                    spec.get("min", ""),
                    spec.get("max", ""),
                    U and U or 0,
                )
                plotdata.encode('utf-8')

            unit = analyses[service_title][0]['Unit']
            if MinimumResults <= len(dict([(d, d) for d in result_dates])):
                _plotscript = str(plotscript) % {
                    'title': "",
                    'xlabel': t(_("Date Sampled")),
                    'ylabel': unit and unit or '',
                    'x_start': "%s" % min(result_dates).strftime(
                        self.date_format_long),
                    'x_end': "%s" % max(result_dates).strftime(
                        self.date_format_long),
                    'date_format_long': self.date_format_long,
                    'date_format_short': self.date_format_short,
                    'time_format': self.time_format,
                }

                plot_png = plot(str(plotdata),
                                plotscript=str(_plotscript),
                                usefifo=False)

                # Temporary PNG data file
                fh, data_fn = tempfile.mkstemp(suffix='.png')
                os.write(fh, plot_png)
                plot_url = data_fn
                self.request['to_remove'].append(data_fn)

                plot_url = data_fn
            else:
                plot_url = ""

            table = {
                'title': "%s: %s" % (
                    t(_("Analysis Service")),
                    service_title),
                'parms': parms,
                'columns': ['Request ID',
                            'Analyst',
                            'Result',
                            'Sampled',
                            'Captured'],
                'data': analyses[service_title],
                'plot_url': plot_url,
            }

            self.report_data['tables'].append(table)

        translate = self.context.translate

        ## footnotes
        if out_of_range_count:
            msgid = _("Analyses out of range")
            self.report_data['footnotes'].append(
                "%s %s" % (error_icon, t(msgid)))
        if in_shoulder_range_count:
            msgid = _("Analyses in error shoulder range")
            self.report_data['footnotes'].append(
                "%s %s" % (warning_icon, t(msgid)))

        self.report_data['parms'].append(
            {"title": _("Analyses out of range"),
             "value": out_of_range_count})
        self.report_data['parms'].append(
            {"title": _("Analyses in error shoulder range"),
             "value": in_shoulder_range_count})

        title = t(header)
        if titles:
            title += " (%s)" % " ".join(titles)
        return {
            'report_title': title,
            'report_data': self.template(),
        }
Exemple #21
0
 def __init__(self, context, request, report=None):
     BrowserView.__init__(self, context, request)
     self.report = report
     self.selection_macros = SelectionMacrosView(self.context, self.request)
     self.uids_map = dict()
class Report(BrowserView):
    implements(IViewView)
    default_template = ViewPageTemplateFile("templates/productivity.pt")
    template = ViewPageTemplateFile(
        "templates/productivity_samplereceivedvsreported.pt")

    def __init__(self, context, request, report=None):
        super(Report, self).__init__(context, request)
        self.report = report
        self.selection_macros = SelectionMacrosView(self.context, self.request)

    def __call__(self):

        parms = []
        titles = []

        self.contentFilter = {'portal_type': 'Sample',
                              'review_state': ['sample_received', 'expired',
                                               'disposed'],
                              'sort_on': 'getDateReceived'}

        val = self.selection_macros.parse_daterange(self.request,
                                                    'getDateReceived',
                                                    _('Date Received'))
        if val:
            self.contentFilter[val['contentFilter'][0]] = val['contentFilter'][1]
            parms.append(val['parms'])
            titles.append(val['titles'])

        # Query the catalog and store results in a dictionary
        samples = self.bika_catalog(self.contentFilter)
        if not samples:
            message = _("No samples matched your query")
            self.context.plone_utils.addPortalMessage(message, "error")
            return self.default_template()

        datalines = {}
        footlines = {}
        total_received_count = 0
        total_published_count = 0
        for sample in samples:
            sample = sample.getObject()

            # For each sample, retrieve check is has results published
            # and add it to datalines
            published = False
            analyses = sample.getAnalyses({})
            for analysis in analyses:
                analysis = analysis.getObject()
                if not (analysis.getDateAnalysisPublished() is None):
                    published = True
                    break

            datereceived = sample.getDateReceived()
            monthyear = datereceived.strftime("%B") + " " + datereceived.strftime(
                "%Y")
            received = 1
            publishedcnt = published and 1 or 0
            if (monthyear in datalines):
                received = datalines[monthyear]['ReceivedCount'] + 1
                publishedcnt = published and datalines[monthyear][
                                                 'PublishedCount'] + 1 or \
                               datalines[monthyear]['PublishedCount']
            ratio = publishedcnt / received
            dataline = {'MonthYear': monthyear,
                        'ReceivedCount': received,
                        'PublishedCount': publishedcnt,
                        'UnpublishedCount': received - publishedcnt,
                        'Ratio': ratio,
                        'RatioPercentage': '%02d' % (
                        100 * (float(publishedcnt) / float(received))) + '%'}
            datalines[monthyear] = dataline

            total_received_count += 1
            total_published_count = published and total_published_count + 1 or total_published_count

        # Footer total data
        ratio = total_published_count / total_received_count
        footline = {'ReceivedCount': total_received_count,
                    'PublishedCount': total_published_count,
                    'UnpublishedCount': total_received_count - total_published_count,
                    'Ratio': ratio,
                    'RatioPercentage': '%02d' % (100 * (
                    float(total_published_count) / float(
                        total_received_count))) + '%'
        }
        footlines['Total'] = footline

        self.report_data = {
            'parameters': parms,
            'datalines': datalines,
            'footlines': footlines}

        if self.request.get('output_format', '') == 'CSV':
            import csv
            import StringIO
            import datetime

            fieldnames = [
                'MonthYear',
                'ReceivedCount',
                'PublishedCount',
                'RatioPercentage',
            ]
            output = StringIO.StringIO()
            dw = csv.DictWriter(output, extrasaction='ignore',
                                fieldnames=fieldnames)
            dw.writerow(dict((fn, fn) for fn in fieldnames))
            for row in datalines.values():
                dw.writerow(row)
            report_data = output.getvalue()
            output.close()
            date = datetime.datetime.now().strftime("%Y%m%d%H%M")
            setheader = self.request.RESPONSE.setHeader
            setheader('Content-Type', 'text/csv')
            setheader("Content-Disposition",
                      "attachment;filename=\"receivedvspublished_%s.csv\"" % date)
            self.request.RESPONSE.write(report_data)
        else:
            return {'report_title': _('Samples received vs. reported'),
                    'report_data': self.template()}
class Report(BrowserView):
    implements(IViewView)
    default_template = ViewPageTemplateFile("templates/productivity.pt")
    template = ViewPageTemplateFile("templates/productivity_dailysamplesreceived.pt")

    def __init__(self, context, request, report=None):
        super(Report, self).__init__(context, request)
        self.report = report
        self.selection_macros = SelectionMacrosView(self.context, self.request)

    def __call__(self):
        
        parms = []
        titles = []
        
        self.contentFilter = {'portal_type': 'Sample',
                              'review_state': ['sample_received', 'expired', 'disposed'],
                              'sort_on': 'getDateReceived'}
               
        val = self.selection_macros.parse_daterange(self.request,
                                                    'getDateReceived',
                                                    _('Date Received'))        
        if val:
            self.contentFilter[val['contentFilter'][0]] = val['contentFilter'][1]
            parms.append(val['parms'])
            titles.append(val['titles'])
            
        # Query the catalog and store results in a dictionary             
        samples = self.bika_catalog(self.contentFilter)
        if not samples:
            message = _("No samples matched your query")
            self.context.plone_utils.addPortalMessage(message, "error")
            return self.default_template()
              
        datalines = []
        analyses_count = 0
        for sample in samples:
            sample = sample.getObject()
            
            # For each sample, retrieve the analyses and generate
            # a data line for each one
            analyses = sample.getAnalyses({})
            for analysis in analyses:         
                analysis = analysis.getObject()    
                dataline = {'AnalysisKeyword': analysis.getKeyword(),
                             'AnalysisTitle': analysis.getServiceTitle(),
                             'SampleID': sample.getSampleID(),
                             'SampleType': sample.getSampleType().Title(),
                             'SampleDateReceived': self.ulocalized_time(sample.getDateReceived(), long_format=1),
                             'SampleSamplingDate': self.ulocalized_time(sample.getSamplingDate())}
                datalines.append(dataline)
                analyses_count += 1
            
        # Footer total data      
        footlines = []  
        footline = {'TotalCount': analyses_count}
        footlines.append(footline)
        
        self.report_data = {
            'parameters': parms,
            'datalines': datalines,
            'footlines': footlines }
        
        return {'report_title': _('Daily samples received'),
                'report_data': self.template()}
class Report(BrowserView):
    implements(IViewView)

    template = ViewPageTemplateFile(
        "templates/qualitycontrol_referenceanalysisqc.pt")
    # if unsuccessful we return here:
    default_template = ViewPageTemplateFile("templates/qualitycontrol.pt")

    def __init__(self, context, request, report=None):
        super(Report, self).__init__(context, request)
        self.report = report
        self.selection_macros = SelectionMacrosView(self.context, self.request)

    def __call__(self):

        header = _("Reference analysis QC")
        subheader = _("Reference analysis quality control graphs ")

        MinimumResults = self.context.bika_setup.getMinimumResults()

        warning_icon = "<img src='" + self.portal_url + "/++resource++bika.lims.images/warning.png' height='9' width='9'/>"
        error_icon = "<img src='" + self.portal_url + "/++resource++bika.lims.images/exclamation.png' height='9' width='9'/>"

        self.parms = []
        titles = []

        sample_uid = self.request.form.get('ReferenceSampleUID', '')
        sample = self.reference_catalog.lookupObject(sample_uid)
        if not sample:
            message = _("No reference sample was selected.")
            self.context.plone_utils.addPortalMessage(message, 'error')
            return self.default_template()

        self.parms.append({
            'title': _("Reference Sample"),
            'value': sample.Title()
        })
        titles.append(sample.Title())

        service_uid = self.request.form.get('ReferenceServiceUID', '')
        service = self.reference_catalog.lookupObject(service_uid)
        if not service:
            message = _("No analysis services were selected.")
            self.context.plone_utils.addPortalMessage(message, 'error')
            return self.default_template()

        self.contentFilter = {
            'portal_type': 'ReferenceAnalysis',
            'review_state': ['verified', 'published'],
            'path': {
                "query": "/".join(sample.getPhysicalPath()),
                "level": 0
            }
        }

        self.parms.append({
            'title': _("Analysis Service"),
            'value': service.Title()
        })
        titles.append(service.Title())

        val = self.selection_macros.parse_daterange(self.request,
                                                    'getDateVerified',
                                                    'DateVerified')
        if val:
            self.contentFilter[val['contentFilter']
                               [0]] = val['contentFilter'][1]
            self.parms.append(val['parms'])
            titles.append(val['titles'])

        proxies = self.bika_analysis_catalog(self.contentFilter)
        if not proxies:
            message = _("No analyses matched your query")
            self.context.plone_utils.addPortalMessage(message, 'error')
            return self.default_template()

        # Compile a list with all relevant analysis data
        analyses = []

        out_of_range_count = 0
        results = []
        capture_dates = []

        plotdata = ""
        tabledata = []

        for analysis in proxies:
            analysis = analysis.getObject()
            service = analysis.getService()
            resultsrange = \
            [x for x in sample.getReferenceResults() if x['uid'] == service_uid][
                0]
            try:
                result = float(analysis.getResult())
                results.append(result)
            except:
                result = analysis.getResult()
            capture_dates.append(analysis.getResultCaptureDate())

            if result < float(resultsrange['min']) or result > float(
                    resultsrange['max']):
                out_of_range_count += 1

            try:
                precision = str(analysis.getPrecision())
            except:
                precision = "2"

            try:
                formatted_result = str("%." + precision + "f") % result
            except:
                formatted_result = result

            tabledata.append({
                _("Analysis"):
                analysis.getId(),
                _("Result"):
                formatted_result,
                _("Analyst"):
                analysis.getAnalyst(),
                _("Captured"):
                analysis.getResultCaptureDate().strftime(self.date_format_long)
            })

            plotdata += "%s\t%s\t%s\t%s\n" % (
                analysis.getResultCaptureDate().strftime(
                    self.date_format_long), result, resultsrange['min'],
                resultsrange['max'])
        plotdata.encode('utf-8')

        result_values = [int(r) for r in results]
        result_dates = [c for c in capture_dates]

        self.parms += [
            {
                "title": _("Total analyses"),
                "value": len(proxies)
            },
        ]

        # # This variable is output to the TAL
        self.report_data = {
            'header': header,
            'subheader': subheader,
            'parms': self.parms,
            'tables': [],
            'footnotes': [],
        }

        if MinimumResults <= len(proxies):
            plotscript = """
            set terminal png transparent truecolor enhanced size 700,350 font "Verdana, 8"
            set title "%(title)s"
            set xlabel "%(xlabel)s"
            set ylabel "%(ylabel)s"
            set key off
            #set logscale
            set timefmt "%(timefmt)s"
            set xdata time
            set format x "%(xformat)s"
            set xrange ["%(x_start)s":"%(x_end)s"]
            set auto fix
            set offsets graph 0, 0, 1, 1
            set xtics border nomirror rotate by 90 font "Verdana, 5" offset 0,-3
            set ytics nomirror

            f(x) = mean_y
            fit f(x) 'gpw_DATAFILE_gpw' u 1:3 via mean_y
            stddev_y = sqrt(FIT_WSSR / (FIT_NDF + 1))

            plot mean_y-stddev_y with filledcurves y1=mean_y lt 1 lc rgb "#efefef",\
                 mean_y+stddev_y with filledcurves y1=mean_y lt 1 lc rgb "#efefef",\
                 mean_y with lines lc rgb '#ffffff' lw 3,\
                 "gpw_DATAFILE_gpw" using 1:3 title 'data' with points pt 7 ps 1 lc rgb '#0000ee' lw 2,\
                   '' using 1:3 smooth unique lc rgb '#aaaaaa' lw 2,\
                   '' using 1:4 with lines lc rgb '#000000' lw 1,\
                   '' using 1:5 with lines lc rgb '#000000' lw 1""" % \
                         {
                             'title': "",
                             'xlabel': "",
                             'ylabel': service.getUnit(),
                             'x_start': "%s" % min(result_dates).strftime(
                                 self.date_format_short),
                             'x_end': "%s" % max(result_dates).strftime(
                                 self.date_format_short),
                             'timefmt': r'%Y-%m-%d %H:%M',
                             'xformat': '%%Y-%%m-%%d\n%%H:%%M',
                         }

            plot_png = plot(str(plotdata),
                            plotscript=str(plotscript),
                            usefifo=False)

            # Temporary PNG data file
            fh, data_fn = tempfile.mkstemp(suffix='.png')
            os.write(fh, plot_png)
            plot_url = data_fn
            self.request['to_remove'].append(data_fn)
            plot_url = data_fn
        else:
            plot_url = ""

        table = {
            'title':
            "%s: %s (%s)" %
            (t(_("Analysis Service")), service.Title(), service.getKeyword()),
            'columns':
            [_('Analysis'),
             _('Result'),
             _('Analyst'),
             _('Captured')],
            'parms': [],
            'data':
            tabledata,
            'plot_url':
            plot_url,
        }

        self.report_data['tables'].append(table)

        translate = self.context.translate

        ## footnotes
        if out_of_range_count:
            msgid = _("Analyses out of range")
            self.report_data['footnotes'].append("%s %s" %
                                                 (error_icon, t(msgid)))

        self.report_data['parms'].append({
            "title": _("Analyses out of range"),
            "value": out_of_range_count
        })

        title = t(header)
        if titles:
            title += " (%s)" % " ".join(titles)
        return {
            'report_title': title,
            'report_data': self.template(),
        }

    def isSamplePointHidden(self):
        return isAttributeHidden('AnalysisRequest', 'SamplePoint')
class Report(BrowserView):
    implements(IViewView)
    template = ViewPageTemplateFile(
        "templates/qualitycontrol_resultspersamplepoint.pt")
    # if unsuccessful we return here:
    default_template = ViewPageTemplateFile("templates/qualitycontrol.pt")

    def __init__(self, context, request, report=None):
        super(Report, self).__init__(context, request)
        self.report = report
        self.selection_macros = SelectionMacrosView(self.context, self.request)

    def __call__(self):

        MinimumResults = self.context.bika_setup.getMinimumResults()
        warning_icon = "<img " +\
            "src='"+self.portal_url+"/++resource++bika.lims.images/warning.png' " +\
            "height='9' width='9'/>"
        error_icon = "<img " +\
            "src='"+self.portal_url+"/++resource++bika.lims.images/exclamation.png' " +\
            "height='9' width='9'/>"

        header = _("Results per sample point")
        subheader = _(
            "Analysis results for per sample point and analysis service")

        self.contentFilter = {
            'portal_type': 'Analysis',
            'review_state': ['verified', 'published'],
            'sort_on': "getDateSampled"
        }

        spec = self.request.form.get('spec', 'lab')
        spec_title = (spec == 'lab') and _("Lab") or _("Client")

        parms = []
        titles = []

        val = self.selection_macros.parse_client(self.request)
        if val:
            self.contentFilter[val['contentFilter']
                               [0]] = val['contentFilter'][1]
            parms.append(val['parms'])
            titles.append(val['titles'])

        val = self.selection_macros.parse_samplepoint(self.request)
        sp_uid = val
        if val:
            self.contentFilter[val['contentFilter']
                               [0]] = val['contentFilter'][1]
            parms.append(val['parms'])
            titles.append(val['titles'])

        val = self.selection_macros.parse_sampletype(self.request)
        st_uid = val
        if val:
            self.contentFilter[val['contentFilter']
                               [0]] = val['contentFilter'][1]
            parms.append(val['parms'])
            titles.append(val['titles'])

        val = self.selection_macros.parse_analysisservice(self.request)
        if val:
            self.contentFilter[val['contentFilter']
                               [0]] = val['contentFilter'][1]
            parms.append(val['parms'])
        else:
            message = _("No analysis services were selected.")
            self.context.plone_utils.addPortalMessage(message, 'error')
            return self.default_template()

        val = self.selection_macros.parse_daterange(self.request,
                                                    'getDateSampled',
                                                    'DateSampled')
        if val:
            self.contentFilter[val['contentFilter']
                               [0]] = val['contentFilter'][1]
            parms.append(val['parms'])
            titles.append(val['titles'])

        val = self.selection_macros.parse_state(
            self.request, 'bika_worksheetanalysis_workflow',
            'worksheetanalysis_review_state', 'Worksheet state')
        if val:
            self.contentFilter[val['contentFilter']
                               [0]] = val['contentFilter'][1]
            parms.append(val['parms'])

        # Query the catalog and store analysis data in a dict
        analyses = {}
        out_of_range_count = 0
        in_shoulder_range_count = 0
        analysis_count = 0

        proxies = self.bika_analysis_catalog(self.contentFilter)

        if not proxies:
            message = _("No analyses matched your query")
            self.context.plone_utils.addPortalMessage(message, 'error')
            return self.default_template()

        cached_specs = {}  # keyed by parent_folder

        def lookup_spec(analysis):
            # If an analysis is OUT OF RANGE, the failed spec values are passed
            # back from the result_in_range function. But if the analysis resuit
            # is IN RANGE, we need to look it up.
            service = analysis['service']
            keyword = service['Keyword']
            analysis = analysis['obj']
            if spec == "client":
                parent = analysis.aq_parent.aq_parent
            else:
                parent = self.context.bika_setup.bika_analysisspecs
            if not parent.UID() in cached_specs:
                proxies = self.bika_setup_catalog(
                    portal_type='AnalysisSpec',
                    getSampleTypeUID=st_uid,
                    path={
                        "query": "/".join(parent.getPhysicalPath()),
                        "level": 0
                    })
                if proxies:
                    spec_obj = proxies[0].getObject()
                    this_spec = spec_obj.getResultsRangeDict()
                else:
                    this_spec = {'min': None, 'max': None}
                cached_specs[parent.UID()] = this_spec
            else:
                this_spec = cached_specs[parent.UID()]
            return this_spec

        ## Compile a list of dictionaries, with all relevant analysis data
        for analysis in proxies:
            analysis = analysis.getObject()
            client = analysis.aq_parent.aq_parent
            uid = analysis.UID()
            service = analysis.getService()
            keyword = service.getKeyword()
            service_title = "%s (%s)" % (service.Title(), service.getKeyword())
            result_in_range = analysis.result_in_range(specification=spec)
            try:
                precision = str(service.getPrecision())
            except:
                precision = "2"

            if service_title not in analyses.keys():
                analyses[service_title] = []
            try:
                result = float(analysis.getResult())
            except:
                # XXX Unfloatable analysis results should be indicated
                continue
            analyses[service_title].append({
                'service':
                service,
                'obj':
                analysis,
                'Request ID':
                analysis.aq_parent.getId(),
                'Analyst':
                analysis.getAnalyst(),
                'Result':
                result,
                'precision':
                precision,
                'Sampled':
                analysis.getDateSampled(),
                'Captured':
                analysis.getResultCaptureDate(),
                'Uncertainty':
                analysis.getUncertainty(),
                'result_in_range':
                result_in_range,
                'Unit':
                service.getUnit(),
                'Keyword':
                keyword,
                'icons':
                '',
            })
            analysis_count += 1

        keys = analyses.keys()
        keys.sort()

        parms += [
            {
                "title": _("Total analyses"),
                "value": analysis_count
            },
            {
                "title": _("Analysis specification"),
                "value": spec_title
            },
        ]

        ## This variable is output to the TAL
        self.report_data = {
            'header': header,
            'subheader': subheader,
            'parms': parms,
            'tables': [],
            'footnotes': [],
        }

        plotscript = """
        set terminal png transparent truecolor enhanced size 700,350 font "Verdana, 8"
        set title "%(title)s"
        set xlabel "%(xlabel)s"
        set ylabel "%(ylabel)s"
        set key off
        #set logscale
        set timefmt "%(date_format_long)s"
        set xdata time
        set format x "%(date_format_short)s\\n%(time_format)s"
        set xrange ["%(x_start)s":"%(x_end)s"]
        set auto fix
        set offsets graph 0, 0, 1, 1
        set xtics border nomirror rotate by 90 font "Verdana, 5" offset 0,-3
        set ytics nomirror

        f(x) = mean_y
        fit f(x) 'gpw_DATAFILE_gpw' u 1:3 via mean_y
        stddev_y = sqrt(FIT_WSSR / (FIT_NDF + 1))

        plot mean_y-stddev_y with filledcurves y1=mean_y lt 1 lc rgb "#efefef",\
             mean_y+stddev_y with filledcurves y1=mean_y lt 1 lc rgb "#efefef",\
             mean_y with lines lc rgb '#ffffff' lw 3,\
             "gpw_DATAFILE_gpw" using 1:3 title 'data' with points pt 7 ps 1 lc rgb '#0000ee' lw 2,\
               '' using 1:3 smooth unique lc rgb '#aaaaaa' lw 2,\
               '' using 1:4 with lines lc rgb '#000000' lw 1,\
               '' using 1:5 with lines lc rgb '#000000' lw 1"""

        ## Compile plots and format data for display
        for service_title in keys:
            # used to calculate XY axis ranges
            result_values = [int(o['Result']) for o in analyses[service_title]]
            result_dates = [o['Sampled'] for o in analyses[service_title]]

            parms = []
            plotdata = str()

            range_min = ''
            range_max = ''

            for a in analyses[service_title]:

                a['Sampled'] = a['Sampled'].strftime(self.date_format_long)
                a['Captured'] = a['Captured'].strftime(self.date_format_long)

                R = a['Result']
                U = a['Uncertainty']

                a['Result'] = str("%." + precision + "f") % a['Result']

                in_range = a['result_in_range']
                # in-range: lookup spec, if possible
                if in_range[1] == None:
                    this_spec_results = lookup_spec(a)
                    if this_spec_results and a['Keyword'] in this_spec_results:
                        this_spec = this_spec_results[a['Keyword']]
                        in_range[1] == this_spec
                # If no specs are supplied, fake them
                # and do not print specification values or errors
                a['range_min'] = in_range[1] and in_range[1]['min'] or ''
                a['range_max'] = in_range[1] and in_range[1]['max'] or ''
                if a['range_min'] and a['range_max']:
                    range_min = a['range_min']
                    range_max = a['range_max']
                    # result out of range
                    if str(in_range[0]) == 'False':
                        out_of_range_count += 1
                        a['Result'] = "%s %s" % (a['Result'], error_icon)
                    # result almost out of range
                    if str(in_range[0]) == '1':
                        in_shoulder_range_count += 1
                        a['Result'] = "%s %s" % (a['Result'], warning_icon)
                else:
                    a['range_min'] = min(result_values)
                    a['range_max'] = max(result_values)

                plotdata += "%s\t%s\t%s\t%s\t%s\n" % (
                    a['Sampled'],
                    R,
                    range_min,
                    range_max,
                    U and U or 0,
                )
                plotdata.encode('utf-8')

            if range_min and range_max:
                spec_str = "%s: %s, %s: %s" % (
                    self.context.translate(_("Range min")),
                    range_min,
                    self.context.translate(_("Range max")),
                    range_max,
                )
                parms.append({
                    'title': _('Specification'),
                    'value': spec_str,
                })

            unit = analyses[service_title][0]['Unit']
            if MinimumResults <= len(dict([(d, d) for d in result_dates])):
                _plotscript = str(plotscript) % {
                    'title':
                    "",
                    'xlabel':
                    self.context.translate(_("Date Sampled")),
                    'ylabel':
                    unit and unit or '',
                    'x_start':
                    "%s" % min(result_dates).strftime(self.date_format_long),
                    'x_end':
                    "%s" % max(result_dates).strftime(self.date_format_long),
                    'date_format_long':
                    self.date_format_long,
                    'date_format_short':
                    self.date_format_short,
                    'time_format':
                    self.time_format,
                }

                plot_png = plot(str(plotdata),
                                plotscript=str(_plotscript),
                                usefifo=False)

                # Temporary PNG data file
                fh, data_fn = tempfile.mkstemp(suffix='.png')
                os.write(fh, plot_png)
                plot_url = data_fn
                self.request['to_remove'].append(data_fn)

                plot_url = data_fn
            else:
                plot_url = ""

            table = {
                'title':
                "%s: %s" %
                (self.context.translate(_("Analysis Service")), service_title),
                'parms':
                parms,
                'columns':
                ['Request ID', 'Analyst', 'Result', 'Sampled', 'Captured'],
                'data':
                analyses[service_title],
                'plot_url':
                plot_url,
            }

            self.report_data['tables'].append(table)

        ## footnotes
        if out_of_range_count:
            msgid = _("Analyses out of range")
            translate = self.context.translate
            self.report_data['footnotes'].append(
                "%s %s" % (error_icon, translate(msgid)))
        if in_shoulder_range_count:
            msgid = _("Analyses in error shoulder range")
            self.report_data['footnotes'].append(
                "%s %s" % (warning_icon, self.context.translate(msgid)))

        self.report_data['parms'].append({
            "title": _("Analyses out of range"),
            "value": out_of_range_count
        })
        self.report_data['parms'].append({
            "title":
            _("Analyses in error shoulder range"),
            "value":
            in_shoulder_range_count
        })

        title = self.context.translate(header)
        if titles:
            title += " (%s)" % " ".join(titles)
        return {
            'report_title': title,
            'report_data': self.template(),
        }
class Report(BrowserView):
    implements(IViewView)
    default_template = ViewPageTemplateFile("templates/productivity.pt")
    template = ViewPageTemplateFile(
        "templates/productivity_samplereceivedvsreported.pt")

    def __init__(self, context, request, report=None):
        super(Report, self).__init__(context, request)
        self.report = report
        self.selection_macros = SelectionMacrosView(self.context, self.request)

    def __call__(self):

        parms = []
        titles = []

        self.contentFilter = {
            'portal_type': 'Sample',
            'review_state': ['sample_received', 'expired', 'disposed'],
            'sort_on': 'getDateReceived'
        }

        val = self.selection_macros.parse_daterange(self.request,
                                                    'getDateReceived',
                                                    _('Date Received'))
        if val:
            self.contentFilter[val['contentFilter']
                               [0]] = val['contentFilter'][1]
            parms.append(val['parms'])
            titles.append(val['titles'])

        # Query the catalog and store results in a dictionary
        samples = self.bika_catalog(self.contentFilter)
        if not samples:
            message = _("No samples matched your query")
            self.context.plone_utils.addPortalMessage(message, "error")
            return self.default_template()

        datalines = {}
        footlines = {}
        total_received_count = 0
        total_published_count = 0
        for sample in samples:
            sample = sample.getObject()

            # For each sample, retrieve check is has results published
            # and add it to datalines
            published = False
            analyses = sample.getAnalyses({})
            for analysis in analyses:
                analysis = analysis.getObject()
                if not (analysis.getDateAnalysisPublished() is None):
                    published = True
                    break

            datereceived = sample.getDateReceived()
            monthyear = datereceived.strftime(
                "%B") + " " + datereceived.strftime("%Y")
            received = 1
            publishedcnt = published and 1 or 0
            if (monthyear in datalines):
                received = datalines[monthyear]['ReceivedCount'] + 1
                publishedcnt = published and datalines[monthyear][
                                                 'PublishedCount'] + 1 or \
                               datalines[monthyear]['PublishedCount']
            ratio = publishedcnt / received
            dataline = {
                'MonthYear':
                monthyear,
                'ReceivedCount':
                received,
                'PublishedCount':
                publishedcnt,
                'UnpublishedCount':
                received - publishedcnt,
                'Ratio':
                ratio,
                'RatioPercentage':
                '%02d' % (100 * (float(publishedcnt) / float(received))) + '%'
            }
            datalines[monthyear] = dataline

            total_received_count += 1
            total_published_count = published and total_published_count + 1 or total_published_count

        # Footer total data
        ratio = total_published_count / total_received_count
        footline = {
            'ReceivedCount':
            total_received_count,
            'PublishedCount':
            total_published_count,
            'UnpublishedCount':
            total_received_count - total_published_count,
            'Ratio':
            ratio,
            'RatioPercentage':
            '%02d' %
            (100 *
             (float(total_published_count) / float(total_received_count))) +
            '%'
        }
        footlines['Total'] = footline

        self.report_data = {
            'parameters': parms,
            'datalines': datalines,
            'footlines': footlines
        }

        if self.request.get('output_format', '') == 'CSV':
            import csv
            import StringIO
            import datetime

            fieldnames = [
                'MonthYear',
                'ReceivedCount',
                'PublishedCount',
                'RatioPercentage',
            ]
            output = StringIO.StringIO()
            dw = csv.DictWriter(output,
                                extrasaction='ignore',
                                fieldnames=fieldnames)
            dw.writerow(dict((fn, fn) for fn in fieldnames))
            for row in datalines.values():
                dw.writerow(row)
            report_data = output.getvalue()
            output.close()
            date = datetime.datetime.now().strftime("%Y%m%d%H%M")
            setheader = self.request.RESPONSE.setHeader
            setheader('Content-Type', 'text/csv')
            setheader(
                "Content-Disposition",
                "attachment;filename=\"receivedvspublished_%s.csv\"" % date)
            self.request.RESPONSE.write(report_data)
        else:
            return {
                'report_title': _('Samples received vs. reported'),
                'report_data': self.template()
            }
class Report(BrowserView):
    implements(IViewView)

    template = ViewPageTemplateFile(
        "templates/qualitycontrol_referenceanalysisqc.pt")
    # if unsuccessful we return here:
    default_template = ViewPageTemplateFile("templates/qualitycontrol.pt")

    def __init__(self, context, request, report=None):
        super(Report, self).__init__(context, request)
        self.report = report
        self.selection_macros = SelectionMacrosView(self.context, self.request)

    def __call__(self):

        header = _("Reference analysis QC")
        subheader = _("Reference analysis quality control graphs ")

        MinimumResults = self.context.bika_setup.getMinimumResults()

        warning_icon = "<img src='" + self.portal_url + "/++resource++bika.lims.images/warning.png' height='9' width='9'/>"
        error_icon = "<img src='" + self.portal_url + "/++resource++bika.lims.images/exclamation.png' height='9' width='9'/>"

        self.parms = []
        titles = []

        sample_uid = self.request.form.get('ReferenceSampleUID', '')
        sample = self.reference_catalog.lookupObject(sample_uid)
        if not sample:
            message = _("No reference sample was selected.")
            self.context.plone_utils.addPortalMessage(message, 'error')
            return self.default_template()

        self.parms.append(
            {'title': _("Reference Sample"), 'value': sample.Title()})
        titles.append(sample.Title())

        service_uid = self.request.form.get('ReferenceServiceUID', '')
        service = self.reference_catalog.lookupObject(service_uid)
        if not service:
            message = _("No analysis services were selected.")
            self.context.plone_utils.addPortalMessage(message, 'error')
            return self.default_template()

        self.contentFilter = {'portal_type': 'ReferenceAnalysis',
                              'review_state': ['verified', 'published'],
                              'path': {
                              "query": "/".join(sample.getPhysicalPath()),
                              "level": 0}}

        self.parms.append(
            {'title': _("Analysis Service"), 'value': service.Title()})
        titles.append(service.Title())

        val = self.selection_macros.parse_daterange(self.request,
                                                    'getDateVerified',
                                                    'DateVerified')
        if val:
            self.contentFilter[val['contentFilter'][0]] = val['contentFilter'][1]
            self.parms.append(val['parms'])
            titles.append(val['titles'])

        proxies = self.bika_analysis_catalog(self.contentFilter)
        if not proxies:
            message = _("No analyses matched your query")
            self.context.plone_utils.addPortalMessage(message, 'error')
            return self.default_template()

        # Compile a list with all relevant analysis data
        analyses = []

        out_of_range_count = 0
        results = []
        capture_dates = []

        plotdata = ""
        tabledata = []

        for analysis in proxies:
            analysis = analysis.getObject()
            service = analysis.getService()
            resultsrange = \
            [x for x in sample.getReferenceResults() if x['uid'] == service_uid][
                0]
            try:
                result = float(analysis.getResult())
                results.append(result)
            except:
                result = analysis.getResult()
            capture_dates.append(analysis.getResultCaptureDate())

            if result < float(resultsrange['min']) or result > float(
                    resultsrange['max']):
                out_of_range_count += 1

            try:
                precision = str(analysis.getPrecision())
            except:
                precision = "2"

            try:
                formatted_result = str("%." + precision + "f") % result
            except:
                formatted_result = result

            tabledata.append({_("Analysis"): analysis.getId(),
                              _("Result"): formatted_result,
                              _("Analyst"): analysis.getAnalyst(),
                              _(
                                  "Captured"): analysis.getResultCaptureDate().strftime(
                                  self.date_format_long)})

            plotdata += "%s\t%s\t%s\t%s\n" % (
                analysis.getResultCaptureDate().strftime(self.date_format_long),
                result,
                resultsrange['min'],
                resultsrange['max']
            )
        plotdata.encode('utf-8')

        result_values = [int(r) for r in results]
        result_dates = [c for c in capture_dates]

        self.parms += [
            {"title": _("Total analyses"), "value": len(proxies)},
        ]

        # # This variable is output to the TAL
        self.report_data = {
            'header': header,
            'subheader': subheader,
            'parms': self.parms,
            'tables': [],
            'footnotes': [],
        }

        if MinimumResults <= len(proxies):
            plotscript = """
            set terminal png transparent truecolor enhanced size 700,350 font "Verdana, 8"
            set title "%(title)s"
            set xlabel "%(xlabel)s"
            set ylabel "%(ylabel)s"
            set key off
            #set logscale
            set timefmt "%(timefmt)s"
            set xdata time
            set format x "%(xformat)s"
            set xrange ["%(x_start)s":"%(x_end)s"]
            set auto fix
            set offsets graph 0, 0, 1, 1
            set xtics border nomirror rotate by 90 font "Verdana, 5" offset 0,-3
            set ytics nomirror

            f(x) = mean_y
            fit f(x) 'gpw_DATAFILE_gpw' u 1:3 via mean_y
            stddev_y = sqrt(FIT_WSSR / (FIT_NDF + 1))

            plot mean_y-stddev_y with filledcurves y1=mean_y lt 1 lc rgb "#efefef",\
                 mean_y+stddev_y with filledcurves y1=mean_y lt 1 lc rgb "#efefef",\
                 mean_y with lines lc rgb '#ffffff' lw 3,\
                 "gpw_DATAFILE_gpw" using 1:3 title 'data' with points pt 7 ps 1 lc rgb '#0000ee' lw 2,\
                   '' using 1:3 smooth unique lc rgb '#aaaaaa' lw 2,\
                   '' using 1:4 with lines lc rgb '#000000' lw 1,\
                   '' using 1:5 with lines lc rgb '#000000' lw 1""" % \
                         {
                             'title': "",
                             'xlabel': "",
                             'ylabel': service.getUnit(),
                             'x_start': "%s" % min(result_dates).strftime(
                                 self.date_format_short),
                             'x_end': "%s" % max(result_dates).strftime(
                                 self.date_format_short),
                             'timefmt': r'%Y-%m-%d %H:%M',
                             'xformat': '%%Y-%%m-%%d\n%%H:%%M',
                         }

            plot_png = plot(str(plotdata), plotscript=str(plotscript),
                            usefifo=False)

            # Temporary PNG data file
            fh, data_fn = tempfile.mkstemp(suffix='.png')
            os.write(fh, plot_png)
            plot_url = data_fn
            self.request['to_remove'].append(data_fn)
            plot_url = data_fn
        else:
            plot_url = ""

        table = {
            'title': "%s: %s (%s)" % (
                t(_("Analysis Service")),
                service.Title(),
                service.getKeyword()
            ),
            'columns': [_('Analysis'),
                        _('Result'),
                        _('Analyst'),
                        _('Captured')],
            'parms': [],
            'data': tabledata,
            'plot_url': plot_url,
        }

        self.report_data['tables'].append(table)

        translate = self.context.translate

        ## footnotes
        if out_of_range_count:
            msgid = _("Analyses out of range")
            self.report_data['footnotes'].append(
                "%s %s" % (error_icon, t(msgid)))

        self.report_data['parms'].append(
            {"title": _("Analyses out of range"),
             "value": out_of_range_count})

        title = t(header)
        if titles:
            title += " (%s)" % " ".join(titles)
        return {
            'report_title': title,
            'report_data': self.template(),
        }

    def isSamplePointHidden(self):
        return isAttributeHidden('AnalysisRequest', 'SamplePoint')
class Report(BrowserView):
    implements(IViewView)
    template = ViewPageTemplateFile("templates/qualitycontrol_resultspersamplepoint.pt")
    # if unsuccessful we return here:
    default_template = ViewPageTemplateFile("templates/qualitycontrol.pt")

    def __init__(self, context, request, report=None):
        super(Report, self).__init__(context, request)
        self.report = report
        self.selection_macros = SelectionMacrosView(self.context, self.request)

    def __call__(self):

        MinimumResults = self.context.bika_setup.getMinimumResults()
        warning_icon = (
            "<img "
            + "src='"
            + self.portal_url
            + "/++resource++bika.lims.images/warning.png' "
            + "height='9' width='9'/>"
        )
        error_icon = (
            "<img "
            + "src='"
            + self.portal_url
            + "/++resource++bika.lims.images/exclamation.png' "
            + "height='9' width='9'/>"
        )

        header = _("Results per sample point")
        subheader = _("Analysis results for per sample point and analysis service")

        self.contentFilter = {
            "portal_type": "Analysis",
            "review_state": ["verified", "published"],
            "sort_on": "getDateSampled",
        }

        spec = self.request.form.get("spec", "lab")
        spec_title = (spec == "lab") and _("Lab") or _("Client")

        parms = []
        titles = []

        val = self.selection_macros.parse_client(self.request)
        if val:
            self.contentFilter[val["contentFilter"][0]] = val["contentFilter"][1]
            parms.append(val["parms"])
            titles.append(val["titles"])

        val = self.selection_macros.parse_samplepoint(self.request)
        sp_uid = val
        if val:
            self.contentFilter[val["contentFilter"][0]] = val["contentFilter"][1]
            parms.append(val["parms"])
            titles.append(val["titles"])

        val = self.selection_macros.parse_sampletype(self.request)
        st_uid = val
        if val:
            self.contentFilter[val["contentFilter"][0]] = val["contentFilter"][1]
            parms.append(val["parms"])
            titles.append(val["titles"])

        val = self.selection_macros.parse_analysisservice(self.request)
        if val:
            self.contentFilter[val["contentFilter"][0]] = val["contentFilter"][1]
            parms.append(val["parms"])
        else:
            message = _("No analysis services were selected.")
            self.context.plone_utils.addPortalMessage(message, "error")
            return self.default_template()

        val = self.selection_macros.parse_daterange(self.request, "getDateSampled", "DateSampled")
        if val:
            self.contentFilter[val["contentFilter"][0]] = val["contentFilter"][1]
            parms.append(val["parms"])
            titles.append(val["titles"])

        val = self.selection_macros.parse_state(
            self.request, "bika_worksheetanalysis_workflow", "worksheetanalysis_review_state", "Worksheet state"
        )
        if val:
            self.contentFilter[val["contentFilter"][0]] = val["contentFilter"][1]
            parms.append(val["parms"])

        # Query the catalog and store analysis data in a dict
        analyses = {}
        out_of_range_count = 0
        in_shoulder_range_count = 0
        analysis_count = 0

        proxies = self.bika_analysis_catalog(self.contentFilter)

        if not proxies:
            message = _("No analyses matched your query")
            self.context.plone_utils.addPortalMessage(message, "error")
            return self.default_template()

        cached_specs = {}  # keyed by parent_folder

        def lookup_spec(analysis):
            # If an analysis is OUT OF RANGE, the failed spec values are passed
            # back from the result_in_range function. But if the analysis resuit
            # is IN RANGE, we need to look it up.
            service = analysis["service"]
            keyword = service["Keyword"]
            analysis = analysis["obj"]
            if spec == "client":
                parent = analysis.aq_parent.aq_parent
            else:
                parent = self.context.bika_setup.bika_analysisspecs
            if not parent.UID() in cached_specs:
                proxies = self.bika_setup_catalog(
                    portal_type="AnalysisSpec",
                    getSampleTypeUID=st_uid,
                    path={"query": "/".join(parent.getPhysicalPath()), "level": 0},
                )
                if proxies:
                    spec_obj = proxies[0].getObject()
                    this_spec = spec_obj.getResultsRangeDict()
                else:
                    this_spec = {"min": None, "max": None}
                cached_specs[parent.UID()] = this_spec
            else:
                this_spec = cached_specs[parent.UID()]
            return this_spec

        ## Compile a list of dictionaries, with all relevant analysis data
        for analysis in proxies:
            analysis = analysis.getObject()
            client = analysis.aq_parent.aq_parent
            uid = analysis.UID()
            service = analysis.getService()
            keyword = service.getKeyword()
            service_title = "%s (%s)" % (service.Title(), service.getKeyword())
            result_in_range = analysis.result_in_range(specification=spec)
            try:
                precision = str(service.getPrecision())
            except:
                precision = "2"

            if service_title not in analyses.keys():
                analyses[service_title] = []
            try:
                result = float(analysis.getResult())
            except:
                # XXX Unfloatable analysis results should be indicated
                continue
            analyses[service_title].append(
                {
                    "service": service,
                    "obj": analysis,
                    "Request ID": analysis.aq_parent.getId(),
                    "Analyst": analysis.getAnalyst(),
                    "Result": result,
                    "precision": precision,
                    "Sampled": analysis.getDateSampled(),
                    "Captured": analysis.getResultCaptureDate(),
                    "Uncertainty": analysis.getUncertainty(),
                    "result_in_range": result_in_range,
                    "Unit": service.getUnit(),
                    "Keyword": keyword,
                    "icons": "",
                }
            )
            analysis_count += 1

        keys = analyses.keys()
        keys.sort()

        parms += [
            {"title": _("Total analyses"), "value": analysis_count},
            {"title": _("Analysis specification"), "value": spec_title},
        ]

        ## This variable is output to the TAL
        self.report_data = {"header": header, "subheader": subheader, "parms": parms, "tables": [], "footnotes": []}

        plotscript = """
        set terminal png transparent truecolor enhanced size 700,350 font "Verdana, 8"
        set title "%(title)s"
        set xlabel "%(xlabel)s"
        set ylabel "%(ylabel)s"
        set key off
        #set logscale
        set timefmt "%(date_format_long)s"
        set xdata time
        set format x "%(date_format_short)s\\n%(time_format)s"
        set xrange ["%(x_start)s":"%(x_end)s"]
        set auto fix
        set offsets graph 0, 0, 1, 1
        set xtics border nomirror rotate by 90 font "Verdana, 5" offset 0,-3
        set ytics nomirror

        f(x) = mean_y
        fit f(x) 'gpw_DATAFILE_gpw' u 1:3 via mean_y
        stddev_y = sqrt(FIT_WSSR / (FIT_NDF + 1))

        plot mean_y-stddev_y with filledcurves y1=mean_y lt 1 lc rgb "#efefef",\
             mean_y+stddev_y with filledcurves y1=mean_y lt 1 lc rgb "#efefef",\
             mean_y with lines lc rgb '#ffffff' lw 3,\
             "gpw_DATAFILE_gpw" using 1:3 title 'data' with points pt 7 ps 1 lc rgb '#0000ee' lw 2,\
               '' using 1:3 smooth unique lc rgb '#aaaaaa' lw 2,\
               '' using 1:4 with lines lc rgb '#000000' lw 1,\
               '' using 1:5 with lines lc rgb '#000000' lw 1"""

        ## Compile plots and format data for display
        for service_title in keys:
            # used to calculate XY axis ranges
            result_values = [int(o["Result"]) for o in analyses[service_title]]
            result_dates = [o["Sampled"] for o in analyses[service_title]]

            parms = []
            plotdata = str()

            range_min = ""
            range_max = ""

            for a in analyses[service_title]:

                a["Sampled"] = a["Sampled"].strftime(self.date_format_long)
                a["Captured"] = a["Captured"].strftime(self.date_format_long)

                R = a["Result"]
                U = a["Uncertainty"]

                a["Result"] = str("%." + precision + "f") % a["Result"]

                in_range = a["result_in_range"]
                # in-range: lookup spec, if possible
                if in_range[1] == None:
                    this_spec_results = lookup_spec(a)
                    if this_spec_results and a["Keyword"] in this_spec_results:
                        this_spec = this_spec_results[a["Keyword"]]
                        in_range[1] == this_spec
                # If no specs are supplied, fake them
                # and do not print specification values or errors
                a["range_min"] = in_range[1] and in_range[1]["min"] or ""
                a["range_max"] = in_range[1] and in_range[1]["max"] or ""
                if a["range_min"] and a["range_max"]:
                    range_min = a["range_min"]
                    range_max = a["range_max"]
                    # result out of range
                    if str(in_range[0]) == "False":
                        out_of_range_count += 1
                        a["Result"] = "%s %s" % (a["Result"], error_icon)
                    # result almost out of range
                    if str(in_range[0]) == "1":
                        in_shoulder_range_count += 1
                        a["Result"] = "%s %s" % (a["Result"], warning_icon)
                else:
                    a["range_min"] = min(result_values)
                    a["range_max"] = max(result_values)

                plotdata += "%s\t%s\t%s\t%s\t%s\n" % (a["Sampled"], R, range_min, range_max, U and U or 0)
                plotdata.encode("utf-8")

            if range_min and range_max:
                spec_str = "%s: %s, %s: %s" % (
                    self.context.translate(_("Range min")),
                    range_min,
                    self.context.translate(_("Range max")),
                    range_max,
                )
                parms.append({"title": _("Specification"), "value": spec_str})

            unit = analyses[service_title][0]["Unit"]
            if MinimumResults <= len(dict([(d, d) for d in result_dates])):
                _plotscript = str(plotscript) % {
                    "title": "",
                    "xlabel": self.context.translate(_("Date Sampled")),
                    "ylabel": unit and unit or "",
                    "x_start": "%s" % min(result_dates).strftime(self.date_format_long),
                    "x_end": "%s" % max(result_dates).strftime(self.date_format_long),
                    "date_format_long": self.date_format_long,
                    "date_format_short": self.date_format_short,
                    "time_format": self.time_format,
                }

                plot_png = plot(str(plotdata), plotscript=str(_plotscript), usefifo=False)

                # Temporary PNG data file
                fh, data_fn = tempfile.mkstemp(suffix=".png")
                os.write(fh, plot_png)
                plot_url = data_fn
                self.request["to_remove"].append(data_fn)

                plot_url = data_fn
            else:
                plot_url = ""

            table = {
                "title": "%s: %s" % (self.context.translate(_("Analysis Service")), service_title),
                "parms": parms,
                "columns": ["Request ID", "Analyst", "Result", "Sampled", "Captured"],
                "data": analyses[service_title],
                "plot_url": plot_url,
            }

            self.report_data["tables"].append(table)

        ## footnotes
        if out_of_range_count:
            msgid = _("Analyses out of range")
            translate = self.context.translate
            self.report_data["footnotes"].append("%s %s" % (error_icon, translate(msgid)))
        if in_shoulder_range_count:
            msgid = _("Analyses in error shoulder range")
            self.report_data["footnotes"].append("%s %s" % (warning_icon, self.context.translate(msgid)))

        self.report_data["parms"].append({"title": _("Analyses out of range"), "value": out_of_range_count})
        self.report_data["parms"].append(
            {"title": _("Analyses in error shoulder range"), "value": in_shoulder_range_count}
        )

        title = self.context.translate(header)
        if titles:
            title += " (%s)" % " ".join(titles)
        return {"report_title": title, "report_data": self.template()}
class Report(BrowserView):
    implements(IViewView)
    default_template = ViewPageTemplateFile("templates/productivity.pt")
    template = ViewPageTemplateFile("templates/productivity_dataentrydaybook.pt")

    def __init__(self, context, request, report=None):
        super(Report, self).__init__(context, request)
        self.report = report
        self.selection_macros = SelectionMacrosView(self.context, self.request)

    def __call__(self):

        parms = []
        titles = []

        # Apply filters
        self.contentFilter = {'portal_type': 'AnalysisRequest'}
        val = self.selection_macros.parse_daterange(self.request,
                                                    'getDateCreated',
                                                    _('Date Created'))
        if val:
            self.contentFilter[val['contentFilter'][0]] = val['contentFilter'][1]
            parms.append(val['parms'])
            titles.append(val['titles'])

        # Query the catalog and store results in a dictionary
        ars = self.bika_catalog(self.contentFilter)
        if not ars:
            message = _("No Analysis Requests matched your query")
            self.context.plone_utils.addPortalMessage(message, "error")
            return self.default_template()

        datalines = {}
        footlines = {}
        totalcreatedcount = len(ars)
        totalreceivedcount = 0
        totalpublishedcount = 0
        totalanlcount = 0
        totalreceptionlag = 0
        totalpublicationlag = 0

        for ar in ars:
            ar = ar.getObject()
            datecreated = ar.created()
            datereceived = ar.getDateReceived()
            datepublished = ar.getDatePublished()
            receptionlag = 0
            publicationlag = 0
            anlcount = len(ar.getAnalyses())

            dataline = {
                        "AnalysisRequestID": ar.getRequestID(),
                        "DateCreated": self.ulocalized_time(datecreated),
                        "DateReceived": self.ulocalized_time(datereceived),
                        "DatePublished": self.ulocalized_time(datepublished),
                        "ReceptionLag": receptionlag,
                        "PublicationLag": publicationlag,
                        "TotalLag": receptionlag + publicationlag,
                        "BatchID": ar.getBatch(),
                        "SampleID": ar.getSample().Title(),
                        "SampleType": ar.getSampleTypeTitle(),
                        "NumAnalyses": anlcount,
                        "ClientID": ar.aq_parent.id,
                        "Creator": ar.Creator(),
                        "Remarks": ar.getRemarks()
                        }

            datalines[ar.getRequestID()] = dataline

            totalreceivedcount += ar.getDateReceived() and 1 or 0
            totalpublishedcount += ar.getDatePublished() and 1 or 0
            totalanlcount += anlcount
            totalreceptionlag += receptionlag
            totalpublicationlag += publicationlag

        # Footer total data
        totalreceivedcreated_ratio = float(totalreceivedcount) / float(totalcreatedcount)
        totalpublishedcreated_ratio = float(totalpublishedcount) / float(totalcreatedcount)
        totalpublishedreceived_ratio = float(totalpublishedcount) / float(totalreceivedcount)

        footline = {'Created': totalcreatedcount,
                    'Received': totalreceivedcount,
                    'Published': totalpublishedcount,
                    'ReceivedCreatedRatio': totalreceivedcreated_ratio,
                    'ReceivedCreatedRatioPercentage': ('{0:.0f}'.format(totalreceivedcreated_ratio * 100)) + "%",
                    'PublishedCreatedRatio': totalpublishedcreated_ratio,
                    'PublishedCreatedRatioPercentage': ('{0:.0f}'.format(totalpublishedcreated_ratio * 100)) + "%",
                    'PublishedReceivedRatio': totalpublishedreceived_ratio,
                    'PublishedReceivedRatioPercentage': ('{0:.0f}'.format(totalpublishedreceived_ratio * 100)) + "%",
                    'AvgReceptionLag': ('{0:.1f}'.format(totalreceptionlag / totalcreatedcount)),
                    'AvgPublicationLag': ('{0:.1f}'.format(totalpublicationlag / totalcreatedcount)),
                    'AvgTotalLag': ('{0:.1f}'.format((totalreceptionlag + totalpublicationlag) / totalcreatedcount)),
                    'NumAnalyses': totalanlcount
                    }

        footlines['Total'] = footline

        self.report_data = {'parameters': parms,
                            'datalines': datalines,
                            'footlines': footlines}

        if self.request.get('output_format', '') == 'CSV':
            import csv
            import StringIO
            import datetime
            fieldnames = [
                "AnalysisRequestID",
                "DateCreated",
                "DateReceived",
                "DatePublished",
                "ReceptionLag",
                "PublicationLag",
                "TotalLag",
                "BatchID",
                "SampleID",
                "SampleType",
                "NumAnalyses",
                "ClientID",
                "Creator",
                "Remarks",
             ]
            output = StringIO.StringIO()
            dw = csv.DictWriter(output, extrasaction='ignore', fieldnames=fieldnames)
            dw.writerow(dict((fn, fn) for fn in fieldnames))
            for ar_id, row in datalines.items():
                dw.writerow({
                    "AnalysisRequestID": row["AnalysisRequestID"],
                    "DateCreated": row["DateCreated"],
                    "DateReceived": row["DateReceived"],
                    "DatePublished": row["DatePublished"],
                    "ReceptionLag": row["ReceptionLag"],
                    "PublicationLag": row["PublicationLag"],
                    "TotalLag": row["TotalLag"],
                    "BatchID": row["BatchID"],
                    "SampleID": row["SampleID"],
                    "SampleType": row["SampleType"],
                    "NumAnalyses": row["NumAnalyses"],
                    "ClientID": row["ClientID"],
                    "Creator": row["Creator"],
                    "Remarks": row["Remarks"],
                    })
            report_data = output.getvalue()
            output.close()
            date = datetime.datetime.now().strftime("%Y%m%d%H%M")
            setheader = self.request.RESPONSE.setHeader
            setheader('Content-Type', 'text/csv')
            setheader("Content-Disposition",
                "attachment;filename=\"dataentrydaybook_%s.csv\"" % date)
            self.request.RESPONSE.write(report_data)
        else:
            return {'report_title': _('Data entry day book'),
                'report_data': self.template()}
class Report(BrowserView):
    implements(IViewView)
    default_template = ViewPageTemplateFile("templates/administration.pt")
    template = ViewPageTemplateFile("templates/administration_usershistory.pt")

    def __init__(self, context, request, report=None):
        super(Report, self).__init__(context, request)
        self.report = report
        self.selection_macros = SelectionMacrosView(self.context, self.request)

    def __call__(self):

        parms = []
        titles = []

        rt = getToolByName(self.context, 'portal_repository')
        mt = getToolByName(self.context, 'portal_membership')

        # Apply filters
        self.contentFilter = {'portal_type': ('Analysis',
                                              'AnalysisCategory',
                                              'AnalysisProfile',
                                              'AnalysisRequest',
                                              'AnalysisService',
                                              'AnalysisSpec',
                                              'ARTemplate',
                                              'Attachment',
                                              'Batch',
                                              'Calculation',
                                              'Client',
                                              'Contact',
                                              'Container',
                                              'ContainerType',
                                              'Department',
                                              'DuplicateAnalysis',
                                              'Instrument',
                                              'InstrumentCalibration',
                                              'InstrumentCertification',
                                              'InstrumentMaintenanceTask',
                                              'InstrumentScheduledTask',
                                              'InstrumentType',
                                              'InstrumentValidation',
                                              'Manufacturer'
                                              'Method',
                                              'Preservation',
                                              'Pricelist',
                                              'ReferenceAnalysis',
                                              'ReferenceDefinition',
                                              'ReferenceSample',
                                              'Sample',
                                              'SampleMatrix',
                                              'SamplePoint',
                                              'SampleType',
                                              'Supplier',
                                              'SupplierContact',
                                              'Worksheet',
                                              'WorksheetTemplate'
                                              )}

        val = self.selection_macros.parse_daterange(self.request,
                                                    'getModificationDate',
                                                    _('Modification date'))
        if val:
            self.contentFilter['modified'] = val['contentFilter'][1]
            parms.append(val['parms'])
            titles.append(val['titles'])

        user = ''
        userfullname = ''
        if (self.request.form.get('User', '')!=''):
            user = self.request.form['User']
            userobj = mt.getMemberById(user)
            userfullname = userobj and userobj.getProperty('fullname') or ''
            parms.append({'title': _('User'), 'value': ("%s (%s)"%(userfullname,user))})
            titles.append(userfullname)

        # Query the catalog and store results in a dictionary
        entities = self.bika_setup_catalog(self.contentFilter)

        if not entities:
            message = _("No historical actions matched your query")
            self.context.plone_utils.addPortalMessage(message, "error")
            return self.default_template()

        datalines = []
        tmpdatalines = {}
        footlines = {}

        for entity in entities:
            entity = entity.getObject()
            entitytype = _(entity.__class__.__name__)

            # Workflow states retrieval
            for workflowid, workflow in entity.workflow_history.iteritems():
                for action in workflow:
                    actiontitle = _('Created')
                    if not action['action'] or (action['action'] and action ['action']=='create'):
                        if workflowid=='bika_inactive_workflow':
                            continue
                        actiontitle=_('Created')
                    else:
                        actiontitle=_(action['action'])

                    if (user=='' or action['actor']==user):
                        actorfullname = userfullname == '' and mt.getMemberById(user) or userfullname
                        dataline = {'EntityNameOrId':entity.title_or_id(),
                                    'EntityAbsoluteUrl':entity.absolute_url(),
                                    'EntityCreationDate':self.ulocalized_time(entity.CreationDate(),1),
                                    'EntityModificationDate':self.ulocalized_time(entity.ModificationDate(),1),
                                    'EntityType':entitytype,
                                    'Workflow':_(workflowid),
                                    'Action': actiontitle,
                                    'ActionDate':action['time'],
                                    'ActionDateStr':self.ulocalized_time(action['time'],1),
                                    'ActionActor':action['actor'],
                                    'ActionActorFullName':actorfullname,
                                    'ActionComments':action['comments']
                                    }
                        tmpdatalines[action['time']] = dataline

            # History versioning retrieval
            history = rt.getHistoryMetadata(entity)
            if history:
                hislen = history.getLength(countPurged=False)
                for index in range(hislen):
                    meta=history.retrieve(index)['metadata']['sys_metadata']
                    metatitle = _(meta['comment'])
                    if (user=='' or meta['principal']==user):
                        actorfullname = userfullname == '' and mt.getMemberById(user) or userfullname
                        dataline = {'EntityNameOrId':entity.title_or_id(),
                                    'EntityAbsoluteUrl':entity.absolute_url(),
                                    'EntityCreationDate':self.ulocalized_time(entity.CreationDate(),1),
                                    'EntityModificationDate':self.ulocalized_time(entity.ModificationDate(),1),
                                    'EntityType':entitytype,
                                    'Workflow': '',
                                    'Action': metatitle,
                                    'ActionDate':meta['timestamp'],
                                    'ActionDateStr':self.ulocalized_time(meta['timestamp'],1),
                                    'ActionActor':meta['principal'],
                                    'ActionActorFullName':actorfullname,
                                    'ActionComments':''
                                }
                        tmpdatalines[meta['timestamp']] = dataline
        if len(tmpdatalines)==0:
            message = _("No actions found for user %s" % userfullname)
            self.context.plone_utils.addPortalMessage(message, "error")
            return self.default_template()
        else:
            # Sort datalines
            tmpkeys = tmpdatalines.keys()
            tmpkeys.sort(reverse=True)
            for index in range(len(tmpkeys)):
                datalines.append(tmpdatalines[tmpkeys[index]])

            self.report_data = {'parameters': parms,
                                'datalines': datalines,
                                'footlines': footlines }

            return {'report_title': _('Users history'),
                    'report_data': self.template()}
Exemple #31
0
class Report(BrowserView):
    implements(IViewView)
    default_template = ViewPageTemplateFile("templates/productivity.pt")
    template = ViewPageTemplateFile(
        "templates/productivity_analysesperdepartment.pt")

    def __init__(self, context, request, report=None):
        super(Report, self).__init__(context, request)
        self.report = report
        self.selection_macros = SelectionMacrosView(self.context, self.request)

    def __call__(self):

        parms = []
        titles = []

        # Apply filters
        self.contentFilter = {'portal_type': 'Analysis'}
        val = self.selection_macros.parse_daterange(self.request,
                                                    'getDateRequested',
                                                    _('Date Requested'))
        if val:
            self.contentFilter[val['contentFilter']
                               [0]] = val['contentFilter'][1]
            parms.append(val['parms'])
            titles.append(val['titles'])

        val = self.selection_macros.parse_state(self.request,
                                                'bika_analysis_workflow',
                                                'getAnalysisState',
                                                _('Analysis State'))
        if val:
            self.contentFilter[val['contentFilter']
                               [0]] = val['contentFilter'][1]
            parms.append(val['parms'])
            titles.append(val['titles'])

        # Query the catalog and store results in a dictionary
        analyses = self.bika_analysis_catalog(self.contentFilter)
        if not analyses:
            message = _("No analyses matched your query")
            self.context.plone_utils.addPortalMessage(message, "error")
            return self.default_template()

        groupby = self.request.form.get('GroupingPeriod', '')
        if (groupby != ''):
            parms.append({"title": _("Grouping period"), "value": _(groupby)})

        datalines = {}
        footlines = {}
        totalcount = len(analyses)
        totalpublishedcount = 0
        totalperformedcount = 0
        for analysis in analyses:
            analysis = analysis.getObject()
            analysisservice = analysis.getService()
            department = analysisservice.getDepartment().Title()
            daterequested = analysis.created()

            group = ''
            if groupby == 'Day':
                group = self.ulocalized_time(daterequested)
            elif groupby == 'Week':
                group = daterequested.strftime(
                    "%Y") + ", " + daterequested.strftime("%U")
            elif groupby == 'Month':
                group = daterequested.strftime(
                    "%B") + " " + daterequested.strftime("%Y")
            elif groupby == 'Year':
                group = daterequested.strftime("%Y")
            else:
                group = ''

            dataline = {
                'Group': group,
                'Requested': 0,
                'Performed': 0,
                'Published': 0,
                'Departments': {}
            }
            deptline = {
                'Department': department,
                'Requested': 0,
                'Performed': 0,
                'Published': 0
            }
            if (group in datalines):
                dataline = datalines[group]
                if (department in dataline['Departments']):
                    deptline = dataline['Departments'][department]

            grouptotalcount = dataline['Requested'] + 1
            groupperformedcount = dataline['Performed']
            grouppublishedcount = dataline['Published']

            depttotalcount = deptline['Requested'] + 1
            deptperformedcount = deptline['Performed']
            deptpubishedcount = deptline['Published']

            workflow = getToolByName(self.context, 'portal_workflow')
            arstate = workflow.getInfoFor(analysis.aq_parent, 'review_state',
                                          '')
            if (arstate == 'published'):
                deptpubishedcount += 1
                grouppublishedcount += 1
                totalpublishedcount += 1

            if (analysis.getResult()):
                deptperformedcount += 1
                groupperformedcount += 1
                totalperformedcount += 1

            group_performedrequested_ratio = float(
                groupperformedcount) / float(grouptotalcount)
            group_publishedperformed_ratio = groupperformedcount > 0 and float(
                grouppublishedcount) / float(groupperformedcount) or 0

            anl_performedrequested_ratio = float(deptperformedcount) / float(
                depttotalcount)
            anl_publishedperformed_ratio = deptperformedcount > 0 and float(
                deptpubishedcount) / float(deptperformedcount) or 0

            dataline['Requested'] = grouptotalcount
            dataline['Performed'] = groupperformedcount
            dataline['Published'] = grouppublishedcount
            dataline[
                'PerformedRequestedRatio'] = group_performedrequested_ratio
            dataline['PerformedRequestedRatioPercentage'] = ('{0:.0f}'.format(
                group_performedrequested_ratio * 100)) + "%"
            dataline[
                'PublishedPerformedRatio'] = group_publishedperformed_ratio
            dataline['PublishedPerformedRatioPercentage'] = ('{0:.0f}'.format(
                group_publishedperformed_ratio * 100)) + "%"

            deptline['Requested'] = depttotalcount
            deptline['Performed'] = deptperformedcount
            deptline['Published'] = deptpubishedcount
            deptline['PerformedRequestedRatio'] = anl_performedrequested_ratio
            deptline['PerformedRequestedRatioPercentage'] = ('{0:.0f}'.format(
                anl_performedrequested_ratio * 100)) + "%"
            deptline['PublishedPerformedRatio'] = anl_publishedperformed_ratio
            deptline['PublishedPerformedRatioPercentage'] = ('{0:.0f}'.format(
                anl_publishedperformed_ratio * 100)) + "%"

            dataline['Departments'][department] = deptline
            datalines[group] = dataline

        # Footer total data
        total_performedrequested_ratio = float(totalperformedcount) / float(
            totalcount)
        total_publishedperformed_ratio = totalperformedcount > 0 and float(
            totalpublishedcount) / float(totalperformedcount) or 0

        footline = {
            'Requested':
            totalcount,
            'Performed':
            totalperformedcount,
            'Published':
            totalpublishedcount,
            'PerformedRequestedRatio':
            total_performedrequested_ratio,
            'PerformedRequestedRatioPercentage':
            ('{0:.0f}'.format(total_performedrequested_ratio * 100)) + "%",
            'PublishedPerformedRatio':
            total_publishedperformed_ratio,
            'PublishedPerformedRatioPercentage':
            ('{0:.0f}'.format(total_publishedperformed_ratio * 100)) + "%"
        }

        footlines['Total'] = footline

        self.report_data = {
            'parameters': parms,
            'datalines': datalines,
            'footlines': footlines
        }

        return {
            'report_title': _('Analyses summary per department'),
            'report_data': self.template()
        }
Exemple #32
0
    def __call__(self):
        """Create and render selected report
        """

        # if there's an error, we return productivity.pt which requires these.
        self.selection_macros = SelectionMacrosView(self.context, self.request)

        report_id = self.request.get('report_id', '')
        if not report_id:
            message = "No report specified in request"
            self.logger.error(message)
            self.context.plone_utils.addPortalMessage(message, 'error')
            return self.template()

        self.date = DateTime()
        username = self.context.portal_membership.getAuthenticatedMember(
        ).getUserName()
        self.reporter = self.user_fullname(username)
        self.reporter_email = self.user_email(username)

        # signature image
        self.reporter_signature = ""
        c = [
            x for x in self.bika_setup_catalog(portal_type='LabContact')
            if x.getObject().getUsername() == username
        ]
        if c:
            sf = c[0].getObject().getSignature()
            if sf:
                self.reporter_signature = sf.absolute_url() + "/Signature"

        lab = self.context.bika_setup.laboratory
        self.laboratory = lab
        self.lab_title = lab.getName()
        self.lab_address = lab.getPrintAddress()
        self.lab_email = lab.getEmailAddress()
        self.lab_url = lab.getLabURL()

        client = logged_in_client(self.context)
        if client:
            clientuid = client.UID()
            self.client_title = client.Title()
            self.client_address = client.getPrintAddress()
        else:
            clientuid = None
            self.client_title = None
            self.client_address = None

        ## Render form output

        # the report can add file names to this list; they will be deleted
        # once the PDF has been generated.  temporary plot image files, etc.
        self.request['to_remove'] = []

        try:
            exec("from bika.lims.browser.reports.%s import Report" % report_id)
        except ImportError:
            message = "Report %s not found (shouldn't happen)" % report_id
            self.logger.error(message)
            self.context.plone_utils.addPortalMessage(message, 'error')
            return self.template()

        # Report must return dict with:
        # - report_title - title string for pdf/history listing
        # - report_data - rendered report
        output = Report(self.context, self.request)()

        if type(output) in (str, unicode, bytes):
            # remove temporary files
            for f in self.request['to_remove']:
                os.remove(f)
            return output

        ## The report output gets pulled through report_frame.pt
        self.reportout = output['report_data']
        framed_output = self.frame_template()

        # this is the good part
        pisa.showLogging()
        ramdisk = StringIO()
        pdf = pisa.CreatePDF(framed_output, ramdisk)
        result = ramdisk.getvalue()
        ramdisk.close()

        ## Create new report object
        reportid = self.aq_parent.generateUniqueId('Report')
        self.aq_parent.invokeFactory(id=reportid, type_name="Report")
        report = self.aq_parent._getOb(reportid)
        report.edit(Client=clientuid)
        report.processForm()

        ## write pdf to report object
        report.edit(title=output['report_title'], ReportFile=result)
        report.reindexObject()

        fn = "%s - %s" % (self.date.strftime(
            self.date_format_short), _u(output['report_title']))

        # remove temporary files
        for f in self.request['to_remove']:
            os.remove(f)

        if not pdf.err:
            setheader = self.request.RESPONSE.setHeader
            setheader('Content-Type', 'application/pdf')
            setheader("Content-Disposition",
                      "attachment;filename=\"%s\"" % _c(fn))
            self.request.RESPONSE.write(result)

        return
class Report(BrowserView):
    implements(IViewView)
    default_template = ViewPageTemplateFile("templates/productivity.pt")
    template = ViewPageTemplateFile(
        "templates/productivity_analysesperformedpertotal.pt")

    def __init__(self, context, request, report=None):
        super(Report, self).__init__(context, request)
        self.report = report
        self.selection_macros = SelectionMacrosView(self.context, self.request)

    def __call__(self):

        parms = []
        titles = []

        # Apply filters
        self.contentFilter = {'portal_type': 'Analysis'}
        val = self.selection_macros.parse_daterange(self.request,
                                                    'getDateRequested',
                                                    _('Date Requested'))
        if val:
            self.contentFilter[val['contentFilter']
                               [0]] = val['contentFilter'][1]
            parms.append(val['parms'])
            titles.append(val['titles'])

        # Query the catalog and store results in a dictionary
        analyses = self.bika_analysis_catalog(self.contentFilter)
        if not analyses:
            message = _("No analyses matched your query")
            self.context.plone_utils.addPortalMessage(message, "error")
            return self.default_template()

        groupby = self.request.form.get('GroupingPeriod', '')
        if (groupby != ''):
            parms.append({"title": _("Grouping period"), "value": _(groupby)})

        datalines = {}
        footlines = {}
        totalcount = len(analyses)
        totalpublishedcount = 0
        totalperformedcount = 0
        for analysis in analyses:
            analysis = analysis.getObject()
            ankeyword = analysis.getKeyword()
            antitle = analysis.Title()
            daterequested = analysis.created()

            group = ''
            if groupby == 'Day':
                group = self.ulocalized_time(daterequested)
            elif groupby == 'Week':
                group = daterequested.strftime(
                    "%Y") + ", " + daterequested.strftime("%U")
            elif groupby == 'Month':
                group = daterequested.strftime(
                    "%B") + " " + daterequested.strftime("%Y")
            elif groupby == 'Year':
                group = daterequested.strftime("%Y")
            else:
                group = ''

            dataline = {
                'Group': group,
                'Requested': 0,
                'Performed': 0,
                'Published': 0,
                'Analyses': {}
            }
            anline = {
                'Analysis': antitle,
                'Requested': 0,
                'Performed': 0,
                'Published': 0
            }
            if (group in datalines):
                dataline = datalines[group]
                if (ankeyword in dataline['Analyses']):
                    anline = dataline['Analyses'][ankeyword]

            grouptotalcount = dataline['Requested'] + 1
            groupperformedcount = dataline['Performed']
            grouppublishedcount = dataline['Published']

            anltotalcount = anline['Requested'] + 1
            anlperformedcount = anline['Performed']
            anlpublishedcount = anline['Published']

            workflow = getToolByName(self.context, 'portal_workflow')
            arstate = workflow.getInfoFor(analysis.aq_parent, 'review_state',
                                          '')
            if (arstate == 'published'):
                anlpublishedcount += 1
                grouppublishedcount += 1
                totalpublishedcount += 1

            if (analysis.getResult()):
                anlperformedcount += 1
                groupperformedcount += 1
                totalperformedcount += 1

            group_performedrequested_ratio = float(
                groupperformedcount) / float(grouptotalcount)
            group_publishedperformed_ratio = groupperformedcount > 0 and float(
                grouppublishedcount) / float(groupperformedcount) or 0

            anl_performedrequested_ratio = float(anlperformedcount) / float(
                anltotalcount)
            anl_publishedperformed_ratio = anlperformedcount > 0 and float(
                anlpublishedcount) / float(anlperformedcount) or 0

            dataline['Requested'] = grouptotalcount
            dataline['Performed'] = groupperformedcount
            dataline['Published'] = grouppublishedcount
            dataline[
                'PerformedRequestedRatio'] = group_performedrequested_ratio
            dataline['PerformedRequestedRatioPercentage'] = ('{0:.0f}'.format(
                group_performedrequested_ratio * 100)) + "%"
            dataline[
                'PublishedPerformedRatio'] = group_publishedperformed_ratio
            dataline['PublishedPerformedRatioPercentage'] = ('{0:.0f}'.format(
                group_publishedperformed_ratio * 100)) + "%"

            anline['Requested'] = anltotalcount
            anline['Performed'] = anlperformedcount
            anline['Published'] = anlpublishedcount
            anline['PerformedRequestedRatio'] = anl_performedrequested_ratio
            anline['PerformedRequestedRatioPercentage'] = ('{0:.0f}'.format(
                anl_performedrequested_ratio * 100)) + "%"
            anline['PublishedPerformedRatio'] = anl_publishedperformed_ratio
            anline['PublishedPerformedRatioPercentage'] = ('{0:.0f}'.format(
                anl_publishedperformed_ratio * 100)) + "%"

            dataline['Analyses'][ankeyword] = anline
            datalines[group] = dataline

        # Footer total data
        total_performedrequested_ratio = float(totalperformedcount) / float(
            totalcount)
        total_publishedperformed_ratio = totalperformedcount > 0 and float(
            totalpublishedcount) / float(totalperformedcount) or 0

        footline = {
            'Requested':
            totalcount,
            'Performed':
            totalperformedcount,
            'Published':
            totalpublishedcount,
            'PerformedRequestedRatio':
            total_performedrequested_ratio,
            'PerformedRequestedRatioPercentage':
            ('{0:.0f}'.format(total_performedrequested_ratio * 100)) + "%",
            'PublishedPerformedRatio':
            total_publishedperformed_ratio,
            'PublishedPerformedRatioPercentage':
            ('{0:.0f}'.format(total_publishedperformed_ratio * 100)) + "%"
        }

        footlines['Total'] = footline

        self.report_data = {
            'parameters': parms,
            'datalines': datalines,
            'footlines': footlines
        }

        if self.request.get('output_format', '') == 'CSV':
            import csv
            import StringIO
            import datetime

            fieldnames = [
                'Group',
                'Analysis',
                'Requested',
                'Performed',
                'Published',
            ]
            output = StringIO.StringIO()
            dw = csv.DictWriter(output,
                                extrasaction='ignore',
                                fieldnames=fieldnames)
            dw.writerow(dict((fn, fn) for fn in fieldnames))
            for group_name, group in datalines.items():
                for service_name, service in group['Analyses'].items():
                    dw.writerow({
                        'Group': group_name,
                        'Analysis': service_name,
                        'Requested': service['Requested'],
                        'Performed': service['Performed'],
                        'Published': service['Published'],
                    })
            report_data = output.getvalue()
            output.close()
            date = datetime.datetime.now().strftime("%Y%m%d%H%M")
            setheader = self.request.RESPONSE.setHeader
            setheader('Content-Type', 'text/csv')
            setheader(
                "Content-Disposition",
                "attachment;filename=\"analysesperformedpertotal_%s.csv\"" %
                date)
            self.request.RESPONSE.write(report_data)
        else:
            return {
                'report_title': _('Analyses performed as % of total'),
                'report_data': self.template()
            }
Exemple #34
0
 def __call__(self):
     self.selection_macros = SelectionMacrosView(self.context, self.request)
     self.icon = self.portal_url + "/++resource++bika.lims.images/report_big.png"
     return self.template()
class Report(BrowserView):
    implements(IViewView)
    default_template = ViewPageTemplateFile("templates/productivity.pt")
    template = ViewPageTemplateFile(
        "templates/productivity_dataentrydaybook.pt")

    def __init__(self, context, request, report=None):
        super(Report, self).__init__(context, request)
        self.report = report
        self.selection_macros = SelectionMacrosView(self.context, self.request)

    def __call__(self):

        parms = []
        titles = []

        # Apply filters
        self.contentFilter = {'portal_type': 'AnalysisRequest'}
        val = self.selection_macros.parse_daterange(self.request,
                                                    'getDateCreated',
                                                    _('Date Created'))
        if val:
            self.contentFilter["created"] = val['contentFilter'][1]
            parms.append(val['parms'])
            titles.append(val['titles'])

        # Query the catalog and store results in a dictionary
        catalog = getToolByName(self.context, CATALOG_ANALYSIS_REQUEST_LISTING)
        ars = catalog(self.contentFilter)

        logger.info("Catalog Query '{}' returned {} results".format(
            self.contentFilter, len(ars)))

        if not ars:
            message = _("No Samples matched your query")
            self.context.plone_utils.addPortalMessage(message, "error")
            return self.default_template()

        datalines = {}
        footlines = {}
        totalcreatedcount = len(ars)
        totalreceivedcount = 0
        totalpublishedcount = 0
        totalanlcount = 0
        totalreceptionlag = 0
        totalpublicationlag = 0

        for ar in ars:
            ar = ar.getObject()
            datecreated = ar.created()
            datereceived = ar.getDateReceived()
            datepublished = getTransitionDate(ar, 'publish')
            receptionlag = 0
            publicationlag = 0
            anlcount = len(ar.getAnalyses())

            dataline = {
                "AnalysisRequestID": ar.getId(),
                "DateCreated": self.ulocalized_time(datecreated),
                "DateReceived": self.ulocalized_time(datereceived),
                "DatePublished": self.ulocalized_time(datepublished),
                "ReceptionLag": receptionlag,
                "PublicationLag": publicationlag,
                "TotalLag": receptionlag + publicationlag,
                "BatchID": ar.getBatch().getId() if ar.getBatch() else '',
                "SampleID": ar.getId(),
                "SampleType": ar.getSampleTypeTitle(),
                "NumAnalyses": anlcount,
                "ClientID": ar.aq_parent.id,
                "Creator": ar.Creator(),
                "Remarks": ar.getRemarks()
            }

            datalines[ar.getId()] = dataline

            totalreceivedcount += ar.getDateReceived() and 1 or 0
            totalpublishedcount += 1 if datepublished else 0
            totalanlcount += anlcount
            totalreceptionlag += receptionlag
            totalpublicationlag += publicationlag

        # Footer total data
        totalreceivedcreated_ratio = float(totalreceivedcount) / float(
            totalcreatedcount)
        totalpublishedcreated_ratio = float(totalpublishedcount) / float(
            totalcreatedcount)
        totalpublishedreceived_ratio = totalreceivedcount and float(
            totalpublishedcount) / float(totalreceivedcount) or 0

        footline = {
            'Created':
            totalcreatedcount,
            'Received':
            totalreceivedcount,
            'Published':
            totalpublishedcount,
            'ReceivedCreatedRatio':
            totalreceivedcreated_ratio,
            'ReceivedCreatedRatioPercentage':
            ('{0:.0f}'.format(totalreceivedcreated_ratio * 100)) + "%",
            'PublishedCreatedRatio':
            totalpublishedcreated_ratio,
            'PublishedCreatedRatioPercentage':
            ('{0:.0f}'.format(totalpublishedcreated_ratio * 100)) + "%",
            'PublishedReceivedRatio':
            totalpublishedreceived_ratio,
            'PublishedReceivedRatioPercentage':
            ('{0:.0f}'.format(totalpublishedreceived_ratio * 100)) + "%",
            'AvgReceptionLag':
            ('{0:.1f}'.format(totalreceptionlag / totalcreatedcount)),
            'AvgPublicationLag':
            ('{0:.1f}'.format(totalpublicationlag / totalcreatedcount)),
            'AvgTotalLag': ('{0:.1f}'.format(
                (totalreceptionlag + totalpublicationlag) /
                totalcreatedcount)),
            'NumAnalyses':
            totalanlcount
        }

        footlines['Total'] = footline

        self.report_data = {
            'parameters': parms,
            'datalines': datalines,
            'footlines': footlines
        }

        if self.request.get('output_format', '') == 'CSV':
            import csv
            import StringIO
            import datetime

            fieldnames = [
                "AnalysisRequestID",
                "DateCreated",
                "DateReceived",
                "DatePublished",
                "ReceptionLag",
                "PublicationLag",
                "TotalLag",
                "BatchID",
                "SampleID",
                "SampleType",
                "NumAnalyses",
                "ClientID",
                "Creator",
                "Remarks",
            ]
            output = StringIO.StringIO()
            dw = csv.DictWriter(output,
                                extrasaction='ignore',
                                fieldnames=fieldnames)
            dw.writerow(dict((fn, fn) for fn in fieldnames))
            for ar_id, row in datalines.items():
                dw.writerow({
                    "AnalysisRequestID": row["AnalysisRequestID"],
                    "DateCreated": row["DateCreated"],
                    "DateReceived": row["DateReceived"],
                    "DatePublished": row["DatePublished"],
                    "ReceptionLag": row["ReceptionLag"],
                    "PublicationLag": row["PublicationLag"],
                    "TotalLag": row["TotalLag"],
                    "BatchID": row["BatchID"],
                    "SampleID": row["SampleID"],
                    "SampleType": row["SampleType"],
                    "NumAnalyses": row["NumAnalyses"],
                    "ClientID": row["ClientID"],
                    "Creator": row["Creator"],
                    "Remarks": row["Remarks"],
                })
            report_data = output.getvalue()
            output.close()
            date = datetime.datetime.now().strftime("%Y%m%d%H%M")
            setheader = self.request.RESPONSE.setHeader
            setheader('Content-Type', 'text/csv')
            setheader("Content-Disposition",
                      "attachment;filename=\"dataentrydaybook_%s.csv\"" % date)
            self.request.RESPONSE.write(report_data)
        else:
            return {
                'report_title': _('Data entry day book'),
                'report_data': self.template()
            }
Exemple #36
0
 def __init__(self, context, request, report=None):
     super(Report, self).__init__(context, request)
     self.report = report
     self.selection_macros = SelectionMacrosView(self.context, self.request)
class Report(BrowserView):
    implements(IViewView)
    default_template = ViewPageTemplateFile("templates/productivity.pt")
    template = ViewPageTemplateFile(
        "templates/productivity_dailysamplesreceived.pt")

    def __init__(self, context, request, report=None):
        super(Report, self).__init__(context, request)
        self.report = report
        self.selection_macros = SelectionMacrosView(self.context, self.request)

    def __call__(self):

        parms = []
        titles = []

        self.contentFilter = {
            'portal_type': 'Sample',
            'review_state': ['sample_received', 'expired', 'disposed'],
            'sort_on': 'getDateReceived'
        }

        val = self.selection_macros.parse_daterange(self.request,
                                                    'getDateReceived',
                                                    _('Date Received'))
        if val:
            self.contentFilter[val['contentFilter']
                               [0]] = val['contentFilter'][1]
            parms.append(val['parms'])
            titles.append(val['titles'])

        # Query the catalog and store results in a dictionary
        samples = self.bika_catalog(self.contentFilter)
        if not samples:
            message = _("No samples matched your query")
            self.context.plone_utils.addPortalMessage(message, "error")
            return self.default_template()

        datalines = []
        analyses_count = 0
        for sample in samples:
            sample = sample.getObject()

            # For each sample, retrieve the analyses and generate
            # a data line for each one
            analyses = sample.getAnalyses({})
            for analysis in analyses:
                analysis = analysis.getObject()
                dataline = {
                    'AnalysisKeyword':
                    analysis.getKeyword(),
                    'AnalysisTitle':
                    analysis.getServiceTitle(),
                    'SampleID':
                    sample.getSampleID(),
                    'SampleType':
                    sample.getSampleType().Title(),
                    'SampleDateReceived':
                    self.ulocalized_time(sample.getDateReceived(),
                                         long_format=1),
                    'SampleSamplingDate':
                    self.ulocalized_time(sample.getSamplingDate())
                }
                datalines.append(dataline)
                analyses_count += 1

        # Footer total data
        footlines = []
        footline = {'TotalCount': analyses_count}
        footlines.append(footline)

        self.report_data = {
            'parameters': parms,
            'datalines': datalines,
            'footlines': footlines
        }

        return {
            'report_title': _('Daily samples received'),
            'report_data': self.template()
        }