Ejemplo n.º 1
0
def get_age(datetime_from, datetime_to=None):
    """Returns the elapsed time in years, months and days between the two
    dates passed in."""
    if not datetime_to:
        datetime_to = DateTime()

    if not bapi.is_date(datetime_from) or not bapi.is_date(datetime_to):
        bapi.fail("Only DateTime and datetype types are supported")

    dfrom = DT2dt(bapi.to_date(datetime_from)).replace(tzinfo=None)
    dto = DT2dt(bapi.to_date(datetime_to)).replace(tzinfo=None)

    diff = relativedelta(dto, dfrom)
    return (diff.years, diff.months, diff.days)
Ejemplo n.º 2
0
def to_datetime(date_value, default=None, tzinfo=None):
    if isinstance(date_value, datetime):
        return date_value

    # Get the DateTime
    date_value = api.to_date(date_value, default=None)
    if not date_value:
        if default is None:
            return None
        return to_datetime(default, tzinfo=tzinfo)

    # Convert to datetime and strip
    return DT2dt(date_value).replace(tzinfo=tzinfo)
Ejemplo n.º 3
0
 def to_localized_time(self, date, **kw):
     """Converts the given date to a localized time string
     """
     date = api.to_date(date, default=None)
     if date is None:
         return ""
     # default options
     options = {
         "long_format": True,
         "time_only": False,
         "context": self.context,
         "request": self.request,
         "domain": "senaite.core",
     }
     options.update(kw)
     return ulocalized_time(date, **options)
Ejemplo n.º 4
0
    def validate(self, field, condition):
        """Returns True if all required values for the condition are valid
        """
        required = field.required_subfields
        types = field.subfield_types

        for subfield in required:
            typ = types.get(subfield) or ""
            val = condition.get(subfield)
            val = val and val.strip()
            if "date" in typ:
                if not api.to_date(val, default=None):
                    # Not a valid date
                    return False
            elif not val:
                # Not a valid value
                return False

        # All checks passed
        return True
Ejemplo n.º 5
0
    def __call__(self):

        parms = []
        titles = []

        rt = getToolByName(self.context, 'portal_repository')
        mt = getToolByName(self.context, 'portal_membership')

        # Apply filters
        self.contentFilter = {'portal_type': ('Analysis',
                                              'AnalysisCategory',
                                              'AnalysisProfile',
                                              'AnalysisRequest',
                                              'AnalysisService',
                                              'AnalysisSpec',
                                              'ARTemplate',
                                              'Attachment',
                                              'Batch',
                                              'Calculation',
                                              'Client',
                                              'Contact',
                                              'Container',
                                              'ContainerType',
                                              'Department',
                                              'DuplicateAnalysis',
                                              'Instrument',
                                              'InstrumentCalibration',
                                              'InstrumentCertification',
                                              'InstrumentMaintenanceTask',
                                              'InstrumentScheduledTask',
                                              'InstrumentType',
                                              'InstrumentValidation',
                                              'Manufacturer'
                                              'Method',
                                              'Preservation',
                                              'Pricelist',
                                              'ReferenceAnalysis',
                                              'ReferenceDefinition',
                                              'ReferenceSample',
                                              'SampleMatrix',
                                              'SamplePoint',
                                              'SampleType',
                                              'Supplier',
                                              'SupplierContact',
                                              'Worksheet',
                                              'WorksheetTemplate'
        )}

        val = self.selection_macros.parse_daterange(self.request,
                                                    'getModificationDate',
                                                    _('Modification date'))
        if val:
            self.contentFilter['modified'] = val['contentFilter'][1]
            parms.append(val['parms'])
            titles.append(val['titles'])

        user = ''
        userfullname = ''
        titles.append(user)
        if self.request.form.get('User', '') != '':
            user = self.request.form['User']
            userobj = mt.getMemberById(user)
            userfullname = userobj.getProperty('fullname') \
                           if userobj else ''
            parms.append(
                {'title': _('User'), 'value': ("%s (%s)" % (userfullname, user))})

        # Query the catalog and store results in a dictionary
        entities = self.bika_setup_catalog(self.contentFilter)

        if not entities:
            message = _("No historical actions matched your query")
            self.context.plone_utils.addPortalMessage(message, "error")
            return self.default_template()

        datalines = []
        tmpdatalines = {}
        footlines = {}

        for entity in entities:
            entity = entity.getObject()
            entitytype = _(entity.__class__.__name__)

            # Workflow states retrieval
            for workflowid, workflow in entity.workflow_history.iteritems():
                for action in workflow:
                    actiontitle = _('Created')
                    if not action['action'] or (
                        action['action'] and action['action'] == 'create'):
                        actiontitle = _('Created')
                    else:
                        actiontitle = _(action['action'])

                    if (user == '' or action['actor'] == user):
                        actorfullname = userfullname == '' and mt.getMemberById(
                            user) or userfullname
                        dataline = {'EntityNameOrId': entity.title_or_id(),
                                    'EntityAbsoluteUrl': entity.absolute_url(),
                                    'EntityCreationDate': entity.CreationDate(),
                                    'EntityModificationDate': entity.ModificationDate(),
                                    'EntityType': entitytype,
                                    'Workflow': _(workflowid),
                                    'Action': actiontitle,
                                    'ActionDate': action['time'],
                                    'ActionDateStr': self.ulocalized_time(
                                        action['time'], 1),
                                    'ActionActor': action['actor'],
                                    'ActionActorFullName': actorfullname,
                                    'ActionComments': action['comments']
                        }
                        tmpdatalines[action['time']] = dataline

            # History versioning retrieval
            history = rt.getHistoryMetadata(entity)
            if history:
                hislen = history.getLength(countPurged=False)
                for index in range(hislen):
                    meta = history.retrieve(index)['metadata']['sys_metadata']
                    metatitle = _(meta['comment'])
                    if (user == '' or meta['principal'] == user):
                        actorfullname = userfullname == '' and \
                            mt.getMemberById(user) or userfullname
                        action_date = api.to_date(meta['timestamp'], None)
                        if not action_date:
                            logger.warn("Cannot convert date {}").format(meta['timestamp'])
                            action_date = "???"
                        else:
                            action_date = self.ulocalized_time(action_date, long_format=1)
                        dataline = {'EntityNameOrId': entity.title_or_id(),
                                    'EntityAbsoluteUrl': entity.absolute_url(),
                                    'EntityCreationDate': entity.CreationDate(),
                                    'EntityModificationDate': entity.ModificationDate(),
                                    'EntityType': entitytype,
                                    'Workflow': '',
                                    'Action': metatitle,
                                    'ActionDate': meta['timestamp'],
                                    'ActionDateStr': action_date,
                                    'ActionActor': meta['principal'],
                                    'ActionActorFullName': actorfullname,
                                    'ActionComments': ''
                        }
                        tmpdatalines[meta['timestamp']] = dataline
        if len(tmpdatalines) == 0:
            message = _(
                "No actions found for user ${user}",
                mapping={"user": userfullname})
            self.context.plone_utils.addPortalMessage(message, "error")
            return self.default_template()
        else:
            # Sort datalines
            tmpkeys = tmpdatalines.keys()
            tmpkeys.sort(reverse=True)
            for index in range(len(tmpkeys)):
                datalines.append(tmpdatalines[tmpkeys[index]])

            self.report_data = {'parameters': parms,
                                'datalines': datalines,
                                'footlines': footlines}

            return {'report_title': _('Users history'),
                    'report_data': self.template()}
Ejemplo n.º 6
0
def snapshot_created(instance):
    """Snapshot created date
    """
    last_snapshot = get_last_snapshot(instance)
    snapshot_created = get_created(last_snapshot)
    return api.to_date(snapshot_created)
    def __call__(self):
        year = self.request.form.get('year', DateTime().year())
        quarter = self.request.form.get('quarter', "Q1")
        category_uid = self.request.form.get('CategoryUID', None)
        if not category_uid:
            return

        date_from = "{}-01-01"
        date_to = "{}-03-31"
        if quarter == "Q1":
            date_from = "{}-10-01"
            date_to = "{}-12-31"
        elif quarter == "Q3":
            date_from = "{}-04-01"
            date_to = "{}-06-30"
        elif quarter == "Q4":
            date_from = "{}-07-01"
            date_to = "{}-09-30"

        date_from = date_from.format(year)
        date_to = date_to.format(year)
        date_from = api.to_date(date_from, DateTime())
        date_to = api.to_date(date_to, DateTime())

        query = {
            'getCategoryUID': category_uid,
            'getDatePublished': {
                'query': [date_from, date_to],
                'range': 'min:max'
            },
            'review_state': ['verified', 'published'],
            'cancellation_state': 'active',
        }
        self.cells = dict()
        catalog = api.get_tool(CATALOG_ANALYSIS_LISTING)
        for analysis_brain in catalog(query):
            result = self.to_float(analysis_brain.getResult)
            if not result:
                continue
            patient_brain = self.get_patient_brain(analysis_brain)
            if not patient_brain:
                continue
            self.fill_reported(result)
            self.fill_results_by_sex(result, patient_brain)
            self.fill_results_by_age(result, patient_brain)
            self.fill_results_by_pregnancy(result, analysis_brain)

        # Build report
        this_dir = os.path.dirname(os.path.abspath(__file__))
        templates_dir = os.path.join(this_dir, 'excel_files')
        wb = load_workbook(templates_dir +
                           '/ViralLoadQuarterlyMonitoringToolv1.xlsx')
        # grab the active worksheet
        ws = wb.get_sheet_by_name('NMRL')
        for cell_id, value in self.cells.items():
            ws[cell_id] = str(value)

        # Leave header cells empty for now
        laboratory = self.context.bika_setup.laboratory
        address = laboratory.getPhysicalAddress()
        user = api.get_current_user()
        ws["I3"] = address and address.get('state', "") or ""
        ws["L3"] = address and address.get('city', "") or ""
        ws["D3"] = address and address.get('country', "") or ""
        ws["E5"] = laboratory.Title()
        ws["E7"] = laboratory.getTaxNumber()
        ws["I7"] = user.getProperty('fullname')
        ws["L7"] = user.getProperty('email')
        ws["H9"] = quarter

        # Save the file in memory
        return save_in_memory_and_return(wb)
Ejemplo n.º 8
0
    def __call__(self):
        year = int(
            self.request.form.get('year_viralloadstatistics',
                                  DateTime().year()))
        month = int(self.request.form.get('month_viralloadstatistics', "1"))
        category_uid = self.request.form.get('CategoryUID', None)
        if not category_uid:
            return

        logger.warn("year:{}".format(year))
        logger.warn("month:{}".format(month))
        last_day = calendar.monthrange(year, month)[1]
        date_from = "{}-{}-01".format(year, month)
        date_to = "{}-{}-{}".format(year, month, last_day)
        date_from = api.to_date(date_from, DateTime())
        date_to = api.to_date(date_to, DateTime())

        this_dir = os.path.dirname(os.path.abspath(__file__))
        templates_dir = os.path.join(this_dir, 'excel_files')
        wb_path = '/'.join([templates_dir, XLS_TEMPLATE])
        self.workbook = load_workbook(wb_path)

        # Fill statistics' header sheet
        ws_stats = self.workbook.get_sheet_by_name(SHEET_STATISTICS)
        month_name = datetime.date(year, month, 1).strftime("%B")
        month_name = month_name.upper()
        ws_stats["C2"] = "REPORTING MONTH/YEAR: {}/{}".format(month_name, year)
        query = {
            'getCategoryUID': category_uid,
            'getDatePublished': {
                'query': [date_from, date_to],
                'range': 'min:max'
            },
            'review_state': ['verified', 'published'],
            'cancellation_state': 'active',
            'sort_on': 'getClientTitle',
            'sort_order': 'ascending'
        }

        catalog = api.get_tool(CATALOG_ANALYSIS_LISTING)
        for analysis_brain in catalog(query):
            patient_brain = self.get_patient_brain(analysis_brain)
            if not patient_brain:
                continue

            self.render_statistics_row(analysis_brain)

        # Fill statistics sheet
        row_num_start = 6
        row_num = row_num_start  # start on row number 6 (headers before)
        provinces_dict = self.cells.get(SHEET_STATISTICS, dict())
        provinces = provinces_dict.keys()
        provinces.sort()
        for province in provinces:
            logger.warn("Province: {}".format(province))
            districts_dict = provinces_dict.get(province)
            districts = districts_dict.keys()
            districts.sort()
            for district in districts:
                logger.warn("  District: {}".format(district))
                clients = districts_dict.get(district)
                for client_uid, row in clients.items():
                    logger.warn("    Client: {}".format(client_uid))
                    for column, cell_value in row.items():
                        cell_id = '{}{}'.format(column, row_num)
                        logger.warn("      {}: {}".format(cell_id, cell_value))
                        ws_stats[cell_id] = cell_value
                    cell_id = 'A{}'.format(row_num)
                    ws_stats[cell_id] = row_num - row_num_start + 1
                    row_num += 1

        # Save the file in memory
        return save_in_memory_and_return(self.workbook)
Ejemplo n.º 9
0
def get_historicresults(patient):
    if not patient:
        return [], {}

    rows = {}
    dates = []

    # Retrieve the AR IDs for the current patient
    query = {
        "portal_type": "AnalysisRequest",
        "getPatientUID": api.get_uid(patient),
        "review_state": ["verified", "published"],
        "sort_on": "getDateSampled",
        "sort_order": "descending"
    }
    brains = api.search(query, CATALOG_ANALYSIS_REQUEST_LISTING)
    samples = map(api.get_object, brains)

    # Retrieve all analyses
    analyses = map(lambda samp: samp.objectValues("Analysis"), samples)
    analyses = list(itertools.chain.from_iterable(analyses))

    # Build the dictionary of rows
    for analysis in analyses:
        sample = analysis.aq_parent
        sample_type = sample.getSampleType()
        row = {
            "object": sample_type,
            "analyses": {},
        }
        sample_type_uid = api.get_uid(sample_type)
        if sample_type_uid in rows:
            row = rows.get(sample_type_uid)

        anrow = row.get("analyses")
        service_uid = analysis.getServiceUID()
        asdict = {
            "object": analysis,
            "title": api.get_title(analysis),
            "keyword": to_utf8(analysis.getKeyword()),
        }
        if service_uid in anrow:
            asdict = anrow.get(service_uid)

        if not anrow.get("units", None):
            asdict.update(
                {"units": format_supsub(to_utf8(analysis.getUnit()))})

        date = analysis.getResultCaptureDate() or analysis.created()
        date_time = DT2dt(to_date(date)).replace(tzinfo=None)
        date_time = datetime.strftime(date_time, "%Y-%m-%d %H:%M")

        # If more than one analysis of the same type has been
        # performed in the same datetime, get only the last one
        if date_time not in asdict.keys():
            asdict[date_time] = {
                "object": analysis,
                "result": analysis.getResult(),
                "formattedresult": analysis.getFormattedResult()
            }
            # Get the specs
            # Only the specs applied to the last analysis for that
            # sample type will be taken into consideration.
            # We assume specs from previous analyses are obsolete.
            if "specs" not in asdict.keys():
                specs = analysis.getResultsRange()
                asdict["specs"] = get_formatted_interval(specs, "")

            if date_time not in dates:
                dates.append(date_time)

        anrow[service_uid] = asdict
        row['analyses'] = anrow
        rows[sample_type_uid] = row
    dates.sort(reverse=False)
    return dates, rows