Exemplo n.º 1
0
def get_date(context, value):
    """Tries to return a DateTime.DateTime object
    """
    if not value:
        return None
    if isinstance(value, DateTime):
        return value
    if isinstance(value, datetime):
        return dt2DT(value)
    if not isinstance(value, basestring):
        return None

    def try_parse(date_string, format):
        if not format:
            return None
        try:
            struct_time = strptime(date_string, format)
            return datetime(*struct_time[:6])
        except ValueError:
            pass
        return None

    def get_locale_format(key, context):
        format = context.translate(key, domain="senaite.core", mapping={})
        # TODO: Is this replacement below strictly necessary?
        return format.replace(r"${", '%').replace('}', '')

    # Try with prioritized formats
    formats = [
        get_locale_format("date_format_long", context),
        get_locale_format("date_format_short", context), "%Y-%m-%d %H:%M",
        "%Y-%m-%d", "%Y-%m-%d %H:%M:%S"
    ]
    for pri_format in formats:
        val = try_parse(value, pri_format)
        if not val:
            continue
        val = dt2DT(val)
        if val.timezoneNaive():
            # Use local timezone for tz naive strings
            # see http://dev.plone.org/plone/ticket/10141
            zone = val.localZone(safelocaltime(val.timeTime()))
            parts = val.parts()[:-1] + (zone, )
            val = DateTime(*parts)
        return val

    logger.warn("Unable to convert {} to datetime".format(value))
    return None
Exemplo n.º 2
0
 def getDataMinima(self):
     """ 
     """
     now = datetime.datetime.now()
     delta = datetime.timedelta(hours=2)
     DT = dt2DT(now+delta)
     return DT
Exemplo n.º 3
0
 def getTimelineEvents(self):
     """Returns the data needed to build the timeline.
     """
     events = []
     previousSortDate = None
     previousSortDate_m = None
     for entry in self.brains:
         deadline = getattr(entry, 'deadline', None)
         sortDate = entry.created
         if deadline is not None:
             try:
                 sortDate = dt2DT(deadline)
             except:
                 sortDate = entry.created
         sortDate_m = int(sortDate.millis())
         if previousSortDate is None:
             previousSortDate = sortDate - 1
         previousSortDate_m = int(previousSortDate.millis())
         event = {
             'end': sortDate_m,
             'id': entry.id,
             'prettyDate': pretty_date(sortDate),
             'start': previousSortDate_m,
             'timeSpan': sortDate_m - previousSortDate_m,
             'title': entry.Title,
         }
         events.append(event)
         previousSortDate = sortDate
     return events
    def setUp(self):
        portal = self.layer['portal']
        login(portal, MANAGER_ID)

        # Create a bunch of questions with different times
        portal.invokeFactory(type_name="qna_forum", id="qna")
        forum = portal['qna']
        for i in range(1, 26):
            id = "question" + str(i)
            forum.invokeFactory(
                type_name="qna_question",
                id=id,
                title="Question " + str(i),
                # Tag content with what it's divisible by
                subject=(["divisible by %d" % j for j in range(2, 10)
                                                if i % j == 0])
            )
            forum[id].creation_date = dt2DT(datetime.now()
                                            - timedelta(hours=30 - i))
            for i in range(0, i % 5):
                # Answer the question i mod 5 times (i.e. no answers iff
                # divisible by 5)
                forum[id].invokeFactory(
                    type_name="qna_answer",
                    id="answer%d" % i,
                    answer="Answer %d" % i,
                )
            forum[id].reindexObject()
Exemplo n.º 5
0
 def getDataMinima(self):
     """ 
     """
     now = datetime.datetime.now()
     delta = datetime.timedelta(hours=2)
     DT = dt2DT(now+delta)
     return DT
Exemplo n.º 6
0
    def __call__(self):
        try:
            plone.protect.CheckAuthenticator(self.request)
        except:
            return json.dumps({"error": "Form authenticator is invalid"})

        member = self.portal_membership.getAuthenticatedMember()
        if not member.has_role("Sampler"):
            return json.dumps({})

        query = {"query": dt2DT(datetime.datetime.now()), "range": "max"}

        ars = self.bika_catalog(portal_type="AnalysisRequest", review_state="sample_due", getSamplingDate=query)

        ret = []
        for ar in ars:
            ar = ar.getObject()
            sample = ar.getSample()
            if not hasattr(sample, "future_dated"):
                continue
            batch_title = ar.getBatch().Title() if ar.getBatch() else ""
            ret.append(
                {
                    "UID": ar.UID(),
                    "Title": "<a href='{0}'>{1}</a>".format(ar.absolute_url(), ar.Title()),
                    "Description": "Works Order ID: {0}".format(batch_title) if batch_title else "",
                }
            )

        return json.dumps(ret)
    def get_items(self):
        now = time.time()
        url = self.data.url
        chooser = getUtility(ICacheChooser)
        cache = chooser('cs.portlet.icalendar')
        cached_data = cache.get(url, None)
        cache_timeout = int(self.data.cache_time)
        retrieve_data = True
        if cached_data is not None:
            timestamp, feed = cached_data
            if now - timestamp < cache_timeout:
                res = feed
                retrieve_data = False

        if retrieve_data:
            try:
                sock = urllib2.urlopen(url)
            except:
                return []
            cal = Calendar.from_ical(sock.read())
            res = []
            for item in cal.walk():
                if item.name == 'VEVENT':
                    d = {}
                    d['text'] = safe_unicode(item.get('SUMMARY', ''))
                    start = item.get('DTSTART', None)
                    d['start'] = start and dt2DT(start.dt) or ''
                    end = item.get('DTEND', None)
                    d['end'] = end and dt2DT(end.dt) or ''
                    d['start'] = d['start'].toZone(d['start'].localZone())
                    d['end'] = d['end'].toZone(d['end'].localZone())
                    d['location'] = safe_unicode(item.get('LOCATION', ''))
                    d['subject'] = safe_unicode(item.get('CATEGORIES', ''))
                    res.append(d)

            cache[url] = (now+cache_timeout, res)

        if not self.data.limit:
            return res
        else:
            return res[:self.data.limit]
Exemplo n.º 8
0
 def getDueDate(self):
     """Used to populate getDueDate index and metadata.
     This calculates the difference between the time the analysis processing
     started and the maximum turnaround time. If the analysis has no
     turnaround time set or is not yet ready for proces, returns None
     """
     tat = self.getMaxTimeAllowed()
     if not tat:
         return None
     start = self.getStartProcessDate()
     if not start:
         return None
     return dt2DT(DT2dt(start) + timedelta(minutes=api.to_minutes(**tat)))
Exemplo n.º 9
0
    def disposal_date(self):
        """ Calculate the disposal date by returning the latest
            disposal date in this sample's partitions """

        parts = self.objectValues("SamplePartition")
        dates = []
        for part in parts:
            date = part.getDisposalDate()
            if date:
                dates.append(date)
        if dates:
            dis_date = dt2DT(max([DT2dt(date) for date in dates]))
        else:
            dis_date = None
        return dis_date
Exemplo n.º 10
0
    def disposal_date(self):
        """ Calculate the disposal date by returning the latest
            disposal date in this sample's partitions """

        parts = self.objectValues("SamplePartition")
        dates = []
        for part in parts:
            date = part.getDisposalDate()
            if date:
                dates.append(date)
        if dates:
            dis_date = dt2DT(max([DT2dt(date) for date in dates]))
        else:
            dis_date = None
        return dis_date
Exemplo n.º 11
0
    def getDueDate(self):
        """Used to populate getDueDate index and metadata.
        This calculates the difference between the time the analysis processing
        started and the maximum turnaround time. If the analysis has no
        turnaround time set or is not yet ready for proces, returns None
        """
        tat = self.getMaxTimeAllowed()
        if not tat:
            return None
        start = self.getStartProcessDate()
        if not start:
            return None

        # delta time when the first analysis is considered as late
        delta = timedelta(minutes=api.to_minutes(**tat))

        # calculated due date
        end = dt2DT(DT2dt(start) + delta)

        # delta is within one day, return immediately
        if delta.days == 0:
            return end

        # get the laboratory workdays
        setup = api.get_setup()
        workdays = setup.getWorkdays()

        # every day is a workday, no need for calculation
        if workdays == tuple(map(str, range(7))):
            return end

        # reset the due date to the received date, and add only for configured
        # workdays another day
        due_date = end - delta.days

        days = 0
        while days < delta.days:
            # add one day to the new due date
            due_date += 1
            # skip if the weekday is a non working day
            if str(due_date.asdatetime().weekday()) not in workdays:
                continue
            days += 1

        return due_date
Exemplo n.º 12
0
    def disposal_date(self):
        """Returns the date the retention period ends for this sample based on
        the retention period from the Sample Type. If the sample hasn't been
        collected yet, returns None
        """
        date_sampled = self.getDateSampled()
        if not date_sampled:
            return None

        # TODO Preservation - preservation's retention period has priority over
        # sample type's preservation period

        retention_period = self.getSampleType().getRetentionPeriod() or {}
        retention_period_delta = timedelta(
            days=int(retention_period.get("days", 0)),
            hours=int(retention_period.get("hours", 0)),
            minutes=int(retention_period.get("minutes", 0)))
        return dt2DT(DT2dt(date_sampled) + retention_period_delta)
Exemplo n.º 13
0
    def disposal_date(self):
        """ return disposal date """

        DateSampled = self.getDateSampled()

        # fallback to sampletype retention period
        st_retention = self.aq_parent.getSampleType().getRetentionPeriod()

        # but prefer retention period from preservation
        pres = self.getPreservation()
        pres_retention = pres and pres.getRetentionPeriod() or None

        rp = pres_retention and pres_retention or None
        rp = rp or st_retention

        td = timedelta(days='days' in rp and int(rp['days']) or 0,
                       hours='hours' in rp and int(rp['hours']) or 0,
                       minutes='minutes' in rp and int(rp['minutes']) or 0)

        dis_date = DateSampled and dt2DT(DT2dt(DateSampled) + td) or None
        return dis_date
Exemplo n.º 14
0
    def set(self, instance, value, **kwargs):
        """
        Check if value is an actual date/time value. If not, attempt
        to convert it to one; otherwise, set to None. Assign all
        properties passed as kwargs to object.
        """
        val = value
        if not value:
            val = None
        elif isinstance(value, basestring):
            for fmt in ['date_format_long', 'date_format_short']:
                fmtstr = instance.translate(fmt, domain='bika', mapping={})
                fmtstr = fmtstr.replace(r"${", '%').replace('}', '')
                try:
                    val = strptime(value, fmtstr)
                except ValueError:
                    continue
                try:
                    val = DateTime(*list(val)[:-6])
                except DateTimeError:
                    continue
                if val.timezoneNaive():
                    # Use local timezone for tz naive strings
                    # see http://dev.plone.org/plone/ticket/10141
                    zone = val.localZone(safelocaltime(val.timeTime()))
                    parts = val.parts()[:-1] + (zone, )
                    val = DateTime(*parts)
                break
            else:
                try:
                    # The following will handle an rfc822 string.
                    value = value.split(" +", 1)[0]
                    val = DateTime(value)
                except:
                    logger.warning("DateTimeField failed to format date "
                                   "string '%s' with '%s'" % (value, fmtstr))
        elif isinstance(value, datetime.datetime):
            val = dt2DT(value)

        super(DateTimeField, self).set(instance, val, **kwargs)
Exemplo n.º 15
0
    def disposal_date(self):
        """ return disposal date """

        DateSampled = self.getDateSampled()

        # fallback to sampletype retention period
        st_retention = self.aq_parent.getSampleType().getRetentionPeriod()

        # but prefer retention period from preservation
        pres = self.getPreservation()
        pres_retention = pres and pres.getRetentionPeriod() or None

        rp = pres_retention and pres_retention or None
        rp = rp or st_retention

        td = timedelta(
            days='days' in rp and int(rp['days']) or 0,
            hours='hours' in rp and int(rp['hours']) or 0,
            minutes='minutes' in rp and int(rp['minutes']) or 0)

        dis_date = DateSampled and dt2DT(DT2dt(DateSampled) + td) or None
        return dis_date
Exemplo n.º 16
0
 def getRaw(self, instance, **kwargs):
     bb = ''
     sample = instance.getSample() \
         if instance.portal_type == 'AnalysisRequest' else instance
     if not sample:
         # portal_factory ARs have no sample.
         return ''
     sampletype = sample.getSampleType()
     FromDate = sample.getDateSampled()
     if not FromDate:
         FromDate = sample.getSamplingDate()
     months = sampletype.Schema().getField('ShelfLife').get(sampletype)
     if FromDate and months:
         FromDate = DT2dt(FromDate)
         try:
             months = int(months)
             bb = add_months(FromDate, months)
             bb = dt2DT(bb)
         except ValueError:
             bb = ''
     else:
         bb = ''
     return bb
    def getRoles(self, principal_id):
        """Returns the roles for the given principal in context"""
        request = self.context.REQUEST
        response = request.RESPONSE

        token = request.get('token', None)
        if not token:
            request.cookies.get('token', None)

        tr_annotate = ITokenRolesAnnotate(self.context, None)
        if tr_annotate and tr_annotate.token_dict.has_key(token):
            expire_date = tr_annotate.token_dict[token].get('token_end')
            roles_to_assign = tr_annotate.token_dict[token].get('token_roles', ('Reader',))
            if expire_date.replace(tzinfo=None) > datetime.now():
                physical_path = self.context.getPhysicalPath()
                # Is there a better method for calculate the url_path?
                url_path = '/' + '/'.join(request.physicalPathToVirtualPath(physical_path))
                response.setCookie(name='token',
                                   value=token,
                                   expires=dt2DT(expire_date).toZone('GMT').rfc822(),
                                   path=url_path)
                return roles_to_assign
        return ()
Exemplo n.º 18
0
    def __call__(self):
        try:
            plone.protect.CheckAuthenticator(self.request)
        except:
            return json.dumps({'error': 'Form authenticator is invalid'})

        member = self.portal_membership.getAuthenticatedMember()
        if not member.has_role('Sampler'):
            return json.dumps({})

        query = {'query': dt2DT(datetime.datetime.now()), 'range': 'max'}

        ars = self.bika_catalog(
            portal_type="AnalysisRequest",
            review_state="sample_due",
            getSamplingDate=query,
        )

        ret = []
        for ar in ars:
            ar = ar.getObject()
            sample = ar.getSample()
            if not hasattr(sample, 'future_dated'):
                continue
            batch_title = ar.getBatch().Title() if ar.getBatch() else ''
            ret.append({
                'UID':
                ar.UID(),
                'Title':
                "<a href='{0}'>{1}</a>".format(ar.absolute_url(), ar.Title()),
                'Description':
                "Works Order ID: {0}".format(batch_title)
                if batch_title else ''
            })

        return json.dumps(ret)
Exemplo n.º 19
0
    def parse_raw_data(self):
        """Parse the UW-format of import file, do some limited parsing,
        and store the values

        This is intentionally brittle, the UW form requires certain
        fields at certain positions, and is fixed.  Further modifications
        to the input file require modifications to this code.
        """

        context = self.context
        bika_catalog = getToolByName(context, 'bika_catalog')
        bika_setup_catalog = getToolByName(context, 'bika_setup_catalog')

        blob = context.Schema()['RawData'].get(context)

        wb, fn = self.load_workbook(blob.data)
        ws = wb.worksheets[0]

        # Header data
        _clientname = ws['C2'].value
        _clientid = ws['D2'].value
        _contactname = ws['E2'].value
        _clientordernumber = ws['F2'].value
        _clientreference = ws['G2'].value
        # Batch data
        _batchtitle = ws['B4'].value
        _batchid = ws['C4'].value
        _batchdescription = ws['D4'].value
        _clientbatchid = ws['E4'].value
        returnsampletoclient = ws['F4'].value
        # "Batch meta" (just more batch data really)
        _clientbatchcomment = ws['B6'].value
        _labbatchcomment = ws['C6'].value
        # analytes (profile titles, service titles, service keywords, CAS nr)
        _analytes = [
            ws[chr(x) + '8'].value for x in range(66, 91)  # B=66, Z=90
            if ws[chr(x) + '8'].value
        ]
        # Samples "meta" (common values for all samples)
        try:
            _datesampled = str(dt2DT(ws['B10'].value))
        except:
            _datesampled = ''
        _sampletype = ws['C10'].value
        _samplesite = ws['D10'].value
        _activitysampled = ws['E10'].value
        # count the number of sample rows
        nr_samples = 0
        while 1:
            if ws['B{}'.format(12 + nr_samples)].value:
                nr_samples += 1
            else:
                break

        # If batch already exists, link it now.
        brains = bika_catalog(portal_type='Batch', title=_batchtitle)
        batch = brains[0].getObject() if brains else None

        # Lookup sample type and point
        sampletype = None
        sampletypes = bika_setup_catalog(portal_type='SampleType',
                                         title=_sampletype)
        if len(sampletypes) == 1:
            sampletype = sampletypes[0].getObject()

        # Write applicable values to ARImport schema
        # These are values that will be used in all created objects,
        # and are set only once.
        arimport_values = {
            'ClientName': _clientname,
            'ClientID': _clientid,
            'ClientOrderNumber': _clientordernumber,
            'ClientReference': _clientreference,
            'ContactName': _contactname,
            'CCContacts': [],
            'SampleType': sampletype,
            # SampleSite field: extenders/arimport,sample,analysisrequest.py
            'SampleSite': _samplesite,
            'ActivitySampled': _activitysampled,
            'BatchTitle': _batchtitle,
            'BatchDescription': _batchdescription,
            'BatchID': _batchid,
            'ClientBatchID': _clientbatchid,
            'LabBatchComment': _labbatchcomment,
            'ClientBatchComment': _clientbatchcomment,
            'Batch': batch,
            'NrSamples': nr_samples,
        }
        # Write initial values to ARImport schema
        for fieldname, fieldvalue in arimport_values.items():
            context.Schema()[fieldname].set(context, fieldvalue)

        itemdata = []
        for sample_nr in range(nr_samples):
            clientsampleid = ws['B{}'.format(12 + sample_nr)].value
            amountsampled = ws['C{}'.format(12 + sample_nr)].value
            metric = ws['D{}'.format(12 + sample_nr)].value
            remarks = ws['E{}'.format(12 + sample_nr)].value
            values = {
                'ClientSampleID': str(clientsampleid),
                'AmountSampled': str(amountsampled),
                'AmountSampledMetric': str(metric),
                'DateSampled': _datesampled,
                'Analyses': _analytes,
                'Remarks': remarks,
                # 'Profile': self.profile,
            }
            itemdata.append(values)
            context.Schema()['ItemData'].set(context, itemdata)
        context.reindexObject()

        # Close worksheet and remove the tmp file.
        wb.close()
        os.unlink(fn)
Exemplo n.º 20
0
def sort_date(context):
    deadline = getattr(context.aq_base, 'deadline', None)
    if deadline is None:
        return context.created()
    else:
        return dt2DT(deadline)
Exemplo n.º 21
0
    def parse_raw_data(self):
        """Parse the UW-format of import file, do some limited parsing,
        and store the values

        This is intentionally brittle, the UW form requires certain
        fields at certain positions, and is fixed.  Further modifications
        to the input file require modifications to this code.
        """

        context = self.context
        bika_catalog = getToolByName(context, 'bika_catalog')
        bika_setup_catalog = getToolByName(context, 'bika_setup_catalog')

        blob = context.Schema()['RawData'].get(context)

        wb, fn = self.load_workbook(blob.data)
        ws = wb.worksheets[0]

        # Header data
        _clientname = ws['C2'].value
        _clientid = ws['D2'].value
        _contactname = ws['E2'].value
        _clientordernumber = ws['F2'].value
        _clientreference = ws['G2'].value
        # Batch data
        _batchtitle = ws['B4'].value
        _batchid = ws['C4'].value
        _batchdescription = ws['D4'].value
        _clientbatchid = ws['E4'].value
        returnsampletoclient = ws['F4'].value
        # "Batch meta" (just more batch data really)
        _clientbatchcomment = ws['B6'].value
        _labbatchcomment = ws['C6'].value
        # analytes (profile titles, service titles, service keywords, CAS nr)
        _analytes = [ws[chr(x) + '8'].value
                     for x in range(66, 91)  # B=66, Z=90
                     if ws[chr(x) + '8'].value]
        # Samples "meta" (common values for all samples)
        try:
            _datesampled = str(dt2DT(ws['B10'].value))
        except:
            _datesampled = ''
        _sampletype = ws['C10'].value
        _samplesite = ws['D10'].value
        _activitysampled = ws['E10'].value
        # count the number of sample rows
        nr_samples = 0
        while 1:
            if ws['B{}'.format(12 + nr_samples)].value:
                nr_samples += 1
            else:
                break

        # If batch already exists, link it now.
        brains = bika_catalog(portal_type='Batch', title=_batchtitle)
        batch = brains[0].getObject() if brains else None

        # Lookup sample type and point
        sampletype = None
        sampletypes = bika_setup_catalog(
            portal_type='SampleType',
            title=_sampletype)
        if len(sampletypes) == 1:
            sampletype = sampletypes[0].getObject()

        # Write applicable values to ARImport schema
        # These are values that will be used in all created objects,
        # and are set only once.
        arimport_values = {
            'ClientName': _clientname,
            'ClientID': _clientid,
            'ClientOrderNumber': _clientordernumber,
            'ClientReference': _clientreference,
            'ContactName': _contactname,
            'CCContacts': [],
            'SampleType': sampletype,
            # SampleSite field: extenders/arimport,sample,analysisrequest.py
            'SampleSite': _samplesite,
            'ActivitySampled': _activitysampled,
            'BatchTitle': _batchtitle,
            'BatchDescription': _batchdescription,
            'BatchID': _batchid,
            'ClientBatchID': _clientbatchid,
            'LabBatchComment': _labbatchcomment,
            'ClientBatchComment': _clientbatchcomment,
            'Batch': batch,
            'NrSamples': nr_samples,
        }
        # Write initial values to ARImport schema
        for fieldname, fieldvalue in arimport_values.items():
            context.Schema()[fieldname].set(context, fieldvalue)

        itemdata = []
        for sample_nr in range(nr_samples):
            clientsampleid = ws['B{}'.format(12 + sample_nr)].value
            amountsampled = ws['C{}'.format(12 + sample_nr)].value
            metric = ws['D{}'.format(12 + sample_nr)].value
            remarks = ws['E{}'.format(12 + sample_nr)].value
            values = {
                'ClientSampleID': str(clientsampleid),
                'AmountSampled': str(amountsampled),
                'AmountSampledMetric': str(metric),
                'DateSampled': _datesampled,
                'Analyses': _analytes,
                'Remarks': remarks,
                # 'Profile': self.profile,
            }
            itemdata.append(values)
            context.Schema()['ItemData'].set(context, itemdata)
        context.reindexObject()

        # Close worksheet and remove the tmp file.
        wb.close()
        os.unlink(fn)