def test_datetime_from_timestamp(self): self.assertEqual(util.datetime_from_timestamp(0), datetime.datetime(1970, 1, 1)) # large negative; test PYTHON-110 workaround for windows self.assertEqual(util.datetime_from_timestamp(-62135596800), datetime.datetime(1, 1, 1)) self.assertEqual(util.datetime_from_timestamp(-62135596199), datetime.datetime(1, 1, 1, 0, 10, 1)) self.assertEqual(util.datetime_from_timestamp(253402300799), datetime.datetime(9999, 12, 31, 23, 59, 59)) self.assertEqual(util.datetime_from_timestamp(0.123456), datetime.datetime(1970, 1, 1, 0, 0, 0, 123456))
def strftime(time_format, seconds, microseconds=0, timezone=None): ret_dt = datetime_from_timestamp(seconds) + datetime.timedelta( microseconds=microseconds) ret_dt = ret_dt.replace(tzinfo=UTC()) if timezone: ret_dt = ret_dt.astimezone(timezone) return ret_dt.strftime(time_format)
def format_value_timestamp(val, colormap, date_time_format, quote=False, **_): tzless_dt = datetime_from_timestamp(calendar.timegm(val.utctimetuple())) \ + datetime.timedelta(microseconds=val.microsecond) bval = tzless_dt.replace(tzinfo=UTC()).strftime( date_time_format.timestamp_format) if quote: bval = "'%s'" % bval return colorme(bval, colormap, 'timestamp')
def test_month_rounding_creation_failure(self): """ @jira_ticket PYTHON-912 """ feb_stamp = ms_timestamp_from_datetime( datetime.datetime(2018, 2, 25, 18, 59, 59, 0)) dr = DateRange(OPEN_BOUND, DateRangeBound(feb_stamp, DateRangePrecision.MONTH)) dt = datetime_from_timestamp(dr.upper_bound.milliseconds / 1000) self.assertEqual(dt.day, 28) # Leap year feb_stamp_leap_year = ms_timestamp_from_datetime( datetime.datetime(2016, 2, 25, 18, 59, 59, 0)) dr = DateRange( OPEN_BOUND, DateRangeBound(feb_stamp_leap_year, DateRangePrecision.MONTH)) dt = datetime_from_timestamp(dr.upper_bound.milliseconds / 1000) self.assertEqual(dt.day, 29)
def write_crawl_data(self): bound_stmt = self.add_crawl_prepped_stmt.bind([ self.crawl_data['id'], self.crawl_data['url'], datetime_from_timestamp(self.crawl_data['crawled_at']), self.crawl_data['failure'], self.crawl_data['title'], self.crawl_data['body'], self.crawl_data['internal_links'], self.crawl_data['outbound_links']]) self.session.execute(bound_stmt)
def strftime(time_format, seconds, timezone=None): ret_dt = datetime_from_timestamp(seconds).replace(tzinfo=UTC()) if timezone: ret_dt = ret_dt.astimezone(timezone) try: return ret_dt.strftime(time_format) except ValueError: # CASSANDRA-13185: if the date cannot be formatted as a string, return a string with the milliseconds # since the epoch. cqlsh does the exact same thing for values below datetime.MINYEAR (1) or above # datetime.MAXYEAR (9999). Some versions of strftime() also have problems for dates between MIN_YEAR and 1900. # cqlsh COPY assumes milliseconds from the epoch if it fails to parse a datetime string, and so it is # able to correctly import timestamps exported as milliseconds since the epoch. return '%d' % (seconds * 1000.0)
def test_uuid_from_time(self): t = time.time() seq = 0x2aa5 node = uuid.getnode() u = util.uuid_from_time(t, node, seq) # using AlmostEqual because time precision is different for # some platforms self.assertAlmostEqual(util.unix_time_from_uuid1(u), t, 4) self.assertEqual(u.node, node) self.assertEqual(u.clock_seq, seq) # random node u1 = util.uuid_from_time(t, clock_seq=seq) u2 = util.uuid_from_time(t, clock_seq=seq) self.assertAlmostEqual(util.unix_time_from_uuid1(u1), t, 4) self.assertAlmostEqual(util.unix_time_from_uuid1(u2), t, 4) self.assertEqual(u.clock_seq, seq) # not impossible, but we shouldn't get the same value twice self.assertNotEqual(u1.node, u2.node) # random seq u1 = util.uuid_from_time(t, node=node) u2 = util.uuid_from_time(t, node=node) self.assertAlmostEqual(util.unix_time_from_uuid1(u1), t, 4) self.assertAlmostEqual(util.unix_time_from_uuid1(u2), t, 4) self.assertEqual(u.node, node) # not impossible, but we shouldn't get the same value twice self.assertNotEqual(u1.clock_seq, u2.clock_seq) # node too large with self.assertRaises(ValueError): u = util.uuid_from_time(t, node=2**48) # clock_seq too large with self.assertRaises(ValueError): u = util.uuid_from_time(t, clock_seq=0x4000) # construct from datetime dt = util.datetime_from_timestamp(t) u = util.uuid_from_time(dt, node, seq) self.assertAlmostEqual(util.unix_time_from_uuid1(u), t, 4) self.assertEqual(u.node, node) self.assertEqual(u.clock_seq, seq)
def test_uuid_from_time(self): t = time.time() seq = 0x2aa5 node = uuid.getnode() u = util.uuid_from_time(t, node, seq) # using AlmostEqual because time precision is different for # some platforms self.assertAlmostEqual(util.unix_time_from_uuid1(u), t, 4) self.assertEqual(u.node, node) self.assertEqual(u.clock_seq, seq) # random node u1 = util.uuid_from_time(t, clock_seq=seq) u2 = util.uuid_from_time(t, clock_seq=seq) self.assertAlmostEqual(util.unix_time_from_uuid1(u1), t, 4) self.assertAlmostEqual(util.unix_time_from_uuid1(u2), t, 4) self.assertEqual(u.clock_seq, seq) # not impossible, but we shouldn't get the same value twice self.assertNotEqual(u1.node, u2.node) # random seq u1 = util.uuid_from_time(t, node=node) u2 = util.uuid_from_time(t, node=node) self.assertAlmostEqual(util.unix_time_from_uuid1(u1), t, 4) self.assertAlmostEqual(util.unix_time_from_uuid1(u2), t, 4) self.assertEqual(u.node, node) # not impossible, but we shouldn't get the same value twice self.assertNotEqual(u1.clock_seq, u2.clock_seq) # node too large with self.assertRaises(ValueError): u = util.uuid_from_time(t, node=2 ** 48) # clock_seq too large with self.assertRaises(ValueError): u = util.uuid_from_time(t, clock_seq=0x4000) # construct from datetime dt = util.datetime_from_timestamp(t) u = util.uuid_from_time(dt, node, seq) self.assertAlmostEqual(util.unix_time_from_uuid1(u), t, 4) self.assertEqual(u.node, node) self.assertEqual(u.clock_seq, seq)
async def main(session): aiosession(session) redis = await aioredis.create_redis("redis://localhost") last_id = await redis.get(LAST_ID_KEY) if last_id: log.info(f"Starting consuming messages from {last_id}") last_id = [last_id] else: log.info(f"Starting consuming messages from the beginning") last_id = [0] streams = redis.streams.consumer([STREAM], latest_ids=last_id, encoding="utf-8") async for message in streams: # message is a tuple (stream, id, order_dict) log.info(f"Got message from device {message[2]['device']}") last_id = streams.last_ids_for_stream[STREAM] log.info(f"Set last id to: {last_id}") await redis.set(LAST_ID_KEY, last_id) data = dict(**message[2]) deviceid = data.pop("device") timestamp = util.datetime_from_timestamp(float(data.pop("timestamp"))) query = session.prepare(""" INSERT INTO devices ("deviceid", "timestamp", "data") VALUES (?, ?, ?) """) await session.execute_future( query, (UUID(deviceid), timestamp, json.dumps(data))) await asyncio.sleep(2)
def strftime(time_format, seconds, microseconds=0, timezone=None): ret_dt = datetime_from_timestamp(seconds) + datetime.timedelta(microseconds=microseconds) ret_dt = ret_dt.replace(tzinfo=UTC()) if timezone: ret_dt = ret_dt.astimezone(timezone) return ret_dt.strftime(time_format)
def datetime_from_timestamp(timestamp): msg = "'cassandra.cqltypes.datetime_from_timestamp' has moved to 'cassandra.util'. This entry point will be removed in the next major version." warnings.warn(msg, DeprecationWarning) log.warning(msg) return util.datetime_from_timestamp(timestamp)
def deserialize(byts, protocol_version): timestamp = int64_unpack(byts) / 1000.0 return util.datetime_from_timestamp(timestamp)
def strftime(time_format, seconds): tzless_dt = datetime_from_timestamp(seconds) return tzless_dt.replace(tzinfo=UTC()).strftime(time_format)
def strftime(time_format, seconds, timezone=None): ret_dt = datetime_from_timestamp(seconds).replace(tzinfo=UTC()) if timezone: ret_dt = ret_dt.astimezone(timezone) return ret_dt.strftime(time_format)
def date_now(): return datetime_from_timestamp(time.time())