def test_datetime_copy_pickle(self): d = datetime.datetime(2010, 5, 5, tzinfo=utc) t = Timestamp(d, 0) dc = copy.deepcopy(d) self.assertEqual(dc, t.as_datetime()) dp = pickle.loads(pickle.dumps(d)) self.assertEqual(dp, t.as_datetime())
def test_datetime_copy_pickle(self): d = datetime.datetime(2010, 5, 5, tzinfo=utc) t = Timestamp(d, 0) dc = copy.deepcopy(d) self.assertEqual(dc, t.as_datetime()) for protocol in [0, 1, 2, -1]: pkl = pickle.dumps(d, protocol=protocol) dp = pickle.loads(pkl) self.assertEqual(dp, t.as_datetime())
def test_update_max_6(monty_update, mongo_update): docs = [ {"a": Timestamp(10, 5)} ] spec = {"$max": {"a": Timestamp(10, 10)}} monty_c = monty_update(docs, spec) mongo_c = mongo_update(docs, spec) assert next(mongo_c) == next(monty_c) monty_c.rewind() assert next(monty_c) == {"a": Timestamp(10, 10)}
def test_sort_11(monty_sort, mongo_sort): docs = [ {"a": Timestamp(0, 1)}, {"a": Timestamp(1, 1)} ] sort = [("a", -1)] monty_c = monty_sort(docs, sort) mongo_c = mongo_sort(docs, sort) for i in range(len(docs)): assert next(mongo_c)["_id"] == next(monty_c)["_id"]
def test_qop_lt_11(monty_find, mongo_find): ts_0 = Timestamp(0, 1) ts_1 = Timestamp(1, 1) docs = [{"a": ts_0}, {"a": ts_1}] spec = {"a": {"$lt": ts_1}} monty_c = monty_find(docs, spec) mongo_c = mongo_find(docs, spec) assert mongo_c.count() == 1 assert monty_c.count() == mongo_c.count() assert next(mongo_c) == next(monty_c)
def test_qop_gt_11(monty_find, mongo_find): ts_0 = Timestamp(0, 1) ts_1 = Timestamp(1, 1) docs = [{"a": ts_0}, {"a": ts_1}] spec = {"a": {"$gt": ts_0}} monty_c = monty_find(docs, spec) mongo_c = mongo_find(docs, spec) assert FieldWalker(docs[1]).go("a").get().value == [ts_1] assert mongo_c.count() == 1 assert monty_c.count() == mongo_c.count() assert next(mongo_c) == next(monty_c)
def create_password_reset_token(self, user): import os from bson.timestamp import Timestamp from datetime import datetime from hashlib import md5 from backend.config import SAVVY_LOGIN_EXPIRATION expires = Timestamp(datetime.now() + SAVVY_LOGIN_EXPIRATION, 1) token = md5(os.urandom(512)).hexdigest()[-8:].upper() self.db.users.update_one({"username": user.username}, { "$set": { "password_reset_token": (token, expires) } }) return token, expires.as_datetime()
def test_qop_gte_11(monty_find, mongo_find): ts_0 = Timestamp(0, 1) ts_1 = Timestamp(1, 1) docs = [ {"a": ts_0}, {"a": ts_1} ] spec = {"a": {"$gte": ts_0}} monty_c = monty_find(docs, spec) mongo_c = mongo_find(docs, spec) assert mongo_c.count() == 2 assert monty_c.count() == mongo_c.count() for i in range(2): assert next(mongo_c) == next(monty_c)
def object_hook(dct, json_options=DEFAULT_JSON_OPTIONS): if "$oid" in dct: return ObjectId(str(dct["$oid"])) if "$ref" in dct: return DBRef(dct["$ref"], dct["$id"], dct.get("$db", None)) if "$date" in dct: return _get_date(dct, json_options) if "$regex" in dct: flags = 0 # PyMongo always adds $options but some other tools may not. for opt in dct.get("$options", ""): flags |= _RE_OPT_TABLE.get(opt, 0) return Regex(dct["$regex"], flags) if "$minKey" in dct: return MinKey() if "$maxKey" in dct: return MaxKey() if "$binary" in dct: return _get_binary(dct, json_options) if "$code" in dct: return Code(dct["$code"], dct.get("$scope")) if "$uuid" in dct: return uuid.UUID(dct["$uuid"]) if "$undefined" in dct: return None if "$numberLong" in dct: return Int64(dct["$numberLong"]) if "$timestamp" in dct: tsp = dct["$timestamp"] return Timestamp(tsp["t"], tsp["i"]) if "$numberDecimal" in dct: return Decimal128(dct["$numberDecimal"]) return dct
def long_to_bson_ts(val): """Convert integer into BSON timestamp. """ seconds = val >> 32 increment = val & 0xffffffff return Timestamp(seconds, increment)
def _get_timestamp( data, position, as_class, tz_aware, uuid_subtype, compile_re): inc, position = _get_int(data, position, unsigned=True) inc = ctypes.c_int32(inc).value timestamp, position = _get_int(data, position, unsigned=True) timestamp = ctypes.c_int32(timestamp).value return Timestamp(timestamp, inc), position
def __init__(self, client, db_map, queue, ts=None): super(OpLogExecutor, self).__init__() assert isinstance(client, MongoClient), type(client) self.client = client if isinstance(db_map, dict): self.db_map = db_map elif isinstance(db_map, Iterable): self.db_map = {name: name for name in db_map} else: raise TypeError( "Type of db_map should be dict or Iterable not %s" % type(db_map)) self.methods = { "i": self.insert, "u": self.update, "d": self.delete, "c": self.create } self.looping = False self.queue = queue from datetime import datetime self.ts = ts if isinstance(ts, Timestamp) else Timestamp( int(datetime.now().timestamp()), 1) self.daemon = True
def parse_currentDate(field, value, array_filters): date_type = { "date": datetime.utcnow(), "timestamp": Timestamp(datetime.utcnow(), 1), } if not isinstance(value, bool): if not is_duckument_type(value): msg = ("{} is not valid type for $currentDate. Please use a " "boolean ('true') or a $type expression ({{$type: " "'timestamp/date'}}).".format(type(value).__name__)) raise WriteError(msg, code=2) for k, v in value.items(): if k != "$type": msg = "Unrecognized $currentDate option: {}".format(k) raise WriteError(msg, code=2) if v not in date_type: msg = ("The '$type' string field is required to be 'date' " "or 'timestamp': {$currentDate: {field : {$type: " "'date'}}}") raise WriteError(msg, code=2) value = date_type[v] else: value = date_type["date"] def _currentDate(fieldwalker): parse_set(field, value, array_filters)(fieldwalker) return _currentDate
def test_timestamp(self): dct = {"ts": Timestamp(4, 13)} res = json_util.dumps(dct, default=json_util.default) self.assertEqual('{"ts": {"$timestamp": {"t": 4, "i": 13}}}', res) rtdct = json_util.loads(res) self.assertEqual(dct, rtdct)
def format(self, record): """Formats LogRecord into python dictionary.""" # Standard document document = { 'timestamp': Timestamp(int(record.created), int(record.msecs)), 'level': record.levelname, 'thread': record.thread, 'message': record.getMessage(), 'loggerName': record.name, 'fileName': record.pathname, 'method': record.funcName, 'lineNumber': record.lineno } # Standard document decorated with exception info if record.exc_info is not None: document.update({ 'exception': { 'message': str(record.exc_info[1]), 'code': 0, 'stackTrace': self.formatException(record.exc_info) } }) # Standard document decorated with extra contextual information if len(self.DEFAULT_PROPERTIES) != len(record.__dict__): contextual_extra = set(record.__dict__).difference(set(self.DEFAULT_PROPERTIES)) if contextual_extra: for key in contextual_extra: document[key] = record.__dict__[key] return document
def test_timestamp_highorder_bits(self): doc = {'a': Timestamp(0xFFFFFFFF, 0xFFFFFFFF)} doc_bson = (b'\x10\x00\x00\x00' b'\x11a\x00\xff\xff\xff\xff\xff\xff\xff\xff' b'\x00') self.assertEqual(doc_bson, BSON.encode(doc)) self.assertEqual(doc, BSON(doc_bson).decode())
def test_read_oplog_progress(self): """Test read_oplog_progress """ conn = Connector(address='%s:%d' % (mongo_host, self.primary_p), oplog_checkpoint=None, target_url=None, ns_set=['test.test'], u_key='_id', auth_key=None) #testing with no file self.assertEqual(conn.read_oplog_progress(), None) try: os.unlink("temp_config.txt") except OSError: pass open("temp_config.txt", "w").close() conn.oplog_checkpoint = "temp_config.txt" #testing with empty file self.assertEqual(conn.read_oplog_progress(), None) oplog_dict = conn.oplog_progress.get_dict() #add a value to the file, delete the dict, and then read in the value oplog_dict['oplog1'] = Timestamp(12, 34) conn.write_oplog_progress() del oplog_dict['oplog1'] self.assertEqual(len(oplog_dict), 0) conn.read_oplog_progress() oplog_dict = conn.oplog_progress.get_dict() self.assertTrue('oplog1' in oplog_dict.keys()) self.assertTrue(oplog_dict['oplog1'], Timestamp(12, 34)) oplog_dict['oplog1'] = Timestamp(55, 11) #see if oplog progress dict is properly updated conn.read_oplog_progress() self.assertTrue(oplog_dict['oplog1'], Timestamp(55, 11)) os.unlink("temp_config.txt")
def timestamp(self): """ Arguments: - `self`: """ return Timestamp(datetime.datetime.now(), 0)
def test_read_oplog_progress(self): """Test read_oplog_progress """ conn = Connector( mongo_address=self.repl_set.uri, oplog_checkpoint=None, **connector_opts ) # testing with no file self.assertEqual(conn.read_oplog_progress(), None) try: os.unlink("temp_oplog.timestamp") except OSError: pass open("temp_oplog.timestamp", "w").close() conn.oplog_checkpoint = "temp_oplog.timestamp" # testing with empty file self.assertEqual(conn.read_oplog_progress(), None) oplog_dict = conn.oplog_progress.get_dict() # add a value to the file, delete the dict, and then read in the value oplog_dict['oplog1'] = Timestamp(12, 34) conn.write_oplog_progress() del oplog_dict['oplog1'] self.assertEqual(len(oplog_dict), 0) conn.read_oplog_progress() oplog_dict = conn.oplog_progress.get_dict() self.assertTrue('oplog1' in oplog_dict.keys()) self.assertTrue(oplog_dict['oplog1'], Timestamp(12, 34)) oplog_dict['oplog1'] = Timestamp(55, 11) # see if oplog progress dict is properly updated conn.read_oplog_progress() self.assertTrue(oplog_dict['oplog1'], Timestamp(55, 11)) os.unlink("temp_oplog.timestamp")
def initial_sync( self ): # Method that starts the initial collection dump and then spawns the writer print self.dbnames time_t = time.time() time_log = Timestamp(int(time_t) - 1, 0) times = Timestamp(int(time_t), 0) curr_time = times.as_datetime() self.target_mongos['sync_log']['init_sync'].insert({'ts': time_log}) self.master_mongos['sync_log']['init_sync'].insert({'ts': time_log}) for name in self.replnames: print name self.target_mongos['sync_log'][name].insert({'ts': time_log}) self.master_mongos['sync_log'][name].insert({'ts': time_log}) self.last_sync = time_log # set last time sync time to be current time, push to database threads = [] for shard in self.master_shards: for dbname in self.dbnames: # loop through all databases that you want to replicate if dbname in shard.database_names( ): # if the database is on the shard identity = shard.address print("Replicating database: %s , on Shard: %s: %s" % (dbname, identity[0], identity[1])) db = shard[dbname] colls = db.collection_names( include_system_collections=False) for coll in colls: # spawn collection dumper threads for all collections within the database coll_dumper = Thread(target=self.dump_collection, args=( db, dbname, coll, )) threads.append(coll_dumper) coll_dumper.start() for thread in threads: # wait on all dumper threads before moving on to write oplog operations thread.join() print("Finished inital sync, took") print(time.time() - time_t) self.start_listening() # start tailing on all shards
def _resume_using_oplog_lastentry(self): lastentry = Timestamp( self.db["sync"].find_one( {"collection": self._colname}, {"oplog_last_entry": 1})["oplog_last_entry"], 0) logger.info("reading from last entry of op log: %s " % lastentry) self._read_from_changestream(start_at_operation_time=lastentry)
def test_timestamp_pickling(self): ts = Timestamp(0, 1) pickled_with_3 = (b'\x80\x04\x95Q\x00\x00\x00\x00\x00\x00\x00\x8c' b'\x0ebson.timestamp\x94\x8c\tTimestamp\x94\x93\x94)' b'\x81\x94}\x94(' b'\x8c\x10_Timestamp__time\x94K\x00\x8c' b'\x0f_Timestamp__inc\x94K\x01ub.') self.round_trip_pickle(ts, pickled_with_3)
def test_timestamp(self): dct = {"ts": Timestamp(4, 13)} res = bsonjs_dumps(dct) self.assertEqual('{ "ts" : { "$timestamp" : { "t" : 4, "i" : 13 } } }', res) rtdct = bsonjs_loads(res) self.assertEqual(dct, rtdct)
def get_ts(time): if isinstance(time, str): return get_ts(datetime.strptime(time.replace("-", ""), "%Y%m%d")) elif isinstance(time, datetime): return get_ts(int(time.timestamp())) elif isinstance(time, int): return Timestamp(time, 1) else: return None
def test_exceptions(self): self.assertRaises(TypeError, Timestamp) self.assertRaises(TypeError, Timestamp, None, 123) self.assertRaises(TypeError, Timestamp, 1.2, 123) self.assertRaises(TypeError, Timestamp, 123, None) self.assertRaises(TypeError, Timestamp, 123, 1.2) self.assertRaises(ValueError, Timestamp, 0, -1) self.assertRaises(ValueError, Timestamp, -1, 0) self.assertTrue(Timestamp(0, 0))
def get_consistent_end_ts(self): ts = None for host in self.tailed_oplogs: for port in self.tailed_oplogs[host]: instance = self.tailed_oplogs[host][port] if 'last_ts' in instance and instance['last_ts'] is not None: if ts is None or instance['last_ts'].time < ts.time: ts = Timestamp(instance['last_ts'].time, 0) return ts
def test_qop_type_17(monty_find, mongo_find): docs = [{"a": Timestamp(0, 1)}] spec = {"a": {"$type": 17}} # timestamp monty_c = monty_find(docs, spec) mongo_c = mongo_find(docs, spec) assert mongo_c.count() == 1 assert monty_c.count() == mongo_c.count()
def test_sort_19(monty_sort, mongo_sort): docs = [ {"a": ["x", True]}, {"a": None}, {"a": []}, {"a": [5, []]}, {"a": {"s": 7}}, {"a": {"s": [9]}}, {"a": {"s": 10}}, {"a": 6}, {"a": 4}, {"a": [5, None]}, {"a": [5, [1]]}, {"a": [Decimal128("4.5"), Binary(b"0")]}, {"a": [{"s": 5}, False]}, {"a": [{"s": 9}]}, {"a": [True, "y"]}, {"a": Binary(b"a")}, {"a": b"bytes"}, {"a": ["abc"]}, {"a": "banana"}, {"a": "appple"}, {"a": [Regex("^a", "ix")]}, {"a": Regex("^b")}, {"a": Code("x", {"m": 0})}, {"a": Code("y")}, {"a": Code("y", {})}, {"a": Code("y", {"m": 0})}, {"a": MinKey()}, {"a": MaxKey()}, {"a": Timestamp(0, 1)}, {"a": Timestamp(1, 1)}, {"a": ObjectId(b"000000000000")}, {"a": ObjectId(b"000000000001")}, {"a": datetime(1900, 1, 1)}, {"a": datetime(1900, 1, 2)}, ] sort = [("a", 1)] monty_c = monty_sort(docs, sort) mongo_c = mongo_sort(docs, sort) for i in range(len(docs)): assert next(mongo_c)["_id"] == next(monty_c)["_id"]
def convert_to_bson_timestamp(ts): """Convert a given timestamp (of datetime) to bson Timestamp. :param ts: datetime timestamp :type ts: float :return: bson Timestamp :rtype: bson.timestamp.Timestamp """ lowpart = int(ts) return Timestamp(lowpart, 1)
def test_timestamp(self): res = json_util.dumps({"ts": Timestamp(4, 13)}, default=json_util.default) if not PY24: # Check order. self.assertEqual('{"ts": {"t": 4, "i": 13}}', res) dct = json_util.loads(res) self.assertEqual(dct['ts']['t'], 4) self.assertEqual(dct['ts']['i'], 13)
def read(self): """ Read optime. Return optime if OK else None. """ if self.filesize != 8: return None self._fd.seek(0, os.SEEK_SET) time = struct.unpack('I', self._fd.read(4))[0] inc = struct.unpack('I', self._fd.read(4))[0] return Timestamp(time, inc)
def getLog(self): configParser = ConfigParser.ConfigParser() configParser.read( '/data/home/workcron/qing.gu/swallow-monitor/monitor.log') sessions = configParser.sections() if sessions.__contains__('alarm'): self.logPreAsyncCumulated = configParser.getint( 'alarm', 'asynccumulated') self.logPreProduceFailed = configParser.getint( 'alarm', 'producefailed') self.logPreSaveFailed = configParser.getint('alarm', 'savefailed') self.logPreSumCumulated = configParser.getint( 'alarm', 'sumcumulated') self.logPreSumDelay = configParser.getint('alarm', 'sumdelay') self.logLastSmsTime = configParser.getint('alarm', 'logLastSmsTime') else: self.logPreAsyncCumulated = 0 self.logPreProduceFailed = 0 self.logPreSaveFailed = 0 self.logPreSumCumulated = 0 self.logPreSumDelay = 0 self.logLastSmsTime = 0 if sessions.__contains__('mongo'): logMongoTopicList = configParser.items('mongo') for topicName, messageId in logMongoTopicList: timeAndInc = messageId.split(',') mid = Timestamp(int(timeAndInc[0]), int(timeAndInc[1])) self.logMongoTopic[topicName] = mid for session in sessions: if str(session).startswith('consume-'): consumeItems = configParser.items(session) topicName = str(session)[8:] consumerDict = dict() for option, value in consumeItems: timeAndInc = str(value).split(',') consumerDict[option] = Timestamp(int(timeAndInc[0]), int(timeAndInc[1])) self.logMongoConsumeStatus[topicName] = consumerDict
def test_qop_lt_14(monty_find, mongo_find): ts_0 = Timestamp(0, 1) dt_1 = datetime(1900, 1, 2) docs = [{"a": ts_0}, {"a": dt_1}] spec = {"a": {"$lt": dt_1}} monty_c = monty_find(docs, spec) mongo_c = mongo_find(docs, spec) assert mongo_c.count() == 0 # They don't sort together assert monty_c.count() == mongo_c.count()
def initial_sync(self): # Method that starts the initial collection dump and then spawns the writer print self.dbnames time_t = time.time() time_log = Timestamp(int(time_t) - 1, 0) times = Timestamp(int(time_t), 0) curr_time = times.as_datetime() self.target_mongos['sync_log']['init_sync'].insert({'ts': time_log}) self.master_mongos['sync_log']['init_sync'].insert({'ts': time_log}) for name in self.replnames: print name self.target_mongos['sync_log'][name].insert({'ts': time_log}) self.master_mongos['sync_log'][name].insert({'ts': time_log}) self.last_sync = time_log # set last time sync time to be current time, push to database threads = [] for shard in self.master_shards: for dbname in self.dbnames: # loop through all databases that you want to replicate if dbname in shard.database_names(): # if the database is on the shard identity = shard.address print("Replicating database: %s , on Shard: %s: %s" % (dbname, identity[0], identity[1])) db = shard[dbname] colls = db.collection_names(include_system_collections=False) for coll in colls: # spawn collection dumper threads for all collections within the database coll_dumper = Thread(target=self.dump_collection, args=(db, dbname, coll,)) threads.append(coll_dumper) coll_dumper.start() for thread in threads: # wait on all dumper threads before moving on to write oplog operations thread.join() print("Finished inital sync, took") print(time.time() - time_t) self.start_listening() # start tailing on all shards
def test_datetime(self): d = datetime.datetime(2010, 5, 5, tzinfo=utc) t = Timestamp(d, 0) self.assertEqual(1273017600, t.time) self.assertEqual(d, t.as_datetime())