def _instrument(self, **kwargs): """Integrate with pymongo to trace it using event listener. https://api.mongodb.com/python/current/api/pymongo/monitoring.html Args: tracer_provider: The `TracerProvider` to use. If none is passed the current configured one is used. """ tracer_provider = kwargs.get("tracer_provider") request_hook = kwargs.get("request_hook", dummy_callback) response_hook = kwargs.get("response_hook", dummy_callback) failed_hook = kwargs.get("failed_hook", dummy_callback) # Create and register a CommandTracer only the first time if self._commandtracer_instance is None: tracer = get_tracer(__name__, __version__, tracer_provider) self._commandtracer_instance = CommandTracer( tracer, request_hook=request_hook, response_hook=response_hook, failed_hook=failed_hook, ) monitoring.register(self._commandtracer_instance) # If already created, just enable it self._commandtracer_instance.is_enabled = True
async def mongo_connect(): if settings.MONGO_MONITORING: logger.info(f"configuring mongo command monitoring") monitoring.register(CommandLogger()) logger.info(f"connecting to mongo at {settings.MONGODB_HOST}:{settings.MONGODB_PORT}") connect(settings.MONGODB_DBNAME, host=settings.MONGODB_HOST, port=settings.MONGODB_PORT) logger.info("Connected to mongodb")
def database_init(app, options): if not check_service(): raise ProcessException('Database is not running...') app.config['MONGODB_SETTINGS'] = options['database']['settings'] database = MongoEngine(app) database.init_app(app) if options['--dev'] or options['logging']['level'] == 'debug': monitoring.register(CommandLogger())
def __init__(self, config): mdb = config['db'] # driver_collection = mdb['docs'] username = config['user'] password = config['pwd'] monitoring.register(CommandLogger()) if len(username) > 0: connect(mdb, username=username, password=password) else: connect(mdb)
def monitor(self): monitoring.register(self) self.config.print_fn("\n\t{} {}.\n".format( colored.stylize("mongomon", colored.fg("black") + colored.bg("magenta")), colored.stylize("active", colored.fg("green")), )) for (k, v) in asdict(self.config).items(): self.config.print_fn("\t{}: {}".format( colored.stylize(k, colored.fg("magenta")), pretty(pformat(v)).strip(), )) self.config.print_fn("")
def trace_integration(tracer=None): """Wrap the pymongo connection to trace it.""" log.info('Integrated module: {}'.format(MODULE_NAME)) monitoring.register(CommandTracer()) # When running Pymongo under Tornado, all commands are run on a different thread pool. In order to get the correct # context for the listener, we need to pass the context across the thread boundaries using some of the code from # the 'threading' integration. We also could just use the 'threading' integration, but we don't want to start a # span for every thread. submit_func = getattr(futures.ThreadPoolExecutor, "submit") setattr( futures.ThreadPoolExecutor, submit_func.__name__, wrap_submit(submit_func), )
def init_mongo_logger(app): class CommandLogger(monitoring.CommandListener): def started(self, event): pass # app.logger.info("Command {0.command_name} with request id " # "{0.request_id} started on server " # "{0.connection_id}".format(event)) def succeeded(self, event): app.logger.info("Command {0.command_name} with request id " "{0.request_id} on server {0.connection_id} " "succeeded in {0.duration_micros} " "microseconds".format(event)) def failed(self, event): app.logger.info("Command {0.command_name} with request id " "{0.request_id} on server {0.connection_id} " "failed in {0.duration_micros} " "microseconds".format(event)) if app.config["DEBUG"]: monitoring.register(CommandLogger())
def create_logging(filename): """ Logging for server stats, connection error and also performance :param filename: :return: """ for handler in logging.root.handlers[:]: logging.root.removeHandler(handler) logging.basicConfig( filename=os.path.join(log_path, filename), filemode='a', format='%(asctime)s,%(msecs)d %(name)s %(levelname)s %(message)s', datefmt='%H:%M:%S', level=logging.DEBUG) monitoring.register(CommandLogger()) monitoring.register(ServerLogger()) monitoring.register(TopologyLogger())
def trace_integration(tracer=None): """Integrate with pymongo to trace it using event listener.""" log.info('Integrated module: {}'.format(MODULE_NAME)) monitoring.register(MongoCommandListener(tracer=tracer))
def trace_integration(tracer=None): """Integrate with pymongo to trace it using event listener. https://api.mongodb.com/python/current/api/pymongo/monitoring.html """ monitoring.register(CommandTracer(tracer))
def trigger_db(self, register_class): monitoring.register(register_class)
def trace_integration(tracer=None): """Wrap the pymongo connection to trace it.""" log.info('Integrated module: {}'.format(MODULE_NAME)) monitoring.register(CommandTracer())
def patch(record_full_documents=False): # ensure `patch()` is idempotent if hasattr(monitoring, '_xray_enabled'): return setattr(monitoring, '_xray_enabled', True) monitoring.register(XrayCommandListener(record_full_documents))
# ========= # = Email = # ========= ANYMAIL = { "MAILGUN_API_KEY": MAILGUN_ACCESS_KEY, "MAILGUN_SENDER_DOMAIN": MAILGUN_SERVER_NAME, } # ========= # = Mongo = # ========= MONGO_COMMAND_LOGGER = MongoCommandLogger() monitoring.register(MONGO_COMMAND_LOGGER) MONGO_DB_DEFAULTS = { 'name': 'newsblur', 'host': f'db_mongo:{MONGO_PORT}', 'alias': 'default', 'unicode_decode_error_handler': 'ignore', 'connect': False, } MONGO_DB = dict(MONGO_DB_DEFAULTS, **MONGO_DB) MONGO_DB_NAME = MONGO_DB.pop('name') # MONGO_URI = 'mongodb://%s' % (MONGO_DB.pop('host'),) # if MONGO_DB.get('read_preference', pymongo.ReadPreference.PRIMARY) != pymongo.ReadPreference.PRIMARY: # MONGO_PRIMARY_DB = MONGO_DB.copy() # MONGO_PRIMARY_DB.update(read_preference=pymongo.ReadPreference.PRIMARY)
"started".format(event)) def connection_check_out_failed(self, event): self.logger.info("[pool {0.address}] connection check out " "failed, reason: {0.reason}".format(event)) def connection_checked_out(self, event): self.logger.info("[pool {0.address}][conn #{0.connection_id}] " "connection checked out of pool".format(event)) def connection_checked_in(self, event): self.logger.info("[pool {0.address}][conn #{0.connection_id}] " "connection checked into pool".format(event)) monitoring.register(ConnectionPoolListener(init_logger('tuixue_mongo_conn', './logs', True))) class VisaStatus: """ MongoDB operations for storing: 1. All fetched visa status by (visa_type, embassy_code), *only successful fetching* 2. Overview of available appointment date of a given write date, *only successful fetching* 3. Latest written time and data, *including failed one* The successfully fetched visa status will be stored in Mongo collection `'visa_status'` and the latest written time will be stored in Mongo collection `'latest_written'`. The schema of documents for `'visa_status'` is as follow: ```python {
"{0.connection_id}".format(event)) def succeeded(self, event): log.debug("Command {0.command_name} with request id " "{0.request_id} on server {0.connection_id} " "succeeded in {0.duration_micros} " "microseconds".format(event)) def failed(self, event): log.debug("Command {0.command_name} with request id " "{0.request_id} on server {0.connection_id} " "failed in {0.duration_micros} " "microseconds".format(event)) monitoring.register(CommandLogger()) class Jedi(Document): name = StringField() connect() log.info('GO!') log.info('Saving an item through MongoEngine...') Jedi(name='Obi-Wan Kenobii').save() log.info('Querying through MongoEngine...') obiwan = Jedi.objects.first()
"{0.connection_id}".format(event)) def succeeded(self, event): logging.info("Command {0.command_name} with request id " "{0.request_id} on server {0.connection_id} " "succeeded in {0.duration_micros} " "microseconds".format(event)) def failed(self, event): logging.info("Command {0.command_name} with request id " "{0.request_id} on server {0.connection_id} " "failed in {0.duration_micros} " "microseconds".format(event)) monitoring.register(CommandLogger()) class ServerLogger(monitoring.ServerListener): def opened(self, event): logging.info("Server {0.server_address} added to topology " "{0.topology_id}".format(event)) def description_changed(self, event): previous_server_type = event.previous_description.server_type new_server_type = event.new_description.server_type if new_server_type != previous_server_type: # server_type_name was added in PyMongo 3.4 logging.info("Server {0.server_address} changed type from " "{0.previous_description.server_type_name} to " "{0.new_description.server_type_name}".format(event))
def wrap_pymongo(): with lumigo_safe_execute("wrap pymogno"): if monitoring: get_logger().debug("wrapping pymongo") monitoring.register(LumigoMongoMonitoring())
def succeeded(self, event): logging.info("Command {0.command_name} with request id " "{0.request_id} on server {0.connection_id} " "succeeded in {0.duration_micros} " "microseconds".format(event)) def failed(self, event): logging.info("Command {0.command_name} with request id " "{0.request_id} on server {0.connection_id} " "failed in {0.duration_micros} " "microseconds".format(event)) # command logger end # command logger register start monitoring.register(CommandLogger()) # command logger register end # motorclient start from tornado import gen, ioloop from motor import MotorClient client = MotorClient() async def do_insert(): await client.test.collection.insert({'message': 'hi!'}) # For this example, wait 10 seconds for more monitoring events to fire. await gen.sleep(10)
logging.info("Command {0.command_name} with request id " "{0.request_id} on server {0.connection_id} " "succeeded in {0.duration_micros} " "microseconds".format(event)) def failed(self, event): logging.info("Command {0.command_name} with request id " "{0.request_id} on server {0.connection_id} " "failed in {0.duration_micros} " "microseconds".format(event)) # command logger end # command logger register start monitoring.register(CommandLogger()) # command logger register end # motorclient start from tornado import gen, ioloop from motor import MotorClient client = MotorClient() async def do_insert(): await client.test.collection.insert({'message': 'hi!'}) # For this example, wait 10 seconds for more monitoring events to fire. await gen.sleep(10)
# The location of command documents within the command object depends on the name # of this command. This is the name -> command object key mapping cmd_doc_locations = { "insert": "documents", "update": "updates", "delete": "deletes", "aggregate": "pipeline" } cmd_doc = None if cmd in cmd_doc_locations: cmd_doc = event.command.get(cmd_doc_locations[cmd]) elif cmd.lower( ) == "mapreduce": # mapreduce command was renamed to mapReduce in pymongo 3.9.0 # mapreduce command consists of two mandatory parts: map and reduce cmd_doc = { "map": event.command.get("map"), "reduce": event.command.get("reduce") } if cmd_doc is not None: span.set_tag("json", json_util.dumps(cmd_doc)) monitoring.register(MongoCommandTracer()) logger.debug("Instrumenting pymongo") except ImportError: pass
# # log query # connection[db][query_history_collection].aggregate(query) # # run query # return connection[db][collection].aggregate(query) # # # def listen_to_queries(): # pipeline = [ # {'$match': {'operationType': 'insert'}} # ] # history_collection = connection[db][query_history_collection] # for document in history_collection.watch(pipeline=pipeline, full_document='updateLookup'): # document['fullDocument']['email'] class QueryLogger(monitoring.CommandListener): def log(self, event): print("an event was logged", event) monitoring.register(QueryLogger()) def main(): r = connection[db]["test"].insert_one({"title": "Fight Club"}) print("query:", list(r)) if __name__ == '__main__': main()
def trace_integration(tracer=None): """Integrate with pymongo to trace it using event listener.""" log.info('Integrated module: {}'.format(MODULE_NAME)) monitoring.register(MongoCommandListener(tracer=tracer)) # pylint: disable=protected-access integrations.add_integration(integrations._Integrations.PYMONGO)