def _init_env(self): if self.environment == 'Production': self.app.logger.addHandler(watchtower.CloudWatchLogHandler()) return ProductionConfig() elif self.environment == 'Staging': self.app.logger.addHandler(watchtower.CloudWatchLogHandler()) return StagingConfig() else: return DevelopmentConfig()
def initLogging(loglevel, partitionTargetValue, LogStreamName): # Set logging level loggingLevelSelected = logging.INFO # Set logging level if (loglevel == 'critical'): loggingLevelSelected = logging.CRITICAL elif (loglevel == 'error'): loggingLevelSelected = logging.ERROR elif (loglevel == 'warning'): loggingLevelSelected = logging.WARNING elif (loglevel == 'info'): loggingLevelSelected = logging.INFO elif (loglevel == 'debug'): loggingLevelSelected = logging.DEBUG elif (loglevel == 'notset'): loggingLevelSelected = logging.NOTSET filenameVal = 'Orchestrator_' + partitionTargetValue + '.log' log_formatter = logging.Formatter( '[%(asctime)s][P:%(process)d][%(levelname)s][%(module)s:%(funcName)s()][%(lineno)d]%(message)s' ) # Add the rotating file handler handler = logging.handlers.RotatingFileHandler(filename=filenameVal, mode='a', maxBytes=128 * 1024, backupCount=10) handler.setFormatter(log_formatter) logger.addHandler(handler) logger.setLevel(loggingLevelSelected) auditlogger.addHandler( watchtower.CloudWatchLogHandler(log_group='Scheduler', stream_name='Audit') ) #Handler for Audit responsible for dispatch of appropriate Audit info to CW. if (loggingLevelSelected > logging.INFO or loggingLevelSelected == logging.NOTSET): loggingLevelSelected = logging.INFO auditlogger.setLevel( loggingLevelSelected ) # Sets the threshold for this handler to appropriate level. specifies the severity that will be dispatched to the appropriate destination, in this case cloudwatch. else: auditlogger.setLevel(loggingLevelSelected) cloud_handler = watchtower.CloudWatchLogHandler(log_group='Scheduler', stream_name=LogStreamName) logger.addHandler( cloud_handler ) #This is the Scheduler logs Handler responsible for dispatch of scheduler log messages .InstanceEnvTag is to identify instance log stream and which scheduler that the logs came from.
def cloudwatch_handler(): if os.environ.get("CLOWDER_ENABLED", "").lower() == "true": f = clowder_config else: f = non_clowder_config aws_access_key_id, aws_secret_access_key, aws_region_name, aws_log_group, create_log_group = f( ) if all((aws_access_key_id, aws_secret_access_key, aws_region_name)): aws_log_stream = os.getenv("AWS_LOG_STREAM", _get_hostname()) print( f"Configuring watchtower logging (log_group={aws_log_group}, stream_name={aws_log_stream})" ) boto3_session = Session( aws_access_key_id=aws_access_key_id, aws_secret_access_key=aws_secret_access_key, region_name=aws_region_name, ) return watchtower.CloudWatchLogHandler( boto3_session=boto3_session, log_group=aws_log_group, stream_name=aws_log_stream, create_log_group=create_log_group, ) else: print( "Unable to configure watchtower logging. Please verify watchtower logging configuration!" ) return NullHandler()
def setup_cw_logging(main_logger): """Setup CloudWatch logging""" logger = get_logger(__name__) if not CFG.cw_enabled: logger.info('CloudWatch logging disabled') return key_id = CFG.cw_aws_access_key_id secret = CFG.cw_aws_secret_access_key if not (key_id and secret): logger.info('CloudWatch logging disabled due to missing access key') return session = Session( aws_access_key_id=key_id, aws_secret_access_key=secret, region_name=CFG.cw_aws_region, ) try: handler = watchtower.CloudWatchLogHandler( boto3_session=session, log_group=CFG.cw_aws_log_group, stream_name=CFG.hostname, ) except ClientError: logger.exception("Unable to enable CloudWatch logging: ") else: # pragma: no cover main_logger.addHandler(handler) logger.info('CloudWatch logging ENABLED!')
def setup_logging(job_name, settings): log = logging.getLogger('kameris') log.setLevel(logging.INFO) formatter = logging.Formatter('%(levelname)-8s %(message)s') console_logger = logging.StreamHandler(stream=sys.stdout) console_logger.setFormatter(formatter) log.addHandler(console_logger) if 'remote_logging' in settings: remote_log_settings = settings['remote_logging'] if remote_log_settings['destination'] != 'cloudwatch': log.warning('*** unknown log destination %s, skipping', remote_log_settings['destination']) return log, formatter aws_session = boto3.session.Session( **_make_aws_args(remote_log_settings) ) log_stream_name = '{}-{}'.format(job_name, int(time.time())) log.info('*** logging to AWS CloudFront stream %s', log_stream_name) aws_logger = watchtower.CloudWatchLogHandler( log_group=remote_log_settings['log_group'], stream_name=log_stream_name, boto3_session=aws_session, send_interval=5 ) aws_logger.setFormatter(formatter) log.addHandler(aws_logger) return log, formatter
def _configure_watchtower_logging_handler(): aws_access_key_id = os.getenv("AWS_ACCESS_KEY_ID", None) aws_secret_access_key = os.getenv("AWS_SECRET_ACCESS_KEY", None) aws_region_name = os.getenv("AWS_REGION_NAME", None) log_group = "platform" stream_name = _get_aws_logging_stream_name(OPENSHIFT_ENVIRONMENT_NAME_FILE) log_level = os.getenv("INVENTORY_LOG_LEVEL", "WARNING").upper() if all([ aws_access_key_id, aws_secret_access_key, aws_region_name, stream_name ]): print( f"Configuring watchtower logging (log_group={log_group}, stream_name={stream_name})" ) boto3_session = Session(aws_access_key_id=aws_access_key_id, aws_secret_access_key=aws_secret_access_key, region_name=aws_region_name) root = logging.getLogger() handler = watchtower.CloudWatchLogHandler(boto3_session=boto3_session, log_group=log_group, stream_name=stream_name) handler.setFormatter(logstash_formatter.LogstashFormatterV1()) root.addHandler(handler) for logger_name in ("app", "app.models", "api", "api.host"): app_logger = logging.getLogger(logger_name) app_logger.setLevel(log_level) else: print("Unable to configure watchtower logging. Please " "verify watchtower logging configuration!")
def setup_cw_logging(main_logger): """Setup CloudWatch logging""" logger = get_logger(__name__) key_id = os.environ.get('CW_AWS_ACCESS_KEY_ID') secret = os.environ.get('CW_AWS_SECRET_ACCESS_KEY') if not (key_id and secret): logger.info('CloudWatch logging disabled due to missing access key') return try: with open('/var/run/secrets/kubernetes.io/serviceaccount/namespace', 'r') as namespace_file: namespace = namespace_file.read() # pragma: no cover except Exception: # pylint: disable=broad-except namespace = 'vulnerability-engine-unknown' session = Session( aws_access_key_id=key_id, aws_secret_access_key=secret, region_name=os.environ.get('AWS_REGION', 'us-east-1'), ) try: handler = watchtower.CloudWatchLogHandler( boto3_session=session, log_group=os.environ.get('CW_LOG_GROUP', 'platform-dev'), stream_name=namespace ) except ClientError: logger.exception("Unable to enable CloudWatch logging: ") else: # pragma: no cover main_logger.addHandler(handler) logger.info('CloudWatch logging ENABLED!')
def server(aws_region, max_run_time, run_dir, sqs_queue_name, max_idle_time, idle_terminate_granularity, queue_receive_message_timeout): session = boto3.session.Session(region_name=aws_region) # make boto quiet locally FIXME is there a better way of doing this? logging.getLogger('boto').setLevel(logging.CRITICAL) logging.getLogger('boto3').setLevel(logging.CRITICAL) logging.getLogger('botocore').setLevel(logging.CRITICAL) instance = get_my_ec2_instance(aws_region) ec2_metadata = get_my_ec2_meta(instance) server_name = ec2_metadata['Name'] log_format_str = '{} %(asctime)s - %(name)s - %(levelname)s - %(message)s'.format( server_name) log_stream_prefix = ec2_metadata['instance_id'] formatter = logging.Formatter(log_format_str, "%Y-%m-%d %H:%M:%S") handler = watchtower.CloudWatchLogHandler(send_interval=20, log_group="pywren.standalone", stream_name=log_stream_prefix + "-{logger_name}", boto3_session=session, max_batch_count=10) handler.setFormatter(formatter) logger.addHandler(handler) #config = pywren.wrenconfig.default() server_runner(aws_region, sqs_queue_name, max_run_time, os.path.abspath(run_dir), server_name, log_stream_prefix, max_idle_time, idle_terminate_granularity, queue_receive_message_timeout)
def set_context(self, ti): super().set_context(ti) self.handler = watchtower.CloudWatchLogHandler( log_group=self.log_group, stream_name=self._render_filename(ti, ti.try_number), boto3_session=self.hook.get_session(self.region_name), )
def setup_cw_logging(logger): # pragma: no cover """ initialize cloudwatch logging from https://github.com/RedHatInsights/cloudwatch-test """ key_id = os.environ.get("CW_AWS_ACCESS_KEY_ID") secret = os.environ.get("CW_AWS_SECRET_ACCESS_KEY") if not (key_id and secret): logger.info("CloudWatch logging disabled due to missing access key") return session = Session( aws_access_key_id=key_id, aws_secret_access_key=secret, region_name=os.environ.get("AWS_REGION", "us-east-1"), ) try: with open("/var/run/secrets/kubernetes.io/serviceaccount/namespace", "r") as f: namespace = f.read() except Exception: namespace = "unknown" handler = watchtower.CloudWatchLogHandler( boto3_session=session, log_group=os.environ.get("CW_LOG_GROUP", "platform-dev"), stream_name=namespace, ) logger.addHandler(handler) logger.info("CloudWatch logging ENABLED!")
def setup_logger(filename: str, source: str): formatter = logging.Formatter( "%(asctime)s %(name)-12s %(levelname)-8s %(message)s") cw_formatter = logging.Formatter( "%(asctime)s %(name)-12s %(levelname)-8s %(funcName)s:%(lineno)s %(message)s" ) file_handler = logging.handlers.TimedRotatingFileHandler( filename=filename, when="midnight", backupCount=MAX_BACKUPS) file_handler.setFormatter(formatter) file_handler.setLevel(logging.DEBUG) cloud_watch_handler = watchtower.CloudWatchLogHandler(stream_name=source) cloud_watch_handler.setFormatter(cw_formatter) cloud_watch_handler.setLevel(logging.WARN) stdout_handler = logging.StreamHandler(sys.stdout) stdout_handler.setFormatter(formatter) stdout_handler.setLevel(logging.DEBUG) root_logger = logging.getLogger() root_logger.setLevel(logging.DEBUG) root_logger.addHandler(file_handler) root_logger.addHandler(cloud_watch_handler) LOGGER.setLevel(logging.DEBUG) LOGGER.addHandler(stdout_handler) LOGGER.addHandler(file_handler) LOGGER.addHandler(cloud_watch_handler)
def _configure_watchtower_logging_handler(): aws_access_key_id = os.getenv("AWS_ACCESS_KEY_ID", None) aws_secret_access_key = os.getenv("AWS_SECRET_ACCESS_KEY", None) aws_region_name = os.getenv("AWS_REGION_NAME", None) log_group = os.getenv("AWS_LOG_GROUP", "platform") stream_name = _get_aws_logging_stream_name(OPENSHIFT_ENVIRONMENT_NAME_FILE) if all([ aws_access_key_id, aws_secret_access_key, aws_region_name, stream_name ]): print( f"Configuring watchtower logging (log_group={log_group}, stream_name={stream_name})" ) boto3_session = Session(aws_access_key_id=aws_access_key_id, aws_secret_access_key=aws_secret_access_key, region_name=aws_region_name) root = logging.getLogger() handler = watchtower.CloudWatchLogHandler(boto3_session=boto3_session, log_group=log_group, stream_name=stream_name) handler.setFormatter(logstash_formatter.LogstashFormatterV1()) root.addHandler(handler) else: print("Unable to configure watchtower logging. Please " "verify watchtower logging configuration!")
def setup_cw_logging(main_logger): """Setup CloudWatch logging""" logger = get_logger(__name__) cfg = Config() if not strtobool(os.getenv('CW_ENABLED', 'FALSE')): logger.info('CloudWatch logging disabled') return key_id = cfg.cw_aws_access_key_id secret = cfg.cw_aws_secret_access_key if not (key_id and secret): logger.info('CloudWatch logging disabled due to missing access key') return session = Session(aws_access_key_id=key_id, aws_secret_access_key=secret, region_name=cfg.cw_aws_region) try: handler = watchtower.CloudWatchLogHandler( boto3_session=session, log_group=cfg.cw_aws_log_group, stream_name=os.environ.get('HOSTNAME', 'vmaas')) except ClientError: logger.exception("Unable to enable CloudWatch logging: ") else: # pragma: no cover main_logger.addHandler(handler) logger.info('CloudWatch logging ENABLED!')
def setup_cw_logging(main_logger): """Setup CloudWatch logging""" logger = get_logger(__name__) if not strtobool(os.getenv('CW_ENABLED', 'FALSE')): logger.info('CloudWatch logging disabled') return key_id = os.environ.get('CW_AWS_ACCESS_KEY_ID') secret = os.environ.get('CW_AWS_SECRET_ACCESS_KEY') if not (key_id and secret): logger.info('CloudWatch logging disabled due to missing access key') return session = Session( aws_access_key_id=key_id, aws_secret_access_key=secret, region_name=os.environ.get('AWS_REGION', 'us-east-1'), ) try: handler = watchtower.CloudWatchLogHandler( boto3_session=session, log_group=os.environ.get('CW_LOG_GROUP', 'platform-dev'), stream_name=os.environ.get('HOSTNAME', 'vulnerability-engine')) except ClientError: logger.exception("Unable to enable CloudWatch logging: ") else: # pragma: no cover main_logger.addHandler(handler) logger.info('CloudWatch logging ENABLED!')
def commence_cw_log_streaming(stream_name): logger = get_logger(__name__) root_logger = logging.getLogger() if CW_ENABLED is False: logger.warning(f"{module_prefix} - Disabled") return if all((AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY, AWS_REGION_NAME, AWS_LOG_GROUP)) is False: logger.error(f"{module_prefix} - Insufficient constant values") return try: boto3_client = boto3.client( 'logs', aws_access_key_id=AWS_ACCESS_KEY_ID, aws_secret_access_key=AWS_SECRET_ACCESS_KEY, region_name=AWS_REGION_NAME ) watchtower_handler = watchtower.CloudWatchLogHandler( boto3_client=boto3_client, log_group=AWS_LOG_GROUP, stream_name=stream_name ) except ClientError as e: logger.exception(f"{module_prefix} - Failed; error: {e}") else: logger.info(f"{module_prefix} - Streaming in progress - Log group: {AWS_LOG_GROUP}") watchtower_handler.setLevel(logging.INFO) watchtower_handler.setFormatter(logging.Formatter(fmt=CW_LOGGING_FORMAT)) root_logger.addHandler(watchtower_handler)
def get_logger() -> logging.Logger: """ Initializes the approiate logger using the environment variable "RUNTIME_ENV". This environment variable is set to PROD by default and is overwritten to be "DEV" by the docker-run task when the debug configuration "Docker: Python - General" is run. If the RUNTIME_ENV == PROD: The logger is initialized with the watchtower handler so that any logs recorded using the returned logger are sent to cloudWatch. If it is RUNTIME_ENV != PROD: I assume the program is being run in development or testing where I do not want logs to be sent to CloudWatch. """ logging.basicConfig(format=LOG_FORMAT, datefmt=DATE_FORMAT, level=logging.INFO) logger = logging.getLogger("app_name") if os.environ["RUNTIME_ENV"] == "PROD": watchtower_log_handler = watchtower.CloudWatchLogHandler() # setting a new handler resets the format to default, this is why I specify it again here watchtower_log_handler.setFormatter( logging.Formatter(LOG_FORMAT, datefmt=DATE_FORMAT)) logger.addHandler(watchtower_log_handler) # overwrite the default excepthook function so uncaught exceptions are sent to cloudWatch sys.excepthook = lambda exc_type, exc_value, exc_traceback: logger.critical( "Uncaught exception", exc_info=(exc_type, exc_value, exc_traceback)) return logger
def _configure_watchtower_logging_handler(): aws_access_key_id = os.getenv("AWS_ACCESS_KEY_ID", None) aws_secret_access_key = os.getenv("AWS_SECRET_ACCESS_KEY", None) aws_region_name = os.getenv("AWS_REGION_NAME", None) log_group = os.getenv("AWS_LOG_GROUP", "platform") stream_name = os.getenv("AWS_LOG_STREAM", _get_hostname()) # default to hostname create_log_group = str(os.getenv("AWS_CREATE_LOG_GROUP")).lower() == "true" if all([aws_access_key_id, aws_secret_access_key, aws_region_name, stream_name]): print(f"Configuring watchtower logging (log_group={log_group}, stream_name={stream_name})") boto3_session = Session( aws_access_key_id=aws_access_key_id, aws_secret_access_key=aws_secret_access_key, region_name=aws_region_name, ) root = logging.getLogger() handler = watchtower.CloudWatchLogHandler( boto3_session=boto3_session, log_group=log_group, stream_name=stream_name, create_log_group=create_log_group, ) handler.setFormatter(logstash_formatter.LogstashFormatterV1()) root.addHandler(handler) else: print("Unable to configure watchtower logging. Please verify watchtower logging configuration!")
def init(cls, reset=True, **kwargs): """ Class init method to set all vars :param bool reset: :param kwargs: """ #print("AppSettings.init(reset={}, {})".format(reset,kwargs)) if cls.dirty and reset: # AppSettings.db_close() reset_class(AppSettings) if 'prefix' in kwargs and kwargs['prefix'] != cls.prefix: cls.__prefix_vars(kwargs['prefix']) cls.set_vars(**kwargs) test_mode_flag = os.getenv('TEST_MODE', '') travis_flag = os.getenv('TRAVIS_BRANCH', '') log_group_name = f"{'' if test_mode_flag or travis_flag else cls.prefix}tX" \ f"{'_DEBUG' if debug_mode_flag else ''}" \ f"{'_TEST' if test_mode_flag else ''}" \ f"{'_TravisCI' if travis_flag else ''}" boto3_client = boto3.client( "logs", aws_access_key_id=cls.aws_access_key_id, aws_secret_access_key=cls.aws_secret_access_key, region_name=cls.aws_region_name) cls.watchtower_log_handler = watchtower.CloudWatchLogHandler( boto3_client=boto3_client, log_group_name=log_group_name, stream_name=cls.name) setup_logger(cls.logger, cls.watchtower_log_handler, logging.DEBUG if debug_mode_flag else logging.INFO) cls.logger.debug( f"Logging to AWS CloudWatch group '{log_group_name}' using key '…{cls.aws_access_key_id[-2:]}'." )
def get_logger(roster_row=None): logging.basicConfig(level=logging.INFO) if roster_row: logger = logging.getLogger('%s_%s' % (roster_row['State'],roster_row['County'])) else: logger = logging.getLogger('noncounty') logger.addHandler(watchtower.CloudWatchLogHandler()) return logger
def config_cloudwatch(logger): CW_SESSION = Session(aws_access_key_id=config.AWS_ACCESS_KEY_ID, aws_secret_access_key=config.AWS_SECRET_ACCESS_KEY, region_name=config.AWS_REGION_NAME) cw_handler = watchtower.CloudWatchLogHandler(boto3_session=CW_SESSION, log_group=config.LOG_GROUP, stream_name=config.HOSTNAME) cw_handler.setFormatter(LogstashFormatterV1()) logger.addHandler(cw_handler)
def create(name): logging.basicConfig(level=logging.INFO) boto3_session = get_aws_session() logger = logging.getLogger(name=name) logger.addHandler( watchtower.CloudWatchLogHandler(log_group='Typonator', stream_name='TyponatorLog', boto3_session=boto3_session)) return logger
def set_stream_logger(self): logger = logging.getLogger(self.name) logger.setLevel(self.level) now = datetime.datetime.now() stream_name = "{}/{}/{}".format(now.year, now.month, now.day) logger.addHandler( watchtower.CloudWatchLogHandler(log_group='/cis/{}'.format( self.log_group_name), stream_name=stream_name)) self.logger = logger
def setup_logger( log_level="INFO", log_file=None, log_cloudwatch=False, service="Testing", np_precision=3, ): """ Creates and configures a logger object. Args: log_level (str): the logging level. Defaults to 'INFO'. log_file (str): optional local log_file. Defaults to None. log_cloudwatch (bool): Will log to CloudWatch topic if set to True. Defaults to False. service (str): service name . np_precision (int): numpy precision. Returns: logging.Logger: python Logger object. """ # numpy float precisions when printing np.set_printoptions(precision=np_precision) # Python logger config logger = logging.getLogger( __name__ + "." + service ) # '' or None is the root logger # Remove all previous filters and handlers logger.handlers = [] logger.filters = [] # Get handler on log if log_file is not None: hdlr = logging.FileHandler(log_file, "a") else: hdlr = logging.StreamHandler(sys.stdout) logger.addHandler(hdlr) # Add cloudwatch logging if requested if log_cloudwatch: hdlr = watchtower.CloudWatchLogHandler() logger.addHandler(hdlr) fmt_str = "# {cols[y]}%(asctime)s{cols[reset]}" fmt_str += " %(levelname)-8s" fmt_str += " {cols[c]}%(funcName)-3s{cols[reset]} %(message)s" logger.setLevel(1 if log_level == "ALL" else getattr(logging, log_level)) logger.propagate = False return logger
def job_handler(job, job_i, run_dir, aws_region, server_name, log_stream_prefix, extra_context=None, delete_taskdir=True): """ Run a deserialized job in run_dir Just for debugging """ session = boto3.session.Session(region_name=aws_region) # we do this here instead of in the global context # because of how multiprocessing works handler = watchtower.CloudWatchLogHandler(send_interval=20, log_group="pywren.standalone", stream_name=log_stream_prefix + "-{logger_name}", boto3_session=session, max_batch_count=10) log_format_str = '{} %(asctime)s - %(name)s - %(levelname)s - %(message)s'.format( server_name) formatter = logging.Formatter(log_format_str, "%Y-%m-%d %H:%M:%S") handler.setFormatter(formatter) wren_log = pywren.wrenhandler.logger # logging.getLogger('pywren.wrenhandler') wren_log.setLevel(logging.DEBUG) wren_log.propagate = 0 wren_log.addHandler(handler) original_dir = os.getcwd() task_run_dir = os.path.join(run_dir, str(job_i)) shutil.rmtree(task_run_dir, True) # delete old modules os.makedirs(task_run_dir) copy_runtime(task_run_dir) context = {'jobnum': job_i} if extra_context is not None: context.update(extra_context) os.chdir(task_run_dir) try: wrenhandler.generic_handler(job, context) finally: if delete_taskdir: shutil.rmtree(task_run_dir) os.chdir(original_dir) handler.flush()
def server(aws_region, max_run_time, run_dir, sqs_queue_name, max_idle_time, idle_terminate_granularity, queue_receive_message_timeout): rand_sleep = random.random() * STARTUP_JITTER_SEC time.sleep(rand_sleep) session = boto3.session.Session(region_name=aws_region) # make boto quiet locally FIXME is there a better way of doing this? logging.getLogger('boto').setLevel(logging.CRITICAL) logging.getLogger('boto3').setLevel(logging.CRITICAL) logging.getLogger('botocore').setLevel(logging.CRITICAL) # NOTE : This assumes EC2 but in the future we could run on # millennium if we set the log stream correctly instance = get_my_ec2_instance(aws_region) ec2_metadata = get_my_ec2_meta(instance) server_name = ec2_metadata['Name'] log_stream_prefix = ec2_metadata['instance_id'] log_format_str = '{} %(asctime)s - %(name)s- %(levelname)s - %(message)s'\ .format(server_name) formatter = logging.Formatter(log_format_str, "%Y-%m-%d %H:%M:%S") handler = watchtower.CloudWatchLogHandler(send_interval=20, log_group="pywren.standalone", stream_name=log_stream_prefix + "-{logger_name}", boto3_session=session, max_batch_count=10) debug_stream_handler = logging.StreamHandler() handler.setFormatter(formatter) logger.addHandler(handler) logger.setLevel(logging.DEBUG) wren_log = pywren.wrenhandler.logger wren_log.addHandler(handler) wren_log.addHandler(debug_stream_handler) #config = pywren.wrenconfig.default() server_runner( aws_region, sqs_queue_name, max_run_time, os.path.abspath(run_dir), #server_name, log_stream_prefix, max_idle_time, idle_terminate_granularity, queue_receive_message_timeout)
def config_cloudwatch(logger): CW_SESSION = Session( aws_access_key_id=config.aws_access_key_id, aws_secret_access_key=config.aws_secret_access_key, region_name=config.aws_region_name, ) cw_handler = watchtower.CloudWatchLogHandler( boto3_session=CW_SESSION, log_group=config.log_group, stream_name=config.namespace, ) cw_handler.setFormatter(LogstashFormatterV1()) logger.addHandler(cw_handler)
def add_cw_handler(logger: logging.Logger, log_level: str, log_group: str, log_stream: str) -> logging.Logger: handler = watchtower.CloudWatchLogHandler(log_group=log_group, stream_name=log_stream, create_log_stream=False, create_log_group=False, send_interval=1) formatter = logging.Formatter( f'[%(levelname)s] | %(asctime)s | %(message)s | Logger: {logger.name} | Function: %(funcName)s | LineNumber: %(lineno)s | ' ) handler.setFormatter(formatter) handler.setLevel(log_level) logger.addHandler(handler) return logger
def _get_watchtower_handler(): session = boto3.Session( aws_access_key_id=AWS_ACCESS_KEY_ID, aws_secret_access_key=AWS_SECRET_ACCESS_KEY, region_name=AWS_REGION, ) handler = watchtower.CloudWatchLogHandler( log_group=CLOUDWATCH_LOG_GROUP, stream_name=CLOUDWATCH_LOG_STREAM, send_interval=5, # every 5 sec boto3_session=session, ) handler.setFormatter(_log_formatter) return handler
def async_log_setup(): ''' None of this stuff should be on the critical path to launching an * instance. Instances should start dequeuing from SQS queue as soon * as possible and shouldn't have to wait for rest of spot cluster * to come up so they have a valid ec2_metadata['Name'] * If there are any exceptions in this function, * we should exponentially backoff and try again until we succeed, * this is critical because if this doesn't happen we end up * clogging all EC2 resources * This function is called once per pywren executor process ''' success = False backoff_time = EXP_BACKOFF_FACTOR while (not success): try: time.sleep(backoff_time) instance = get_my_ec2_instance(aws_region) ec2_metadata = get_my_ec2_meta(instance) server_name = ec2_metadata['Name'] log_stream_prefix = ec2_metadata['instance_id'] log_format_str = '{} %(asctime)s - %(name)s- %(levelname)s - %(message)s'\ .format(server_name) formatter = logging.Formatter(log_format_str, "%Y-%m-%d %H:%M:%S") stream_name = log_stream_prefix + "-{logger_name}" handler = watchtower.CloudWatchLogHandler( send_interval=20, log_group="pywren.standalone", stream_name=stream_name, boto3_session=session, max_batch_count=10) debug_stream_handler = logging.StreamHandler() handler.setFormatter(formatter) logger.addHandler(handler) logger.setLevel(logging.DEBUG) wren_log = pywren.wrenhandler.logger wren_log.addHandler(handler) wren_log.addHandler(debug_stream_handler) success = True except Exception as e: logger.error('Logging setup error: ' + str(e)) backoff_time *= 2
def main(use_cloudwatch=True): logger = logging.getLogger(__name__) logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(message)s', datefmt='%Y-%m-%d %H:%M:%S') d = dirname(dirname(abspath(__file__))) config = yaml.safe_load(open(os.path.join(d, "config.yml"))) s3_client = boto3.client('s3') bucket = config["s3"]["bucket"] bucket_dir = config["s3"]["bucket_dir"] unprocessed_dir = config["unprocessed_dir"] done_dir = config["done_dir"] error_dir = config["error_dir"] lambda_client = boto3.client('lambda') upload_device_id = config["upload_device_id"] lambda_arn = config["lambda_arn"] # Setup remote logging if use_cloudwatch: watchtower_handler = watchtower.CloudWatchLogHandler( log_group=config["cloudwatch"]["log_group"], stream_name=config["cloudwatch"]["stream_name"], send_interval=config["cloudwatch"]["send_interval"], create_log_group=False) logger.addHandler(watchtower_handler) # Check for any preexisting files in the "unprocessed" folder preexisting = get_preexisting_files(unprocessed_dir) # Setup the watchdog handler for new files that are added while the script is running event_handler = S3EventHandler(s3_client, bucket, bucket_dir, unprocessed_dir, done_dir, error_dir, lambda_client, upload_device_id, lambda_arn) observer = Observer() observer.schedule(event_handler, unprocessed_dir, recursive=True) observer.start() # Upload & process any preexisting files for file_path in preexisting: process(file_path, s3_client, bucket, bucket_dir, done_dir, error_dir, unprocessed_dir, lambda_client, upload_device_id, lambda_arn) # Keep the main thread running so watchdog handler can be still be called keep_running(observer, config["send_heartbeat"], config["heartbeat_seconds"])