Esempio n. 1
0
 def test_flush_safing(self):
     handler = CloudWatchLogHandler()
     logger = logging.getLogger("l")
     logger.addHandler(handler)
     handler.flush()
     logger.critical("msg")
     handler.close()
     logger.critical("msg")
Esempio n. 2
0
 def test_flush_safing(self):
     handler = CloudWatchLogHandler()
     logger = logging.getLogger("l")
     logger.addHandler(handler)
     handler.flush()
     logger.critical("msg")
     handler.close()
     logger.critical("msg")
Esempio n. 3
0
 def test_flush_safing(self):
     handler = CloudWatchLogHandler()
     logger = logging.getLogger("l")
     logger.addHandler(handler)
     handler.flush()
     logger.critical("msg")
     handler.close()
     with self.assertWarns(WatchtowerWarning) as cm:
         logger.critical("msg")
     self.assertEqual(
         str(cm.warning),
         "Received message after logging system shutdown",
     )
Esempio n. 4
0
import logging
import os
import sys

sys.path.insert(0,
                os.path.abspath(os.path.join(os.path.dirname(__file__),
                                             '..')))  # noqa
from watchtower import CloudWatchLogHandler

handler = CloudWatchLogHandler(stream_name='run_logging')
logger = logging.getLogger('run_logging')
logger.setLevel(logging.INFO)
logger.addHandler(handler)
logger.info('msg')
handler.close()
Esempio n. 5
0
def job(queued_json_payload: Dict[str, Any]) -> None:
    """
    This function is called by the rq package to process a job in the queue(s).
        (Don't rename this function.)

    The job is removed from the queue before the job is started,
        but if the job throws an exception or times out (timeout specified in enqueue process)
            then the job gets added to the 'failed' queue.
    """
    logger.info(MY_NAME_VERSION_STRING)
    logger.debug("tX PDF JobHandler received a job" +
                 (" (in debug mode)" if debug_mode_flag else ""))
    start_time = time()
    stats_client.incr(f'{job_handler_stats_prefix}.jobs.OBSPDF.attempted')

    logger.info(f"Clearing /tmp folder…")
    empty_folder(
        '/tmp/',
        only_prefix='tX_')  # Stops failed jobs from accumulating in /tmp

    # logger.info(f"Updating queue statistics…")
    our_queue = Queue(webhook_queue_name,
                      connection=get_current_job().connection)
    len_our_queue = len(our_queue)  # Should normally sit at zero here
    # logger.debug(f"Queue '{webhook_queue_name}' length={len_our_queue}")
    stats_client.gauge(
        f'{tx_stats_prefix}.enqueue-job.queue.OBSPDF.length.current',
        len_our_queue)
    logger.info(
        f"Updated stats for '{tx_stats_prefix}.enqueue-job.queue.OBSPDF.length.current' to {len_our_queue}"
    )

    # Save some stats
    stats_client.incr(
        f"{job_handler_stats_prefix}.jobs.OBSPDF.input.{queued_json_payload['input_format']}"
    )
    stats_client.incr(
        f"{job_handler_stats_prefix}.jobs.OBSPDF.subject.{queued_json_payload['resource_type']}"
    )

    try:
        job_descriptive_name = process_PDF_job(prefix, queued_json_payload)
    except Exception as e:
        # Catch most exceptions here so we can log them to CloudWatch
        prefixed_name = f"{prefix}tX_PDF_Job_Handler"
        logger.critical(
            f"{prefixed_name} threw an exception while processing: {queued_json_payload}"
        )
        logger.critical(f"{e}: {traceback.format_exc()}")
        main_watchtower_log_handler.close(
        )  # Ensure queued logs are uploaded to AWS CloudWatch
        # Now attempt to log it to an additional, separate FAILED log
        logger2 = logging.getLogger(prefixed_name)
        log_group_name = f"FAILED_{'' if test_mode_flag or travis_flag else prefix}tX" \
                         f"{'_DEBUG' if debug_mode_flag else ''}" \
                         f"{'_TEST' if test_mode_flag else ''}" \
                         f"{'_TravisCI' if travis_flag else ''}"
        boto3_session = Session(aws_access_key_id=aws_access_key_id,
                                aws_secret_access_key=aws_secret_access_key,
                                region_name='us-west-2')
        failure_watchtower_log_handler = CloudWatchLogHandler(
            boto3_session=boto3_session,
            use_queues=False,
            log_group=log_group_name,
            stream_name=prefixed_name)
        logger2.addHandler(failure_watchtower_log_handler)
        logger2.setLevel(logging.DEBUG)
        logger2.info(
            f"Logging to AWS CloudWatch group '{log_group_name}' using key '…{aws_access_key_id[-2:]}'."
        )
        logger2.critical(
            f"{prefixed_name} threw an exception while processing: {queued_json_payload}"
        )
        logger2.critical(f"{e}: {traceback.format_exc()}")
        failure_watchtower_log_handler.close()
        raise e  # We raise the exception again so it goes into the failed queue

    elapsed_milliseconds = round((time() - start_time) * 1000)
    stats_client.timing(f'{job_handler_stats_prefix}.job.OBSPDF.duration',
                        elapsed_milliseconds)
    if elapsed_milliseconds < 2000:
        logger.info(
            f"{prefix}tX job handling for {job_descriptive_name} PDF completed in {elapsed_milliseconds:,} milliseconds."
        )
    else:
        logger.info(
            f"{prefix}tX job handling for {job_descriptive_name} PDF completed in {round(time() - start_time)} seconds."
        )

    stats_client.incr(f'{job_handler_stats_prefix}.jobs.OBSPDF.completed')
    main_watchtower_log_handler.close(
    )  # Ensure queued logs are uploaded to AWS CloudWatch
Esempio n. 6
0
import logging
import os
import sys


sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), '..')))  # noqa
from watchtower import CloudWatchLogHandler

handler = CloudWatchLogHandler(stream_name='run_logging')
logger = logging.getLogger('run_logging')
logger.setLevel(logging.INFO)
logger.addHandler(handler)
logger.info('msg')
handler.close()
def job(queued_json_payload:Dict[str,Any]) -> None:
    """
    This function is called by the rq package to process a job in the queue(s).
        (Don't rename this function.)

    The job is removed from the queue before the job is started,
        but if the job throws an exception or times out (timeout specified in enqueue process)
            then the job gets added to the 'failed' queue.
    """
    AppSettings.logger.info("Door43-Job-Handler received a callback" + (" (in debug mode)" if debug_mode_flag else ""))
    start_time = time.time()
    stats_client.incr(f'{callback_stats_prefix}.jobs.attempted')

    current_job = get_current_job()
    #print(f"Current job: {current_job}") # Mostly just displays the job number and payload
    #print("id",current_job.id) # Displays job number
    #print("origin",current_job.origin) # Displays queue name
    #print("meta",current_job.meta) # Empty dict

    # AppSettings.logger.info(f"Updating queue statistics…")
    our_queue= Queue(callback_queue_name, connection=current_job.connection)
    len_our_queue = len(our_queue) # Should normally sit at zero here
    # AppSettings.logger.debug(f"Queue '{callback_queue_name}' length={len_our_queue}")
    stats_client.gauge(f'"{door43_stats_prefix}.enqueue-job.callback.queue.length.current', len_our_queue)
    AppSettings.logger.info(f"Updated stats for '{door43_stats_prefix}.enqueue-job.callback.queue.length.current' to {len_our_queue}")

    #print(f"Got a job from {current_job.origin} queue: {queued_json_payload}")
    #print(f"\nGot job {current_job.id} from {current_job.origin} queue")
    #queue_prefix = 'dev-' if current_job.origin.startswith('dev-') else ''
    #assert queue_prefix == prefix
    try:
        job_descriptive_name, door43_webhook_received_at = \
                process_callback_job(prefix, queued_json_payload, current_job.connection)
    except Exception as e:
        # Catch most exceptions here so we can log them to CloudWatch
        prefixed_name = f"{prefix}Door43_Callback"
        AppSettings.logger.critical(f"{prefixed_name} threw an exception while processing: {queued_json_payload}")
        AppSettings.logger.critical(f"{e}: {traceback.format_exc()}")
        AppSettings.close_logger() # Ensure queued logs are uploaded to AWS CloudWatch
        # Now attempt to log it to an additional, separate FAILED log
        import logging
        from boto3 import Session
        from watchtower import CloudWatchLogHandler
        logger2 = logging.getLogger(prefixed_name)
        test_mode_flag = os.getenv('TEST_MODE', '')
        travis_flag = os.getenv('TRAVIS_BRANCH', '')
        log_group_name = f"FAILED_{'' if test_mode_flag or travis_flag else prefix}tX" \
                         f"{'_DEBUG' if debug_mode_flag else ''}" \
                         f"{'_TEST' if test_mode_flag else ''}" \
                         f"{'_TravisCI' if travis_flag else ''}"
        aws_access_key_id = os.environ['AWS_ACCESS_KEY_ID']
        boto3_session = Session(aws_access_key_id=aws_access_key_id,
                            aws_secret_access_key=os.environ['AWS_SECRET_ACCESS_KEY'],
                            region_name='us-west-2')
        failure_watchtower_log_handler = CloudWatchLogHandler(boto3_session=boto3_session,
                                                    use_queues=False,
                                                    log_group=log_group_name,
                                                    stream_name=prefixed_name)
        logger2.addHandler(failure_watchtower_log_handler)
        logger2.setLevel(logging.DEBUG)
        logger2.info(f"Logging to AWS CloudWatch group '{log_group_name}' using key '…{aws_access_key_id[-2:]}'.")
        logger2.critical(f"{prefixed_name} threw an exception while processing: {queued_json_payload}")
        logger2.critical(f"{e}: {traceback.format_exc()}")
        failure_watchtower_log_handler.close()
        # NOTE: following line removed as stats recording used too much disk space
        # stats_client.gauge(user_projects_invoked_string, 1) # Mark as 'failed'
        stats_client.gauge(project_types_invoked_string, 1) # Mark as 'failed'
        raise e # We raise the exception again so it goes into the failed queue

    elapsed_milliseconds = round((time.time() - start_time) * 1000)
    stats_client.timing(f'{callback_stats_prefix}.job.duration', elapsed_milliseconds)
    if elapsed_milliseconds < 2000:
        AppSettings.logger.info(f"{prefix}Door43 callback handling for {job_descriptive_name} completed in {elapsed_milliseconds:,} milliseconds.")
    else:
        AppSettings.logger.info(f"{prefix}Door43 callback handling for {job_descriptive_name} completed in {round(time.time() - start_time)} seconds.")

    # Calculate total elapsed time for the job
    total_elapsed_time = datetime.utcnow() - \
                         datetime.strptime(door43_webhook_received_at,
                                           '%Y-%m-%dT%H:%M:%SZ')
    AppSettings.logger.info(f"{prefix}Door43 total job for {job_descriptive_name} completed in {round(total_elapsed_time.total_seconds())} seconds.")
    stats_client.timing(f'{job_handler_stats_prefix}.total.job.duration', round(total_elapsed_time.total_seconds() * 1000))

    # NOTE: following line removed as stats recording used too much disk space
    # stats_client.gauge(user_projects_invoked_string, 0) # Mark as 'succeeded'
    stats_client.gauge(project_types_invoked_string, 0) # Mark as 'succeeded'
    stats_client.incr(f'{callback_stats_prefix}.jobs.succeeded')
    AppSettings.close_logger() # Ensure queued logs are uploaded to AWS CloudWatch