Exemplo n.º 1
0
def verify_schema():
    """Function to verify the schema before the first request."""
    with get_db_connection() as conn:
        try:
            utils.verify_db_schema(conn, 'dirbs_core_api')
        except utils.DatabaseSchemaException:
            raise ServiceUnavailable(description='Invalid database schema or database schema requires upgrade')
Exemplo n.º 2
0
        def decorated(ctx, *args, **kwargs):
            _command = command or os.path.basename(sys.argv[0])
            _logger_name = logger_name or _command.replace('-', '.')
            if callable(metrics_root):
                _metrics_root = metrics_root(ctx, args, **kwargs)
            else:
                _metrics_root = metrics_root
            if _metrics_root is None:
                _metrics_root = _logger_name + '.'
                if subcommand is not None:
                    _metrics_root = _metrics_root + subcommand + '.'

            config = ensure_config(ctx)
            statsd = ensure_statsd(ctx)
            logger = logging.getLogger(_logger_name)
            metrics_run_root = None
            run_id = -1
            metadata_conn = None
            inited_file_logging = False

            try:
                # Store time so that we can track metrics for total listgen time
                st = time.time()

                # Get metadata connection in autocommit mode
                metadata_conn = utils.create_db_connection(config.db_config, autocommit=True)

                try:
                    # Verify DB schema
                    utils.verify_db_schema(metadata_conn, required_role)
                except (utils.DatabaseSchemaException, utils.DatabaseRoleCheckException) as ex:
                    logger.error(str(ex))
                    sys.exit(1)

                # Store metadata and get run_id
                run_id = metadata.store_job_metadata(metadata_conn, _command, logger, job_subcommand=subcommand)

                # Now that we have a run_id, we can setup logging
                if subcommand is not None:
                    log_filename = '{0}_{1}_run_id_{2:d}'.format(command, subcommand, run_id)
                else:
                    log_filename = '{0}_run_id_{1:d}'.format(command, run_id)
                inited_file_logging = dirbs.logging.setup_file_logging(config.log_config, log_filename)

                # Get metrics run root based on run_id
                metrics_run_root = '{0}runs.{1:d}.'.format(_metrics_root, run_id)

                # Validate that any exempted device types occur in the imported GSMA TAC DB
                utils.validate_exempted_device_types(metadata_conn, config)

                # Run the actual decorated function with injected args for config, conn, statsd, logger,
                # run_id and metadata_conn
                with utils.create_db_connection(config.db_config) as conn:
                    # Call CLI function with injected args
                    f(ctx,
                      config,
                      statsd,
                      logger,
                      run_id,
                      conn,
                      metadata_conn,
                      _command,
                      _metrics_root,
                      metrics_run_root,
                      *args,
                      **kwargs)

                # Update the last success timestamp
                statsd.gauge('{0}last_success'.format(_metrics_root), int(time.time()))
                metadata.log_job_success(metadata_conn, _command, run_id)
            except:  # noqa: E722
                # Make sure we track the last failure timestamp for any exception and re-raise
                statsd.gauge('{0}last_failure'.format(_metrics_root), int(time.time()))
                # Log metadata in job_metadata table
                if run_id != -1:
                    metadata.log_job_failure(metadata_conn, _command, run_id, logger)
                raise
            finally:
                # Make sure we init file logging so with date as a last resort so we flush our buffered
                # log output
                if not inited_file_logging:
                    if subcommand is not None:
                        log_filename = '{0}_{1}_run_id_unknown'.format(command, subcommand)
                    else:
                        log_filename = '{0}_run_id_unknown'.format(command)
                    dirbs.logging.setup_file_logging(config.log_config, log_filename)

                # Only track StatsD metrics for run time if we at least retrieved a run id, as this
                # forms part of the key
                dt = int((time.time() - st) * 1000)
                if metrics_run_root is not None:
                    statsd.gauge('{0}runtime.total'.format(metrics_run_root), dt)

                # If there was a duration_callback set, call it here with the calculated dt
                if duration_callback is not None:
                    duration_callback(dt)

                # Cleanup metadata connection (not in with statement)
                if metadata_conn is not None:
                    try:
                        metadata_conn.close()
                    except (psycopg2.InterfaceError, psycopg2.OperationalError) as e:
                        logger.error(str(e))