예제 #1
0
파일: db.py 프로젝트: fossabot/DIRBS-Core-1
def _store_job_metadata(config, subcommand):
    """
    Utility method to store metadata about a dirbs-db invocation in the database.

    :param config: dirbs config obj
    :param subcommand: sub-command name
    """
    logger = logging.getLogger('dirbs.db')
    with utils.create_db_connection(config.db_config, autocommit=True) as conn:
        # We can only really store successful database installs/upgrades as we can't store
        # anything in an unknown schema version. Therefore, we can store at the end of the job
        # and mark it as successfully complete immediately
        run_id = metadata.store_job_metadata(conn,
                                             'dirbs-db',
                                             logger,
                                             job_subcommand=subcommand)
        metadata.log_job_success(conn, 'dirbs-db', run_id)
예제 #2
0
def get_importer(importer_type, db_conn, metadata_db_conn, db_config, tmpdir,
                 logger, statsd, importer_data_params):
    """Helper function for constructor an importer object with the supplied parameters."""
    subcommand_lookup = {
        GSMADataImporter: 'gsma_tac',
        PairingListImporter: 'pairing_list',
        StolenListImporter: 'stolen_list',
        RegistrationListImporter: 'registration_list.',
        GoldenListImporter: 'golden_list',
        OperatorDataImporter: 'operator',
        BarredListImporter: 'barred_list',
        BarredTacListImporter: 'barred_tac_list',
        SubscribersListImporter: 'subscribers_registration_list',
        DeviceAssociationListImporter: 'device_association_list',
        MonitoringListImporter: 'monitoring_list'
    }
    subcommand = subcommand_lookup[importer_type]

    import_id = metadata.store_job_metadata(metadata_db_conn,
                                            'dirbs-import',
                                            logger,
                                            job_subcommand=subcommand)

    if importer_type == OperatorDataImporter:
        metrics_root = 'dirbs.import.operator.{0}.'.format(
            importer_data_params.operator)
    else:
        metrics_root = 'dirbs.import.{0}.'.format(subcommand)

    imp = importer_type(conn=db_conn,
                        metadata_conn=metadata_db_conn,
                        import_id=import_id,
                        db_config=db_config,
                        input_filename=importer_data_params.file(tmpdir),
                        logger=logger,
                        statsd=statsd,
                        metrics_root=metrics_root,
                        metrics_run_root='{0}runs.{1}.'.format(
                            metrics_root, import_id),
                        **importer_data_params.kwparams_as_dict())
    return imp
예제 #3
0
        def decorated(ctx, *args, **kwargs):
            _command = command or os.path.basename(sys.argv[0])
            _logger_name = logger_name or _command.replace('-', '.')
            if callable(metrics_root):
                _metrics_root = metrics_root(ctx, args, **kwargs)
            else:
                _metrics_root = metrics_root
            if _metrics_root is None:
                _metrics_root = _logger_name + '.'
                if subcommand is not None:
                    _metrics_root = _metrics_root + subcommand + '.'

            config = ensure_config(ctx)
            statsd = ensure_statsd(ctx)
            logger = logging.getLogger(_logger_name)
            metrics_run_root = None
            run_id = -1
            metadata_conn = None
            inited_file_logging = False

            try:
                # Store time so that we can track metrics for total listgen time
                st = time.time()

                # Get metadata connection in autocommit mode
                metadata_conn = utils.create_db_connection(config.db_config, autocommit=True)

                try:
                    # Verify DB schema
                    utils.verify_db_schema(metadata_conn, required_role)
                except (utils.DatabaseSchemaException, utils.DatabaseRoleCheckException) as ex:
                    logger.error(str(ex))
                    sys.exit(1)

                # Store metadata and get run_id
                run_id = metadata.store_job_metadata(metadata_conn, _command, logger, job_subcommand=subcommand)

                # Now that we have a run_id, we can setup logging
                if subcommand is not None:
                    log_filename = '{0}_{1}_run_id_{2:d}'.format(command, subcommand, run_id)
                else:
                    log_filename = '{0}_run_id_{1:d}'.format(command, run_id)
                inited_file_logging = dirbs.logging.setup_file_logging(config.log_config, log_filename)

                # Get metrics run root based on run_id
                metrics_run_root = '{0}runs.{1:d}.'.format(_metrics_root, run_id)

                # Validate that any exempted device types occur in the imported GSMA TAC DB
                utils.validate_exempted_device_types(metadata_conn, config)

                # Run the actual decorated function with injected args for config, conn, statsd, logger,
                # run_id and metadata_conn
                with utils.create_db_connection(config.db_config) as conn:
                    # Call CLI function with injected args
                    f(ctx,
                      config,
                      statsd,
                      logger,
                      run_id,
                      conn,
                      metadata_conn,
                      _command,
                      _metrics_root,
                      metrics_run_root,
                      *args,
                      **kwargs)

                # Update the last success timestamp
                statsd.gauge('{0}last_success'.format(_metrics_root), int(time.time()))
                metadata.log_job_success(metadata_conn, _command, run_id)
            except:  # noqa: E722
                # Make sure we track the last failure timestamp for any exception and re-raise
                statsd.gauge('{0}last_failure'.format(_metrics_root), int(time.time()))
                # Log metadata in job_metadata table
                if run_id != -1:
                    metadata.log_job_failure(metadata_conn, _command, run_id, logger)
                raise
            finally:
                # Make sure we init file logging so with date as a last resort so we flush our buffered
                # log output
                if not inited_file_logging:
                    if subcommand is not None:
                        log_filename = '{0}_{1}_run_id_unknown'.format(command, subcommand)
                    else:
                        log_filename = '{0}_run_id_unknown'.format(command)
                    dirbs.logging.setup_file_logging(config.log_config, log_filename)

                # Only track StatsD metrics for run time if we at least retrieved a run id, as this
                # forms part of the key
                dt = int((time.time() - st) * 1000)
                if metrics_run_root is not None:
                    statsd.gauge('{0}runtime.total'.format(metrics_run_root), dt)

                # If there was a duration_callback set, call it here with the calculated dt
                if duration_callback is not None:
                    duration_callback(dt)

                # Cleanup metadata connection (not in with statement)
                if metadata_conn is not None:
                    try:
                        metadata_conn.close()
                    except (psycopg2.InterfaceError, psycopg2.OperationalError) as e:
                        logger.error(str(e))