Пример #1
0
    def get_total_lead_time(self, results):
        """
        Get the total lead time - the time from the start of a story until its current state.

        :param list results: contains inflated results from Neo4j
        :return: the seconds of total time in the story, or None if sufficient data is not available
        :rtype: int or None
        """
        first_artifact = results[0]
        last_artifact = results[-1]
        times = {
            'BugzillaBug': ['creation_time', None],
            'DistGitCommit': ['commit_date', None],
            'Advisory': ['created_at', None],
            'ContainerAdvisory': ['created_at', None],
            'FreshmakerEvent': ['time_created', 'time_done'],
            'KojiBuild': ['creation_time', 'completion_time'],
            'ModuleKojiBuild': ['creation_time', 'completion_time'],
            'ContainerKojiBuild': ['creation_time', 'completion_time']
        }

        start_time_key = times[first_artifact.__label__][0]
        start_time = getattr(first_artifact, start_time_key)
        if not start_time:
            id_num = getattr(first_artifact,
                             first_artifact.unique_id_property + '_')
            log.warning(
                'While calculating the total lead time, a %s with ID %s was encountered '
                'without a creation time.', first_artifact.__label__, id_num)
            return
        end_time_key = times[last_artifact.__label__][1]

        if end_time_key:
            end_time = getattr(last_artifact, end_time_key)
            if not end_time:
                end_time = datetime.utcnow()
        elif last_artifact.__label__.endswith('Advisory'):
            if last_artifact.state in ['SHIPPED_LIVE', 'DROPPED_NO_SHIP']:
                end_time = getattr(last_artifact, 'status_time')
            else:
                end_time = datetime.utcnow()
        else:
            end_time = getattr(last_artifact, start_time_key)

        # Remove timezone info so that both are offset naive and thus able to be subtracted
        start_time = start_time.replace(tzinfo=None)
        end_time = end_time.replace(tzinfo=None)
        total = end_time - start_time
        if total.total_seconds() < 0:
            first_id_num = getattr(first_artifact,
                                   first_artifact.unique_id_property + '_')
            last_id_num = getattr(last_artifact,
                                  last_artifact.unique_id_property + '_')
            log.warning(
                'A negative total lead time was calculated, in a story starting with a %s with ID '
                '%s and ending with a %s with ID %s.',
                first_artifact.__label__, first_id_num,
                last_artifact.__label__, last_id_num)
            return 0
        return total.total_seconds()
Пример #2
0
def create_app(config_obj=None):
    """
    Create a Flask application object.

    :return: a Flask application object
    :rtype: flask.Flask
    """
    app = Flask(__name__)
    if config_obj:
        app.config.from_object(config_obj)
    else:
        load_config(app)

    if app.config[
            'PRODUCTION'] and app.secret_key == 'replace-me-with-something-random':
        raise RuntimeError(
            'You need to change the app.secret_key value for production')
    elif app.config['ENABLE_AUTH']:
        base_error = 'The "{0}" configuration must be set if authentication is enabled'
        if not app.config['OIDC_INTROSPECT_URL']:
            raise RuntimeError(base_error.format('OIDC_INTROSPECT_URL'))
        elif not app.config['OIDC_CLIENT_ID']:
            raise RuntimeError(base_error.format('OIDC_CLIENT_ID'))
        elif not app.config['OIDC_CLIENT_SECRET']:
            raise RuntimeError(base_error.format('OIDC_CLIENT_SECRET'))

    # Set the Neo4j connection URI based on the Flask config
    neomodel_config.DATABASE_URL = app.config.get('NEO4J_URI')

    if app.config['ENABLE_AUTH']:
        # Import this here so that flask_oidc isn't required to run the app if authentication is
        # disabled
        from estuary.auth import EstuaryOIDC
        app.oidc = EstuaryOIDC(app)

    init_logging(app)

    for status_code in default_exceptions.keys():
        app.register_error_handler(status_code, json_error)
    app.register_error_handler(ValidationError, json_error)
    app.register_error_handler(ServiceUnavailable, json_error)
    app.register_error_handler(AuthError, json_error)
    app.register_blueprint(api_v1, url_prefix='/api/v1')
    app.add_url_rule('/healthcheck', view_func=health_check)
    try:
        from estuary.api.monitoring import configure_monitoring, monitoring_api
        app.register_blueprint(monitoring_api, url_prefix='/monitoring')
        configure_monitoring(app)
    except ImportError as e:
        # If prometheus_client isn't installed, then don't register the monitoring blueprint
        log.warning(
            'The promethus_client is not installed, so metrics will be disabled'
        )
        if 'prometheus_client' not in str(e):
            raise

    app.after_request(insert_headers)

    return app
Пример #3
0
    def get_connection(self, db_name, force_new=False, retry=None):
        """
        Return an existing psycopg2 connection and establish it if needed.

        :param str db_name: the database name to get a connection to
        :kwarg bool force_new: forces a new database connection even if one
        already exists
        :kwarg int retry: the number of times to retry a failed connection. If this
        is not set, then the Teiid connection attempt will be repeated until it is successful.
        :return: a connection to Teiid
        :rtype: psycopg2 connection
        """
        if not force_new and db_name in self._connections:
            return self._connections[db_name]
        if retry is not None and retry < 1:
            raise ValueError(
                'The retry keyword must contain a value greater than 0')

        log.debug('Connecting to Teiid host {0}:{1}'.format(
            self.host, self.port))
        attempts = 0
        while True:
            attempts += 1
            try:
                conn = psycopg2.connect(database=db_name,
                                        host=self.host,
                                        port=str(self.port),
                                        user=self.username,
                                        password=self.password,
                                        connect_timeout=300)
                break
            except psycopg2.OperationalError as e:
                if retry and attempts > retry:
                    raise
                else:
                    log.exception(e)
                    log.warning(
                        'The Teiid connection failed on attempt {0}. Sleeping for 60 '
                        'seconds.'.format(attempts))
                    sleep(60)

        # Teiid does not support setting this value at all and unless we
        # specify ISOLATION_LEVEL_AUTOCOMMIT (zero), psycopg2 will send a
        # SET command to the Teiid server doesn't understand.
        conn.set_isolation_level(0)

        self._connections[db_name] = conn
        return conn
Пример #4
0
    def query(self, sql, db='public', retry=None):
        """
        Send the SQL query to Teiid and return the rows as a list.

        :param str sql: the SQL query to send to the database
        :kwarg str db: the database name to query on
        :kwarg int retry: the number of times to retry a failed query. If this
        is not set, then the Teiid query will be repeated until it is successful.
        :return: a list of rows from Teiid. Each row is a dictionary
        with the column headers as the keys.
        :rtype: list
        """
        con = self.get_connection(db)
        cursor = con.cursor()
        if retry is not None and retry < 1:
            raise ValueError(
                'The retry keyword must contain a value greater than 0')

        if self._last_query_dt:
            now = datetime.utcnow()
            now_and_last_diff = now - self._last_query_dt
            if now_and_last_diff < timedelta(seconds=0.5):
                sleep(now_and_last_diff.total_seconds())

        log.debug('Querying Teiid DB "{0}" with SQL:\n{1}'.format(db, sql))

        fifteen_mins = 15 * 60
        backoff = 30
        attempts = 0
        while True:
            attempts += 1
            try:
                if attempts > 1:
                    # Restart the database connection after failed queries
                    con = self.get_connection(db, force_new=True)
                    cursor = con.cursor()
                cursor.execute(sql)
                self._last_query_dt = datetime.utcnow()
                break
            except psycopg2.OperationalError as e:
                if retry and attempts > retry:
                    raise
                else:
                    log.exception(e)
                    if backoff < fifteen_mins:
                        # Double the backoff time
                        backoff = backoff * 2
                    elif backoff > fifteen_mins:
                        # Max out the backoff time to 15 minutes
                        backoff = fifteen_mins
                    log.warning(
                        'The Teiid query failed on attempt {0}. Sleeping for {1} seconds.'
                        .format(attempts, backoff))
                    sleep(backoff)

        data = cursor.fetchall()
        # column header names
        cols = [t[0] for t in cursor.description or []]
        log.debug('Found the following columns: {}'.format(cols))
        log.debug('Received {} rows from Teiid'.format(len(data)))
        # build a return array with all columns
        return [dict(zip(cols, row)) for row in data]
Пример #5
0
    def get_total_processing_time(self, results):
        """
        Get the total time spent processing the story.

        :param list results: contains inflated results from Neo4j
        :return: the seconds of total time spent processing with a flag for inaccurate calculations
        :rtype: tuple
        """
        flag = False
        total = 0
        # If there is a build in the story, it will be assigned here so that it can later be
        # checked to see if it was attached to an advisory in the story
        build = None
        timed_processes = {
            'FreshmakerEvent': ['time_created', 'time_done'],
            'KojiBuild': ['creation_time', 'completion_time'],
            'ModuleKojiBuild': ['creation_time', 'completion_time'],
            'ContainerKojiBuild': ['creation_time', 'completion_time'],
            'Advisory': ['created_at', 'status_time'],
            'ContainerAdvisory': ['created_at', 'status_time']
        }
        for index, artifact in enumerate(results):
            if artifact.__label__ not in timed_processes:
                continue

            creation_time = getattr(artifact,
                                    timed_processes[artifact.__label__][0])
            if not creation_time:
                id_num = getattr(artifact, artifact.unique_id_property + '_')
                log.warning(
                    'While calculating the total processing time, a %s with ID %s was encountered '
                    'without a creation time.', artifact.__label__, id_num)
                flag = True
                continue

            if artifact.__label__.endswith('KojiBuild'):
                build = artifact

            if artifact.__label__.endswith('Advisory'):
                if artifact.state in ['SHIPPED_LIVE', 'DROPPED_NO_SHIP']:
                    completion_time = getattr(
                        artifact, timed_processes[artifact.__label__][1])
                else:
                    completion_time = datetime.utcnow()
                if build:
                    creation_time = artifact.attached_build_time(
                        artifact, build)
                    if not creation_time:
                        creation_time = getattr(
                            build, timed_processes[build.__label__][1])
                if not build or not creation_time:
                    log.warning(
                        'While calculating the processing time, a %s with ID %s was '
                        'encountered without a build or creation time.',
                        artifact.__label__,
                        getattr(artifact, artifact.unique_id_property + '_'))
                    flag = True
                    continue

            # We do not want the processing time of the entire FreshmakerEvent, just the
            # processing time until the displayed ContainerKojiBuild is created
            elif artifact.__label__ == 'FreshmakerEvent':
                if index != len(results) - 1:
                    next_artifact = results[index + 1]
                    completion_time = getattr(
                        next_artifact,
                        timed_processes[next_artifact.__label__][0])
                elif artifact.state_name in [
                        'COMPLETE', 'SKIPPED', 'FAILED', 'CANCELED'
                ]:
                    completion_time = getattr(
                        artifact, timed_processes['FreshmakerEvent'][1])
                    if completion_time is None:
                        id_num = getattr(artifact,
                                         artifact.unique_id_property + '_')
                        log.warning(
                            'While calculating the total processing time, a %s with ID %s was '
                            'encountered without a completion time or subsequent build.',
                            artifact.__label__, id_num)
                        flag = True
                        continue
                else:
                    completion_time = datetime.utcnow()

            else:
                completion_time = getattr(
                    artifact, timed_processes[artifact.__label__][1])
                if not completion_time:
                    completion_time = datetime.utcnow()

            # Remove timezone info so that both are offset naive and thus able to be subtracted
            creation_time = creation_time.replace(tzinfo=None)
            completion_time = completion_time.replace(tzinfo=None)
            processing_time = completion_time - creation_time

            if processing_time.total_seconds() < 0:
                id_num = getattr(artifact, artifact.unique_id_property + '_')
                log.warning(
                    'A negative processing time was calculated, with a %s with ID %s.',
                    artifact.__label__, id_num)
            else:
                total += processing_time.total_seconds()

        return total, flag
Пример #6
0
    def get_wait_times(self, results):
        """
        Get the wait time between two artifacts for each pair of them, and the sum of these times.

        :param list results: contains inflated results from Neo4j
        :return: tuple with list of wait time ints in order of the story (oldest to newest), and
            a total wait time
        :rtype: tuple
        :raises RuntimeError: if results has less than 2 elements
        """
        len_story = len(results)
        if len_story < 2:
            return [0], 0

        # Some services do not have a real completion time because they perform a single action
        # that takes a negligible amount of time
        completion_times = {
            'BugzillaBug': 'creation_time',
            'DistGitCommit': 'commit_date',
            'Advisory': 'status_time',
            'ContainerAdvisory': 'status_time',
            # Although Freshmaker has a duration, we need to see how long it takes to trigger a
            # ContainerKojiBuild from when it started
            'FreshmakerEvent': 'time_created',
            'KojiBuild': 'completion_time',
            'ModuleKojiBuild': 'completion_time',
            'ContainerKojiBuild': 'completion_time'
        }

        total_wait_time = 0
        wait_times = [None for i in range(len_story - 1)]

        for index in range(len_story - 1):
            artifact = results[index]
            next_artifact = results[index + 1]
            property_name = completion_times[artifact.__label__]
            completion_time = getattr(artifact, property_name)
            if not completion_time or not next_artifact.timeline_datetime:
                continue

            if next_artifact.__label__.endswith('Advisory'):
                if next_artifact.attached_build_time(next_artifact, artifact):
                    next_artifact_start_time = next_artifact.attached_build_time(
                        next_artifact, artifact)
                else:
                    id_num = getattr(next_artifact,
                                     artifact.unique_id_property + '_')
                    log.warning(
                        'While calculating the wait time, a %s with ID %s was '
                        'encountered without an attached build time.',
                        next_artifact.__label__, id_num)
                    continue
            else:
                next_artifact_start_time = next_artifact.timeline_datetime

            # Remove timezone info so that both are offset naive and thus able to be subtracted
            next_artifact_start_time = next_artifact_start_time.replace(
                tzinfo=None)
            completion_time = completion_time.replace(tzinfo=None)

            # Ensure that the artifacts are sequential
            if completion_time > next_artifact_start_time:
                continue

            # Find the time between when the current artifact completes and the next one starts
            wait_time = next_artifact_start_time - completion_time
            wait_times[index] = wait_time.total_seconds()

            # The 'wait time' between a FreshmakerEvent and a ContainerKojiBuild is still a part of
            # the processing in a FreshmakerEvent, so we do not count it towards the total wait time
            if artifact.__label__ != 'FreshmakerEvent':
                total_wait_time += wait_time.total_seconds()

        return wait_times, total_wait_time