Beispiel #1
0
 def _has_signature_report(self, signature, connection):
     try:
         single_row_sql(
             connection, """
             SELECT 1 FROM reports
             WHERE signature = %s LIMIT 1""", (signature, ))
         return True
     except SQLDidNotReturnSingleRow:
         return False
Beispiel #2
0
 def _has_signature_report(self, signature, connection):
     try:
         single_row_sql(
             connection,
             """
             SELECT 1 FROM reports
             WHERE signature = %s LIMIT 1""",
             (signature,)
         )
         return True
     except SQLDidNotReturnSingleRow:
         return False
Beispiel #3
0
    def test_single_value_sql5(self):
        m_execute = Mock()
        m_fetchall = Mock(return_value=((17, 22), ))
        m_cursor = Mock()
        m_cursor.execute = m_execute
        m_cursor.fetchall = m_fetchall
        conn = Mock()
        conn.cursor.return_value = m_cursor

        dbapi2_util.single_row_sql(conn, "select 17, 22", (1, 2, 3))
        eq_(conn.cursor.call_count, 1)
        eq_(m_cursor.execute.call_count, 1)
        m_cursor.execute.assert_called_once_with("select 17, 22", (1, 2, 3))
Beispiel #4
0
    def test_single_value_sql5(self):
        m_execute = Mock()
        m_fetchall = Mock(return_value=((17, 22),))
        m_cursor = Mock()
        m_cursor.execute = m_execute
        m_cursor.fetchall = m_fetchall
        conn = Mock()
        conn.cursor.return_value = m_cursor

        dbapi2_util.single_row_sql(conn, "select 17, 22", (1, 2, 3))
        eq_(conn.cursor.call_count, 1)
        eq_(m_cursor.execute.call_count, 1)
        m_cursor.execute.assert_called_once_with("select 17, 22", (1, 2, 3))
Beispiel #5
0
    def test_single_value_sql4(self):
        m_execute = Mock()
        m_fetchall = Mock(return_value=None)
        m_cursor = MagicMock()
        m_cursor.execute = m_execute
        m_cursor.fetchall = m_fetchall
        conn = MagicMock()
        conn.cursor.return_value.__enter__.return_value = m_cursor

        with pytest.raises(dbapi2_util.SQLDidNotReturnSingleRow):
            dbapi2_util.single_row_sql(conn, 'select 17, 22', (1, 2, 3))
        assert conn.cursor.call_count == 1
        assert m_cursor.execute.call_count == 1
        m_cursor.execute.assert_called_once_with("select 17, 22", (1, 2, 3))
    def test_single_value_sql4(self):
        m_execute = Mock()
        m_fetchall = Mock(return_value=None)
        m_cursor = MagicMock()
        m_cursor.execute = m_execute
        m_cursor.fetchall = m_fetchall
        conn = MagicMock()
        conn.cursor.return_value.__enter__.return_value = m_cursor

        with pytest.raises(dbapi2_util.SQLDidNotReturnSingleRow):
            dbapi2_util.single_row_sql(conn, 'select 17, 22', (1, 2, 3))
        assert conn.cursor.call_count == 1
        assert m_cursor.execute.call_count == 1
        m_cursor.execute.assert_called_once_with("select 17, 22", (1, 2, 3))
Beispiel #7
0
    def update_crashstats_signature(self, connection, signature, report_date,
                                    report_build):
        # Pull the data in the db. If it's there, then do an update. If it's
        # not there, then do an insert.
        try:
            sig = single_row_sql(
                connection, """
                SELECT signature, first_build, first_date
                FROM crashstats_signature
                WHERE signature=%s
                """, (signature, ))
            sql = """
            UPDATE crashstats_signature
            SET first_build=%s, first_date=%s
            WHERE signature=%s
            """
            params = (min(sig[1], int(report_build)),
                      min(sig[2], string_to_datetime(report_date)), sig[0])

        except SQLDidNotReturnSingleRow:
            sql = """
            INSERT INTO crashstats_signature (signature, first_build, first_date)
            VALUES (%s, %s, %s)
            """
            params = (signature, report_build, report_date)

        execute_no_results(connection, sql, params)
Beispiel #8
0
    def test_single_row_sql1(self):
        m_execute = Mock()
        m_fetchall = Mock(return_value=((17, 22), ))
        m_cursor = Mock()
        m_cursor.execute = m_execute
        m_cursor.fetchall = m_fetchall
        conn = Mock()
        conn.cursor.return_value = m_cursor

        r = dbapi2_util.single_row_sql(conn, "select 17, 22")
        self.assertEqual(r, (17, 22))
        self.assertEqual(conn.cursor.call_count, 1)
        self.assertEqual(m_cursor.execute.call_count, 1)
        m_cursor.execute.assert_called_once_with('select 17, 22', None)
Beispiel #9
0
    def test_single_row_sql1(self):
        m_execute = Mock()
        m_fetchall = Mock(return_value=((17, 22), ))
        m_cursor = MagicMock()
        m_cursor.execute = m_execute
        m_cursor.fetchall = m_fetchall
        conn = MagicMock()
        conn.cursor.return_value.__enter__.return_value = m_cursor

        r = dbapi2_util.single_row_sql(conn, "select 17, 22")
        assert r == (17, 22)
        assert conn.cursor.call_count == 1
        assert m_cursor.execute.call_count == 1
        m_cursor.execute.assert_called_once_with('select 17, 22', None)
Beispiel #10
0
    def test_single_row_sql1(self):
        m_execute = Mock()
        m_fetchall = Mock(return_value=((17, 22),))
        m_cursor = MagicMock()
        m_cursor.execute = m_execute
        m_cursor.fetchall = m_fetchall
        conn = MagicMock()
        conn.cursor.return_value.__enter__.return_value = m_cursor

        r = dbapi2_util.single_row_sql(conn, "select 17, 22")
        eq_(r, (17, 22))
        eq_(conn.cursor.call_count, 1)
        eq_(m_cursor.execute.call_count, 1)
        m_cursor.execute.assert_called_once_with('select 17, 22', None)
Beispiel #11
0
    def inner_transaction(self, connection, bug_id, status, resolution,
                          short_desc, signature_set):
        self.config.logger.debug("bug %s (%s, %s) %s: %s", bug_id, status,
                                 resolution, short_desc, signature_set)
        if not signature_set:
            execute_no_results(connection, "DELETE FROM bugs WHERE id = %s",
                               (bug_id, ))
            return
        useful = False
        insert_made = False
        try:
            status_db, resolution_db, short_desc_db = single_row_sql(
                connection, """SELECT status, resolution, short_desc
                FROM bugs
                WHERE id = %s""", (bug_id, ))
            if (status_db != status or resolution_db != resolution
                    or short_desc_db != short_desc):
                execute_no_results(
                    connection, """
                    UPDATE bugs SET
                        status = %s, resolution = %s, short_desc = %s
                    WHERE id = %s""", (status, resolution, short_desc, bug_id))
                self.config.logger.info("bug status updated: %s - %s, %s",
                                        bug_id, status, resolution)
                useful = True

            signature_rows = execute_query_fetchall(
                connection,
                "SELECT signature FROM bug_associations WHERE bug_id = %s",
                (bug_id, ))
            signatures_db = [x[0] for x in signature_rows]

            for signature in signatures_db:
                if signature not in signature_set:
                    execute_no_results(
                        connection, """
                        DELETE FROM bug_associations
                        WHERE signature = %s and bug_id = %s""",
                        (signature, bug_id))
                    self.config.logger.info('association removed: %s - "%s"',
                                            bug_id, signature)
                    useful = True
        except SQLDidNotReturnSingleRow:
            execute_no_results(
                connection, """
                INSERT INTO bugs
                (id, status, resolution, short_desc)
                VALUES (%s, %s, %s, %s)""",
                (bug_id, status, resolution, short_desc))
            insert_made = True
            signatures_db = []

        for signature in signature_set:
            if signature not in signatures_db:
                if self._has_signature_report(signature, connection):
                    execute_no_results(
                        connection, """
                        INSERT INTO bug_associations (signature, bug_id)
                        VALUES (%s, %s)""", (signature, bug_id))
                    self.config.logger.info('new association: %s - "%s"',
                                            bug_id, signature)
                    useful = True
                else:
                    self.config.logger.info(
                        'rejecting association (no reports with this '
                        'signature): %s - "%s"', bug_id, signature)

        if useful:
            if insert_made:
                self.config.logger.info('new bug: %s - %s, %s, "%s"', bug_id,
                                        status, resolution, short_desc)
        else:
            if insert_made:
                self.config.logger.info(
                    'rejecting bug (no useful information): '
                    '%s - %s, %s, "%s"', bug_id, status, resolution,
                    short_desc)
            else:
                self.config.logger.info(
                    'skipping bug (no new information): '
                    '%s - %s, %s, "%s"', bug_id, status, resolution,
                    short_desc)
            raise NothingUsefulHappened('nothing useful done')
Beispiel #12
0
    def inner_transaction(
        self,
        connection,
        bug_id,
        status,
        resolution,
        short_desc,
        signature_set
    ):
        self.config.logger.debug(
            "bug %s (%s, %s) %s: %s",
            bug_id, status, resolution, short_desc, signature_set)
        if not signature_set:
            execute_no_results(
                connection,
                "DELETE FROM bugs WHERE id = %s",
                (bug_id,)
            )
            return
        useful = False
        insert_made = False
        try:
            status_db, resolution_db, short_desc_db = single_row_sql(
                connection,
                """SELECT status, resolution, short_desc
                FROM bugs
                WHERE id = %s""",
                (bug_id,)
            )
            if (status_db != status
                or resolution_db != resolution
                or short_desc_db != short_desc):
                execute_no_results(
                    connection,
                    """
                    UPDATE bugs SET
                        status = %s, resolution = %s, short_desc = %s
                    WHERE id = %s""",
                    (status, resolution, short_desc, bug_id)
                )
                self.config.logger.info(
                    "bug status updated: %s - %s, %s",
                    bug_id,
                    status,
                    resolution
                )
                useful = True

            signature_rows = execute_query_fetchall(
                connection,
                "SELECT signature FROM bug_associations WHERE bug_id = %s",
                (bug_id,)
            )
            signatures_db = [x[0] for x in signature_rows]

            for signature in signatures_db:
                if signature not in signature_set:
                    execute_no_results(
                        connection,
                        """
                        DELETE FROM bug_associations
                        WHERE signature = %s and bug_id = %s""",
                        (signature, bug_id)
                    )
                    self.config.logger.info(
                        'association removed: %s - "%s"',
                        bug_id, signature)
                    useful = True
        except SQLDidNotReturnSingleRow:
            execute_no_results(
                connection,
                """
                INSERT INTO bugs
                (id, status, resolution, short_desc)
                VALUES (%s, %s, %s, %s)""",
                (bug_id, status, resolution, short_desc)
            )
            insert_made = True
            signatures_db = []

        for signature in signature_set:
            if signature not in signatures_db:
                if self._has_signature_report(signature, connection):
                    execute_no_results(
                        connection,
                        """
                        INSERT INTO bug_associations (signature, bug_id)
                        VALUES (%s, %s)""",
                        (signature, bug_id)
                    )
                    self.config.logger.info(
                        'new association: %s - "%s"',
                        bug_id,
                        signature
                    )
                    useful = True
                else:
                    self.config.logger.info(
                        'rejecting association (no reports with this '
                        'signature): %s - "%s"',
                        bug_id,
                        signature
                    )

        if useful:
            if insert_made:
                self.config.logger.info(
                    'new bug: %s - %s, %s, "%s"',
                    bug_id,
                    status,
                    resolution,
                    short_desc
                )
        else:
            if insert_made:
                self.config.logger.info(
                    'rejecting bug (no useful information): '
                    '%s - %s, %s, "%s"',
                    bug_id, status, resolution, short_desc)
            else:
                self.config.logger.info(
                    'skipping bug (no new information): '
                    '%s - %s, %s, "%s"',
                    bug_id,
                    status,
                    resolution,
                    short_desc
                )
            raise NothingUsefulHappened('nothing useful done')
Beispiel #13
0
    def _sweep_dead_processors_transaction(self, connection):
        """this function is a single database transaction: look for dead
        processors - find all the jobs of dead processors and assign them to
        live processors then delete the dead processor registrations"""
        self.config.logger.info("looking for dead processors")
        try:
            self.config.logger.info("threshold %s",
                                    self.config.registrar.check_in_frequency)
            threshold = single_value_sql(
                connection, "select now() - %s - %s",
                (self.config.registrar.processor_grace_period,
                 self.config.registrar.check_in_frequency))
            dead_processors = execute_query_fetchall(
                connection,
                "select id from processors where lastSeenDateTime < %s",
                (threshold, ))
            if dead_processors:
                self.config.logger.info("found dead processor(s):")
                for a_dead_processor in dead_processors:
                    self.config.logger.info("%d is dead", a_dead_processor[0])

                self.config.logger.debug("getting list of live processor(s):")
                live_processors = execute_query_fetchall(
                    connection,
                    "select id from processors where lastSeenDateTime >= %s",
                    (threshold, ))
                if not live_processors:
                    if self.config.registrar.quit_if_no_processors:
                        raise NoProcessorsRegisteredError(
                            "There are no processors registered")
                    else:
                        self.config.logger.critical(
                            'There are no live processors, nothing to do. '
                            'Waiting for processors to come on line.')
                        return
                number_of_live_processors = len(live_processors)

                self.config.logger.debug(
                    "getting range of queued date for jobs associated with "
                    "dead processor(s):")
                dead_processor_ids_str = ", ".join(
                    [str(x[0]) for x in dead_processors])
                earliest_dead_job, latest_dead_job = single_row_sql(
                    connection,
                    "select min(queueddatetime), max(queueddatetime) from jobs "
                    "where owner in (%s)" % dead_processor_ids_str)
                # take dead processor jobs and reallocate them to live
                # processors in equal sized chunks
                if (earliest_dead_job is not None
                        and latest_dead_job is not None):
                    time_increment = ((latest_dead_job - earliest_dead_job) /
                                      number_of_live_processors)
                    for x, live_processor_id in enumerate(live_processors):
                        low_queued_time = (x * time_increment +
                                           earliest_dead_job)
                        high_queued_time = ((x + 1) * time_increment +
                                            earliest_dead_job)
                        self.config.logger.info(
                            "assigning jobs from %s to %s to processor %s:",
                            low_queued_time, high_queued_time,
                            live_processor_id)
                        # why is the range >= at both ends? the range must be
                        # inclusive, the risk of moving a job twice is low and
                        # consequences low, too.
                        # 1st step: take any jobs of a dead processor that were
                        # in progress and reset them to unprocessed
                        execute_no_results(
                            connection, "update jobs set"
                            "    starteddatetime = NULL "
                            "where"
                            "    %%s >= queueddatetime"
                            "    and queueddatetime >= %%s"
                            "    and owner in (%s)"
                            "    and success is NULL" % dead_processor_ids_str,
                            (high_queued_time, low_queued_time))
                        # 2nd step: take all jobs of a dead processor and give
                        # them to a new owner
                        execute_no_results(
                            connection, "update jobs set"
                            "    set owner = %%s "
                            "where"
                            "    %%s >= queueddatetime"
                            "    and queueddatetime >= %%s"
                            "    and owner in (%s)" % dead_processor_ids_str,
                            (live_processor_id, high_queued_time,
                             low_queued_time))

                # transfer stalled priority jobs to new processors
                for dead_processor_tuple in dead_processors:
                    self.config.logger.info(
                        "re-assigning priority jobs from processor %d:",
                        dead_processor_tuple[0])
                    execute_no_results(
                        connection,
                        "insert into priorityjobs (uuid) select uuid "
                        "from priority_jobs_%d" % dead_processor_tuple)

                self.config.logger.info("removing all dead processors")
                execute_no_results(
                    connection,
                    "delete from processors where lastSeenDateTime < %s",
                    (threshold, ))
                # remove dead processors' priority tables
                for a_dead_processor in dead_processors:
                    execute_no_results(
                        connection, "drop table if exists priority_jobs_%d" %
                        a_dead_processor[0])
        except NoProcessorsRegisteredError:
            self.quit = True
            self.config.logger.critical('there are no live processors')
Beispiel #14
0
    def _sweep_dead_processors_transaction(self, connection):
        """this function is a single database transaction: look for dead
        processors - find all the jobs of dead processors and assign them to
        live processors then delete the dead processor registrations"""
        self.config.logger.info("looking for dead processors")
        try:
            self.config.logger.info(
              "threshold %s",
              self.config.registrar.check_in_frequency
            )
            threshold = single_value_sql(
              connection,
              "select now() - %s - %s",
              (self.config.registrar.processor_grace_period,
               self.config.registrar.check_in_frequency)
            )
            dead_processors = execute_query_fetchall(
              connection,
              "select id from processors where lastSeenDateTime < %s",
              (threshold,)
            )
            if dead_processors:
                self.config.logger.info("found dead processor(s):")
                for a_dead_processor in dead_processors:
                    self.config.logger.info("%d is dead", a_dead_processor[0])

                self.config.logger.debug("getting list of live processor(s):")
                live_processors = execute_query_fetchall(
                  connection,
                  "select id from processors where lastSeenDateTime >= %s",
                  (threshold,)
                )
                if not live_processors:
                    if self.config.registrar.quit_if_no_processors:
                        raise NoProcessorsRegisteredError(
                          "There are no processors registered"
                        )
                    else:
                        self.config.logger.critical(
                          'There are no live processors, nothing to do. '
                          'Waiting for processors to come on line.'
                        )
                        return
                number_of_live_processors = len(live_processors)

                self.config.logger.debug(
                  "getting range of queued date for jobs associated with "
                  "dead processor(s):"
                )
                dead_processor_ids_str = ", ".join(
                  [str(x[0]) for x in dead_processors]
                )
                earliest_dead_job, latest_dead_job = single_row_sql(
                  connection,
                  "select min(queueddatetime), max(queueddatetime) from jobs "
                      "where owner in (%s)" % dead_processor_ids_str
                )
                # take dead processor jobs and reallocate them to live
                # processors in equal sized chunks
                if (earliest_dead_job is not None and
                  latest_dead_job is not None):
                    time_increment = (
                      (latest_dead_job - earliest_dead_job) /
                      number_of_live_processors
                    )
                    for x, live_processor_id in enumerate(live_processors):
                        low_queued_time = (
                          x * time_increment + earliest_dead_job
                        )
                        high_queued_time = (
                          (x + 1) * time_increment + earliest_dead_job
                        )
                        self.config.logger.info(
                          "assigning jobs from %s to %s to processor %s:",
                          low_queued_time,
                          high_queued_time,
                          live_processor_id
                        )
                        # why is the range >= at both ends? the range must be
                        # inclusive, the risk of moving a job twice is low and
                        # consequences low, too.
                        # 1st step: take any jobs of a dead processor that were
                        # in progress and reset them to unprocessed
                        execute_no_results(
                          connection,
                          "update jobs set"
                          "    starteddatetime = NULL "
                          "where"
                          "    %%s >= queueddatetime"
                          "    and queueddatetime >= %%s"
                          "    and owner in (%s)"
                          "    and success is NULL" % dead_processor_ids_str,
                          (high_queued_time, low_queued_time)
                        )
                        # 2nd step: take all jobs of a dead processor and give
                        # them to a new owner
                        execute_no_results(
                          connection,
                          "update jobs set"
                          "    set owner = %%s "
                          "where"
                          "    %%s >= queueddatetime"
                          "    and queueddatetime >= %%s"
                          "    and owner in (%s)" % dead_processor_ids_str,
                          (live_processor_id, high_queued_time,
                           low_queued_time)
                        )

                # transfer stalled priority jobs to new processors
                for dead_processor_tuple in dead_processors:
                    self.config.logger.info(
                      "re-assigning priority jobs from processor %d:",
                      dead_processor_tuple[0]
                    )
                    execute_no_results(
                      connection,
                      "insert into priorityjobs (uuid) select uuid "
                      "from priority_jobs_%d" % dead_processor_tuple
                    )

                self.config.logger.info("removing all dead processors")
                execute_no_results(
                  connection,
                  "delete from processors where lastSeenDateTime < %s",
                  (threshold,)
                )
                # remove dead processors' priority tables
                for a_dead_processor in dead_processors:
                    execute_no_results(
                      connection,
                      "drop table if exists priority_jobs_%d" %
                        a_dead_processor[0]
                    )
        except NoProcessorsRegisteredError:
            self.quit = True
            self.config.logger.critical('there are no live processors')