Example #1
0
    def _read_signature_rules_from_database(self, connection):
        for category, category_re in (
            ('prefix', 'prefix_signature_re'),
            ('irrelevant', 'irrelevant_signature_re'),
            ('line_number', 'signatures_with_line_numbers_re')
        ):
            rule_element_list = [
                a_rule
                for (a_rule,) in execute_query_fetchall(
                    connection,
                    "select rule from skiplist "
                    "where category = %s",
                    (category, )
                )
            ]
            setattr(
                self,
                category_re,
                re.compile('|'.join(rule_element_list))
            )

        # get sentinel rules
        self.signature_sentinels = [
            eval(sentinel_rule)  # eval quoted strings and tuples
                if sentinel_rule[0] in "'\"(" else
            sentinel_rule  # already a string, don't need to eval
            for (sentinel_rule,) in execute_query_fetchall(
                connection,
                "select rule from csignature_rules where category = 'sentinel'"
            )
        ]
Example #2
0
    def _read_signature_rules_from_database(self, connection):
        for category, category_re in (
            ('prefix', 'prefix_signature_re'),
            ('irrelevant', 'irrelevant_signature_re'),
            ('line_number', 'signatures_with_line_numbers_re')
        ):
            rule_element_list = [
                a_rule
                for (a_rule,) in execute_query_fetchall(
                    connection,
                    "select rule from skiplist "
                    "where category = %s",
                    (category, )
                )
            ]
            setattr(
                self,
                category_re,
                re.compile('|'.join(rule_element_list))
            )

        # get sentinel rules
        self.signature_sentinels = [
            eval(sentinel_rule)  # eval quoted strings and tuples
            if sentinel_rule[0] in "'\"(" else
            sentinel_rule  # already a string, don't need to eval
            for (sentinel_rule,) in execute_query_fetchall(
                connection,
                "select rule from csignature_rules where category = 'sentinel'"
            )
        ]
    def test_basic_run_job(self, rget):
        config_manager = self._setup_config_manager()

        def mocked_get(url):
            return Response({
                'hits': [
                    {
                        'product': 'Firefox',
                        'is_featured': True,
                        'version': '24.5.0'
                    },
                ],
                'total':
                1
            })

        rget.side_effect = mocked_get

        rows = execute_query_fetchall(
            self.conn, 'select product_name, version_string, featured_version '
            'from product_versions')
        eq_(sorted(rows), [('Firefox', '15.0a1', True),
                           ('Firefox', '24.5.0', False)])
        # and the view `product_info`...
        rows = execute_query_fetchall(
            self.conn, 'select product_name, version_string, is_featured '
            'from product_info')
        eq_(sorted(rows), [('Firefox', '15.0a1', True),
                           ('Firefox', '24.5.0', False)])
        # This is necessary so we get a new cursor when we do other
        # selects after the crontabber app has run.
        self.conn.commit()

        with config_manager.context() as config:
            tab = CronTabber(config)
            tab.run_all()

            information = self._load_structure()
            assert information['featured-versions-sync']
            assert not information['featured-versions-sync']['last_error']
            assert information['featured-versions-sync']['last_success']

            config.logger.info.assert_called_with(
                'Set featured versions for Firefox %r' % ([u'24.5.0'], ))

        rows = execute_query_fetchall(
            self.conn, 'select product_name, version_string, featured_version '
            'from product_versions')
        eq_(sorted(rows), [('Firefox', '15.0a1', False),
                           ('Firefox', '24.5.0', True)])
        # and the view `product_info`...
        rows = execute_query_fetchall(
            self.conn, 'select product_name, version_string, is_featured '
            'from product_info')
        eq_(sorted(rows), [('Firefox', '15.0a1', False),
                           ('Firefox', '24.5.0', True)])
Example #4
0
    def inner_transaction(self, connection, bug_id, signature_set):
        self.config.logger.debug("bug %s: %s", bug_id, signature_set)
        if not signature_set:
            execute_no_results(
                connection, "DELETE FROM bug_associations WHERE bug_id = %s",
                (bug_id, ))
            return

        try:
            signature_rows = execute_query_fetchall(
                connection,
                "SELECT signature FROM bug_associations WHERE bug_id = %s",
                (bug_id, ))
            signatures_db = [x[0] for x in signature_rows]

            for signature in signatures_db:
                if signature not in signature_set:
                    execute_no_results(
                        connection, """
                        DELETE FROM bug_associations
                        WHERE signature = %s and bug_id = %s""",
                        (signature, bug_id))
                    self.config.logger.info('association removed: %s - "%s"',
                                            bug_id, signature)
        except SQLDidNotReturnSingleRow:
            signatures_db = []

        for signature in signature_set:
            if signature not in signatures_db:
                execute_no_results(
                    connection, """
                    INSERT INTO bug_associations (signature, bug_id)
                    VALUES (%s, %s)""", (signature, bug_id))
                self.config.logger.info('new association: %s - "%s"', bug_id,
                                        signature)
Example #5
0
 def _get_priority_jobs_transaction(self, connection):
     """this method implements a single transaction that just returns a
     set of priority jobs."""
     priority_jobs_list = execute_query_fetchall(
       connection,
       "select * from priorityjobs"
     )
     return set(x[0] for x in priority_jobs_list)
Example #6
0
 def _get_live_processors_transaction(self, connection):
     """this transaction just fetches a list of live processors"""
     processor_ids = execute_query_fetchall(
         connection, "select id, name from processors "
         "where lastSeenDateTime > now() - interval %s",
         (self.config.registrar.check_in_frequency, ))
     # remove the row tuples, just give out a pure list of ids
     return [(a_row[0], a_row[1]) for a_row in processor_ids]
Example #7
0
 def _get_live_processors_transaction(self, connection):
     """this transaction just fetches a list of live processors"""
     processor_ids = execute_query_fetchall(
       connection,
       "select id, name from processors "
       "where lastSeenDateTime > now() - interval %s",
       (self.config.registrar.check_in_frequency,)
     )
     # remove the row tuples, just give out a pure list of ids
     return [(a_row[0], a_row[1]) for a_row in processor_ids]
    def test_basic_run_job(self, rget):
        config_manager = self._setup_config_manager()

        def mocked_get(url):
            return Response({"hits": [{"product": "Firefox", "is_featured": True, "version": "24.5.0"}], "total": 1})

        rget.side_effect = mocked_get

        rows = execute_query_fetchall(
            self.conn, "select product_name, version_string, featured_version " "from product_versions"
        )
        eq_(sorted(rows), [("Firefox", "15.0a1", True), ("Firefox", "24.5.0", False)])
        # and the view `product_info`...
        rows = execute_query_fetchall(
            self.conn, "select product_name, version_string, is_featured " "from product_info"
        )
        eq_(sorted(rows), [("Firefox", "15.0a1", True), ("Firefox", "24.5.0", False)])
        # This is necessary so we get a new cursor when we do other
        # selects after the crontabber app has run.
        self.conn.commit()

        with config_manager.context() as config:
            tab = CronTabber(config)
            tab.run_all()

            information = self._load_structure()
            assert information["featured-versions-sync"]
            assert not information["featured-versions-sync"]["last_error"]
            assert information["featured-versions-sync"]["last_success"]

            config.logger.info.assert_called_with("Set featured versions for Firefox %r" % ([u"24.5.0"],))

        rows = execute_query_fetchall(
            self.conn, "select product_name, version_string, featured_version " "from product_versions"
        )
        eq_(sorted(rows), [("Firefox", "15.0a1", False), ("Firefox", "24.5.0", True)])
        # and the view `product_info`...
        rows = execute_query_fetchall(
            self.conn, "select product_name, version_string, is_featured " "from product_info"
        )
        eq_(sorted(rows), [("Firefox", "15.0a1", False), ("Firefox", "24.5.0", True)])
Example #9
0
 def _get_processors_and_loads_transaction(self, connection):
     """this transaction fetches a list of live processors and how many
     jobs each curretly has assigned to it"""
     sql = ("with live_processors as "
            "    (select * from processors where "
            "     lastSeenDateTime > now() - %s)"
            "select"
            "    p.id,"
            "    count(j.owner),"
            "    p.name "
            "from"
            "    live_processors p left join jobs j "
            "        on p.id = j.owner"
            "           and j.success is null "
            "group by p.id, p.name")
     processors_and_load = execute_query_fetchall(
         connection, sql, (self.config.registrar.check_in_frequency, ))
     # convert row tuples to muteable lists
     return [[a_row[0], a_row[1], a_row[2]]
             for a_row in processors_and_load]
Example #10
0
    def inner_transaction(self, connection, bug_id, signature_set):
        self.config.logger.debug("bug %s: %s", bug_id, signature_set)
        if not signature_set:
            execute_no_results(
                connection,
                "DELETE FROM bug_associations WHERE bug_id = %s",
                (bug_id,)
            )
            return

        try:
            signature_rows = execute_query_fetchall(
                connection,
                "SELECT signature FROM bug_associations WHERE bug_id = %s",
                (bug_id,)
            )
            signatures_db = [x[0] for x in signature_rows]

            for signature in signatures_db:
                if signature not in signature_set:
                    execute_no_results(
                        connection,
                        """
                        DELETE FROM bug_associations
                        WHERE signature = %s and bug_id = %s""",
                        (signature, bug_id)
                    )
                    self.config.logger.info('association removed: %s - "%s"', bug_id, signature)
        except SQLDidNotReturnSingleRow:
            signatures_db = []

        for signature in signature_set:
            if signature not in signatures_db:
                execute_no_results(
                    connection,
                    """
                    INSERT INTO bug_associations (signature, bug_id)
                    VALUES (%s, %s)""",
                    (signature, bug_id)
                )
                self.config.logger.info('association added: %s - "%s"', bug_id, signature)
Example #11
0
    def update_bug_data(self, connection, bug_id, signature_set):
        self.config.logger.debug('bug %s: %s', bug_id, signature_set)

        # If there's no associated signatures, delete everything for this bug id
        if not signature_set:
            execute_no_results(
                connection, """
                DELETE FROM crashstats_bugassociation WHERE bug_id = %s
                """, (bug_id, ))
            return

        try:
            signature_rows = execute_query_fetchall(
                connection, """
                SELECT signature FROM crashstats_bugassociation WHERE bug_id = %s
                """, (bug_id, ))
            signatures_db = [x[0] for x in signature_rows]

            for signature in signatures_db:
                if signature not in signature_set:
                    execute_no_results(
                        connection, """
                        DELETE FROM crashstats_bugassociation
                        WHERE signature = %s and bug_id = %s
                        """, (signature, bug_id))
                    self.config.logger.info('association removed: %s - "%s"',
                                            bug_id, signature)

        except SQLDidNotReturnSingleRow:
            signatures_db = []

        for signature in signature_set:
            if signature not in signatures_db:
                execute_no_results(
                    connection, """
                    INSERT INTO crashstats_bugassociation (signature, bug_id)
                    VALUES (%s, %s)
                    """, (signature, bug_id))
                self.config.logger.info('association added: %s - "%s"', bug_id,
                                        signature)
Example #12
0
 def _get_processors_and_loads_transaction(self, connection):
     """this transaction fetches a list of live processors and how many
     jobs each curretly has assigned to it"""
     sql = ("with live_processors as "
            "    (select * from processors where "
            "     lastSeenDateTime > now() - %s)"
            "select"
            "    p.id,"
            "    count(j.owner),"
            "    p.name "
            "from"
            "    live_processors p left join jobs j "
            "        on p.id = j.owner"
            "           and j.success is null "
            "group by p.id, p.name")
     processors_and_load = execute_query_fetchall(
       connection,
       sql,
       (self.config.registrar.check_in_frequency,)
     )
     # convert row tuples to muteable lists
     return [[a_row[0], a_row[1], a_row[2]]
             for a_row in processors_and_load]
    def test_basic_run_job(self, rget):
        config_manager = self._setup_config_manager()

        def mocked_get(url):
            return Response({
                'hits': {
                    'Firefox': [
                        {'featured': True, 'version': '24.5.0'},
                        {'featured': False, 'version': '15.0'},
                    ],
                    'Camino': [
                        {'featured': False, 'version': '2.0.2'},
                    ]
                },
                'total': 1
            })

        rget.side_effect = mocked_get

        rows = execute_query_fetchall(
            self.conn,
            'select product_name, version_string, featured_version '
            'from product_versions'
        )
        eq_(
            sorted(rows),
            [('Firefox', '15.0a1', True), ('Firefox', '24.5.0', False)]
        )
        # and the view `product_info`...
        rows = execute_query_fetchall(
            self.conn,
            'select product_name, version_string, is_featured '
            'from product_info'
        )
        eq_(
            sorted(rows),
            [('Firefox', '15.0a1', True), ('Firefox', '24.5.0', False)]
        )
        # This is necessary so we get a new cursor when we do other
        # selects after the crontabber app has run.
        self.conn.commit()

        with config_manager.context() as config:
            tab = CronTabber(config)
            tab.run_all()

            information = self._load_structure()
            assert information['featured-versions-sync']
            assert not information['featured-versions-sync']['last_error']
            assert information['featured-versions-sync']['last_success']

            config.logger.info.assert_called_with(
                'Set featured versions for Firefox %r' % (
                    [u'24.5.0'],
                )
            )

        rows = execute_query_fetchall(
            self.conn,
            'select product_name, version_string, featured_version '
            'from product_versions'
        )
        eq_(
            sorted(rows),
            [('Firefox', '15.0a1', False), ('Firefox', '24.5.0', True)]
        )
        # and the view `product_info`...
        rows = execute_query_fetchall(
            self.conn,
            'select product_name, version_string, is_featured '
            'from product_info'
        )
        eq_(
            sorted(rows),
            [('Firefox', '15.0a1', False), ('Firefox', '24.5.0', True)]
        )
    def test_basic_run_job(self, rget):
        config_manager = self._setup_config_manager()

        def mocked_get(url):
            if 'firefox_versions.json' in url:
                return Response({
                    'FIREFOX_NIGHTLY': '52.0a1',
                    'FIREFOX_AURORA': '51.0a2',
                    'FIREFOX_ESR': '45.4.0esr',
                    'FIREFOX_ESR_NEXT': '',
                    'LATEST_FIREFOX_DEVEL_VERSION': '50.0b7',
                    'LATEST_FIREFOX_OLDER_VERSION': '3.6.28',
                    'LATEST_FIREFOX_RELEASED_DEVEL_VERSION': '50.0b7',
                    'LATEST_FIREFOX_VERSION': '49.0.1'
                })
            elif 'mobile_versions.json' in url:
                return Response({
                    'nightly_version': '52.0a1',
                    'alpha_version': '51.0a2',
                    'beta_version': '50.0b6',
                    'version': '49.0',
                    'ios_beta_version': '6.0',
                    'ios_version': '5.0'
                })
            elif 'thunderbird_versions.json' in url:
                return Response({
                    'LATEST_THUNDERBIRD_VERSION': '45.4.0',
                    'LATEST_THUNDERBIRD_DEVEL_VERSION': '50.0b1',
                    'LATEST_THUNDERBIRD_ALPHA_VERSION': '51.0a2',
                    'LATEST_THUNDERBIRD_NIGHTLY_VERSION': '52.0a1',
                })
            else:
                raise NotImplementedError(url)

        rget.side_effect = mocked_get

        # Check what's set up in the fixture
        rows = execute_query_fetchall(
            self.conn, 'select product_name, version_string, featured_version '
            'from product_versions order by version_string')
        assert sorted(rows) == [
            ('Firefox', '15.0a1', True),
            ('Firefox', '24.5.0', True),
            ('Firefox', '49.0.1', False),
            ('Firefox', '50.0b', False),
            ('Firefox', '51.0a2', False),
            ('Firefox', '52.0a1', False),
        ]

        # This is necessary so we get a new cursor when we do other
        # selects after the crontabber app has run.
        self.conn.commit()

        with config_manager.context() as config:
            tab = CronTabber(config)
            tab.run_all()

            information = self._load_structure()
            assert information['featured-versions-automatic']
            assert not information['featured-versions-automatic']['last_error']
            assert information['featured-versions-automatic']['last_success']

            config.logger.info.assert_called_with(
                'Set featured versions for Thunderbird to: '
                '45.4.0, 50.0b1, 51.0a2, 52.0a1')

        rows = execute_query_fetchall(
            self.conn, 'select product_name, version_string, featured_version '
            'from product_versions')
        eq_(sorted(rows), [
            ('Firefox', '15.0a1', False),
            ('Firefox', '24.5.0', False),
            ('Firefox', '49.0.1', True),
            ('Firefox', '50.0b', True),
            ('Firefox', '51.0a2', True),
            ('Firefox', '52.0a1', True),
        ])
Example #15
0
    def test_basic_run_job(self, req_mock):
        config_manager = self._setup_config_manager()

        req_mock.get(
            'http://example.com/firefox_versions.json',
            json={
                'FIREFOX_NIGHTLY': '52.0a1',
                # Kept for legacy and smooth transition. We USED to consider
                # the latest AURORA version a featured version but we no
                # longer build aurora so Socorro shouldn't pick this up any
                # more even if product-details.mozilla.org supplies it.
                'FIREFOX_AURORA': '51.0a2',
                'FIREFOX_ESR': '45.4.0esr',
                'FIREFOX_ESR_NEXT': '',
                'LATEST_FIREFOX_DEVEL_VERSION': '50.0b7',
                'LATEST_FIREFOX_OLDER_VERSION': '3.6.28',
                'LATEST_FIREFOX_RELEASED_DEVEL_VERSION': '50.0b7',
                'LATEST_FIREFOX_VERSION': '49.0.1',
            })
        req_mock.get('http://example.com/mobile_versions.json',
                     json={
                         'nightly_version': '52.0a1',
                         'alpha_version': '51.0a2',
                         'beta_version': '50.0b6',
                         'version': '49.0',
                         'ios_beta_version': '6.0',
                         'ios_version': '5.0',
                     })
        req_mock.get('http://example.com/thunderbird_versions.json',
                     json={
                         'LATEST_THUNDERBIRD_VERSION': '45.4.0',
                         'LATEST_THUNDERBIRD_DEVEL_VERSION': '50.0b1',
                         'LATEST_THUNDERBIRD_ALPHA_VERSION': '51.0a2',
                         'LATEST_THUNDERBIRD_NIGHTLY_VERSION': '52.0a1',
                     })

        # Check what's set up in the fixture
        rows = execute_query_fetchall(
            self.conn, 'select product_name, version_string, featured_version '
            'from product_versions order by version_string')
        assert sorted(rows) == [
            ('Firefox', '15.0a1', True),
            ('Firefox', '24.5.0', True),
            ('Firefox', '49.0.1', False),
            ('Firefox', '50.0b', False),
            ('Firefox', '51.0a2', False),
            ('Firefox', '52.0a1', False),
        ]

        # This is necessary so we get a new cursor when we do other selects
        # after the crontabber app has run.
        self.conn.commit()

        with config_manager.context() as config:
            tab = CronTabberApp(config)
            tab.run_all()

            information = self._load_structure()
            assert information['featured-versions-automatic']
            assert not information['featured-versions-automatic']['last_error']
            assert information['featured-versions-automatic']['last_success']

            config.logger.info.assert_called_with(
                'Set featured versions for Thunderbird to: '
                '45.4.0, 50.0b1, 52.0a1')

        rows = execute_query_fetchall(
            self.conn, 'select product_name, version_string, featured_version '
            'from product_versions')
        expected = [
            ('Firefox', '15.0a1', False),
            ('Firefox', '24.5.0', False),
            ('Firefox', '49.0.1', True),
            ('Firefox', '50.0b', True),
            # Note that the 'Aurora' branch is still mentioned but note that it's NOT featured
            # (hence 'False').
            ('Firefox', '51.0a2', False),
            ('Firefox', '52.0a1', True),
        ]
        assert sorted(rows) == expected
Example #16
0
    def inner_transaction(self, connection, bug_id, status, resolution,
                          short_desc, signature_set):
        self.config.logger.debug("bug %s (%s, %s) %s: %s", bug_id, status,
                                 resolution, short_desc, signature_set)
        if not signature_set:
            execute_no_results(connection, "DELETE FROM bugs WHERE id = %s",
                               (bug_id, ))
            return
        useful = False
        insert_made = False
        try:
            status_db, resolution_db, short_desc_db = single_row_sql(
                connection, """SELECT status, resolution, short_desc
                FROM bugs
                WHERE id = %s""", (bug_id, ))
            if (status_db != status or resolution_db != resolution
                    or short_desc_db != short_desc):
                execute_no_results(
                    connection, """
                    UPDATE bugs SET
                        status = %s, resolution = %s, short_desc = %s
                    WHERE id = %s""", (status, resolution, short_desc, bug_id))
                self.config.logger.info("bug status updated: %s - %s, %s",
                                        bug_id, status, resolution)
                useful = True

            signature_rows = execute_query_fetchall(
                connection,
                "SELECT signature FROM bug_associations WHERE bug_id = %s",
                (bug_id, ))
            signatures_db = [x[0] for x in signature_rows]

            for signature in signatures_db:
                if signature not in signature_set:
                    execute_no_results(
                        connection, """
                        DELETE FROM bug_associations
                        WHERE signature = %s and bug_id = %s""",
                        (signature, bug_id))
                    self.config.logger.info('association removed: %s - "%s"',
                                            bug_id, signature)
                    useful = True
        except SQLDidNotReturnSingleRow:
            execute_no_results(
                connection, """
                INSERT INTO bugs
                (id, status, resolution, short_desc)
                VALUES (%s, %s, %s, %s)""",
                (bug_id, status, resolution, short_desc))
            insert_made = True
            signatures_db = []

        for signature in signature_set:
            if signature not in signatures_db:
                if self._has_signature_report(signature, connection):
                    execute_no_results(
                        connection, """
                        INSERT INTO bug_associations (signature, bug_id)
                        VALUES (%s, %s)""", (signature, bug_id))
                    self.config.logger.info('new association: %s - "%s"',
                                            bug_id, signature)
                    useful = True
                else:
                    self.config.logger.info(
                        'rejecting association (no reports with this '
                        'signature): %s - "%s"', bug_id, signature)

        if useful:
            if insert_made:
                self.config.logger.info('new bug: %s - %s, %s, "%s"', bug_id,
                                        status, resolution, short_desc)
        else:
            if insert_made:
                self.config.logger.info(
                    'rejecting bug (no useful information): '
                    '%s - %s, %s, "%s"', bug_id, status, resolution,
                    short_desc)
            else:
                self.config.logger.info(
                    'skipping bug (no new information): '
                    '%s - %s, %s, "%s"', bug_id, status, resolution,
                    short_desc)
            raise NothingUsefulHappened('nothing useful done')
Example #17
0
    def _sweep_dead_processors_transaction(self, connection):
        """this function is a single database transaction: look for dead
        processors - find all the jobs of dead processors and assign them to
        live processors then delete the dead processor registrations"""
        self.config.logger.info("looking for dead processors")
        try:
            self.config.logger.info(
              "threshold %s",
              self.config.registrar.check_in_frequency
            )
            threshold = single_value_sql(
              connection,
              "select now() - %s - %s",
              (self.config.registrar.processor_grace_period,
               self.config.registrar.check_in_frequency)
            )
            dead_processors = execute_query_fetchall(
              connection,
              "select id from processors where lastSeenDateTime < %s",
              (threshold,)
            )
            if dead_processors:
                self.config.logger.info("found dead processor(s):")
                for a_dead_processor in dead_processors:
                    self.config.logger.info("%d is dead", a_dead_processor[0])

                self.config.logger.debug("getting list of live processor(s):")
                live_processors = execute_query_fetchall(
                  connection,
                  "select id from processors where lastSeenDateTime >= %s",
                  (threshold,)
                )
                if not live_processors:
                    if self.config.registrar.quit_if_no_processors:
                        raise NoProcessorsRegisteredError(
                          "There are no processors registered"
                        )
                    else:
                        self.config.logger.critical(
                          'There are no live processors, nothing to do. '
                          'Waiting for processors to come on line.'
                        )
                        return
                number_of_live_processors = len(live_processors)

                self.config.logger.debug(
                  "getting range of queued date for jobs associated with "
                  "dead processor(s):"
                )
                dead_processor_ids_str = ", ".join(
                  [str(x[0]) for x in dead_processors]
                )
                earliest_dead_job, latest_dead_job = single_row_sql(
                  connection,
                  "select min(queueddatetime), max(queueddatetime) from jobs "
                      "where owner in (%s)" % dead_processor_ids_str
                )
                # take dead processor jobs and reallocate them to live
                # processors in equal sized chunks
                if (earliest_dead_job is not None and
                  latest_dead_job is not None):
                    time_increment = (
                      (latest_dead_job - earliest_dead_job) /
                      number_of_live_processors
                    )
                    for x, live_processor_id in enumerate(live_processors):
                        low_queued_time = (
                          x * time_increment + earliest_dead_job
                        )
                        high_queued_time = (
                          (x + 1) * time_increment + earliest_dead_job
                        )
                        self.config.logger.info(
                          "assigning jobs from %s to %s to processor %s:",
                          low_queued_time,
                          high_queued_time,
                          live_processor_id
                        )
                        # why is the range >= at both ends? the range must be
                        # inclusive, the risk of moving a job twice is low and
                        # consequences low, too.
                        # 1st step: take any jobs of a dead processor that were
                        # in progress and reset them to unprocessed
                        execute_no_results(
                          connection,
                          "update jobs set"
                          "    starteddatetime = NULL "
                          "where"
                          "    %%s >= queueddatetime"
                          "    and queueddatetime >= %%s"
                          "    and owner in (%s)"
                          "    and success is NULL" % dead_processor_ids_str,
                          (high_queued_time, low_queued_time)
                        )
                        # 2nd step: take all jobs of a dead processor and give
                        # them to a new owner
                        execute_no_results(
                          connection,
                          "update jobs set"
                          "    set owner = %%s "
                          "where"
                          "    %%s >= queueddatetime"
                          "    and queueddatetime >= %%s"
                          "    and owner in (%s)" % dead_processor_ids_str,
                          (live_processor_id, high_queued_time,
                           low_queued_time)
                        )

                # transfer stalled priority jobs to new processors
                for dead_processor_tuple in dead_processors:
                    self.config.logger.info(
                      "re-assigning priority jobs from processor %d:",
                      dead_processor_tuple[0]
                    )
                    execute_no_results(
                      connection,
                      "insert into priorityjobs (uuid) select uuid "
                      "from priority_jobs_%d" % dead_processor_tuple
                    )

                self.config.logger.info("removing all dead processors")
                execute_no_results(
                  connection,
                  "delete from processors where lastSeenDateTime < %s",
                  (threshold,)
                )
                # remove dead processors' priority tables
                for a_dead_processor in dead_processors:
                    execute_no_results(
                      connection,
                      "drop table if exists priority_jobs_%d" %
                        a_dead_processor[0]
                    )
        except NoProcessorsRegisteredError:
            self.quit = True
            self.config.logger.critical('there are no live processors')
Example #18
0
    def inner_transaction(
        self,
        connection,
        bug_id,
        status,
        resolution,
        short_desc,
        signature_set
    ):
        self.config.logger.debug(
            "bug %s (%s, %s) %s: %s",
            bug_id, status, resolution, short_desc, signature_set)
        if not signature_set:
            execute_no_results(
                connection,
                "DELETE FROM bugs WHERE id = %s",
                (bug_id,)
            )
            return
        useful = False
        insert_made = False
        try:
            status_db, resolution_db, short_desc_db = single_row_sql(
                connection,
                """SELECT status, resolution, short_desc
                FROM bugs
                WHERE id = %s""",
                (bug_id,)
            )
            if (status_db != status
                or resolution_db != resolution
                or short_desc_db != short_desc):
                execute_no_results(
                    connection,
                    """
                    UPDATE bugs SET
                        status = %s, resolution = %s, short_desc = %s
                    WHERE id = %s""",
                    (status, resolution, short_desc, bug_id)
                )
                self.config.logger.info(
                    "bug status updated: %s - %s, %s",
                    bug_id,
                    status,
                    resolution
                )
                useful = True

            signature_rows = execute_query_fetchall(
                connection,
                "SELECT signature FROM bug_associations WHERE bug_id = %s",
                (bug_id,)
            )
            signatures_db = [x[0] for x in signature_rows]

            for signature in signatures_db:
                if signature not in signature_set:
                    execute_no_results(
                        connection,
                        """
                        DELETE FROM bug_associations
                        WHERE signature = %s and bug_id = %s""",
                        (signature, bug_id)
                    )
                    self.config.logger.info(
                        'association removed: %s - "%s"',
                        bug_id, signature)
                    useful = True
        except SQLDidNotReturnSingleRow:
            execute_no_results(
                connection,
                """
                INSERT INTO bugs
                (id, status, resolution, short_desc)
                VALUES (%s, %s, %s, %s)""",
                (bug_id, status, resolution, short_desc)
            )
            insert_made = True
            signatures_db = []

        for signature in signature_set:
            if signature not in signatures_db:
                if self._has_signature_report(signature, connection):
                    execute_no_results(
                        connection,
                        """
                        INSERT INTO bug_associations (signature, bug_id)
                        VALUES (%s, %s)""",
                        (signature, bug_id)
                    )
                    self.config.logger.info(
                        'new association: %s - "%s"',
                        bug_id,
                        signature
                    )
                    useful = True
                else:
                    self.config.logger.info(
                        'rejecting association (no reports with this '
                        'signature): %s - "%s"',
                        bug_id,
                        signature
                    )

        if useful:
            if insert_made:
                self.config.logger.info(
                    'new bug: %s - %s, %s, "%s"',
                    bug_id,
                    status,
                    resolution,
                    short_desc
                )
        else:
            if insert_made:
                self.config.logger.info(
                    'rejecting bug (no useful information): '
                    '%s - %s, %s, "%s"',
                    bug_id, status, resolution, short_desc)
            else:
                self.config.logger.info(
                    'skipping bug (no new information): '
                    '%s - %s, %s, "%s"',
                    bug_id,
                    status,
                    resolution,
                    short_desc
                )
            raise NothingUsefulHappened('nothing useful done')
    def test_basic_run_job(self, rget):
        config_manager = self._setup_config_manager()

        def mocked_get(url):
            if 'firefox_versions.json' in url:
                return Response({
                    'FIREFOX_NIGHTLY': '52.0a1',
                    # Kept for legacy and smooth transition.
                    # We USED to consider the latest AURORA version a
                    # featured version but we no longer build aurora
                    # so Socorro shouldn't pick this up any more
                    # even if product-details.mozilla.org supplies it.
                    'FIREFOX_AURORA': '51.0a2',
                    'FIREFOX_ESR': '45.4.0esr',
                    'FIREFOX_ESR_NEXT': '',
                    'LATEST_FIREFOX_DEVEL_VERSION': '50.0b7',
                    'LATEST_FIREFOX_OLDER_VERSION': '3.6.28',
                    'LATEST_FIREFOX_RELEASED_DEVEL_VERSION': '50.0b7',
                    'LATEST_FIREFOX_VERSION': '49.0.1'
                })
            elif 'mobile_versions.json' in url:
                return Response({
                    'nightly_version': '52.0a1',
                    'alpha_version': '51.0a2',
                    'beta_version': '50.0b6',
                    'version': '49.0',
                    'ios_beta_version': '6.0',
                    'ios_version': '5.0'
                })
            elif 'thunderbird_versions.json' in url:
                return Response({
                    'LATEST_THUNDERBIRD_VERSION': '45.4.0',
                    'LATEST_THUNDERBIRD_DEVEL_VERSION': '50.0b1',
                    'LATEST_THUNDERBIRD_ALPHA_VERSION': '51.0a2',
                    'LATEST_THUNDERBIRD_NIGHTLY_VERSION': '52.0a1',
                })
            else:
                raise NotImplementedError(url)

        rget.side_effect = mocked_get

        # Check what's set up in the fixture
        rows = execute_query_fetchall(
            self.conn,
            'select product_name, version_string, featured_version '
            'from product_versions order by version_string'
        )
        assert sorted(rows) == [
            ('Firefox', '15.0a1', True),
            ('Firefox', '24.5.0', True),
            ('Firefox', '49.0.1', False),
            ('Firefox', '50.0b', False),
            ('Firefox', '51.0a2', False),
            ('Firefox', '52.0a1', False),
        ]

        # This is necessary so we get a new cursor when we do other
        # selects after the crontabber app has run.
        self.conn.commit()

        with config_manager.context() as config:
            tab = CronTabber(config)
            tab.run_all()

            information = self._load_structure()
            assert information['featured-versions-automatic']
            assert not information['featured-versions-automatic']['last_error']
            assert information['featured-versions-automatic']['last_success']

            config.logger.info.assert_called_with(
                'Set featured versions for Thunderbird to: '
                '45.4.0, 50.0b1, 52.0a1'
            )

        rows = execute_query_fetchall(
            self.conn,
            'select product_name, version_string, featured_version '
            'from product_versions'
        )
        eq_(
            sorted(rows),
            [
                ('Firefox', '15.0a1', False),
                ('Firefox', '24.5.0', False),
                ('Firefox', '49.0.1', True),
                ('Firefox', '50.0b', True),
                # Note that the 'Aurora' branch is still mentioned but
                # note that it's NOT featured (hence 'False').
                ('Firefox', '51.0a2', False),
                ('Firefox', '52.0a1', True),
            ]
        )
Example #20
0
 def _get_priority_jobs_transaction(self, connection):
     """this method implements a single transaction that just returns a
     set of priority jobs."""
     priority_jobs_list = execute_query_fetchall(
         connection, "select * from priorityjobs")
     return set(x[0] for x in priority_jobs_list)
Example #21
0
    def _sweep_dead_processors_transaction(self, connection):
        """this function is a single database transaction: look for dead
        processors - find all the jobs of dead processors and assign them to
        live processors then delete the dead processor registrations"""
        self.config.logger.info("looking for dead processors")
        try:
            self.config.logger.info("threshold %s",
                                    self.config.registrar.check_in_frequency)
            threshold = single_value_sql(
                connection, "select now() - %s - %s",
                (self.config.registrar.processor_grace_period,
                 self.config.registrar.check_in_frequency))
            dead_processors = execute_query_fetchall(
                connection,
                "select id from processors where lastSeenDateTime < %s",
                (threshold, ))
            if dead_processors:
                self.config.logger.info("found dead processor(s):")
                for a_dead_processor in dead_processors:
                    self.config.logger.info("%d is dead", a_dead_processor[0])

                self.config.logger.debug("getting list of live processor(s):")
                live_processors = execute_query_fetchall(
                    connection,
                    "select id from processors where lastSeenDateTime >= %s",
                    (threshold, ))
                if not live_processors:
                    if self.config.registrar.quit_if_no_processors:
                        raise NoProcessorsRegisteredError(
                            "There are no processors registered")
                    else:
                        self.config.logger.critical(
                            'There are no live processors, nothing to do. '
                            'Waiting for processors to come on line.')
                        return
                number_of_live_processors = len(live_processors)

                self.config.logger.debug(
                    "getting range of queued date for jobs associated with "
                    "dead processor(s):")
                dead_processor_ids_str = ", ".join(
                    [str(x[0]) for x in dead_processors])
                earliest_dead_job, latest_dead_job = single_row_sql(
                    connection,
                    "select min(queueddatetime), max(queueddatetime) from jobs "
                    "where owner in (%s)" % dead_processor_ids_str)
                # take dead processor jobs and reallocate them to live
                # processors in equal sized chunks
                if (earliest_dead_job is not None
                        and latest_dead_job is not None):
                    time_increment = ((latest_dead_job - earliest_dead_job) /
                                      number_of_live_processors)
                    for x, live_processor_id in enumerate(live_processors):
                        low_queued_time = (x * time_increment +
                                           earliest_dead_job)
                        high_queued_time = ((x + 1) * time_increment +
                                            earliest_dead_job)
                        self.config.logger.info(
                            "assigning jobs from %s to %s to processor %s:",
                            low_queued_time, high_queued_time,
                            live_processor_id)
                        # why is the range >= at both ends? the range must be
                        # inclusive, the risk of moving a job twice is low and
                        # consequences low, too.
                        # 1st step: take any jobs of a dead processor that were
                        # in progress and reset them to unprocessed
                        execute_no_results(
                            connection, "update jobs set"
                            "    starteddatetime = NULL "
                            "where"
                            "    %%s >= queueddatetime"
                            "    and queueddatetime >= %%s"
                            "    and owner in (%s)"
                            "    and success is NULL" % dead_processor_ids_str,
                            (high_queued_time, low_queued_time))
                        # 2nd step: take all jobs of a dead processor and give
                        # them to a new owner
                        execute_no_results(
                            connection, "update jobs set"
                            "    set owner = %%s "
                            "where"
                            "    %%s >= queueddatetime"
                            "    and queueddatetime >= %%s"
                            "    and owner in (%s)" % dead_processor_ids_str,
                            (live_processor_id, high_queued_time,
                             low_queued_time))

                # transfer stalled priority jobs to new processors
                for dead_processor_tuple in dead_processors:
                    self.config.logger.info(
                        "re-assigning priority jobs from processor %d:",
                        dead_processor_tuple[0])
                    execute_no_results(
                        connection,
                        "insert into priorityjobs (uuid) select uuid "
                        "from priority_jobs_%d" % dead_processor_tuple)

                self.config.logger.info("removing all dead processors")
                execute_no_results(
                    connection,
                    "delete from processors where lastSeenDateTime < %s",
                    (threshold, ))
                # remove dead processors' priority tables
                for a_dead_processor in dead_processors:
                    execute_no_results(
                        connection, "drop table if exists priority_jobs_%d" %
                        a_dead_processor[0])
        except NoProcessorsRegisteredError:
            self.quit = True
            self.config.logger.critical('there are no live processors')
    def test_basic_run_job(self, rget):
        config_manager = self._setup_config_manager()

        def mocked_get(url):
            if 'firefox_versions.json' in url:
                return Response({
                    'FIREFOX_NIGHTLY': '52.0a1',
                    'FIREFOX_AURORA': '51.0a2',
                    'FIREFOX_ESR': '45.4.0esr',
                    'FIREFOX_ESR_NEXT': '',
                    'LATEST_FIREFOX_DEVEL_VERSION': '50.0b7',
                    'LATEST_FIREFOX_OLDER_VERSION': '3.6.28',
                    'LATEST_FIREFOX_RELEASED_DEVEL_VERSION': '50.0b7',
                    'LATEST_FIREFOX_VERSION': '49.0.1'
                })
            elif 'mobile_versions.json' in url:
                return Response({
                    'nightly_version': '52.0a1',
                    'alpha_version': '51.0a2',
                    'beta_version': '50.0b6',
                    'version': '49.0',
                    'ios_beta_version': '6.0',
                    'ios_version': '5.0'
                })
            elif 'thunderbird_versions.json' in url:
                return Response({
                    'LATEST_THUNDERBIRD_VERSION': '45.4.0',
                    'LATEST_THUNDERBIRD_DEVEL_VERSION': '50.0b1',
                    'LATEST_THUNDERBIRD_ALPHA_VERSION': '51.0a2',
                    'LATEST_THUNDERBIRD_NIGHTLY_VERSION': '52.0a1',
                })
            else:
                raise NotImplementedError(url)

        rget.side_effect = mocked_get

        # Check what's set up in the fixture
        rows = execute_query_fetchall(
            self.conn,
            'select product_name, version_string, featured_version '
            'from product_versions order by version_string'
        )
        assert sorted(rows) == [
            ('Firefox', '15.0a1', True),
            ('Firefox', '24.5.0', True),
            ('Firefox', '49.0.1', False),
            ('Firefox', '50.0b', False),
            ('Firefox', '51.0a2', False),
            ('Firefox', '52.0a1', False),
        ]

        # This is necessary so we get a new cursor when we do other
        # selects after the crontabber app has run.
        self.conn.commit()

        with config_manager.context() as config:
            tab = CronTabber(config)
            tab.run_all()

            information = self._load_structure()
            assert information['featured-versions-automatic']
            assert not information['featured-versions-automatic']['last_error']
            assert information['featured-versions-automatic']['last_success']

            config.logger.info.assert_called_with(
                'Set featured versions for Thunderbird to: '
                '45.4.0, 50.0b1, 51.0a2, 52.0a1'
            )

        rows = execute_query_fetchall(
            self.conn,
            'select product_name, version_string, featured_version '
            'from product_versions'
        )
        eq_(
            sorted(rows),
            [
                ('Firefox', '15.0a1', False),
                ('Firefox', '24.5.0', False),
                ('Firefox', '49.0.1', True),
                ('Firefox', '50.0b', True),
                ('Firefox', '51.0a2', True),
                ('Firefox', '52.0a1', True),
            ]
        )