def update_crashstats_signature(self, signature, report_date, report_build): with transaction_context(self.database) as connection: # Pull the data from the db. If it's there, then do an update. If it's # not there, then do an insert. try: sql = """ SELECT signature, first_build, first_date FROM crashstats_signature WHERE signature=%s """ sig = single_row_sql(connection, sql, (signature, )) sql = """ UPDATE crashstats_signature SET first_build=%s, first_date=%s WHERE signature=%s """ params = (min(sig[1], int(report_build)), min(sig[2], string_to_datetime(report_date)), sig[0]) except SQLDidNotReturnSingleRow: sql = """ INSERT INTO crashstats_signature (signature, first_build, first_date) VALUES (%s, %s, %s) """ params = (signature, report_build, report_date) execute_no_results(connection, sql, params)
def _remember_failure(self, class_, duration, exc_type, exc_value, exc_tb): with transaction_context(self.database_class) as connection: exc_traceback = ''.join(traceback.format_tb(exc_tb)) app_name = class_.app_name execute_no_results( connection, """INSERT INTO cron_log ( app_name, duration, exc_type, exc_value, exc_traceback, log_time ) VALUES ( %s, %s, %s, %s, %s, %s )""", ( app_name, '%.5f' % duration, repr(exc_type), repr(exc_value), exc_traceback, utc_now() ), ) metrics.gauge('job_failure_runtime', value=duration, tags=['job:%s' % app_name])
def __getitem__(self, key): """return the job info or raise a KeyError""" sql = """ SELECT next_run, first_run, last_run, last_success, depends_on, error_count, last_error, ongoing FROM cron_job WHERE app_name = %s """ columns = ( 'next_run', 'first_run', 'last_run', 'last_success', 'depends_on', 'error_count', 'last_error', 'ongoing' ) try: with transaction_context(self.database_class) as conn: record = single_row_sql(conn, sql, (key,)) except SQLDidNotReturnSingleRow: raise KeyError(key) row = dict(zip(columns, record)) # Unserialize last_error into a Python dict if row['last_error']: row['last_error'] = json.loads(row['last_error']) return row
def insert_build(self, product_name, release_channel, major_version, release_version, version_string, build_id, archive_url): params = { 'product_name': product_name, 'release_channel': release_channel, 'major_version': major_version, 'release_version': release_version, 'version_string': version_string, 'build_id': build_id, 'archive_url': archive_url } if self.config.verbose: self.logger.info('INSERTING: %s' % list(sorted(params.items()))) with transaction_context(self.database) as conn: cursor = conn.cursor() sql = """ INSERT INTO crashstats_productversion ( product_name, release_channel, major_version, release_version, version_string, build_id, archive_url ) VALUES ( %(product_name)s, %(release_channel)s, %(major_version)s, %(release_version)s, %(version_string)s, %(build_id)s, %(archive_url)s ) """ try: cursor.execute(sql, params) self.successful_inserts += 1 except psycopg2.IntegrityError: # If it's an IntegrityError, we already have it and everything is fine pass except psycopg2.Error: self.logger.exception('failed to insert')
def insert_build(self, product_name, release_channel, major_version, release_version, version_string, build_id, archive_url): """Insert a new build into the crashstats_productversion table.""" params = { 'product_name': product_name, 'release_channel': release_channel, 'major_version': major_version, 'release_version': release_version, 'version_string': version_string, 'build_id': build_id, 'archive_url': archive_url } if self.config.verbose: logger.info('INSERTING: %s' % list(sorted(params.items()))) with transaction_context(self.database) as conn: cursor = conn.cursor() sql = """ INSERT INTO crashstats_productversion ( product_name, release_channel, major_version, release_version, version_string, build_id, archive_url ) VALUES ( %(product_name)s, %(release_channel)s, %(major_version)s, %(release_version)s, %(version_string)s, %(build_id)s, %(archive_url)s ) """ try: cursor.execute(sql, params) except psycopg2.IntegrityError: # If it's an IntegrityError, we already have it and everything is fine pass except psycopg2.Error: logger.exception('failed to insert')
def __getitem__(self, key): """return the job info or raise a KeyError""" sql = """ SELECT next_run, first_run, last_run, last_success, depends_on, error_count, last_error, ongoing FROM cron_job WHERE app_name = %s """ columns = ('next_run', 'first_run', 'last_run', 'last_success', 'depends_on', 'error_count', 'last_error', 'ongoing') try: with transaction_context(self.database_class) as conn: record = single_row_sql(conn, sql, (key, )) except SQLDidNotReturnSingleRow: raise KeyError(key) row = dict(zip(columns, record)) # Unserialize last_error into a Python dict if row['last_error']: row['last_error'] = json.loads(row['last_error']) return row
def update_crashstats_signature(self, signature, report_date, report_build): with transaction_context(self.database) as connection: # Pull the data from the db. If it's there, then do an update. If it's # not there, then do an insert. try: sql = """ SELECT signature, first_build, first_date FROM crashstats_signature WHERE signature=%s """ sig = single_row_sql(connection, sql, (signature,)) sql = """ UPDATE crashstats_signature SET first_build=%s, first_date=%s WHERE signature=%s """ params = ( min(sig[1], int(report_build)), min(sig[2], string_to_datetime(report_date)), sig[0] ) except SQLDidNotReturnSingleRow: sql = """ INSERT INTO crashstats_signature (signature, first_build, first_date) VALUES (%s, %s, %s) """ params = (signature, report_build, report_date) execute_no_results(connection, sql, params)
def _remember_failure(self, class_, duration, exc_type, exc_value, exc_tb): with transaction_context(self.database_class) as connection: exc_traceback = ''.join(traceback.format_tb(exc_tb)) app_name = class_.app_name execute_no_results( connection, """INSERT INTO cron_log ( app_name, duration, exc_type, exc_value, exc_traceback, log_time ) VALUES ( %s, %s, %s, %s, %s, %s )""", (app_name, '%.5f' % duration, repr(exc_type), repr(exc_value), exc_traceback, utc_now()), ) metrics.gauge('job_failure_runtime', value=duration, tags=['job:%s' % app_name])
def __iter__(self): records = [] with transaction_context(self.database_class) as conn: records.extend([ record[0] for record in execute_query_fetchall( conn, 'SELECT app_name FROM cron_job') ]) return iter(records)
def __iter__(self): records = [] with transaction_context(self.database_class) as conn: records.extend([ record[0] for record in execute_query_fetchall(conn, 'SELECT app_name FROM cron_job') ]) return iter(records)
def test_commit_called(self): connection_context = mock.MagicMock() with transaction_context(connection_context): pass # Assert rollback() gets called because an exception was thrown in the # context assert connection_context.return_value.__enter__.return_value.commit.called
def test_commit_called(self): connection_context = mock.MagicMock() with transaction_context(connection_context): pass # Assert rollback() gets called because an exception was thrown in the # context assert connection_context.return_value.__enter__.return_value.commit.called
def __contains__(self, key): """return True if we have a job by this key""" try: with transaction_context(self.database_class) as conn: single_value_sql( conn, """SELECT app_name FROM cron_job WHERE app_name = %s""", (key, )) return True except SQLDidNotReturnSingleValue: return False
def check_past_missing(self): """Check the table for missing crashes and check to see if they exist.""" connection_source = self.crashstorage.connection_source bucket_name = connection_source.config.bucket_name boto_conn = connection_source._connect() crash_ids = [] with transaction_context(self.database) as conn: sql = """ SELECT crash_id FROM crashstats_missingprocessedcrash WHERE is_processed=False """ params = () crash_ids = [item[0] for item in execute_query_fetchall(conn, sql, params)] no_longer_missing = [] for crash_id in crash_ids: bucket = boto_conn.get_bucket(bucket_name) processed_crash_key = bucket.get_key(PROCESSED_CRASH_TEMPLATE % crash_id) if processed_crash_key is not None: no_longer_missing.append(crash_id) if no_longer_missing: with transaction_context(self.database) as conn: sql = """ UPDATE crashstats_missingprocessedcrash SET is_processed=True WHERE crash_id IN %s """ params = (tuple(no_longer_missing),) execute_no_results(conn, sql, params) self.logger.info( 'Updated %s missing crashes which have since been processed', len(no_longer_missing) )
def __contains__(self, key): """return True if we have a job by this key""" try: with transaction_context(self.database_class) as conn: single_value_sql( conn, """SELECT app_name FROM cron_job WHERE app_name = %s""", (key,) ) return True except SQLDidNotReturnSingleValue: return False
def test_rollback_called(self): connection_context = mock.MagicMock() exc = Exception('omg') with pytest.raises(Exception) as exc_info: with transaction_context(connection_context): raise exc # Assert rollback() gets called because an exception was thrown in the # context assert connection_context.return_value.__enter__.return_value.rollback.called # Assert the exception is rethrown assert exc_info.value == exc
def test_rollback_called(self): connection_context = mock.MagicMock() exc = Exception('omg') with pytest.raises(Exception) as exc_info: with transaction_context(connection_context): raise exc # Assert rollback() gets called because an exception was thrown in the # context assert connection_context.return_value.__enter__.return_value.rollback.called # Assert the exception is rethrown assert exc_info.value == exc
def __delitem__(self, key): """remove the item by key or raise KeyError""" with transaction_context(self.database_class) as connection: try: # result intentionally ignored single_value_sql( connection, """SELECT app_name FROM cron_job WHERE app_name = %s""", (key, )) except SQLDidNotReturnSingleValue: raise KeyError(key) # item exists execute_no_results( connection, """DELETE FROM cron_job WHERE app_name = %s""", (key, ))
def _remember_success(self, class_, success_date, duration): with transaction_context(self.database_class) as connection: app_name = class_.app_name execute_no_results( connection, """INSERT INTO cron_log ( app_name, success, duration, log_time ) VALUES ( %s, %s, %s, %s )""", (app_name, success_date, '%.5f' % duration, utc_now()), ) metrics.gauge('job_success_runtime', value=duration, tags=['job:%s' % app_name])
def _remember_success(self, class_, success_date, duration): with transaction_context(self.database_class) as connection: app_name = class_.app_name execute_no_results( connection, """INSERT INTO cron_log ( app_name, success, duration, log_time ) VALUES ( %s, %s, %s, %s )""", (app_name, success_date, '%.5f' % duration, utc_now()), ) metrics.gauge('job_success_runtime', value=duration, tags=['job:%s' % app_name])
def update_bug_data(self, bug_id, signature_set): with transaction_context(self.database) as connection: self.logger.debug('bug %s: %s', bug_id, signature_set) # If there's no associated signatures, delete everything for this bug id if not signature_set: sql = """ DELETE FROM crashstats_bugassociation WHERE bug_id = %s """ execute_no_results(connection, sql, (bug_id, )) return try: sql = """ SELECT signature FROM crashstats_bugassociation WHERE bug_id = %s """ signature_rows = execute_query_fetchall( connection, sql, (bug_id, )) signatures_db = [x[0] for x in signature_rows] for signature in signatures_db: if signature not in signature_set: sql = """ DELETE FROM crashstats_bugassociation WHERE signature = %s and bug_id = %s """ execute_no_results(connection, sql, (signature, bug_id)) self.logger.info('association removed: %s - "%s"', bug_id, signature) except SQLDidNotReturnSingleRow: signatures_db = [] for signature in signature_set: if signature not in signatures_db: sql = """ INSERT INTO crashstats_bugassociation (signature, bug_id) VALUES (%s, %s) """ execute_no_results(connection, sql, (signature, bug_id)) self.logger.info('association added: %s - "%s"', bug_id, signature)
def handle_missing(self, date, missing): """Report crash ids for missing processed crashes.""" metrics.gauge('missing_processed', len(missing)) if missing: for crash_id in missing: self.logger.info('Missing: %s', crash_id) with transaction_context(self.database) as conn: sql = """ INSERT INTO crashstats_missingprocessedcrash (crash_id, is_processed, created) VALUES (%s, False, current_timestamp) """ params = (crash_id,) try: execute_no_results(conn, sql, params) except IntegrityError: # If there's already one, that's fine--just move on pass else: self.logger.info('All crashes for %s were processed.', date)
def items(self): """return all the app_names and their values as tuples""" sql = """ SELECT app_name, next_run, first_run, last_run, last_success, depends_on, error_count, last_error FROM cron_job""" columns = ('app_name', 'next_run', 'first_run', 'last_run', 'last_success', 'depends_on', 'error_count', 'last_error') items = [] with transaction_context(self.database_class) as conn: for record in execute_query_fetchall(conn, sql): row = dict(zip(columns, record)) items.append((row.pop('app_name'), row)) return items
def update_bug_data(self, bug_id, signature_set): with transaction_context(self.database) as connection: self.logger.debug('bug %s: %s', bug_id, signature_set) # If there's no associated signatures, delete everything for this bug id if not signature_set: sql = """ DELETE FROM crashstats_bugassociation WHERE bug_id = %s """ execute_no_results(connection, sql, (bug_id,)) return try: sql = """ SELECT signature FROM crashstats_bugassociation WHERE bug_id = %s """ signature_rows = execute_query_fetchall(connection, sql, (bug_id,)) signatures_db = [x[0] for x in signature_rows] for signature in signatures_db: if signature not in signature_set: sql = """ DELETE FROM crashstats_bugassociation WHERE signature = %s and bug_id = %s """ execute_no_results(connection, sql, (signature, bug_id)) self.logger.info('association removed: %s - "%s"', bug_id, signature) except SQLDidNotReturnSingleRow: signatures_db = [] for signature in signature_set: if signature not in signatures_db: sql = """ INSERT INTO crashstats_bugassociation (signature, bug_id) VALUES (%s, %s) """ execute_no_results(connection, sql, (signature, bug_id)) self.logger.info('association added: %s - "%s"', bug_id, signature)
def copy(self): with transaction_context(self.database_class) as connection: sql = """SELECT app_name, next_run, first_run, last_run, last_success, depends_on, error_count, last_error, ongoing FROM cron_job """ columns = ('app_name', 'next_run', 'first_run', 'last_run', 'last_success', 'depends_on', 'error_count', 'last_error', 'ongoing') all = {} for record in execute_query_iter(connection, sql): row = dict(zip(columns, record)) all[row.pop('app_name')] = row return all
def get_max_major_version(self, product_name): """Retrieve the max major version for this product. :arg str product_name: the name of the product :returns: maximum major version as an int or None """ with transaction_context(self.database) as conn: cursor = conn.cursor() sql = """ SELECT max(major_version) FROM crashstats_productversion WHERE product_name = %s """ params = (product_name,) cursor.execute(sql, params) data = cursor.fetchall() if data: return data[0][0] return None
def get_max_major_version(self, product_name): """Retrieves the max major version for this product :arg str product_name: the name of the product :returns: maximum major version as an int or None """ with transaction_context(self.database) as conn: cursor = conn.cursor() sql = """ SELECT max(major_version) FROM crashstats_productversion WHERE product_name = %s """ params = (product_name,) cursor.execute(sql, params) data = cursor.fetchall() if data: return data[0][0] return None
def __delitem__(self, key): """remove the item by key or raise KeyError""" with transaction_context(self.database_class) as connection: try: # result intentionally ignored single_value_sql( connection, """SELECT app_name FROM cron_job WHERE app_name = %s""", (key,) ) except SQLDidNotReturnSingleValue: raise KeyError(key) # item exists execute_no_results( connection, """DELETE FROM cron_job WHERE app_name = %s""", (key,) )
def items(self): """return all the app_names and their values as tuples""" sql = """ SELECT app_name, next_run, first_run, last_run, last_success, depends_on, error_count, last_error FROM cron_job""" columns = ( 'app_name', 'next_run', 'first_run', 'last_run', 'last_success', 'depends_on', 'error_count', 'last_error' ) items = [] with transaction_context(self.database_class) as conn: for record in execute_query_fetchall(conn, sql): row = dict(zip(columns, record)) items.append((row.pop('app_name'), row)) return items
def copy(self): with transaction_context(self.database_class) as connection: sql = """SELECT app_name, next_run, first_run, last_run, last_success, depends_on, error_count, last_error, ongoing FROM cron_job """ columns = ( 'app_name', 'next_run', 'first_run', 'last_run', 'last_success', 'depends_on', 'error_count', 'last_error', 'ongoing' ) all = {} for record in execute_query_iter(connection, sql): row = dict(zip(columns, record)) all[row.pop('app_name')] = row return all
def has_data(self): with transaction_context(self.database_class) as conn: return bool(single_value_sql(conn, 'SELECT count(*) FROM cron_job'))
def __setitem__(self, key, value): class LastErrorEncoder(json.JSONEncoder): def default(self, obj): if isinstance(obj, type): return repr(obj) return json.JSONEncoder.default(self, obj) with transaction_context(self.database_class) as connection: try: single_value_sql( connection, """ SELECT ongoing FROM cron_job WHERE app_name = %s FOR UPDATE NOWAIT """, (key, )) # If the above single_value_sql() didn't raise a # SQLDidNotReturnSingleValue exception, it means # there is a row by this app_name. # Therefore, the next SQL is an update. next_sql = """ UPDATE cron_job SET next_run = %(next_run)s, first_run = %(first_run)s, last_run = %(last_run)s, last_success = %(last_success)s, depends_on = %(depends_on)s, error_count = %(error_count)s, last_error = %(last_error)s, ongoing = %(ongoing)s WHERE app_name = %(app_name)s """ except OperationalError as exception: if 'could not obtain lock' in exception.args[0]: raise RowLevelLockError(exception.args[0]) else: raise except SQLDidNotReturnSingleValue: # the key does not exist, do an insert next_sql = """ INSERT INTO cron_job ( app_name, next_run, first_run, last_run, last_success, depends_on, error_count, last_error, ongoing ) VALUES ( %(app_name)s, %(next_run)s, %(first_run)s, %(last_run)s, %(last_success)s, %(depends_on)s, %(error_count)s, %(last_error)s, %(ongoing)s ) """ # serialize last_error if it's a {} last_error = value['last_error'] if isinstance(last_error, dict): last_error = json.dumps(value['last_error'], cls=LastErrorEncoder) parameters = { 'app_name': key, 'next_run': value['next_run'], 'first_run': value['first_run'], 'last_run': value['last_run'], 'last_success': value.get('last_success'), 'depends_on': value['depends_on'], 'error_count': value['error_count'], 'last_error': last_error, 'ongoing': value.get('ongoing'), } try: execute_no_results(connection, next_sql, parameters) except IntegrityError as exception: # See CREATE_CRONTABBER_APP_NAME_UNIQUE_INDEX for why # we know to look for this mentioned in the error message. if 'crontabber_unique_app_name_idx' in exception.args[0]: raise RowLevelLockError(exception.args[0]) raise
def __setitem__(self, key, value): class LastErrorEncoder(json.JSONEncoder): def default(self, obj): if isinstance(obj, type): return repr(obj) return json.JSONEncoder.default(self, obj) with transaction_context(self.database_class) as connection: try: single_value_sql( connection, """ SELECT ongoing FROM cron_job WHERE app_name = %s FOR UPDATE NOWAIT """, (key,) ) # If the above single_value_sql() didn't raise a # SQLDidNotReturnSingleValue exception, it means # there is a row by this app_name. # Therefore, the next SQL is an update. next_sql = """ UPDATE cron_job SET next_run = %(next_run)s, first_run = %(first_run)s, last_run = %(last_run)s, last_success = %(last_success)s, depends_on = %(depends_on)s, error_count = %(error_count)s, last_error = %(last_error)s, ongoing = %(ongoing)s WHERE app_name = %(app_name)s """ except OperationalError as exception: if 'could not obtain lock' in exception.args[0]: raise RowLevelLockError(exception.args[0]) else: raise except SQLDidNotReturnSingleValue: # the key does not exist, do an insert next_sql = """ INSERT INTO cron_job ( app_name, next_run, first_run, last_run, last_success, depends_on, error_count, last_error, ongoing ) VALUES ( %(app_name)s, %(next_run)s, %(first_run)s, %(last_run)s, %(last_success)s, %(depends_on)s, %(error_count)s, %(last_error)s, %(ongoing)s ) """ # serialize last_error if it's a {} last_error = value['last_error'] if isinstance(last_error, dict): last_error = json.dumps(value['last_error'], cls=LastErrorEncoder) parameters = { 'app_name': key, 'next_run': value['next_run'], 'first_run': value['first_run'], 'last_run': value['last_run'], 'last_success': value.get('last_success'), 'depends_on': value['depends_on'], 'error_count': value['error_count'], 'last_error': last_error, 'ongoing': value.get('ongoing'), } try: execute_no_results(connection, next_sql, parameters) except IntegrityError as exception: # See CREATE_CRONTABBER_APP_NAME_UNIQUE_INDEX for why # we know to look for this mentioned in the error message. if 'crontabber_unique_app_name_idx' in exception.args[0]: raise RowLevelLockError(exception.args[0]) raise
def has_data(self): with transaction_context(self.database_class) as conn: return bool(single_value_sql(conn, 'SELECT count(*) FROM cron_job'))