def test_new_db_functions_backwards_compatible(): conn = _create_connection() cursor = conn.cursor() old_query = 'SELECT * FROM batched_get_metric(%s, %s, %s, %s, %s)' cursor.execute(old_query, ('submission_date', 'nightly', '41', ['20150603'], json.dumps({ 'metric': 'GC_MAX_PAUSE_MS_2', 'child': 'true' }))) # Just 1 result since this is 1 date and not a keyed histogram result = cursor.fetchall() assert len(result) == 1, result new_query = 'SELECT * FROM batched_get_metric(%s, %s, %s, %s, %s, %s)' cursor.execute(new_query, ('submission_date', 'nightly', '41', [ '20150603' ], json.dumps({ 'metric': 'GC_MAX_PAUSE_MS_2', 'child': 'true' }), json.dumps({'metric': 'DEVTOOLS_PERFTOOLS_RECORDING_FEATURES_USED'}))) # 1 for the non-keyed histogram, 1 for the 1 key of the keyed histogram # Note we don't actually use batched_get_metric for multiple metrics, # but this behavior is expected assert len(cursor.fetchall()) == 2
def test_cast_array_to_bigint(): conn = _create_connection() cursor = conn.cursor() cursor.execute( "SELECT cast_array_to_bigint_safe(ARRAY[-9223372036854775809, 9223372036854775808, 12]);" ) res = cursor.fetchall() assert res == [([-9223372036854775808L, 9223372036854775807L, 12L], )]
def test_aggregate_histograms(): conn = _create_connection() cursor = conn.cursor() cursor.execute(""" SELECT aggregate_histograms(t.histos) AS aggregates FROM (VALUES (ARRAY[1,1,1,1]), (ARRAY[1,1,1,1,1])) AS t(histos) """) res = cursor.fetchall() assert res == [([2, 2, 1, 2, 2], )]
def test_notice_logging_cursor(): conn = _create_connection() cursor = conn.cursor(cursor_factory=NoticeLoggingCursor) expected = ('py4j', 'WARNING', 'WARNING: Truncating positive value(s) too large for bigint in array: {9223372036854775808}') with LogCapture("py4j") as lc: cursor.execute("SELECT cast_array_to_bigint_safe(ARRAY[9223372036854775808]);") lc.check(expected)
def test_aggregate_histograms(): conn = _create_connection() cursor = conn.cursor() cursor.execute(""" SELECT aggregate_histograms(t.histos) AS aggregates FROM (VALUES (ARRAY[1,1,1,1]), (ARRAY[1,1,1,1,1])) AS t(histos) """) res = cursor.fetchall() assert res == [([2, 2, 1, 2, 2],)]
def test_notice_logging_cursor(): conn = _create_connection() cursor = conn.cursor(cursor_factory=NoticeLoggingCursor) expected = ( 'py4j', 'WARNING', 'WARNING: Truncating positive value(s) too large for bigint in array: {9223372036854775808}' ) with LogCapture("py4j") as lc: cursor.execute( "SELECT cast_array_to_bigint_safe(ARRAY[9223372036854775808]);") lc.check(expected)
def test_new_db_functions_backwards_compatible(): conn = _create_connection() cursor = conn.cursor() old_query = 'SELECT * FROM batched_get_metric(%s, %s, %s, %s, %s)' cursor.execute(old_query, ('submission_date', 'nightly', '41', ['20150603'], json.dumps({'metric': 'GC_MAX_PAUSE_MS', 'child': 'true'}))) # Just 1 result since this is 1 date and not a keyed histogram assert len(cursor.fetchall()) == 1 new_query = 'SELECT * FROM batched_get_metric(%s, %s, %s, %s, %s, %s)' cursor.execute(new_query, ('submission_date', 'nightly', '41', ['20150603'], json.dumps({'metric': 'GC_MAX_PAUSE_MS', 'child': 'true'}), json.dumps({'metric': 'BLOCKED_ON_PLUGIN_INSTANCE_DESTROY_MS'}))) # 1 for the non-keyed histogram, 1 for the 1 key of the keyed histogram # Note we don't actually use batched_get_metric for multiple metrics, # but this behavior is expected assert len(cursor.fetchall()) == 2
def test_new_db_functions_backwards_compatible(): conn = _create_connection() cursor = conn.cursor() old_query = 'SELECT * FROM batched_get_metric(%s, %s, %s, %s, %s)' cursor.execute(old_query, ( 'submission_date', 'nightly', '41', [SUBMISSION_DATE_1.strftime(DATE_FMT)], json.dumps({'metric': 'GC_MAX_PAUSE_MS_2', 'child': 'true'}))) # Just 1 result since this is 1 date and not a keyed histogram result = cursor.fetchall() assert len(result) == 1, result new_query = 'SELECT * FROM batched_get_metric(%s, %s, %s, %s, %s, %s)' cursor.execute(new_query, ( 'submission_date', 'nightly', '41', [SUBMISSION_DATE_1.strftime(DATE_FMT)], json.dumps({'metric': 'GC_MAX_PAUSE_MS_2', 'child': 'true'}), json.dumps({'metric': 'DEVTOOLS_PERFTOOLS_RECORDING_FEATURES_USED'}))) # 1 for the non-keyed histogram, 1 for the 1 key of the keyed histogram # Note we don't actually use batched_get_metric for multiple metrics, # but this behavior is expected assert len(cursor.fetchall()) == 2
def test_connection(): db = _create_connection() assert(db)
def test_connection(): db = _create_connection() assert (db)
def test_cast_array_to_bigint(): conn = _create_connection() cursor = conn.cursor() cursor.execute("SELECT cast_array_to_bigint_safe(ARRAY[-9223372036854775809, 9223372036854775808, 12]);") res = cursor.fetchall() assert res == [([-9223372036854775808L, 9223372036854775807L, 12L],)]