async def test_logs_batch_query_additional_workspaces(): client = LogsQueryClient(_credential()) query = "union * | where TimeGenerated > ago(100d) | project TenantId | summarize count() by TenantId" requests = [ LogsQueryRequest( query, timespan="PT1H", workspace_id=os.environ['LOG_WORKSPACE_ID'], additional_workspaces=[os.environ['SECONDARY_WORKSPACE_ID']]), LogsQueryRequest( query, timespan="PT1H", workspace_id=os.environ['LOG_WORKSPACE_ID'], additional_workspaces=[os.environ['SECONDARY_WORKSPACE_ID']]), LogsQueryRequest( query, workspace_id=os.environ['LOG_WORKSPACE_ID'], additional_workspaces=[os.environ['SECONDARY_WORKSPACE_ID']]), ] response = await client.batch_query(requests) assert len(response.responses) == 3 for resp in response.responses: assert len(resp.body.tables[0].rows) == 2
async def test_logs_query_batch_default(): client = LogsQueryClient(_credential()) requests = [ LogsBatchQuery(query="AzureActivity | summarize count()", timespan=timedelta(hours=1), workspace_id=os.environ['LOG_WORKSPACE_ID']), LogsBatchQuery(query="""AppRequests | take 10 | summarize avgRequestDuration=avg(DurationMs) by bin(TimeGenerated, 10m), _ResourceId""", timespan=timedelta(hours=1), workspace_id=os.environ['LOG_WORKSPACE_ID']), LogsBatchQuery(query="Wrong query | take 2", workspace_id=os.environ['LOG_WORKSPACE_ID'], timespan=None), ] response = await client.query_batch(requests) assert len(response) == 3 r0 = response[0] assert r0.tables[0].columns == ['count_'] r1 = response[1] assert r1.tables[0].columns[0] == 'TimeGenerated' assert r1.tables[0].columns[1] == '_ResourceId' assert r1.tables[0].columns[2] == 'avgRequestDuration' r2 = response[2] assert r2.__class__ == LogsQueryError
async def test_logs_single_query_fatal_exception(): credential = _credential() client = LogsQueryClient(credential) with pytest.raises(HttpResponseError): await client.query_workspace('bad_workspace_id', 'AppRequests', timespan=None)
async def test_logs_server_timeout(): client = LogsQueryClient(_credential()) with pytest.raises(HttpResponseError) as e: response = await client.query( os.environ['LOG_WORKSPACE_ID'], "range x from 1 to 10000000000 step 1 | count", server_timeout=1, ) assert e.message.contains('Gateway timeout')
async def test_logs_auth_no_timespan(): credential = _credential() client = LogsQueryClient(credential) query = """AppRequests | where TimeGenerated > ago(12h) | summarize avgRequestDuration=avg(DurationMs) by bin(TimeGenerated, 10m), _ResourceId""" # returns LogsQueryResult with pytest.raises(TypeError): await client.query_workspace(os.environ['LOG_WORKSPACE_ID'], query)
async def test_logs_auth(): credential = _credential() client = LogsQueryClient(credential) query = """AppRequests | where TimeGenerated > ago(12h) | summarize avgRequestDuration=avg(DurationMs) by bin(TimeGenerated, 10m), _ResourceId""" # returns LogsQueryResults response = await client.query(os.environ['LOG_WORKSPACE_ID'], query) assert response is not None assert response.tables is not None
async def test_logs_single_query_with_render(): credential = _credential() client = LogsQueryClient(credential) query = """AppRequests | take 10""" # returns LogsQueryResult response = await client.query_workspace(os.environ['LOG_WORKSPACE_ID'], query, timespan=None, include_visualization=True) assert response.visualization is not None
async def test_logs_single_query_partial_exception_not_allowed(): credential = _credential() client = LogsQueryClient(credential) query = """let Weight = 92233720368547758; range x from 1 to 3 step 1 | summarize percentilesw(x, Weight * 100, 50)""" response = await client.query_workspace(os.environ['LOG_WORKSPACE_ID'], query, timespan=timedelta(days=1)) assert response.__class__ == LogsQueryPartialResult assert response.partial_error is not None assert response.partial_error.code == 'PartialError' assert response.partial_error.__class__ == LogsQueryError
async def test_logs_single_query_additional_workspaces_async(): credential = _credential() client = LogsQueryClient(credential) query = "union * | where TimeGenerated > ago(100d) | project TenantId | summarize count() by TenantId" # returns LogsQueryResults response = await client.query( os.environ['LOG_WORKSPACE_ID'], query, additional_workspaces=[os.environ["SECONDARY_WORKSPACE_ID"]], ) assert response assert len(response.tables[0].rows) == 2
async def test_logs_batch_query(): client = LogsQueryClient(_credential()) requests = [ LogsQueryRequest(query="AzureActivity | summarize count()", timespan="PT1H", workspace=os.environ['LOG_WORKSPACE_ID']), LogsQueryRequest(query="""AppRequests | take 10 | summarize avgRequestDuration=avg(DurationMs) by bin(TimeGenerated, 10m), _ResourceId""", timespan="PT1H", workspace=os.environ['LOG_WORKSPACE_ID']), LogsQueryRequest(query="AppRequests | take 2", workspace=os.environ['LOG_WORKSPACE_ID']), ] response = await client.batch_query(requests) assert len(response.responses) == 3
async def logs_query(): credential = DefaultAzureCredential( client_id=os.environ['AZURE_CLIENT_ID'], client_secret=os.environ['AZURE_CLIENT_SECRET'], tenant_id=os.environ['AZURE_TENANT_ID']) client = LogsQueryClient(credential) # Response time trend # request duration over the last 12 hours. query = """AppRequests | where TimeGenerated > ago(12h) | summarize avgRequestDuration=avg(DurationMs) by bin(TimeGenerated, 10m), _ResourceId""" # returns LogsQueryResult async with client: response = await client.query(os.environ['LOG_WORKSPACE_ID'], query, timespan=None) if not response.tables: print("No results for the query") for table in response.tables: df = pd.DataFrame(table.rows, columns=[col.name for col in table.columns]) print(df) """ TimeGenerated _ResourceId avgRequestDuration 0 2021-05-27T08:40:00Z /subscriptions/faa080af-c1d8-40ad-9cce-e1a450c... 27.307699999999997 1 2021-05-27T08:50:00Z /subscriptions/faa080af-c1d8-40ad-9cce-e1a450c... 18.11655 2 2021-05-27T09:00:00Z /subscriptions/faa080af-c1d8-40ad-9cce-e1a450c... 24.5271 """ # if you dont want to use pandas - here's how you can process it. #response.tables is a LogsTable for table in response.tables: for col in table.columns: #LogsQueryResultColumn print(col.name + "/" + col.type + " | ", end="") print("\n") for row in table.rows: for item in row: print(item + " | ", end="") print("\n") """
async def test_logs_query_result_row_type(): client = LogsQueryClient(_credential()) query = "AppRequests | take 5" response = await client.query_workspace( os.environ['LOG_WORKSPACE_ID'], query, timespan=None, ) ## should iterate over tables for table in response: assert table.__class__ == LogsTable for row in table.rows: assert row.__class__ == LogsTableRow
async def test_logs_query_result_iterate_over_tables(): client = LogsQueryClient(_credential()) query = "AppRequests | take 10; AppRequests | take 5" response = await client.query_workspace(os.environ['LOG_WORKSPACE_ID'], query, timespan=None, include_statistics=True, include_visualization=True) ## should iterate over tables for item in response: assert item.__class__ == LogsTable assert response.statistics is not None assert response.visualization is not None assert len(response.tables) == 2 assert response.__class__ == LogsQueryResult
async def logs_query(): credential = DefaultAzureCredential() client = LogsQueryClient(credential) query = """AppRequests | take 5""" try: response = await client.query_workspace( os.environ['LOGS_WORKSPACE_ID'], query, timespan=timedelta(days=1)) if response.status == LogsQueryStatus.PARTIAL: error = response.partial_error data = response.partial_data print(error.message) elif response.status == LogsQueryStatus.SUCCESS: data = response.tables for table in data: df = pd.DataFrame(data=table.rows, columns=table.columns) print(df) except HttpResponseError as err: print("something fatal happened") print(err)
async def test_logs_batch_query_partial_exception_not_allowed(): credential = _credential() client = LogsQueryClient(credential) requests = [ LogsBatchQuery(query="AzureActivity | summarize count()", timespan=timedelta(hours=1), workspace_id=os.environ['LOG_WORKSPACE_ID']), LogsBatchQuery(query="""bad query | take 10""", timespan=(datetime(2021, 6, 2), timedelta(days=1)), workspace_id=os.environ['LOG_WORKSPACE_ID']), LogsBatchQuery(query="""let Weight = 92233720368547758; range x from 1 to 3 step 1 | summarize percentilesw(x, Weight * 100, 50)""", workspace_id=os.environ['LOG_WORKSPACE_ID'], timespan=(datetime(2021, 6, 2), datetime(2021, 6, 3)), include_statistics=True), ] responses = await client.query_batch(requests) r1, r2, r3 = responses[0], responses[1], responses[2] assert r1.__class__ == LogsQueryResult assert r2.__class__ == LogsQueryError assert r3.__class__ == LogsQueryPartialResult
async def test_logs_batch_query_fatal_exception(): credential = ClientSecretCredential( client_id=os.environ['AZURE_CLIENT_ID'], client_secret='bad_secret', tenant_id=os.environ['AZURE_TENANT_ID']) client = LogsQueryClient(credential) requests = [ LogsBatchQuery(query="AzureActivity | summarize count()", timespan=timedelta(hours=1), workspace_id=os.environ['LOG_WORKSPACE_ID']), LogsBatchQuery(query="""AppRequestsss | take 10""", timespan=(datetime(2021, 6, 2), timedelta(days=1)), workspace_id=os.environ['LOG_WORKSPACE_ID']), LogsBatchQuery(query="""let Weight = 92233720368547758; range x from 1 to 3 step 1 | summarize percentilesw(x, Weight * 100, 50)""", workspace_id=os.environ['LOG_WORKSPACE_ID'], timespan=(datetime(2021, 6, 2), datetime(2021, 6, 3)), include_statistics=True), ] with pytest.raises(HttpResponseError): await client.query_batch(requests)