def get_pdts_buildtimes(self) -> Tuple[int, int, int]: """Bucket PDTs build times into categories. Returns: build_less_30: The number of PDTs building in less than 30min. build_30_60: The number of PDTs building between 30 and 60min. build_more_60: The number of PDTs building in more than 60min. """ body = models.WriteQuery( model="system__activity", view="pdt_builds", fields=[ "pdt_builds.view_name", "pdt_builds.connection", "pdt_builds.average_build_time_minutes" ], filters={ "pdt_builds.start_date": "24 hours", "pdt_builds.status": "done" }, sorts=["pdt_builds.average_build_time_minutes desc"], limit="500", dynamic_fields= '[{\"table_calculation\":\"build_less_30\",\"label\":\"build_less_30\",\"expression\":\"sum(if(${pdt_builds.average_build_time_minutes} < 30 AND NOT is_null(${pdt_builds.average_build_time_minutes}), 1, 0))\",\"value_format\":null,\"value_format_name\":null,\"_kind_hint\":\"measure\",\"_type_hint\":\"number\"},{\"table_calculation\":\"build_30_60\",\"label\":\"build_30_60\",\"expression\":\"sum(if(${pdt_builds.average_build_time_minutes} >= 30 AND ${pdt_builds.average_build_time_minutes}<60,\\n 1, 0))\",\"value_format\":null,\"value_format_name\":null,\"_kind_hint\":\"measure\",\"_type_hint\":\"number\"},{\"table_calculation\":\"build_more_60\",\"label\":\"build_more_60\",\"expression\":\"sum(if(${pdt_builds.average_build_time_minutes} >=60,\\n 1, 0))\",\"value_format\":null,\"value_format_name\":null,\"_kind_hint\":\"measure\",\"_type_hint\":\"number\"}]' ) pdt_build_times_query = self.looker_client.create_query(body) pdt_build_times = self.looker_client.run_query( pdt_build_times_query.id, result_format='json') build_less_30 = json.loads(pdt_build_times)[0]['build_less_30'] build_30_60 = json.loads(pdt_build_times)[0]['build_30_60'] build_more_60 = json.loads(pdt_build_times)[0]['build_more_60'] return int(build_less_30 or 0), int(build_30_60 or 0), int(build_more_60 or 0)
def get_pdts_status(self) -> Tuple[str, int]: """Finds PDTs with issues. Returns: cleaned_errors: The list of unique errors found for the PDTs. len(json.loads(failed_pdts_list)): The number of failed PDTs. """ body = models.WriteQuery( model="system__activity", view="pdt_event_log", fields=["pdt_event_log.view_name", "pdt_event_log.connection"], filters={ "pdt_event_log.action": "%error%", "pdt_event_log.created_time": "24 hours" }, sorts=["pdt_event_log.connection"], limit="5000") failed_pdts = self.looker_client.create_query(body) failed_pdts_list = self.looker_client.run_query(failed_pdts.id, result_format='json') cleaned_errors = [] for elem in json.loads(failed_pdts_list): cleaned_errors.append("PDT \'{}\' failed on connection: {}".format( elem['pdt_event_log.view_name'], elem['pdt_event_log.connection'])) if failed_pdts_list: cleaned_errors = list( set(cleaned_errors)) # set to remove duplicates return cleaned_errors, len(json.loads(failed_pdts_list)) else: return None, 0
def nodes_matching(self) -> List[str]: """For clusters, checks if the nodes are on same Looker version. Returns: diff_node_version: The list with information aobut nodes and versions. """ body = models.WriteQuery( model="system__activity", view="history", fields=[ "node.id", "node.version", "node.last_heartbeat_time", "node.last_heartbeat_time" ], filters={"node.last_heartbeat_date": "1 days"}, sorts=["node.last_heartbeat_time desc"], limit="500", vis_config={ "hidden_fields": [ "node.id", "node.version", "node.last_heartbeat_time", "most_recent_heartbeat", "node.count" ] }, dynamic_fields= "[{\"table_calculation\":\"most_recent_heartbeat\",\"label\":\"most_recent_heartbeat\",\"expression\":\"diff_minutes(${node.last_heartbeat_time}, now())\",\"value_format\":null,\"value_format_name\":null,\"_kind_hint\":\"dimension\",\"_type_hint\":\"number\"},{\"table_calculation\":\"node_version_at_last_beat\",\"label\":\"node_version_at_last_beat\",\"expression\":\"if(diff_minutes(${node.last_heartbeat_time}, now()) > ${most_recent_heartbeat}*1.10 OR diff_minutes(${node.last_heartbeat_time}, now()) < ${most_recent_heartbeat}*0.90, ${node.version}, null)\",\"value_format\":null,\"value_format_name\":null,\"_kind_hint\":\"dimension\",\"_type_hint\":\"string\"}]" ) node_check = self.looker_client.create_query(body) nodes_versions = self.looker_client.run_query(node_check.id, result_format='json') # to exclude older heartbeat checks with None values results = [ version for version in json.loads(nodes_versions) if version['node_version_at_last_beat'] ] diff_node_version = [] if len(list(set(results))) == 1: diff_node_version.append( "All {} Nodes found on same Looker version".format( len(results))) return diff_node_version else: for k, v in Counter(results).items(): diff_node_version.append("{} nodes found on version {}".format( v, k)) return diff_node_version
def test_enum(sdk: mtds.Looker40SDK): # TODO: there is currently no example in the Looker API of a "bare" # ForwardRef property on a model that is returned by the API. We # have unittests deserializing into "bare" ForwardRef properties, # that will have to do for now. query = ml.WriteQuery( model="system__activity", view="dashboard", fields=["dashboard.id", "dashboard.title", "dashboard.count"], ) query_id = sdk.create_query(query).id assert query_id task = ml.WriteCreateQueryTask(query_id=query_id, source="test", result_format=ml.ResultFormat.csv) created = sdk.create_query_task(task) # created.result_format is type str, not ResultFormat.csv assert ml.ResultFormat.csv.value == created.result_format
def unlimited_downloads(self) -> Tuple[str, str]: """Returns unlimited downloads information from instance. Returns: results: The summary of unlimited results from query. unltd_downloads.share_url: The link to access result in Looker. """ body = models.WriteQuery(model="system__activity", view="history", fields=[ "history.created_time", "query.link", "user.id", "user.name", "history.source", "query.limit" ], filters={ "history.created_time": "24 hours", "history.source": "-regenerator,-suggest", "query.limit": ">5000" }, sorts=["history.created_time desc"], limit="500") unltd_downloads = self.looker_client.create_query(body) unlimited_downloads = self.looker_client.run_query( unltd_downloads.id, result_format='json') if unlimited_downloads: unltd_source = [ query['history.source'] for query in json.loads(unlimited_downloads) ] unltd_users = [ query['user.id'] for query in json.loads(unlimited_downloads) ] results = "{} users have ran queries with more than 5000 rows \ from these sources: {}".format(len(list(set(unltd_users))), list(set(unltd_source))) return results, unltd_downloads.share_url else: return None, unltd_downloads.share_url
def create_query_request(q, limit: Optional[str] = None) -> ml.WriteQuery: return ml.WriteQuery( model=q.get("model"), view=q.get("view"), fields=q.get("fields"), pivots=q.get("pivots"), fill_fields=q.get("fill_fields"), filters=q.get("filters"), filter_expression=q.get("filter_expressions"), sorts=q.get("sorts"), limit=q.get("limit") or limit, column_limit=q.get("column_limit"), total=q.get("total"), row_total=q.get("row_total"), subtotals=q.get("subtotal"), vis_config=q.get("vis_config"), filter_config=q.get("filter_config"), visible_ui_sections=q.get("visible_ui_sections"), dynamic_fields=q.get("dynamic_fields"), client_id=q.get("client_id"), query_timezone=q.get("query_timezone"), )
def get_inactive_users(self) -> int: """Returns users without login data for past 90 days. Returns: len(inactive_users): The number of inactive users in Looker. """ body = models.WriteQuery( model="system__activity", view="user", fields=["user.id", "user.name"], filters={ "user_facts.is_looker_employee": "no", "user.is_disabled": "no" }, filter_expression={ "diff_days(${user_facts.last_api_login_date}, now()) > 90 OR diff_days(${user_facts.last_ui_login_date}, now()) > 90" }, sorts=["user.id"], limit="500") users_query = self.looker_client.create_query(body) inactive_users = self.looker_client.run_query(users_query.id, result_format='json') return len(inactive_users)
def check_if_clustered(self) -> bool: """Checks if Looker is using a clustered settup. Returns: A boolean value representing whether the instance is clustered. """ body = models.WriteQuery( model="system__activity", view="history", fields=["node.clustered", "node.mac_adress", "node.count"], filters={"node.mac_adress": "-null"}, sorts=["node.count desc"], limit="500") cluster_check = self.looker_client.create_query(body) check_clustered = self.looker_client.run_query(cluster_check.id, result_format='json') nodes_count = len(json.loads(check_clustered)) node_is_cluster = [ node['node.clustered'] for node in json.loads(check_clustered) ] return nodes_count > 1 and list(set(node_is_cluster))[0] == "Yes"
def get_failed_schedules(self) -> Tuple[str, int]: """Finds failed email schedules. Returns: cleaned_errors: The list of unique errors found for failing schedules. len(json.loads(failed_schedules)): The number of failed schedules. """ body = models.WriteQuery( model="system__activity", view="scheduled_plan", fields=[ "scheduled_job.status", "scheduled_plan.id", "scheduled_job.name", "scheduled_plan_destination.format", "scheduled_plan_destination.type", "look.id", "dashboard.id" ], filters={ "scheduled_plan.run_once": "no", "scheduled_job.status": "failure", "scheduled_job.created_date": "this week" }, sorts=["scheduled_job.created_date", "scheduled_plan.id desc"], limit="500") schedules_query = self.looker_client.create_query(body) failed_schedules = self.looker_client.run_query(schedules_query.id, result_format='json') cleaned_errors = [] for elem in json.loads(failed_schedules): cleaned_errors.append( "Schedule \'{}\' failed to send to {}".format( elem['scheduled_job.name'], elem['scheduled_plan_destination.type'])) if failed_schedules: cleaned_errors = list( set(cleaned_errors)) # set to remove duplicates return cleaned_errors, len(json.loads(failed_schedules)) else: return None, 0