def run(self): signal.signal(signal.SIGINT, signal_handler) started_at = time.time() logger.debug("Executing query:\n%s", self.query) self._log_progress('executing_query') query_runner = self.data_source.query_runner annotated_query = self._annotate_query(query_runner) try: data, error = query_runner.run_query(annotated_query, self.user) except Exception as e: if isinstance(e, SoftTimeLimitExceeded): error = TIMEOUT_MESSAGE else: error = text_type(e) data = None logging.warning('Unexpected error while running query:', exc_info=1) run_time = time.time() - started_at logger.info(u"task=execute_query query_hash=%s data_length=%s error=[%s]", self.query_hash, data and len(data), error) _unlock(self.query_hash, self.data_source.id) if error is not None and data is None: result = QueryExecutionError(error) if self.scheduled_query is not None: self.scheduled_query = models.db.session.merge(self.scheduled_query, load=False) track_failure(self.scheduled_query, error) raise result else: if (self.scheduled_query and self.scheduled_query.schedule_failures > 0): self.scheduled_query = models.db.session.merge(self.scheduled_query, load=False) self.scheduled_query.schedule_failures = 0 models.db.session.add(self.scheduled_query) query_result = models.QueryResult.store_result( self.data_source.org_id, self.data_source, self.query_hash, self.query, data, run_time, utcnow()) updated_query_ids = models.Query.update_latest_result(query_result) models.db.session.commit() # make sure that alert sees the latest query result self._log_progress('checking_alerts') for query_id in updated_query_ids: check_alerts_for_query.delay(query_id) self._log_progress('finished') result = query_result.id models.db.session.commit() return result
def _apply_default_parameters(query): parameters = {p["name"]: p.get("value") for p in query.parameters} if any(parameters): try: return query.parameterized.apply(parameters).query except InvalidParameterError as e: error = u"Skipping refresh of {} because of invalid parameters: {}".format( query.id, str(e)) track_failure(query, error) raise except QueryDetachedFromDataSourceError as e: error = ("Skipping refresh of {} because a related dropdown " "query ({}) is unattached to any datasource.").format( query.id, e.query_id) track_failure(query, error) raise else: return query.query_text
def refresh_queries(): logger.info("Refreshing queries...") outdated_queries_count = 0 query_ids = [] with statsd_client.timer("manager.outdated_queries_lookup"): for query in models.Query.outdated_queries(): if settings.FEATURE_DISABLE_REFRESH_QUERIES: logging.info("Disabled refresh queries.") elif query.org.is_disabled: logging.debug( "Skipping refresh of %s because org is disabled.", query.id) elif query.data_source is None: logging.debug( "Skipping refresh of %s because the datasource is none.", query.id) elif query.data_source.paused: logging.debug( "Skipping refresh of %s because datasource - %s is paused (%s).", query.id, query.data_source.name, query.data_source.pause_reason, ) else: query_text = query.query_text parameters = { p["name"]: p.get("value") for p in query.parameters } if any(parameters): try: query_text = query.parameterized.apply( parameters).query except InvalidParameterError as e: error = u"Skipping refresh of {} because of invalid parameters: {}".format( query.id, str(e)) track_failure(query, error) continue except QueryDetachedFromDataSourceError as e: error = ( "Skipping refresh of {} because a related dropdown " "query ({}) is unattached to any datasource." ).format(query.id, e.query_id) track_failure(query, error) continue enqueue_query( query_text, query.data_source, query.user_id, scheduled_query=query, metadata={ "Query ID": query.id, "Username": "******" }, ) query_ids.append(query.id) outdated_queries_count += 1 statsd_client.gauge("manager.outdated_queries", outdated_queries_count) logger.info("Done refreshing queries. Found %d outdated queries: %s" % (outdated_queries_count, query_ids)) status = redis_connection.hgetall("redash:status") now = time.time() redis_connection.hmset( "redash:status", { "outdated_queries_count": outdated_queries_count, "last_refresh_at": now, "query_ids": json_dumps(query_ids), }, ) statsd_client.gauge("manager.seconds_since_refresh", now - float(status.get("last_refresh_at", now)))
def run(self): started_at = time.time() signal.signal(signal.SIGINT, signal_handler) enqueue_time = self.metadata.get("Enqueue Time", 0.0) waiting_time = started_at - enqueue_time message = "waiting_time=%f" % waiting_time self._log_progress("EXECUTING_QUERY", message) query_runner = self.data_source.query_runner annotated_query = self._annotate_query(query_runner) try: data, error = query_runner.run_query(annotated_query, self.user) except Exception as e: if isinstance(e, JobTimeoutException): error = TIMEOUT_MESSAGE else: error = str(e) data = None #get_logger().warning("Unexpected error while running query:", exc_info=1) run_time = time.time() - started_at message = "run_time=%f, error=[%s]" % (run_time, error) self._log_progress("UNEXPECTED_ERROR", message) run_time = time.time() - started_at message = "run_time=%f, error=[%s], data_length=%s" % (run_time, error, data and len(data)) self._log_progress("AFTER_QUERY", message) _unlock(self.query_hash, self.data_source.id) if error is not None and data is None: result = QueryExecutionError(error) if self.scheduled_query is not None: self.scheduled_query = models.db.session.merge( self.scheduled_query, load=False ) track_failure(self.scheduled_query, error) raise result else: if self.scheduled_query and self.scheduled_query.schedule_failures > 0: self.scheduled_query = models.db.session.merge( self.scheduled_query, load=False ) self.scheduled_query.schedule_failures = 0 self.scheduled_query.skip_updated_at = True models.db.session.add(self.scheduled_query) query_result = models.QueryResult.store_result( self.data_source.org_id, self.data_source, self.query_hash, self.query, data, run_time, utcnow(), ) updated_query_ids = models.Query.update_latest_result(query_result) models.db.session.commit() # make sure that alert sees the latest query result self._log_progress("CHECKING_ALERTS") for query_id in updated_query_ids: check_alerts_for_query.delay(query_id) self._log_progress("FINISHED") result = query_result.id models.db.session.commit() return result
def run(self): signal.signal(signal.SIGINT, signal_handler) started_at = time.time() # --- logger.debug("---Executing FEATURE_ALLOW_JINJA:\n%s", settings.FEATURE_ALLOW_JINJA) if settings.FEATURE_ALLOW_JINJA: cleanQuery = self.query.replace('-- ', '') logger.debug("Executing Clean:\n%s", cleanQuery) j = JinjaSql() jt, nothing = j.prepare_query(cleanQuery, "") self.query = jt logger.debug("Executing query:\n%s", self.query) # --- logger.debug("Executing query:\n%s", self.query) self._log_progress("executing_query") query_runner = self.data_source.query_runner annotated_query = self._annotate_query(query_runner) try: data, error = query_runner.run_query(annotated_query, self.user) except Exception as e: if isinstance(e, JobTimeoutException): error = TIMEOUT_MESSAGE else: error = str(e) data = None logger.warning("Unexpected error while running query:", exc_info=1) run_time = time.time() - started_at logger.info( "job=execute_query query_hash=%s ds_id=%d data_length=%s error=[%s]", self.query_hash, self.data_source_id, data and len(data), error, ) _unlock(self.query_hash, self.data_source.id) if error is not None and data is None: result = QueryExecutionError(error) if self.scheduled_query is not None: self.scheduled_query = models.db.session.merge( self.scheduled_query, load=False) track_failure(self.scheduled_query, error) raise result else: if self.scheduled_query and self.scheduled_query.schedule_failures > 0: self.scheduled_query = models.db.session.merge( self.scheduled_query, load=False) self.scheduled_query.schedule_failures = 0 self.scheduled_query.skip_updated_at = True models.db.session.add(self.scheduled_query) query_result = models.QueryResult.store_result( self.data_source.org_id, self.data_source, self.query_hash, self.query, data, run_time, utcnow(), ) updated_query_ids = models.Query.update_latest_result(query_result) models.db.session.commit( ) # make sure that alert sees the latest query result self._log_progress("checking_alerts") for query_id in updated_query_ids: check_alerts_for_query.delay(query_id) self._log_progress("finished") result = query_result.id models.db.session.commit() return result