def run(self): signal.signal(signal.SIGINT, signal_handler) started_at = time.time() logger.debug("Executing query:\n%s", self.query) self._log_progress('executing_query') query_runner = self.data_source.query_runner annotated_query = self._annotate_query(query_runner) try: data, error = query_runner.run_query(annotated_query, self.user) except Exception as e: if isinstance(e, SoftTimeLimitExceeded): error = TIMEOUT_MESSAGE else: error = text_type(e) data = None logging.warning('Unexpected error while running query:', exc_info=1) run_time = time.time() - started_at logger.info( u"task=execute_query query_hash=%s data_length=%s error=[%s]", self.query_hash, data and len(data), error) _unlock(self.query_hash, self.data_source.id) if error is not None and data is None: result = QueryExecutionError(error) if self.scheduled_query is not None: self.scheduled_query = models.db.session.merge( self.scheduled_query, load=False) self.scheduled_query.schedule_failures += 1 models.db.session.add(self.scheduled_query) notify_of_failure(error, self.scheduled_query) models.db.session.commit() raise result else: if (self.scheduled_query and self.scheduled_query.schedule_failures > 0): self.scheduled_query = models.db.session.merge( self.scheduled_query, load=False) self.scheduled_query.schedule_failures = 0 models.db.session.add(self.scheduled_query) query_result, updated_query_ids = models.QueryResult.store_result( self.data_source.org_id, self.data_source, self.query_hash, self.query, data, run_time, utcnow()) models.db.session.commit( ) # make sure that alert sees the latest query result self._log_progress('checking_alerts') for query_id in updated_query_ids: check_alerts_for_query.delay(query_id) self._log_progress('finished') result = query_result.id models.db.session.commit() return result
def run(self): signal.signal(signal.SIGINT, signal_handler) self.tracker.update(started_at=time.time(), state='started') logger.debug("Executing query:\n%s", self.query) self._log_progress('executing_query') query_runner = self.data_source.query_runner annotated_query = self._annotate_query(query_runner) try: data, error = query_runner.run_query(annotated_query, self.user) dict_data = json.loads(data) data_consumed_mb = dict_data['data_consumed_mb'] except Exception as e: error = unicode(e) data = None data_consumed_mb = 0.0 logging.warning('Unexpected error while running query:', exc_info=1) run_time = time.time() - self.tracker.started_at self.tracker.update(error=error, run_time=run_time, state='saving_results') logger.info(u"task=execute_query query_hash=%s data_length=%s error=[%s]", self.query_hash, data and len(data), error) _unlock(self.query_hash, self.data_source.id) if error: self.tracker.update(state='failed') result = QueryExecutionError(error) if self.scheduled_query: self.scheduled_query.schedule_failures += 1 models.db.session.add(self.scheduled_query) else: if (self.scheduled_query and self.scheduled_query.schedule_failures > 0): self.scheduled_query.schedule_failures = 0 models.db.session.add(self.scheduled_query) query_result, updated_query_ids = models.QueryResult.store_result( self.data_source.org, self.data_source, self.query_hash, self.query, data, run_time, utils.utcnow()) query_result_metadata = models.QueryResultMetaData.store_result_metadata( updated_query_ids=updated_query_ids, query_result_id=query_result.id, data_consumed_mb=data_consumed_mb, data_source_id=self.data_source.id, query_hash=self.query_hash, run_by_user_id=self.user.id, run_at=utils.utcnow()) self._log_progress('checking_alerts') for query_id in updated_query_ids: check_alerts_for_query.delay(query_id) self._log_progress('finished') result = query_result.id models.db.session.commit() return result
def run(self): signal.signal(signal.SIGINT, signal_handler) self.tracker.update(started_at=time.time(), state='started') logger.debug("Executing query:\n%s", self.query) self._log_progress('executing_query') query_runner = self.data_source.query_runner annotated_query = self._annotate_query(query_runner) try: data, error = query_runner.run_query(annotated_query, self.user) except Exception as e: error = text_type(e) data = None logging.warning('Unexpected error while running query:', exc_info=1) run_time = time.time() - self.tracker.started_at self.tracker.update(error=error, run_time=run_time, state='saving_results') logger.info(u"task=execute_query query_hash=%s data_length=%s error=[%s]", self.query_hash, data and len(data), error) _unlock(self.query_hash, self.data_source.id) if error: self.tracker.update(state='failed') result = QueryExecutionError(error) if self.scheduled_query is not None: self.scheduled_query = models.db.session.merge(self.scheduled_query, load=False) self.scheduled_query.schedule_failures += 1 models.db.session.add(self.scheduled_query) models.db.session.commit() raise result else: if (self.scheduled_query and self.scheduled_query.schedule_failures > 0): self.scheduled_query = models.db.session.merge(self.scheduled_query, load=False) self.scheduled_query.schedule_failures = 0 models.db.session.add(self.scheduled_query) query_result, updated_query_ids = models.QueryResult.store_result( self.data_source.org_id, self.data_source, self.query_hash, self.query, data, run_time, utcnow()) models.db.session.commit() # make sure that alert sees the latest query result self._log_progress('checking_alerts') for query_id in updated_query_ids: check_alerts_for_query.delay(query_id) self._log_progress('finished') result = query_result.id models.db.session.commit() return result
def run(self): signal.signal(signal.SIGINT, signal_handler) started_at = time.time() logger.debug("Executing query:\n%s", self.query) self._log_progress("executing_query") query_runner = self.data_source.query_runner annotated_query = self._annotate_query(query_runner) try: data, error = query_runner.run_query(annotated_query, self.user) except Exception as e: if isinstance(e, JobTimeoutException): error = TIMEOUT_MESSAGE else: error = str(e) data = None logger.warning("Unexpected error while running query:", exc_info=1) run_time = time.time() - started_at logger.info( "job=execute_query query_hash=%s data_length=%s error=[%s]", self.query_hash, data and len(data), error, ) _unlock(self.query_hash, self.data_source.id) if error is not None and data is None: result = QueryExecutionError(error) if self.scheduled_query is not None: self.scheduled_query = models.db.session.merge( self.scheduled_query, load=False) track_failure(self.scheduled_query, error) raise result else: if self.scheduled_query and self.scheduled_query.schedule_failures > 0: self.scheduled_query = models.db.session.merge( self.scheduled_query, load=False) self.scheduled_query.schedule_failures = 0 self.scheduled_query.skip_updated_at = True models.db.session.add(self.scheduled_query) query_result = models.QueryResult.store_result( self.data_source.org_id, self.data_source, self.query_hash, self.query, data, run_time, utcnow(), ) updated_query_ids = models.Query.update_latest_result(query_result) models.db.session.commit( ) # make sure that alert sees the latest query result self._log_progress("checking_alerts") for query_id in updated_query_ids: check_alerts_for_query.delay(query_id) self._log_progress("finished") result = query_result.id models.db.session.commit() return result
def run(self): started_at = time.time() signal.signal(signal.SIGINT, signal_handler) enqueue_time = self.metadata.get("Enqueue Time", 0.0) waiting_time = started_at - enqueue_time message = "waiting_time=%f" % waiting_time self._log_progress("EXECUTING_QUERY", message) query_runner = self.data_source.query_runner annotated_query = self._annotate_query(query_runner) try: data, error = query_runner.run_query(annotated_query, self.user) except Exception as e: if isinstance(e, JobTimeoutException): error = TIMEOUT_MESSAGE else: error = str(e) data = None #get_logger().warning("Unexpected error while running query:", exc_info=1) run_time = time.time() - started_at message = "run_time=%f, error=[%s]" % (run_time, error) self._log_progress("UNEXPECTED_ERROR", message) run_time = time.time() - started_at message = "run_time=%f, error=[%s], data_length=%s" % (run_time, error, data and len(data)) self._log_progress("AFTER_QUERY", message) _unlock(self.query_hash, self.data_source.id) if error is not None and data is None: result = QueryExecutionError(error) if self.scheduled_query is not None: self.scheduled_query = models.db.session.merge( self.scheduled_query, load=False ) track_failure(self.scheduled_query, error) raise result else: if self.scheduled_query and self.scheduled_query.schedule_failures > 0: self.scheduled_query = models.db.session.merge( self.scheduled_query, load=False ) self.scheduled_query.schedule_failures = 0 self.scheduled_query.skip_updated_at = True models.db.session.add(self.scheduled_query) query_result = models.QueryResult.store_result( self.data_source.org_id, self.data_source, self.query_hash, self.query, data, run_time, utcnow(), ) updated_query_ids = models.Query.update_latest_result(query_result) models.db.session.commit() # make sure that alert sees the latest query result self._log_progress("CHECKING_ALERTS") for query_id in updated_query_ids: check_alerts_for_query.delay(query_id) self._log_progress("FINISHED") result = query_result.id models.db.session.commit() return result