def watch_query(request, id): """ Wait for the query to finish and (by default) displays the results of query id. It understands the optional GET params: on_success_url If given, it will be displayed when the query is successfully finished. Otherwise, it will display the view query results page by default. context A string of "name:data" that describes the context that generated this query result. It may be: - "table":"<table_name>" - "design":<design_id> All other GET params will be passed to on_success_url (if present). """ # Coerce types; manage arguments id = int(id) query_history = authorized_get_history(request, id, must_exist=True) # GET param: context. context_param = request.GET.get('context', '') # GET param: on_success_url. Default to view_results results_url = urlresolvers.reverse(view_results, kwargs=dict(id=str(id), first_row=0, last_result_len=0)) on_success_url = request.GET.get('on_success_url') if not on_success_url: on_success_url = results_url # Get the server_id server_id, state = _get_server_id_and_state(query_history) query_history.save_state(state) # Query finished? if state == QueryHistory.STATE.expired: raise PopupException(_("The result of this query has expired.")) elif state == QueryHistory.STATE.available: return format_preserving_redirect(request, on_success_url, request.GET) elif state == QueryHistory.STATE.failed: # When we fetch, Beeswax server will throw us a BeeswaxException, which has the # log we want to display. return format_preserving_redirect(request, results_url, request.GET) # Still running log = db_utils.db_client(query_history.get_query_server()).get_log(server_id) # Keep waiting # - Translate context into something more meaningful (type, data) context = _parse_query_context(context_param) return render('watch_wait.mako', request, { 'query': query_history, 'fwd_params': request.GET.urlencode(), 'log': log, 'hadoop_jobs': _parse_out_hadoop_jobs(log), 'query_context': context, })
def get_jobs(self, notebook, snippet, logs): job_ids = _parse_out_hadoop_jobs(logs) jobs = [ {"name": job_id, "url": reverse("jobbrowser.views.single_job", kwargs={"job": job_id})} for job_id in job_ids ] return jobs
def get_jobs(self, notebook, snippet, logs): job_ids = _parse_out_hadoop_jobs(logs) jobs = [{ 'name': job_id, 'url': reverse('jobbrowser.views.single_job', kwargs={'job': job_id}) } for job_id in job_ids] return jobs
def get_jobs(self, notebook, snippet, logs): jobs = [] if snippet['type'] == 'hive': engine = self._get_hive_execution_engine(notebook, snippet) job_ids = _parse_out_hadoop_jobs(logs, engine=engine) jobs = [{ 'name': job_id, 'url': reverse('jobbrowser.views.single_job', kwargs={'job': job_id}) } for job_id in job_ids] return jobs
def get_jobs(self, notebook, snippet, logs): jobs = [] if snippet['type'] == 'hive': engine = self._get_hive_execution_engine(notebook, snippet) jobs_with_state = _parse_out_hadoop_jobs(logs, engine=engine, with_state=True) jobs = [{ 'name': job.get('job_id', ''), 'url': reverse('jobbrowser.views.single_job', kwargs={'job': job.get('job_id', '')}), 'started': job.get('started', False), 'finished': job.get('finished', False) } for job in jobs_with_state] return jobs
def _get_jobs(self, log): return _parse_out_hadoop_jobs(log)
close_operation(request, id) query_history = db.execute_next_statement(query_history, request.POST.get('query-query')) handle, state = _get_query_handle_and_state(query_history) except QueryServerException, ex: raise ex except Exception, ex: LOG.exception(ex) handle, state = _get_query_handle_and_state(query_history) try: start_over = request.POST.get('log-start-over') == 'true' log = db.get_log(handle, start_over=start_over) except Exception, ex: log = str(ex) jobs = _parse_out_hadoop_jobs(log) job_urls = massage_job_urls_for_json(jobs) result = { 'status': -1, 'log': log, 'jobs': jobs, 'jobUrls': job_urls, 'isSuccess': query_history.is_success(), 'isFailure': query_history.is_failure(), 'id': id, 'statement': query_history.get_current_statement(), 'watch_url': reverse(get_app_name(request) + ':api_watch_query_refresh_json', kwargs={'id': query_history.id}), 'oldLogsApi': USE_GET_LOG_API.get() }
def view_results(request, id, first_row=0): """ Returns the view for the results of the QueryHistory with the given id. The query results MUST be ready. To display query results, one should always go through the watch_query view. If ``first_row`` is 0, restarts (if necessary) the query read. Otherwise, just spits out a warning if first_row doesn't match the servers conception. Multiple readers will produce a confusing interaction here, and that's known. It understands the ``context`` GET parameter. (See watch_query().) """ first_row = long(first_row) start_over = (first_row == 0) results = None data = None fetch_error = False error_message = '' log = '' app_name = get_app_name(request) query_history = authorized_get_history(request, id, must_exist=True) db = dbms.get(request.user, query_history.get_query_server_config()) handle, state = _get_query_handle_and_state(query_history) context_param = request.GET.get('context', '') query_context = _parse_query_context(context_param) # To remove in Hue 2.3 download = request.GET.get('download', '') # Update the status as expired should not be accessible expired = state == QueryHistory.STATE.expired if expired: state = QueryHistory.STATE.expired query_history.save_state(state) # Retrieve query results try: if not download: results = db.fetch(handle, start_over, 100) data = list(results.rows()) # Materialize results # We display the "Download" button only when we know that there are results: downloadable = first_row > 0 or data else: downloadable = True data = [] results = type('Result', (object,), { 'rows': 0, 'columns': [], 'has_more': False, 'start_row': 0, }) log = db.get_log(handle) except Exception as ex: fetch_error = True error_message, log = expand_exception(ex, db) # Handle errors error = fetch_error or results is None or expired context = { 'error': error, 'error_message': error_message, 'has_more': True, 'query': query_history, 'results': data, 'expected_first_row': first_row, 'log': log, 'hadoop_jobs': _parse_out_hadoop_jobs(log)[0], 'query_context': query_context, 'can_save': False, 'context_param': context_param, 'expired': expired, 'app_name': app_name, 'download': download, } if not error: download_urls = {} if downloadable: for format in common.DL_FORMATS: download_urls[format] = urlresolvers.reverse('beeswax' + ':download', kwargs=dict(id=str(id), format=format)) save_form = SaveResultsForm() results.start_row = first_row context.update({ 'results': data, 'has_more': results.has_more, 'next_row': results.start_row + len(data), 'start_row': results.start_row, 'expected_first_row': first_row, 'columns': results.columns, 'download_urls': download_urls, 'save_form': save_form, 'can_save': query_history.owner == request.user and not download, }) return render('watch_results.mako', request, context)
def watch_query(request, id, download_format=None): """ Wait for the query to finish and (by default) displays the results of query id. It understands the optional GET params: on_success_url If given, it will be displayed when the query is successfully finished. Otherwise, it will display the view query results page by default. context A string of "name:data" that describes the context that generated this query result. It may be: - "table":"<table_name>" - "design":<design_id> All other GET params will be passed to on_success_url (if present). """ # Coerce types; manage arguments query_history = authorized_get_history(request, id, must_exist=True) db = dbms.get(request.user, query_history.get_query_server_config()) # GET param: context. context_param = request.GET.get('context', '') # GET param: on_success_url. Default to view_results if request.session.get('dl_status', False)==False and download_format in common.DL_FORMATS: results_url = urlresolvers.reverse(get_app_name(request) + ':execute_query') else: results_url = urlresolvers.reverse(get_app_name(request) + ':view_results', kwargs={'id': id, 'first_row': 0}) if request.GET.get('download', ''): results_url += '?download=true' on_success_url = request.GET.get('on_success_url') if not on_success_url: on_success_url = results_url # Go to next statement if asked to continue or when a statement with no dataset finished. if request.method == 'POST' or ( not query_history.is_finished() and query_history.is_success() and not query_history.has_results): try: query_history = db.execute_next_statement(query_history) except Exception: pass # Check query state handle, state = _get_query_handle_and_state(query_history) query_history.save_state(state) if query_history.is_failure(): # When we fetch, Beeswax server will throw us a BeeswaxException, which has the # log we want to display. return format_preserving_redirect(request, results_url, request.GET) elif query_history.is_finished() or (query_history.is_success() and query_history.has_results): if request.session.get('dl_status', False): # BUG-20020 on_success_url = urlresolvers.reverse(get_app_name(request) + ':download', kwargs=dict(id=str(id), format=download_format)) _clean_session(request) return format_preserving_redirect(request, on_success_url, request.GET) # Still running log = db.get_log(handle) # Keep waiting # - Translate context into something more meaningful (type, data) query_context = _parse_query_context(context_param) return render('watch_wait.mako', request, { 'query': query_history, 'fwd_params': request.GET.urlencode(), 'log': log, 'hadoop_jobs': _parse_out_hadoop_jobs(log)[0], 'query_context': query_context, 'download_format': download_format, ## ExpV })
def view_results(request, id, first_row=0): """ Returns the view for the results of the QueryHistory with the given id. The query results MUST be ready. To display query results, one should always go through the watch_query view. If ``first_row`` is 0, restarts (if necessary) the query read. Otherwise, just spits out a warning if first_row doesn't match the servers conception. Multiple readers will produce a confusing interaction here, and that's known. It understands the ``context`` GET parameter. (See watch_query().) """ first_row = long(first_row) start_over = (first_row == 0) results = None data = None fetch_error = False error_message = '' log = '' app_name = get_app_name(request) query_history = authorized_get_history(request, id, must_exist=True) db = dbms.get(request.user, query_history.get_query_server_config()) handle, state = _get_query_handle_and_state(query_history) context_param = request.GET.get('context', '') query_context = _parse_query_context(context_param) # To remove in Hue 2.3 download = request.GET.get('download', '') # Update the status as expired should not be accessible expired = state == QueryHistory.STATE.expired if expired: state = QueryHistory.STATE.expired query_history.save_state(state) # Retrieve query results try: if not download: results = db.fetch(handle, start_over, 100) data = list(results.rows()) # Materialize results # We display the "Download" button only when we know that there are results: downloadable = first_row > 0 or data else: downloadable = True data = [] results = type('Result', (object, ), { 'rows': 0, 'columns': [], 'has_more': False, 'start_row': 0, }) log = db.get_log(handle) except Exception as ex: fetch_error = True error_message, log = expand_exception(ex, db) # Handle errors error = fetch_error or results is None or expired context = { 'error': error, 'error_message': error_message, 'has_more': True, 'query': query_history, 'results': data, 'expected_first_row': first_row, 'log': log, 'hadoop_jobs': _parse_out_hadoop_jobs(log)[0], 'query_context': query_context, 'can_save': False, 'context_param': context_param, 'expired': expired, 'app_name': app_name, 'download': download, } if not error: download_urls = {} if downloadable: for format in common.DL_FORMATS: download_urls[format] = urlresolvers.reverse( 'beeswax' + ':download', kwargs=dict(id=str(id), format=format)) save_form = SaveResultsForm() results.start_row = first_row context.update({ 'results': data, 'has_more': results.has_more, 'next_row': results.start_row + len(data), 'start_row': results.start_row, 'expected_first_row': first_row, 'columns': results.columns, 'download_urls': download_urls, 'save_form': save_form, 'can_save': query_history.owner == request.user and not download, }) return render('watch_results.mako', request, context)
# We display the "Download" button only when we know # that there are results: downloadable = (first_row > 0 or len(results.data) > 0) fetch_error = False except BeeswaxException, ex: fetch_error = True error_message, log = expand_exception(ex) # Handle errors if fetch_error: return render('watch_results.mako', request, { 'query': query_history, 'error': True, 'error_message': error_message, 'log': log, 'hadoop_jobs': _parse_out_hadoop_jobs(log), 'query_context': context, 'can_save': False, }) log = db_utils.db_client(query_history.get_query_server()).get_log(query_history.server_id) download_urls = {} if downloadable: for format in common.DL_FORMATS: download_urls[format] = urlresolvers.reverse(download, kwargs=dict(id=str(id), format=format)) save_form = SaveResultsForm() has_more = True last_result_len = long(last_result_len) if (last_result_len != 0 and len(results.data) != last_result_len) or len(results.data) == 0: has_more = False
def watch_query(request, id): """ Wait for the query to finish and (by default) displays the results of query id. It understands the optional GET params: on_success_url If given, it will be displayed when the query is successfully finished. Otherwise, it will display the view query results page by default. context A string of "name:data" that describes the context that generated this query result. It may be: - "table":"<table_name>" - "design":<design_id> All other GET params will be passed to on_success_url (if present). """ # Coerce types; manage arguments id = int(id) query_history = authorized_get_history(request, id, must_exist=True) # GET param: context. context_param = request.GET.get('context', '') # GET param: on_success_url. Default to view_results results_url = urlresolvers.reverse(view_results, kwargs=dict(id=str(id), first_row=0, last_result_len=0)) on_success_url = request.GET.get('on_success_url') if not on_success_url: on_success_url = results_url # Get the server_id server_id, state = _get_server_id_and_state(query_history) query_history.save_state(state) # Query finished? if state == QueryHistory.STATE.expired: raise PopupException(_("The result of this query has expired.")) elif state == QueryHistory.STATE.available: return format_preserving_redirect(request, on_success_url, request.GET) elif state == QueryHistory.STATE.failed: # When we fetch, Beeswax server will throw us a BeeswaxException, which has the # log we want to display. return format_preserving_redirect(request, results_url, request.GET) # Still running log = db_utils.db_client( query_history.get_query_server()).get_log(server_id) # Keep waiting # - Translate context into something more meaningful (type, data) context = _parse_query_context(context_param) return render( 'watch_wait.mako', request, { 'query': query_history, 'fwd_params': request.GET.urlencode(), 'log': log, 'hadoop_jobs': _parse_out_hadoop_jobs(log), 'query_context': context, })
# that there are results: downloadable = (first_row > 0 or len(results.data) > 0) fetch_error = False except BeeswaxException, ex: fetch_error = True error_message, log = expand_exception(ex) # Handle errors if fetch_error: return render( 'watch_results.mako', request, { 'query': query_history, 'error': True, 'error_message': error_message, 'log': log, 'hadoop_jobs': _parse_out_hadoop_jobs(log), 'query_context': context, 'can_save': False, }) log = db_utils.db_client(query_history.get_query_server()).get_log( query_history.server_id) download_urls = {} if downloadable: for format in common.DL_FORMATS: download_urls[format] = urlresolvers.reverse(download, kwargs=dict( id=str(id), format=format)) save_form = SaveResultsForm()
def watch_query(request, id, download_format=None): """ Wait for the query to finish and (by default) displays the results of query id. It understands the optional GET params: on_success_url If given, it will be displayed when the query is successfully finished. Otherwise, it will display the view query results page by default. context A string of "name:data" that describes the context that generated this query result. It may be: - "table":"<table_name>" - "design":<design_id> All other GET params will be passed to on_success_url (if present). """ # Coerce types; manage arguments query_history = authorized_get_history(request, id, must_exist=True) db = dbms.get(request.user, query_history.get_query_server_config()) # GET param: context. context_param = request.GET.get('context', '') # GET param: on_success_url. Default to view_results if request.session.get( 'dl_status', False) == False and download_format in common.DL_FORMATS: results_url = urlresolvers.reverse( get_app_name(request) + ':execute_query') else: results_url = urlresolvers.reverse(get_app_name(request) + ':view_results', kwargs={ 'id': id, 'first_row': 0 }) if request.GET.get('download', ''): results_url += '?download=true' on_success_url = request.GET.get('on_success_url') if not on_success_url: on_success_url = results_url # Go to next statement if asked to continue or when a statement with no dataset finished. if request.method == 'POST' or (not query_history.is_finished() and query_history.is_success() and not query_history.has_results): try: query_history = db.execute_next_statement(query_history) except Exception: pass # Check query state handle, state = _get_query_handle_and_state(query_history) query_history.save_state(state) if query_history.is_failure(): # When we fetch, Beeswax server will throw us a BeeswaxException, which has the # log we want to display. return format_preserving_redirect(request, results_url, request.GET) elif query_history.is_finished() or (query_history.is_success() and query_history.has_results): if request.session.get('dl_status', False): # BUG-20020 on_success_url = urlresolvers.reverse( get_app_name(request) + ':download', kwargs=dict(id=str(id), format=download_format)) _clean_session(request) return format_preserving_redirect(request, on_success_url, request.GET) # Still running log = db.get_log(handle) # Keep waiting # - Translate context into something more meaningful (type, data) query_context = _parse_query_context(context_param) return render( 'watch_wait.mako', request, { 'query': query_history, 'fwd_params': request.GET.urlencode(), 'log': log, 'hadoop_jobs': _parse_out_hadoop_jobs(log)[0], 'query_context': query_context, 'download_format': download_format, ## ExpV })