def create(request): if not _running_with_spawning(request): if request.method == "POST": result = simplejson.dumps({constants.NOT_RUNNING_SPAWNING: True}) return HttpResponse(result, mimetype="application/json") else: return render('not_running_spawning.mako', request, {}) shell_manager = ShellManager.global_instance() user = request.user if request.method == "POST": key_name = request.POST.get(constants.KEY_NAME, "") else: key_name = request.GET.get(constants.KEY_NAME, "") SHELL_INPUT_LOGGER.info( "%s %s - Create '%s' shell" % (request.META.get('REMOTE_ADDR'), user.username, key_name)) result = shell_manager.try_create(user, key_name) if request.method == "POST": return HttpResponse( simplejson.dumps(result), mimetype="application/json") else: if constants.SUCCESS in result: shell_types = shell_manager.available_shell_types(user) dict_for_template = { 'shells': shell_types, 'shell_id': result.get(constants.SHELL_ID) } return render('index.mako', request, dict_for_template) else: return render('failed_to_create.mako', request, {})
def create_database(request): error = None if request.method == "POST": try: data = request.POST.copy() data.setdefault("use_default_location", False) db = dbms.get(request.user) databases = db.get_databases() form = hcatalog.forms.CreateDatabaseForm(data) form.database_list = databases if form.is_valid(): database = form.cleaned_data['db_name'] comment = form.cleaned_data['comment'] location = None if not form.cleaned_data['use_default_location']: location = form.cleaned_data['external_location'] hcat_cli = HCatClient(request.user.username) hcat_cli.create_database(database=database, comment=comment, location=location) return render("show_databases.mako", request, {}) except Exception as ex: error = ex.message else: form = hcatalog.forms.CreateDatabaseForm() return render("create_database.mako", request, dict( database_form=form, error=error ))
def edit_permission(request, app=None, priv=None): """ edit_permission(request, app = None, priv = None) -> reply @type request: HttpRequest @param request: The request object @type app: string @param app: Default to None, specifies the app of the privilege @type priv: string @param priv Default to None, the action of the privilege Only superusers may modify permissions """ if not request.user.is_superuser: raise PopupException(_("You must be a superuser to change permissions."), error_code=401) instance = HuePermission.objects.get(app=app, action=priv) if request.method == 'POST': form = PermissionsEditForm(request.POST, instance=instance) if form.is_valid(): form.save() request.info(_('Permission information updated')) return render("list_permissions.mako", request, dict(permissions=HuePermission.objects.all())) else: form = PermissionsEditForm(instance=instance) return render('edit_permissions.mako', request, dict(form=form, action=request.path, app=app, priv=priv))
def index(request): if not _running_with_spawning(request): return render('not_running_spawning.mako', request, {}) shell_manager = ShellManager.global_instance() result = shell_manager.available_shell_types(request.user) if result is None: return render('no_such_user.mako', request, {}) return render('index.mako', request, {'shells': result})
def log_view(request): """ We have a log handler that retains the last X characters of log messages. If it is attached to the root logger, this view will display that history, otherwise it will report that it can't be found. """ l = logging.getLogger() for h in l.handlers: if isinstance(h, desktop.log.log_buffer.FixedBufferHandler): return render('logs.mako', request, dict(log=[l for l in h.buf], query=request.GET.get("q", ""))) return render('logs.mako', request, dict(log=[_("No logs found!")]))
def listdir(request, path, chooser): """ Implements directory listing (or index). Intended to be called via view(). TODO: Remove? """ if not request.fs.isdir(path): raise PopupException(_("Not a directory: %(path)s") % {'path': path}) file_filter = request.REQUEST.get('file_filter', 'any') assert file_filter in ['any', 'file', 'dir'] home_dir_path = request.user.get_home_directory() breadcrumbs = parse_breadcrumbs(path) data = { 'path': path, 'file_filter': file_filter, 'breadcrumbs': breadcrumbs, 'current_dir_path': path, # These could also be put in automatically via # http://docs.djangoproject.com/en/dev/ref/templates/api/#django-core-context-processors-request, # but manually seems cleaner, since we only need it here. 'current_request_path': request.path, 'home_directory': request.fs.isdir(home_dir_path) and home_dir_path or None, 'cwd_set': True, 'is_superuser': request.user.username == request.fs.superuser, 'groups': request.user.username == request.fs.superuser and [str(x) for x in Group.objects.values_list('name', flat=True)] or [], 'users': request.user.username == request.fs.superuser and [str(x) for x in User.objects.values_list('username', flat=True)] or [], 'superuser': request.fs.superuser, 'show_upload': (request.REQUEST.get('show_upload') == 'false' and (False,) or (True,))[0] } stats = request.fs.listdir_stats(path) # Include parent dir, unless at filesystem root. if Hdfs.normpath(path) != posixpath.sep: parent_path = request.fs.join(path, "..") parent_stat = request.fs.stats(parent_path) # The 'path' field would be absolute, but we want its basename to be # actually '..' for display purposes. Encode it since _massage_stats expects byte strings. parent_stat['path'] = parent_path stats.insert(0, parent_stat) data['files'] = [_massage_stats(request, stat) for stat in stats] if chooser: return render('chooser.mako', request, data) else: return render('listdir.mako', request, data)
def listdir(request, path, chooser): """ Implements directory listing (or index). Intended to be called via view(). TODO: Remove? """ if not request.fs.isdir(path): raise PopupException(_("Not a directory: %(path)s") % {'path': path}) file_filter = request.REQUEST.get('file_filter', 'any') assert file_filter in ['any', 'file', 'dir'] home_dir_path = request.user.get_home_directory() breadcrumbs = parse_breadcrumbs(path) data = { 'path': path, 'file_filter': file_filter, 'breadcrumbs': breadcrumbs, 'current_dir_path': path, 'current_request_path': request.path, 'home_directory': request.fs.isdir(home_dir_path) and home_dir_path or None, 'cwd_set': True, 'is_superuser': request.user.username == request.fs.superuser, 'groups': request.user.username == request.fs.superuser and [str(x) for x in Group.objects.values_list('name', flat=True)] or [], 'users': request.user.username == request.fs.superuser and [str(x) for x in User.objects.values_list('username', flat=True)] or [], 'superuser': request.fs.superuser, 'show_upload': (request.REQUEST.get('show_upload') == 'false' and (False,) or (True,))[0], 'show_download_button': SHOW_DOWNLOAD_BUTTON.get(), 'show_upload_button': SHOW_UPLOAD_BUTTON.get() } stats = request.fs.listdir_stats(path) # Include parent dir, unless at filesystem root. if not request.fs.isroot(path): parent_path = request.fs.join(path, "..") parent_stat = request.fs.stats(parent_path) # The 'path' field would be absolute, but we want its basename to be # actually '..' for display purposes. Encode it since _massage_stats expects byte strings. parent_stat['path'] = parent_path stats.insert(0, parent_stat) data['files'] = [_massage_stats(request, stat) for stat in stats] if chooser: return render('chooser.mako', request, data) else: return render('listdir.mako', request, data)
def create_table(request, database=None): """Create a table by specifying its attributes manually""" if database is None: database = _get_last_database(request, database) form = MultiForm( table=hcatalog.forms.CreateTableForm, columns=hcatalog.forms.ColumnTypeFormSet, partitions=hcatalog.forms.PartitionTypeFormSet) db = dbms.get(request.user) databases = db.get_databases() db_form = hcatalog.forms.DbForm(initial={'database': database}, databases=databases) error = None if request.method == "POST": form.bind(request.POST) form.table.table_list = _get_table_list(request) if form.is_valid() and 'createTable' in request.POST: try: columns = [f.cleaned_data for f in form.columns.forms] column_names = [col["column_name"] for col in columns] isTableValid, tableValidErrMsg = hcatalog.common.validateHiveTable(column_names) if not isTableValid: raise Exception(tableValidErrMsg) partition_columns = [f.cleaned_data for f in form.partitions.forms] proposed_query = django_mako.render_to_string("create_table_statement.mako", { 'table': form.table.cleaned_data, 'columns': columns, 'partition_columns': partition_columns }) # Mako outputs bytestring in utf8 proposed_query = proposed_query.decode('utf-8') tablename = form.table.cleaned_data['name'] hcat_cli = HCatClient(request.user.username) hcat_cli.create_table(database, tablename, proposed_query) databases = hcat_cli.get_databases(like="*") db_form = hcatalog.forms.DbForm(initial={'database': database}, databases=databases) return render("show_tables.mako", request, { 'database': database, 'db_form': db_form, }) except Exception as ex: error = ex.message else: form.bind() return render("create_table_manually.mako", request, dict( database=database, db_form=db_form, table_form=form.table, columns_form=form.columns, partitions_form=form.partitions, error=error, ))
def sample_insert_interface(request): """ Insert the data of one or multiple sample in the database """ error_get = False error_sample = False samples_quantity = 0 # We take the file received if 'vcf' in request.GET and 'type' in request.GET: filename = request.GET['vcf'] else: error_get = True return render('sample.insert.interface.mako', request, locals()) # We take the files in the current user directory init_path = directory_current_user(request) files = list_directory_content(request, init_path, ".vcf", True) length = 0 for f in files: new_name = f['path'].replace(init_path+"/","", 1) if new_name == filename: length = f['stats']['size'] break if length == 0: # File not found error_get = True return render('sample.insert.interface.mako', request, locals()) # We take the number of samples (and their name) in the vcf file samples = sample_insert_vcfinfo(request, filename, length) samples_quantity = len(samples) if samples_quantity == 0: error_sample = True return render('sample.insert.interface.mako', request, locals()) # We take the list of questions the user has to answer, and as dict in python is not ordered, we use an intermediary list # We also receive the different files previously uploaded by the user questions, q, files = sample_insert_questions(request) if request.method == 'POST' or request.GET['type'] == 'Import directly': # Now we save the result fprint(str(request.POST)) if request.GET['type'] == 'Import directly': request.method = 'POST' request.POST = {'vcf_data':''} result = sample_insert(request) result = json_to_dict(result) # We display the form return render('sample.insert.interface.mako', request, locals())
def generic_op(form_class, request, op, parameter_names, piggyback=None, template="fileop.mako", data_extractor=default_data_extractor, arg_extractor=default_arg_extractor, initial_value_extractor=default_initial_value_extractor, extra_params=None): """ Generic implementation for several operations. @param form_class form to instantiate @param request incoming request, used for parameters @param op callable with the filesystem operation @param parameter_names list of form parameters that are extracted and then passed to op @param piggyback list of form parameters whose file stats to look up after the operation @param data_extractor function that extracts POST data to be used by op @param arg_extractor function that extracts args from a given form or formset @param initial_value_extractor function that extracts the initial values of a form or formset @param extra_params dictionary of extra parameters to send to the template for rendering """ # Use next for non-ajax requests, when available. next = request.GET.get("next", request.POST.get("next", None)) ret = dict({ 'next': next }) if extra_params is not None: ret['extra_params'] = extra_params for p in parameter_names: val = request.REQUEST.get(p) if val: ret[p] = val if request.method == 'POST': form = form_class(**data_extractor(request)) ret['form'] = form if form.is_valid(): args = arg_extractor(request, form, parameter_names) try: op(*args) except (IOError, WebHdfsException), e: msg = _("Cannot perform operation.") if request.user.is_superuser and not request.user == request.fs.superuser: msg += _(' Note: you are a Hue admin but not a HDFS superuser (which is "%(superuser)s").') \ % {'superuser': request.fs.superuser} raise PopupException(msg, detail=e) if next: logging.debug("Next: %s" % next) # Doesn't need to be quoted: quoting is done by HttpResponseRedirect. return format_preserving_redirect(request, next) ret["success"] = True try: if piggyback: piggy_path = form.cleaned_data[piggyback] ret["result"] = _massage_stats(request, request.fs.stats(piggy_path)) except Exception, e: # Hard to report these more naturally here. These happen either # because of a bug in the piggy-back code or because of a # race condition. logger.exception("Exception while processing piggyback data") ret["result_error"] = True ret['user'] = request.user return render(template, request, ret)
def edit_bundle(request, bundle): history = History.objects.filter(submitter=request.user, job=bundle).order_by('-submission_date') BundledCoordinatorFormSet = inlineformset_factory(Bundle, BundledCoordinator, form=BundledCoordinatorForm, max_num=0, can_order=False, can_delete=True) bundle_form = BundleForm(instance=bundle) if request.method == 'POST': bundle_form = BundleForm(request.POST, instance=bundle) bundled_coordinator_formset = BundledCoordinatorFormSet(request.POST, instance=bundle) if bundle_form.is_valid() and bundled_coordinator_formset.is_valid(): bundle = bundle_form.save() bundled_coordinator_formset.save() request.info(_('Bundle saved.')) return redirect(reverse('oozie:list_bundles')) else: bundle_form = BundleForm(instance=bundle) bundled_coordinator_formset = BundledCoordinatorFormSet(instance=bundle) return render('editor/edit_bundle.mako', request, { 'bundle': bundle, 'bundle_form': bundle_form, 'bundled_coordinator_formset': bundled_coordinator_formset, 'bundled_coordinator_html_form': get_create_bundled_coordinator_html(request, bundle), 'history': history })
def save_file(request): """ The POST endpoint to save a file in the file editor. Does the save and then redirects back to the edit page. """ form = EditorForm(request.POST) is_valid = form.is_valid() path = form.cleaned_data.get('path') if request.POST.get('save') == "Save As": if not is_valid: return edit(request, path, form=form) else: return render("saveas.mako", request, {'form': form}) if not path: raise PopupException(_("No path specified")) if not is_valid: return edit(request, path, form=form) if request.fs.exists(path): do_overwrite_save(request.fs, path, form.cleaned_data['contents'], form.cleaned_data['encoding']) else: do_newfile_save(request.fs, path, form.cleaned_data['contents'], form.cleaned_data['encoding']) messages.info(request, _('Saved %(path)s.') % {'path': os.path.basename(path)}) request.path = reverse("filebrowser.views.edit", kwargs=dict(path=path)) return edit(request, path, form)
def edit_group(request, name=None): """ edit_group(request, name = None) -> reply @type request: HttpRequest @param request: The request object @type name: string @param name: Default to None, when creating a new group Only superusers may create a group """ if not request.user.is_superuser: raise PopupException(_("You must be a superuser to add or edit a group."), error_code=401) if name is not None: instance = Group.objects.get(name=name) else: instance = None if request.method == 'POST': form = GroupEditForm(request.POST, instance=instance) if form.is_valid(): form.save() request.info(_('Group information updated')) return list_groups(request) else: form = GroupEditForm(instance=instance) return render('edit_group.mako', request, dict(form=form, action=request.path, name=name))
def create_coordinator_dataset(request, coordinator): """Returns {'status' 0/1, data:html or url}""" dataset = Dataset(coordinator=coordinator) response = {'status': -1, 'data': 'None'} if request.method == 'POST': dataset_form = DatasetForm(request.POST, instance=dataset, prefix='create') if dataset_form.is_valid(): dataset_form.save() response['status'] = 0 response['data'] = reverse('oozie:edit_coordinator', kwargs={'coordinator': coordinator.id}) + "#listDataset" request.info(_('Dataset created')) else: ## Bad response['data'] = _('A POST request is required.') if response['status'] != 0: response['data'] = render('editor/create_coordinator_dataset.mako', request, { 'coordinator': coordinator, 'dataset_form': dataset_form, 'dataset': dataset, }, force_template=True).content return HttpResponse(json.dumps(response), mimetype="application/json")
def edit_coordinator_dataset(request, dataset): """Returns HTML for modal to edit datasets""" response = {'status': -1, 'data': 'None'} if request.method == 'POST': dataset_form = DatasetForm(request.POST, instance=dataset, prefix='edit') if dataset_form.is_valid(): dataset = dataset_form.save() response['status'] = 0 response['data'] = reverse('oozie:edit_coordinator', kwargs={'coordinator': dataset.coordinator.id}) + "#listDataset" request.info(_('Dataset modified')) if dataset.start > dataset.coordinator.start: request.error(_('Beware: dataset start date was after the coordinator start date.')) else: response['data'] = dataset_form.errors else: dataset_form = DatasetForm(instance=dataset, prefix='edit') if response['status'] != 0: response['data'] = render('editor/edit_coordinator_dataset.mako', request, { 'coordinator': dataset.coordinator, 'dataset_form': dataset_form, 'dataset': dataset, 'path': request.path, }, force_template=True).content return HttpResponse(json.dumps(response), mimetype="application/json")
def submit_bundle(request, doc_id): bundle = Bundle(document=Document2.objects.get(id=doc_id)) ParametersFormSet = formset_factory(ParameterForm, extra=0) if request.method == 'POST': params_form = ParametersFormSet(request.POST) if params_form.is_valid(): mapping = dict([(param['name'], param['value']) for param in params_form.cleaned_data]) job_id = _submit_bundle(request, bundle, mapping) jsonify = request.POST.get('format') == 'json' if jsonify: return JsonResponse({'status': 0, 'job_id': job_id, 'type': 'bundle'}, safe=False) else: request.info(_('Bundle submitted.')) return redirect(reverse('oozie:list_oozie_bundle', kwargs={'job_id': job_id})) else: request.error(_('Invalid submission form: %s' % params_form.errors)) else: parameters = bundle.find_all_parameters() initial_params = ParameterForm.get_initial_params(dict([(param['name'], param['value']) for param in parameters])) params_form = ParametersFormSet(initial=initial_params) popup = render('editor2/submit_job_popup.mako', request, { 'params_form': params_form, 'name': bundle.name, 'action': reverse('oozie:editor_submit_bundle', kwargs={'doc_id': bundle.id}), 'return_json': request.GET.get('format') == 'json', 'show_dryrun': False }, force_template=True).content return JsonResponse(popup, safe=False)
def submit_workflow(request, workflow): ParametersFormSet = formset_factory(ParameterForm, extra=0) if request.method == 'POST': params_form = ParametersFormSet(request.POST) if params_form.is_valid(): mapping = dict([(param['name'], param['value']) for param in params_form.cleaned_data]) job_id = _submit_workflow(request.user, request.fs, workflow, mapping) request.info(_('Workflow submitted')) return redirect(reverse('oozie:list_oozie_workflow', kwargs={'job_id': job_id})) else: request.error(_('Invalid submission form: %s' % params_form.errors)) else: parameters = workflow.find_all_parameters() initial_params = ParameterForm.get_initial_params(dict([(param['name'], param['value']) for param in parameters])) params_form = ParametersFormSet(initial=initial_params) popup = render('editor/submit_job_popup.mako', request, { 'params_form': params_form, 'action': reverse('oozie:submit_workflow', kwargs={'workflow': workflow.id}) }, force_template=True).content return HttpResponse(json.dumps(popup), mimetype="application/json")
def save_file(request): """ The POST endpoint to save a file in the file editor. Does the save and then redirects back to the edit page. """ form = EditorForm(request.POST) is_valid = form.is_valid() path = form.cleaned_data.get('path') if request.POST.get('save') == "Save As": if not is_valid: return edit(request, path, form=form) else: data = dict(form=form) return render("saveas.mako", request, data) if not path: raise PopupException("No path specified") if not is_valid: return edit(request, path, form=form) if request.fs.exists(path): _do_overwrite_save(request.fs, path, form.cleaned_data['contents'], form.cleaned_data['encoding']) else: _do_newfile_save(request.fs, path, form.cleaned_data['contents'], form.cleaned_data['encoding']) messages.info(request, _('Saved %(path)s.') % {'path': os.path.basename(path)}) """ Changing path to reflect the request path of the JFrame that will actually be returned.""" request.path = urlresolvers.reverse("filebrowser.views.edit", kwargs=dict(path=path)) return edit(request, path, form)
def edit_bundle(request): bundle_id = request.GET.get('bundle') doc = None if bundle_id: doc = Document2.objects.get(id=bundle_id) bundle = Bundle(document=doc) else: bundle = Bundle() bundle.set_workspace(request.user) if USE_NEW_EDITOR.get(): coordinators = [dict([('id', d.id), ('uuid', d.uuid), ('name', d.name)]) for d in Document2.objects.documents(request.user).search_documents(types=['oozie-coordinator2'])] else: coordinators = [dict([('id', d.content_object.id), ('uuid', d.content_object.uuid), ('name', d.content_object.name)]) for d in Document.objects.get_docs(request.user, Document2, extra='coordinator2')] can_edit_json = doc is None or (doc.can_write(request.user) if USE_NEW_EDITOR.get() else doc.doc.get().is_editable(request.user)) return render('editor2/bundle_editor.mako', request, { 'bundle_json': bundle.to_json_for_html(), 'coordinators_json': json.dumps(coordinators, cls=JSONEncoderForHTML), 'doc_uuid': doc.uuid if doc else '', 'is_embeddable': request.GET.get('is_embeddable', False), 'can_edit_json': json.dumps(can_edit_json) })
def jobbrowser(request): """ jobbrowser.jsp - a - like. """ # TODO(bc): Is this view even reachable? def check_job_state(state): return lambda job: job.status == state status = request.jt.cluster_status() alljobs = get_matching_jobs(request) runningjobs = filter(check_job_state('RUNNING'), alljobs) completedjobs = filter(check_job_state('COMPLETED'), alljobs) failedjobs = filter(check_job_state('FAILED'), alljobs) killedjobs = filter(check_job_state('KILLED'), alljobs) jobqueues = request.jt.queues() return render("jobbrowser.html", request, { "clusterstatus" : status, "queues" : jobqueues, "alljobs" : alljobs, "runningjobs" : runningjobs, "failedjobs" : failedjobs, "killedjobs" : killedjobs, "completedjobs" : completedjobs })
def jobs(request): """ We get here from /jobs?filterargs """ check_permission = not conf.SHARE_JOBS.get() and not request.user.is_superuser user = request.GET.get('user', request.user.username) filters = {} if user != '': filters['user'] = user jobs = get_matching_jobs(request, check_permission, **filters) matching_jobs = sort_if_necessary(request, jobs) state = request.GET.get('state', 'all') text = request.GET.get('text', '') retired = request.GET.get('retired', '') return render("jobs.mako", request, { 'jobs':matching_jobs, 'request': request, 'state_filter': state, 'user_filter': user, 'text_filter': text, 'retired': retired, 'filtered': not (state == 'all' and user == '' and text == '') })
def single_task_attempt_logs(request, jobid, taskid, attemptid): """ We get here from /jobs/jobid/tasks/taskid/attempts/attemptid/logs """ job_link = JobLinkage(request.jt, jobid) task = job_link.get_task(taskid) try: attempt = task.get_attempt(attemptid) except KeyError: raise KeyError(_("Cannot find attempt '%(id)s' in task") % dict(id=attemptid)) try: # Add a diagnostic log diagnostic_log = ", ".join(task.diagnosticMap[attempt.attemptId]) logs = [ diagnostic_log ] # Add remaining logs logs += [ section.strip() for section in attempt.get_task_log() ] except TaskTrackerNotFoundException: # Four entries, # for diagnostic, stdout, stderr and syslog logs = [ _("Failed to retrieve log. TaskTracker not found.") ] * 4 return render("attempt_logs.mako", request, { "attempt":attempt, "taskid":taskid, "joblnk": job_link, "task": task, "logs": logs })
def trackers(request): """ We get here from /trackers """ trackers = sort_if_necessary(request, get_tasktrackers(request)) return render("tasktrackers.mako", request, {'trackers':trackers})
def single_job(request, job): def cmp_exec_time(task1, task2): return cmp(task1.execStartTimeMs, task2.execStartTimeMs) if job.applicationType == 'SPARK': return single_spark_job(request, job) failed_tasks = job.filter_tasks(task_states=('failed',)) failed_tasks.sort(cmp_exec_time) recent_tasks = job.filter_tasks(task_states=('running', 'succeeded',)) recent_tasks.sort(cmp_exec_time, reverse=True) if request.REQUEST.get('format') == 'json': json_failed_tasks = [massage_task_for_json(task) for task in failed_tasks] json_recent_tasks = [massage_task_for_json(task) for task in recent_tasks] json_job = { 'job': massage_job_for_json(job, request), 'failedTasks': json_failed_tasks, 'recentTasks': json_recent_tasks } return JsonResponse(json_job, encoder=JSONEncoderForHTML) return render('job.mako', request, { 'request': request, 'job': job, 'failed_tasks': failed_tasks and failed_tasks[:5] or [], 'recent_tasks': recent_tasks and recent_tasks[:5] or [], })
def execute_and_watch(request): notebook_id = request.GET.get('editor', request.GET.get('notebook')) snippet_id = int(request.GET['snippet']) action = request.GET['action'] destination = request.GET['destination'] notebook = Notebook(document=Document2.objects.get(id=notebook_id)).get_data() snippet = notebook['snippets'][snippet_id] editor_type = snippet['type'] api = get_api(request, snippet) if action == 'save_as_table': sql, success_url = api.export_data_as_table(notebook, snippet, destination) editor = make_notebook(name='Execute and watch', editor_type=editor_type, statement=sql, status='ready-execute') elif action == 'insert_as_query': sql, success_url = api.export_large_data_to_hdfs(notebook, snippet, destination) editor = make_notebook(name='Execute and watch', editor_type=editor_type, statement=sql, status='ready-execute') else: raise PopupException(_('Action %s is unknown') % action) return render('editor.mako', request, { 'notebooks_json': json.dumps([editor.get_data()]), 'options_json': json.dumps({ 'languages': [{"name": "%s SQL" % editor_type.title(), "type": editor_type}], 'mode': 'editor', 'success_url': success_url }), 'editor_type': editor_type, })
def list_query_history(request): """ View the history of query (for the current user). We get here from /beeswax/query_history?filterargs, with the options being: page=<n> - Controls pagination. Defaults to 1. user=<name> - Show history items from a user. Default to current user only. Also accepts ':all' to show all history items. type=<type> - <type> is "report|hql", for design type. Default to show all. design_id=<id> - Show history for this particular design id. sort=<key> - Sort by the attribute <key>, which is one of: "date", "state", "name" (design name), and "type" (design type) Accepts the form "-date", which sort in descending order. Default to "-date". auto_query=<bool> - Show auto generated actions (drop table, read data, etc). Default False """ DEFAULT_PAGE_SIZE = 10 share_queries = conf.SHARE_SAVED_QUERIES.get() or request.user.is_superuser querydict_query = request.GET.copy() if not share_queries: querydict_query['user'] = request.user.username page, filter_params = _list_query_history(request.user, querydict_query, DEFAULT_PAGE_SIZE) return render('list_history.mako', request, { 'request': request, 'page': page, 'filter_params': filter_params, 'share_queries': share_queries, })
def load_table(request, table): table_obj = dbms.get(request.user).get_table('default', table) if request.method == "POST": form = beeswax.forms.LoadDataForm(table_obj, request.POST) if form.is_valid(): # TODO(philip/todd): When PathField might refer to non-HDFS, # we need a pathfield.is_local function. hql = "LOAD DATA INPATH" hql += " '%s'" % form.cleaned_data['path'] if form.cleaned_data['overwrite']: hql += " OVERWRITE" hql += " INTO TABLE " hql += "`%s`" % (table,) if form.partition_columns: hql += " PARTITION (" vals = [] for key, column_name in form.partition_columns.iteritems(): vals.append("%s='%s'" % (column_name, form.cleaned_data[key])) hql += ", ".join(vals) hql += ")" on_success_url = urlresolvers.reverse(describe_table, kwargs={'table': table}) return confirm_query(request, hql, on_success_url) else: form = beeswax.forms.LoadDataForm(table_obj) return render("load_table.mako", request, dict(form=form, table=table, action=request.get_full_path()))
def editor(request): editor_id = request.GET.get('editor') editor_type = request.GET.get('type', 'hive') if editor_id: # Open existing saved editor document editor = Notebook(document=Document2.objects.get(id=editor_id)) editor_type = editor.get_data()['type'].rsplit('-', 1)[-1] editor = upgrade_session_properties(request, notebook=editor) else: # Create new editor editor = Notebook() data = editor.get_data() data['name'] = '' data['type'] = 'query-%s' % editor_type # TODO: Add handling for non-SQL types editor.data = json.dumps(data) return render('editor.mako', request, { 'notebooks_json': json.dumps([editor.get_data()]), 'options_json': json.dumps({ 'languages': [{"name": "%s SQL" % editor_type.title(), "type": editor_type}], 'mode': 'editor', 'is_optimizer_enabled': has_optimizer(), }), 'editor_type': editor_type, })
def trackers(request): """ We get here from /trackers """ trackers = get_tasktrackers(request) return render("tasktrackers.mako", request, {'trackers':trackers})
def delete_group(request, name): if not request.user.is_superuser: raise PopupException(_("You must be a superuser to delete groups."), error_code=401) if request.method == 'POST': try: global groups_lock __groups_lock.acquire() try: # Get the default group before getting the group, because we may be # trying to delete the default group, and it may not have been created # yet default_group = get_default_user_group() group = Group.objects.get(name=name) if default_group is not None and default_group.name == name: raise PopupException(_("The default user group may not be deleted."), error_code=401) group.delete() finally: __groups_lock.release() request.info(_('The group was deleted.')) return redirect(reverse(list_groups)) except Group.DoesNotExist: raise PopupException(_("Group not found."), error_code=404) else: return render("delete_group.mako", request, dict(path=request.path, groupname=name))
def query_history(request): return render( "query_history.mako", request, dict(jobs=Job.objects.filter( script__user=request.user).order_by("-start_time").all()))
def workers_embedded(request): return render('workers_embedded.mako', request, {})
def ace_sql_syntax_worker(request): return HttpResponse(render('ace_sql_syntax_worker.mako', request, None), content_type="application/javascript")
template = 'search.mako' if is_mobile: template = 'search_m.mako' return render( template, request, { 'collection': collection, 'query': json.dumps(query), 'initial': json.dumps({ 'collections': [], 'layout': DEFAULT_LAYOUT, 'qb_layout': QUEERY_BUILDER_LAYOUT, 'is_latest': _get_latest(), 'engines': get_engines(request.user) }), 'is_owner': collection_doc.can_write(request.user) if USE_NEW_EDITOR.get() else collection_doc.doc.get().can_write(request.user), 'can_edit_index': can_edit_index(request.user), 'is_embeddable': request.GET.get('is_embeddable', False), 'mobile': is_mobile, }) def index_m(request): return index(request, True)
return no_collections(request) try: collection = Collection.objects.get( id=collection_id) # TODO perms HUE-1987 except Exception, e: raise PopupException(e, title=_('Error while accessing the collection')) query = {'qs': [{'q': ''}], 'fqs': [], 'start': 0} return render( 'search.mako', request, { 'collection': collection, 'query': query, 'initial': json.dumps({ 'collections': [], 'layout': [] }), }) @allow_admin_only def new_search(request): collections = SearchController(request.user).get_all_indexes() if not collections: return no_collections(request) collection = Collection(name=collections[0], label=collections[0]) query = {'qs': [{'q': ''}], 'fqs': [], 'start': 0}
def path_forbidden(request): return render('403.mako', request, {'is_embeddable': request.GET.get('is_embeddable', False)})
def ko_editor(request): apps = appmanager.get_apps_dict(request.user) return render('ko_editor.mako', request, { 'apps': apps, })
def import_wizard(request): """ Help users define table and based on a file they want to import to Hive. Limitations: - Rows are delimited (no serde). - No detection for map and array types. - No detection for the presence of column header in the first row. - No partition table. - Does not work with binary data. """ encoding = i18n.get_site_encoding() if request.method == 'POST': # Have a while loop to allow an easy way to break for _ in range(1): # # General processing logic: # - We have 3 steps. Each requires the previous. # * Step 1 : Table name and file location # * Step 2a : Display sample with auto chosen delim # * Step 2b : Display sample with user chosen delim (if user chooses one) # * Step 3 : Display sample, and define columns # - Each step is represented by a different form. The form of an earlier step # should be present when submitting to a later step. # - To preserve the data from the earlier steps, we send the forms back as # hidden fields. This way, when users revisit a previous step, the data would # be there as well. # delim_is_auto = False fields_list, n_cols = [[]], 0 s3_col_formset = None # Everything requires a valid file form s1_file_form = hcatalog.forms.CreateByImportFileForm(request.POST) if not s1_file_form.is_valid(): break do_s2_auto_delim = request.POST.get('submit_file') # Step 1 -> 2 do_s2_user_delim = request.POST.get('submit_preview') # Step 2 -> 2 do_s3_column_def = request.POST.get('submit_delim') # Step 2 -> 3 do_hive_create = request.POST.get('submit_create') # Step 3 -> execute cancel_s2_user_delim = request.POST.get('cancel_delim') # Step 2 -> 1 cancel_s3_column_def = request.POST.get('cancel_create') # Step 3 -> 2 # Exactly one of these should be True assert len(filter(None, (do_s2_auto_delim, do_s2_user_delim, do_s3_column_def, do_hive_create, cancel_s2_user_delim, cancel_s3_column_def))) == 1, 'Invalid form submission' s2_delim_form = None # # Fix up what we should do in case any form is invalid # if not do_s2_auto_delim: # We should have a valid delim form s2_delim_form = hcatalog.forms.CreateByImportDelimForm(request.POST) if not s2_delim_form.is_valid(): # Go back to picking delimiter do_s2_user_delim, do_s3_column_def, do_hive_create = True, False, False if do_hive_create: # We should have a valid columns formset s3_col_formset = hcatalog.forms.ColumnTypeFormSet(prefix='cols', data=request.POST) if not s3_col_formset.is_valid(): # Go back to define columns do_s3_column_def, do_hive_create = True, False # # Go to step 2: We've just picked the file. Preview it. # if do_s2_auto_delim: delim_is_auto = True fields_list, n_cols, col_names, s2_delim_form = _delim_preview( request.fs, s1_file_form, encoding, [reader.TYPE for reader in FILE_READERS], DELIMITERS, True, False, None) if (do_s2_user_delim or do_s3_column_def or cancel_s3_column_def) and s2_delim_form.is_valid(): # Delimit based on input fields_list, n_cols, col_names, s2_delim_form = _delim_preview( request.fs, s1_file_form, encoding, (s2_delim_form.cleaned_data['file_type'],), (s2_delim_form.cleaned_data['delimiter'],), s2_delim_form.cleaned_data.get('parse_first_row_as_header'), s2_delim_form.cleaned_data.get('apply_excel_dialect'), s2_delim_form.cleaned_data['path_tmp']) if do_s2_auto_delim or do_s2_user_delim or cancel_s3_column_def: return render('choose_delimiter.mako', request, dict( action=urlresolvers.reverse(import_wizard), delim_readable=DELIMITER_READABLE.get(s2_delim_form['delimiter'].data[0], s2_delim_form['delimiter'].data[1]), initial=delim_is_auto, file_form=s1_file_form, delim_form=s2_delim_form, fields_list=fields_list, delimiter_choices=hcatalog.forms.TERMINATOR_CHOICES, col_names=col_names, )) # # Go to step 3: Define column. # if do_s3_column_def: if s3_col_formset is None: columns = [] for col_name in col_names: columns.append(dict( column_name=col_name, column_type='string', )) s3_col_formset = hcatalog.forms.ColumnTypeFormSet(prefix='cols', initial=columns) return render('define_columns.mako', request, dict( action=urlresolvers.reverse(import_wizard), file_form=s1_file_form, delim_form=s2_delim_form, column_formset=s3_col_formset, fields_list=fields_list, n_cols=n_cols, )) # # Finale: Execute # if do_hive_create: delim = s2_delim_form.cleaned_data['delimiter'] table_name = s1_file_form.cleaned_data['name'] proposed_query = django_mako.render_to_string("create_table_statement.mako", { 'table': dict(name=table_name, comment=s1_file_form.cleaned_data['comment'], row_format='Delimited', field_terminator=delim), 'columns': [f.cleaned_data for f in s3_col_formset.forms], 'partition_columns': [] } ) do_load_data = s1_file_form.cleaned_data.get('do_import') path = s1_file_form.cleaned_data['path'] path_tmp = s2_delim_form.cleaned_data['path_tmp'] if request.fs.exists(path_tmp): if do_load_data: path = path_tmp else: request.fs.remove(path_tmp) return _submit_create_and_load(request, proposed_query, table_name, path, do_load_data) else: s1_file_form = hcatalog.forms.CreateByImportFileForm() return render('choose_file.mako', request, dict( action=urlresolvers.reverse(import_wizard), file_form=s1_file_form, ))
) # Mako outputs bytestring in utf8 proposed_query = proposed_query.decode('utf-8') tablename = form.table.cleaned_data['name'] tables = [] try: hcat_client().create_table("default", proposed_query) tables = hcat_client().get_tables() except Exception, ex: raise PopupException('Error on creating table', title="Error on creating table", detail=str(ex)) return render("show_tables.mako", request, dict(tables=tables,)) else: form.bind() return render("create_table_manually.mako", request, dict( action="#", table_form=form.table, columns_form=form.columns, partitions_form=form.partitions, )) IMPORT_PEEK_SIZE = 8192 IMPORT_PEEK_NLINES = 10 DELIMITERS = [hive_val for hive_val, _, _ in hcatalog.common.TERMINATORS] DELIMITER_READABLE = {'\\001': 'ctrl-As', '\\002': 'ctrl-Bs', '\\003': 'ctrl-Cs', '\\t': 'tabs', ',': 'commas', ' ': 'spaces'} FILE_READERS = []
def no_collections(request): return render('no_collections.mako', request, {'is_embeddable': request.GET.get('is_embeddable', False)})
def queues(request): """ We get here from /queues """ return render("queues.html", request, {"queuelist": request.jt.queues()})
def dock_jobs(request): username = request.user.username matching_jobs = get_job_count_by_state(request, username) return render("jobs_dock_info.mako", request, {'jobs': matching_jobs}, force_template=True)
def job_attempt_logs(request, job, attempt_index=0): return render("job_attempt_logs.mako", request, { "attempt_index": attempt_index, "job": job, })
def clusterstatus(request): """ We get here from /clusterstatus """ return render("clusterstatus.html", request, Cluster(request.jt))
if request.GET.get('format') == 'json': result = {'status': -1, 'message': ''} try: get_api(request.user, request.jt).get_job(jobid=jobid) result['status'] = 0 except ApplicationNotRunning, e: result['status'] = 1 except Exception, e: result['message'] = _('Error polling job %s: %s') % (jobid, e) return HttpResponse(encode_json_for_js(result), mimetype="application/json") else: return render('job_not_assigned.mako', request, { 'jobid': jobid, 'path': path }) def jobs(request): user = request.GET.get('user', request.user.username) state = request.GET.get('state') text = request.GET.get('text') retired = request.GET.get('retired') if request.GET.get('format') == 'json': jobs = get_api(request.user, request.jt).get_jobs(user=request.user, username=user, state=state, text=text, retired=retired)
def job_counters(request, job): return render("counters.html", request, {"counters": job.counters})
def listdir_paged(request, path): """ A paginated version of listdir. Query parameters: pagenum - The page number to show. Defaults to 1. pagesize - How many to show on a page. Defaults to 15. sortby=? - Specify attribute to sort by. Accepts: (type, name, atime, mtime, size, user, group) Defaults to name. descending - Specify a descending sort order. Default to false. filter=? - Specify a substring filter to search for in the filename field. """ if not request.fs.isdir(path): raise PopupException("Not a directory: %s" % (path,)) pagenum = int(request.GET.get('pagenum', 1)) pagesize = int(request.GET.get('pagesize', 30)) do_as = None if request.user.is_superuser or request.user.has_hue_permission(action="impersonate", app="security"): do_as = request.GET.get('doas', request.user.username) if hasattr(request, 'doas'): do_as = request.doas home_dir_path = request.user.get_home_directory() breadcrumbs = parse_breadcrumbs(path) if do_as: all_stats = request.fs.do_as_user(do_as, request.fs.listdir_stats, path) else: all_stats = request.fs.listdir_stats(path) # Filter first filter_str = request.GET.get('filter', None) if filter_str: filtered_stats = filter(lambda sb: filter_str in sb['name'], all_stats) all_stats = filtered_stats # Sort next sortby = request.GET.get('sortby', None) descending_param = request.GET.get('descending', None) if sortby is not None: if sortby not in ('type', 'name', 'atime', 'mtime', 'user', 'group', 'size'): logger.info("Invalid sort attribute '%s' for listdir." % (sortby,)) else: all_stats = sorted(all_stats, key=operator.attrgetter(sortby), reverse=coerce_bool(descending_param)) # Do pagination page = paginator.Paginator(all_stats, pagesize).page(pagenum) shown_stats = page.object_list # Include parent dir always as second option, unless at filesystem root. if Hdfs.normpath(path) != posixpath.sep: parent_path = request.fs.join(path, "..") parent_stat = request.fs.stats(parent_path) # The 'path' field would be absolute, but we want its basename to be # actually '..' for display purposes. Encode it since _massage_stats expects byte strings. parent_stat['path'] = parent_path parent_stat['name'] = ".." shown_stats.insert(0, parent_stat) # Include same dir always as first option to see stats of the current folder current_stat = request.fs.stats(path) # The 'path' field would be absolute, but we want its basename to be # actually '.' for display purposes. Encode it since _massage_stats expects byte strings. current_stat['path'] = path current_stat['name'] = "." shown_stats.insert(1, current_stat) page.object_list = [ _massage_stats(request, s) for s in shown_stats ] is_fs_superuser = _is_hdfs_superuser(request) data = { 'path': path, 'breadcrumbs': breadcrumbs, 'current_request_path': request.path, 'files': page.object_list, 'page': _massage_page(page), 'pagesize': pagesize, 'home_directory': request.fs.isdir(home_dir_path) and home_dir_path or None, 'sortby': sortby, 'descending': descending_param, # The following should probably be deprecated 'cwd_set': True, 'file_filter': 'any', 'current_dir_path': path, 'is_fs_superuser': is_fs_superuser, 'groups': is_fs_superuser and [str(x) for x in Group.objects.values_list('name', flat=True)] or [], 'users': is_fs_superuser and [str(x) for x in User.objects.values_list('username', flat=True)] or [], 'superuser': request.fs.superuser, 'supergroup': request.fs.supergroup, 'is_sentry_managed': request.fs.is_sentry_managed(path), 'apps': appmanager.get_apps_dict(request.user).keys() } return render('listdir.mako', request, data)
def display(request, path): """ Implements displaying part of a file. GET arguments are length, offset, mode, compression and encoding with reasonable defaults chosen. Note that display by length and offset are on bytes, not on characters. TODO(philip): Could easily built-in file type detection (perhaps using something similar to file(1)), as well as more advanced binary-file viewing capability (de-serialize sequence files, decompress gzipped text files, etc.). There exists a python-magic package to interface with libmagic. """ if not request.fs.isfile(path): raise PopupException(_("Not a file: '%(path)s'") % {'path': path}) # display inline files just if it's not an ajax request if not request.is_ajax(): mimetype = mimetypes.guess_type(path)[0] if mimetype is not None and INLINE_DISPLAY_MIMETYPE.search(mimetype): path_enc = urlencode(path) return redirect(reverse('filebrowser.views.download', args=[path_enc]) + '?disposition=inline') stats = request.fs.stats(path) encoding = request.GET.get('encoding') or i18n.get_site_encoding() # I'm mixing URL-based parameters and traditional # HTTP GET parameters, since URL-based parameters # can't naturally be optional. # Need to deal with possibility that length is not present # because the offset came in via the toolbar manual byte entry. end = request.GET.get("end") if end: end = int(end) begin = request.GET.get("begin", 1) if begin: # Subtract one to zero index for file read begin = int(begin) - 1 if end: offset = begin length = end - begin if begin >= end: raise PopupException(_("First byte to display must be before last byte to display.")) else: length = int(request.GET.get("length", DEFAULT_CHUNK_SIZE_BYTES)) # Display first block by default. offset = int(request.GET.get("offset", 0)) mode = request.GET.get("mode") compression = request.GET.get("compression") if mode and mode not in ["binary", "text"]: raise PopupException(_("Mode must be one of 'binary' or 'text'.")) if offset < 0: raise PopupException(_("Offset may not be less than zero.")) if length < 0: raise PopupException(_("Length may not be less than zero.")) if length > MAX_CHUNK_SIZE_BYTES: raise PopupException(_("Cannot request chunks greater than %(bytes)d bytes.") % {'bytes': MAX_CHUNK_SIZE_BYTES}) # Do not decompress in binary mode. if mode == 'binary': compression = 'none' # Read out based on meta. compression, offset, length, contents =\ read_contents(compression, path, request.fs, offset, length) # Get contents as string for text mode, or at least try uni_contents = None if not mode or mode == 'text': uni_contents = unicode(contents, encoding, errors='replace') is_binary = uni_contents.find(i18n.REPLACEMENT_CHAR) != -1 # Auto-detect mode if not mode: mode = is_binary and 'binary' or 'text' # Get contents as bytes if mode == "binary": xxd_out = list(xxd.xxd(offset, contents, BYTES_PER_LINE, BYTES_PER_SENTENCE)) dirname = posixpath.dirname(path) # Start with index-like data: data = _massage_stats(request, request.fs.stats(path)) # And add a view structure: data["success"] = True data["view"] = { 'offset': offset, 'length': length, 'end': offset + len(contents), 'dirname': dirname, 'mode': mode, 'compression': compression, 'size': stats['size'], 'max_chunk_size': str(MAX_CHUNK_SIZE_BYTES) } data["filename"] = os.path.basename(path) data["editable"] = stats['size'] < MAX_FILEEDITOR_SIZE if mode == "binary": # This might be the wrong thing for ?format=json; doing the # xxd'ing in javascript might be more compact, or sending a less # intermediate representation... logger.debug("xxd: " + str(xxd_out)) data['view']['xxd'] = xxd_out data['view']['masked_binary_data'] = False else: data['view']['contents'] = uni_contents data['view']['masked_binary_data'] = is_binary data['breadcrumbs'] = parse_breadcrumbs(path) return render("display.mako", request, data)
def no_collections(request): return render('no_collections.mako', request, {})
finally: f.close() else: current_contents = u"" form = EditorForm(dict(path=path, contents=current_contents, encoding=encoding)) data = dict( exists=(stats is not None), stats=stats, form=form, path=path, filename=os.path.basename(path), dirname=os.path.dirname(path), breadcrumbs = parse_breadcrumbs(path)) return render("edit.mako", request, data) def save_file(request): """ The POST endpoint to save a file in the file editor. Does the save and then redirects back to the edit page. """ form = EditorForm(request.POST) is_valid = form.is_valid() path = form.cleaned_data.get('path') if request.POST.get('save') == "Save As": if not is_valid: return edit(request, path, form=form)
'beeswax:watch_query_history', kwargs={ 'query_history_id': query_history.id }) + '?on_success_url=' + reverse('metastore:databases') return redirect(url) except Exception, ex: error_message, log = dbms.expand_exception(ex, db) error = _("Failed to remove %(databases)s. Error: %(error)s") % { 'databases': ','.join(databases), 'error': error_message } raise PopupException(error, title=_("Hive Error"), detail=log) else: title = _("Do you really want to delete the database(s)?") return render('confirm.mako', request, { 'url': request.path, 'title': title }) def get_database_metadata(request, database): db = dbms.get(request.user) response = {'status': -1, 'data': ''} try: db_metadata = db.get_database(database) response['status'] = 0 db_metadata['hdfs_link'] = location_to_url(db_metadata['location']) response['data'] = db_metadata except Exception, ex: response['status'] = 1 response['data'] = _("Cannot get metadata for database: %s") % ( database, )
def describe_partitions(request, database, table): db = dbms.get(request.user) table_obj = db.get_table(database, table) if not table_obj.partition_keys: raise PopupException( _("Table '%(table)s' is not partitioned.") % {'table': table}) reverse_sort = request.REQUEST.get("sort", "desc").lower() == "desc" if request.method == "POST": partition_filters = {} for part in table_obj.partition_keys: if request.REQUEST.get(part.name): partition_filters[part.name] = request.REQUEST.get(part.name) partition_spec = ','.join( ["%s='%s'" % (k, v) for k, v in partition_filters.items()]) else: partition_spec = '' partitions = db.get_partitions(database, table_obj, partition_spec, reverse_sort=reverse_sort) massaged_partitions = [ _massage_partition(database, table_obj, partition) for partition in partitions ] if request.method == "POST" or request.GET.get('format', 'html') == 'json': return JsonResponse({ 'partition_keys_json': [partition.name for partition in table_obj.partition_keys], 'partition_values_json': massaged_partitions, }) else: return render( "describe_partitions.mako", request, { 'breadcrumbs': [ { 'name': database, 'url': reverse('metastore:show_tables', kwargs={'database': database}) }, { 'name': table, 'url': reverse('metastore:describe_table', kwargs={ 'database': database, 'table': table }) }, { 'name': 'partitions', 'url': reverse('metastore:describe_partitions', kwargs={ 'database': database, 'table': table }) }, ], 'database': database, 'table': table_obj, 'partitions': partitions, 'partition_keys_json': json.dumps( [partition.name for partition in table_obj.partition_keys]), 'partition_values_json': json.dumps(massaged_partitions), 'request': request, 'has_write_access': has_write_access(request.user) })
def serve_404_error(request, *args, **kwargs): """Registered handler for 404. We just return a simple error""" access_warn(request, "404 not found") return render("404.mako", request, dict(uri=request.build_absolute_uri()), status=404)
def execute_and_watch(request): notebook_id = request.GET.get('editor', request.GET.get('notebook')) snippet_id = int(request.GET['snippet']) action = request.GET['action'] destination = request.GET['destination'] notebook = Notebook(document=Document2.objects.get( id=notebook_id)).get_data() snippet = notebook['snippets'][snippet_id] editor_type = snippet['type'] api = get_api(request, snippet) if action == 'save_as_table': sql, success_url = api.export_data_as_table(notebook, snippet, destination) editor = make_notebook(name='Execute and watch', editor_type=editor_type, statement=sql, status='ready-execute', database=snippet['database']) elif action == 'insert_as_query': # TODO: checks/workarounds in case of non impersonation or Sentry # TODO: keep older simpler way in case of known not many rows? sql, success_url = api.export_large_data_to_hdfs( notebook, snippet, destination) editor = make_notebook(name='Execute and watch', editor_type=editor_type, statement=sql, status='ready-execute', database=snippet['database'], on_success_url=success_url) elif action == 'index_query': if destination == '__hue__': destination = _get_snippet_name(notebook, unique=True, table_format=True) live_indexing = True else: live_indexing = False sql, success_url = api.export_data_as_table(notebook, snippet, destination, is_temporary=True, location='') editor = make_notebook(name='Execute and watch', editor_type=editor_type, statement=sql, status='ready-execute') sample = get_api(request, snippet).fetch_result(notebook, snippet, 0, start_over=True) from indexer.api3 import _index # Will ve moved to the lib from indexer.file_format import HiveFormat from indexer.fields import Field file_format = { 'name': 'col', 'inputFormat': 'query', 'format': { 'quoteChar': '"', 'recordSeparator': '\n', 'type': 'csv', 'hasHeader': False, 'fieldSeparator': '\u0001' }, "sample": '', "columns": [ Field( col['name'].rsplit('.')[-1], HiveFormat.FIELD_TYPE_TRANSLATE.get(col['type'], 'string')).to_dict() for col in sample['meta'] ] } if live_indexing: file_format['inputFormat'] = 'hs2_handle' file_format['fetch_handle'] = lambda rows, start_over: get_api( request, snippet).fetch_result( notebook, snippet, rows=rows, start_over=start_over) job_handle = _index(request, file_format, destination, query=notebook['uuid']) if live_indexing: return redirect( reverse('search:browse', kwargs={'name': destination})) else: return redirect( reverse('oozie:list_oozie_workflow', kwargs={'job_id': job_handle['handle']['id']})) else: raise PopupException(_('Action %s is unknown') % action) return render( 'editor.mako', request, { 'notebooks_json': json.dumps([editor.get_data()]), 'options_json': json.dumps({ 'languages': [{ "name": "%s SQL" % editor_type.title(), "type": editor_type }], 'mode': 'editor', 'editor_type': editor_type, 'success_url': success_url }), 'editor_type': editor_type, })
def assist_m(request): return render('assist_m.mako', request, None)
def csrf_failure(request, reason=None): """Registered handler for CSRF.""" access_warn(request, reason) return render("403_csrf.mako", request, dict(uri=request.build_absolute_uri()), status=403)
def unsupported(request): return render('unsupported.mako', request, None)
def jasmine(request): return render('jasmine.mako', request, None)
def ko_metastore(request): apps = appmanager.get_apps_dict(request.user) return render('ko_metastore.mako', request, { 'apps': apps, })
LOG.error(smart_str(e)) can_edit_json = doc is None or (doc.can_write(request.user) if USE_NEW_EDITOR.get() else doc.doc.get().is_editable(request.user)) return render( 'editor2/workflow_editor.mako', request, { 'layout_json': json.dumps(workflow_data['layout'], cls=JSONEncoderForHTML), 'workflow_json': json.dumps(workflow_data['workflow'], cls=JSONEncoderForHTML), 'credentials_json': json.dumps(credentials.credentials.keys(), cls=JSONEncoderForHTML), 'workflow_properties_json': json.dumps(WORKFLOW_NODE_PROPERTIES, cls=JSONEncoderForHTML), 'doc_uuid': doc.uuid if doc else '', 'subworkflows_json': json.dumps(_get_workflows(request.user), cls=JSONEncoderForHTML), 'can_edit_json': json.dumps(can_edit_json), 'is_embeddable': request.GET.get('is_embeddable', False), }) @check_editor_access_permission def new_workflow(request): doc = None workflow = Workflow(user=request.user)