def render_table_display_page(request, workflow, view, columns, ajax_url): """ Function to render the content of the display page taking into account the columns to show and the AJAX url to use to render the table content. :param request: HTTP request :param workflow: Workflow object used to access the data frame :param view: View to use to render the table (or None) :param columns: Columns to display in the page :param ajax_url: URL to invoke to populate the table :return: HTTP Response """ # Create the initial context context = { 'workflow': workflow, 'query_builder_ops': workflow.get_query_builder_ops_as_str(), 'ajax_url': ajax_url, 'views': workflow.views.all(), 'no_actions': workflow.actions.count() == 0 } # If there is a DF, add the columns if ops.workflow_id_has_table(workflow.id): context['columns'] = columns context['columns_datatables'] = \ [{'data': 'Operations'}] + \ [{'data': c.name.replace('.', '\\.')} for c in columns] else: context['columns'] = None context['columns_datatables'] = [] # If using a view, add it to the context if view: context['view'] = view return render(request, 'table/display.html', context)
def display_view_ss(request, pk): """ AJAX function to provide a subset of the table for visualisation. The subset is defined by the elements in a view :param request: HTTP request from dataTables :param pk: Primary key of the view to be used :return: AJAX response """ workflow = get_workflow(request) if not workflow: return JsonResponse( {'error': _('Incorrect request. Unable to process')} ) # If there is not DF, go to workflow details. if not ops.workflow_id_has_table(workflow.id): return JsonResponse({'error': _('There is no data in the table')}) try: view = View.objects.get(pk=pk, workflow=workflow) except ObjectDoesNotExist: # The view has not been found, so it must be due to a session expire return JsonResponse({'error': _('Incorrect view reference')}) return render_table_display_data( request, workflow, view.columns.all(), view.formula, view.id )
def get_context_data(self, **kwargs): context = super(WorkflowDetailView, self).get_context_data(**kwargs) # Get the table information (if it exist) context['table_info'] = None if ops.workflow_id_has_table(self.object.id): context['table_info'] = { 'num_rows': self.object.nrows, 'num_cols': self.object.ncols, 'num_actions': self.object.actions.all().count(), 'num_attributes': len(self.object.attributes) } # put the number of key columns in the context context['num_key_columns'] = self.object.columns.filter( is_key=True).count() # Guarantee that column position is set for backward compatibility columns = self.object.columns.all() if any(x.position == 0 for x in columns): # At least a column has index equal to zero, so reset all of them for idx, c in enumerate(columns): c.position = idx + 1 c.save() # Safety check for consistency (only in development) if settings.DEBUG: pandas_db.check_wf_df(self.object) # Columns are properly numbered cpos = self.object.columns.all().values_list('position', flat=True) assert sorted(cpos) == list(range(1, len(cpos) + 1)) return context
def csvdownload(request, pk=None): """ :param request: HTML request :param pk: If given, the PK of the view to subset the table :return: Return a CSV download of the data in the table """ # Get the appropriate workflow object workflow = get_workflow(request) if not workflow: return redirect('workflow:index') # Check if dataframe is present if not ops.workflow_id_has_table(workflow.id): # Go back to show the workflow detail return redirect(reverse('workflow:detail', kwargs={'pk': workflow.id})) # Get the columns from the view (if given) view = None if pk: try: view = View.objects.filter( Q(workflow__user=request.user) | Q(workflow__shared=request.user) ).distinct().prefetch_related('columns').get(pk=pk) except ObjectDoesNotExist: # Go back to show the workflow detail return redirect(reverse('workflow:detail', kwargs={'pk': workflow.id})) # Fetch the data frame if view: col_names = [x.name for x in view.columns.all()] else: col_names = workflow.get_column_names() data_frame = pandas_db.get_subframe(workflow.id, view, col_names) # Create the response object response = HttpResponse(content_type='text/csv') response['Content-Disposition'] = \ 'attachment; filename="ontask_table.csv"' # Dump the data frame as the content of the response object data_frame.to_csv(path_or_buf=response, sep=str(','), index=False, encoding='utf-8') return response
def display_ss(request): """ AJAX function to provide a subset of the table for visualisation :param request: HTTP request from dataTables :return: AJAX response """ workflow = get_workflow(request) if not workflow: return JsonResponse( {'error': _('Incorrect request. Unable to process')}) # If there is not DF, go to workflow details. if not ops.workflow_id_has_table(workflow.id): return JsonResponse({'error': _('There is no data in the table')}) return render_table_display_data(request, workflow, workflow.columns.all(), None)
def upload_s2(request): """ The four step process will populate the following dictionary with name upload_data (divided by steps in which they are set ASSUMES: initial_column_names: List of column names in the initial file. column_types: List of column types as detected by pandas src_is_key_column: Boolean list with src columns that are unique step_1: URL name of the first step CREATES: rename_column_names: Modified column names to remove ambiguity when merging. columns_to_upload: Boolean list denoting the columns in SRC that are marked for upload. keep_key_column: Boolean list with those key columns that need to be kept. :param request: Web request :return: the dictionary upload_data in the session object """ workflow = get_workflow(request) if not workflow: return redirect('workflow:index') # Get the dictionary to store information about the upload # is stored in the session. upload_data = request.session.get('upload_data', None) if not upload_data: # If there is no object, or it is an empty dict, it denotes a direct # jump to this step, get back to the dataops page return redirect('dataops:uploadmerge') # Get the column names, types, and those that are unique from the data frame try: initial_columns = upload_data.get('initial_column_names') column_types = upload_data.get('column_types') src_is_key_column = upload_data.get('src_is_key_column') except KeyError: # The page has been invoked out of order return redirect( upload_data.get('step_1', reverse('dataops:uploadmerge'))) # Get or create the list with the renamed column names rename_column_names = upload_data.get('rename_column_names', None) if rename_column_names is None: rename_column_names = initial_columns[:] upload_data['rename_column_names'] = rename_column_names # Get or create list of booleans identifying columns to be uploaded columns_to_upload = upload_data.get('columns_to_upload', None) if columns_to_upload is None: columns_to_upload = [True] * len(initial_columns) upload_data['columns_to_upload'] = columns_to_upload # Get or create list of booleans identifying key columns to be kept keep_key_column = upload_data.get('keep_key_column', None) if keep_key_column is None: keep_key_column = upload_data['src_is_key_column'][:] upload_data['keep_key_column'] = keep_key_column # Bind the form with the received data (remember unique columns) form = SelectColumnUploadForm(request.POST or None, column_names=rename_column_names, columns_to_upload=columns_to_upload, is_key=src_is_key_column, keep_key=keep_key_column) # Get a hold of the fields to create a list to be processed in the page load_fields = [f for f in form if f.name.startswith('upload_')] newname_fields = [f for f in form if f.name.startswith('new_name_')] src_key_fields = [ form['make_key_%s' % idx] if src_is_key_column[idx] else None for idx in range(len(src_is_key_column)) ] # Create one of the context elements for the form. Pack the lists so that # they can be iterated in the template df_info = [ list(i) for i in zip(load_fields, initial_columns, newname_fields, column_types, src_key_fields) ] # Process the initial loading of the form and return if request.method != 'POST' or not form.is_valid(): # Update the dictionary with the session information request.session['upload_data'] = upload_data context = { 'form': form, 'wid': workflow.id, 'prev_step': upload_data['step_1'], 'df_info': df_info } if not ops.workflow_id_has_table(workflow.id): # It is an upload, not a merge, set the next step to finish context['next_name'] = _('Finish') return render(request, 'dataops/upload_s2.html', context) # At this point we are processing a valid POST request # We need to modify upload_data with the information received in the post for i in range(len(initial_columns)): new_name = form.cleaned_data['new_name_%s' % i] upload_data['rename_column_names'][i] = new_name upload = form.cleaned_data['upload_%s' % i] upload_data['columns_to_upload'][i] = upload if src_is_key_column[i]: # If the column is key, check if the user wants to keep it keep_key_column[i] = form.cleaned_data['make_key_%s' % i] # Update the dictionary with the session information request.session['upload_data'] = upload_data # Load the existing DF or None if it doesn't exist existing_df = pandas_db.load_from_db(workflow.id) if existing_df is not None: # This is a merge operation, so move to Step 3 return redirect('dataops:upload_s3') # This is an upload operation (not a merge) save the uploaded dataframe in # the DB and finish. # Get the uploaded data_frame try: data_frame = ops.load_upload_from_db(workflow.id) except Exception: return render( request, 'error.html', {'message': _('Exception while retrieving the data frame')}) # Update the data frame status = ops.perform_dataframe_upload_merge(workflow.id, existing_df, data_frame, upload_data) if status: # Something went wrong. Flag it and reload context = { 'form': form, 'wid': workflow.id, 'prev_step': upload_data['step_1'], 'df_info': df_info } return render(request, 'dataops/upload_s2.html', context) # Nuke the temporary table pandas_db.delete_upload_table(workflow.id) # Log the event col_info = workflow.get_column_info() Log.objects.register( request.user, Log.WORKFLOW_DATA_UPLOAD, workflow, { 'id': workflow.id, 'name': workflow.name, 'num_rows': workflow.nrows, 'num_cols': workflow.ncols, 'column_names': col_info[0], 'column_types': col_info[1], 'column_unique': col_info[2] }) # Go back to show the workflow detail return redirect(reverse('workflow:detail', kwargs={'pk': workflow.id}))
def column_ss(request, pk): """ Given the workflow id and the request, return to DataTable the proper list of columns to be rendered. :param request: Http request received from DataTable :param pk: Workflow id :return: Data to visualize in the table """ workflow = get_workflow(request) if not workflow: return JsonResponse( {'error': _('Incorrect request. Unable to process')}) # If there is no DF, there are no columns to show, this should be # detected before this is executed if not ops.workflow_id_has_table(workflow.id): return JsonResponse({'error': _('There is no data in the workflow')}) # Check that the GET parameter are correctly given try: draw = int(request.POST.get('draw', None)) start = int(request.POST.get('start', None)) length = int(request.POST.get('length', None)) order_col = request.POST.get('order[0][column]', None) order_dir = request.POST.get('order[0][dir]', 'asc') except ValueError: return JsonResponse( {'error': _('Incorrect request. Unable to process')}) # Get the column information from the request and the rest of values. search_value = request.POST.get('search[value]', None) # Get the initial set qs = workflow.columns.all() recordsTotal = len(qs) recordsFiltered = recordsTotal # Reorder if required if order_col: col_name = ['name', 'data_type', 'is_key'][int(order_col)] if order_dir == 'desc': col_name = '-' + col_name qs = qs.order_by(col_name) if search_value: qs = qs.filter( Q(name__icontains=search_value) | Q(data_type__icontains=search_value)) recordsFiltered = len(qs) # Creating the result final_qs = [] for col in qs[start:start + length]: ops_string = render_to_string( 'workflow/includes/workflow_column_operations.html', { 'id': col.id, 'is_key': col.is_key }) # The data type for integers or doubles is shown as 'number' col_data_type = col.data_type if col_data_type == 'integer' or col_data_type == 'double': col_data_type = 'number' final_qs.append({ 'number': render_to_string('workflow/includes/workflow_column_movement.html', {'column': col}), 'name': col.name, 'description': col.description_text, 'type': col_data_type, 'key': '<span class="true">✔</span>' if col.is_key else '', 'operations': ops_string }) if len(final_qs) == length: break # Result to return as Ajax response data = { 'draw': draw, 'recordsTotal': recordsTotal, 'recordsFiltered': recordsFiltered, 'data': final_qs } return JsonResponse(data)
def run_survey_ss(request, pk): """ Serve the AJAX requests to show the elements in the table that satisfy the filter and between the given limits. :param request: :param pk: action id being run :return: """ workflow = get_workflow(request) if not workflow: return JsonResponse( {'error': _('Incorrect request. Unable to process')}) # If there is not DF, go to workflow details. if not ops.workflow_id_has_table(workflow.id): return JsonResponse({'error': _('There is no data in the table')}) # Get the action try: action = Action.objects.filter( Q(workflow__user=request.user) | Q(workflow__shared=request.user)).distinct().get(pk=pk) except ObjectDoesNotExist: return redirect('action:index') # Check that the GET parameter are correctly given try: draw = int(request.POST.get('draw', None)) start = int(request.POST.get('start', None)) length = int(request.POST.get('length', None)) order_col = request.POST.get('order[0][column]', None) order_dir = request.POST.get('order[0][dir]', 'asc') except ValueError: return JsonResponse( {'error': _('Incorrect request. Unable to process')}) # Get the column information from the request and the rest of values. search_value = request.POST.get('search[value]', None) # Get columns and the position of the first key columns = action.columns.all() column_names = [x.name for x in columns] key_idx = next(idx for idx, c in enumerate(columns) if c.is_key) # See if an order column has been given. if order_col: order_col = columns[int(order_col)] # Get the search pairs of field, value cv_tuples = [] if search_value: cv_tuples = [(c.name, search_value, c.data_type) for c in columns] # Filter cfilter = action.get_filter() # Get the query set (including the filter in the action) qs = pandas_db.search_table_rows( workflow.id, cv_tuples, True, order_col.name, order_dir == 'asc', column_names, # Column names in the action cfilter.formula if cfilter else None) # Post processing + adding operations final_qs = [] items = 0 for row in qs[start:start + length]: items += 1 # Render the first element (the key) as the link to the page to update # the content. dst_url = reverse('action:run_survey_row', kwargs={'pk': action.id}) url_parts = list(urlparse.urlparse(dst_url)) query = dict(urlparse.parse_qs(url_parts[4])) query.update({'uatn': column_names[key_idx], 'uatv': row[key_idx]}) url_parts[4] = urlencode(query) link_item = '<a href="{0}">{1}</a>'.format( urlparse.urlunparse(url_parts), row[key_idx]) row = list(row) row[key_idx] = link_item # Add the row for rendering final_qs.append(row) if items == length: # We reached the number or requested elements, abandon loop break data = { 'draw': draw, 'recordsTotal': workflow.nrows, 'recordsFiltered': len(qs), 'data': final_qs } return JsonResponse(data)
def column_ss(request, pk): """ Given the workflow id and the request, return to DataTable the proper list of columns to be rendered. :param request: Http request received from DataTable :param pk: Workflow id :return: Data to visualize in the table """ workflow = get_workflow(request) if not workflow: return JsonResponse( {'error': _('Incorrect request. Unable to process')}) # If there is no DF, there are no columns to show, this should be # detected before this is executed if not ops.workflow_id_has_table(workflow.id): return JsonResponse({'error': _('There is no data in the workflow')}) # Check that the GET parameter are correctly given try: draw = int(request.POST.get('draw', None)) start = int(request.POST.get('start', None)) length = int(request.POST.get('length', None)) order_col = request.POST.get('order[0][column]', None) order_dir = request.POST.get('order[0][dir]', 'asc') except ValueError: return JsonResponse( {'error': _('Incorrect request. Unable to process')}) # Get the column information from the request and the rest of values. search_value = request.POST.get('search[value]', None) # Get the initial set qs = workflow.columns.all() recordsTotal = len(qs) recordsFiltered = recordsTotal # Reorder if required if order_col: col_name = [ 'position', 'name', 'description_text', 'data_type', 'is_key' ][int(order_col)] if order_dir == 'desc': col_name = '-' + col_name qs = qs.order_by(col_name) if search_value: qs = qs.filter( Q(name__icontains=search_value) | Q(data_type__icontains=search_value)) recordsFiltered = len(qs) # Creating the result final_qs = [] for col in qs[start:start + length]: ops_string = render_to_string( 'workflow/includes/workflow_column_operations.html', { 'id': col.id, 'is_key': col.is_key }) # The data type for integers or doubles is shown as 'number' col_data_type = col.data_type col_data_type_str = """<div data-toggle="tooltip" title="{0}"> <span class="fa fa-{1}"></span></div>""" if col_data_type == 'string': col_data_type_str = col_data_type_str.format('Text', 'italic') elif col_data_type == 'integer' or col_data_type == 'double': col_data_type_str = col_data_type_str.format('Number', 'percent') elif col_data_type == 'boolean': col_data_type_str = col_data_type_str.format( 'True/False', 'toggle-on') elif col_data_type == 'datetime': col_data_type_str = col_data_type_str.format( 'Date/Time', 'calendar-o') final_qs.append({ 'number': col.position, 'name': format_html( """<a href="#" class="js-workflow-column-edit" data-toggle="tooltip" data-url="{0}" title="Edit the parameters of this column">{1}</a>""", reverse('workflow:column_edit', kwargs={'pk': col.id}), col.name), 'description': col.description_text, 'type': format_html(col_data_type_str), 'key': '<span class="true">✔</span>' if col.is_key else '', 'operations': ops_string }) if len(final_qs) == length: break # Result to return as Ajax response data = { 'draw': draw, 'recordsTotal': recordsTotal, 'recordsFiltered': recordsFiltered, 'data': final_qs } return JsonResponse(data)