コード例 #1
0
def display_view_ss(request, pk):
    """
    AJAX function to provide a subset of the table for visualisation. The
    subset is defined by the elements in a view
    :param request: HTTP request from dataTables
    :param pk: Primary key of the view to be used
    :return: AJAX response
    """

    workflow = get_workflow(request)
    if not workflow:
        return JsonResponse(
            {'error': _('Incorrect request. Unable to process')})

    # If there is not DF, go to workflow details.
    if not ops.workflow_id_has_table(workflow.id):
        return JsonResponse({'error': _('There is no data in the table')})

    try:
        view = View.objects.get(pk=pk, workflow=workflow)
    except ObjectDoesNotExist:
        # The view has not been found, so it must be due to a session expire
        return JsonResponse({'error': _('Incorrect view reference')})

    return render_table_display_data(request, workflow, view.columns.all(),
                                     view.formula, view.id)
コード例 #2
0
ファイル: views.py プロジェクト: uts-cic/ontask_b
    def get_context_data(self, **kwargs):

        context = super(WorkflowDetailView, self).get_context_data(**kwargs)

        workflow_id = self.request.session.get('ontask_workflow_id', None)
        if not workflow_id:
            return context

        # Get the table information (if it exist)
        context['table_info'] = None
        if ops.workflow_id_has_table(self.object.id):
            context['table_info'] = {
                'num_rows': self.object.nrows,
                'num_cols': self.object.ncols,
                'num_actions': self.object.actions.all().count(),
                'num_attributes': len(self.object.attributes)
            }

        # put the number of key columns in the workflow
        context['num_key_columns'] = Column.objects.filter(
            workflow__id=workflow_id, is_key=True).count()

        # Safety check for consistency (only in development)
        if settings.DEBUG:
            assert pandas_db.check_wf_df(self.object)

        return context
コード例 #3
0
def render_table_display_page(request, workflow, view, columns, ajax_url):
    """
    Function to render the content of the display page taking into account
    the columns to show and the AJAX url to use to render the table content.
    :param request: HTTP request
    :param workflow: Workflow object used to access the data frame
    :param view: View to use to render the table (or None)
    :param columns: Columns to display in the page
    :param ajax_url: URL to invoke to populate the table
    :return: HTTP Response
    """
    # Create the initial context
    context = {
        'query_builder_ops': workflow.get_query_builder_ops_as_str(),
        'ajax_url': ajax_url,
        'views': workflow.views.all()
    }

    # If there is a DF, add the columns
    if ops.workflow_id_has_table(workflow.id):
        context['columns'] = columns

    # If using a view, add it to the context
    if view:
        context['view'] = view

    return render(request, 'table/display.html', context)
コード例 #4
0
def display_ss(request):
    """
    AJAX function to provide a subset of the table for visualisation
    :param request: HTTP request from dataTables
    :return: AJAX response
    """
    workflow = get_workflow(request)
    if not workflow:
        return JsonResponse({'error': 'Incorrect request. Unable to process'})

    # If there is not DF, go to workflow details.
    if not ops.workflow_id_has_table(workflow.id):
        return JsonResponse({'error': 'There is no data in the table'})

    return render_table_display_data(request, workflow, workflow.columns.all(),
                                     None)
コード例 #5
0
def csvdownload(request, pk=None):
    """

    :param request: HTML request
    :param pk: If given, the PK of the view to subset the table
    :return: Return a CSV download of the data in the table
    """

    # Get the appropriate workflow object
    workflow = get_workflow(request)
    if not workflow:
        return redirect('workflow:index')

    # Check if dataframe is present
    if not ops.workflow_id_has_table(workflow.id):
        # Go back to show the workflow detail
        return redirect(reverse('workflow:detail',
                                kwargs={'pk': workflow.id}))

    # Get the columns from the view (if given)
    view = None
    if pk:
        try:
            view = View.objects.filter(
                Q(workflow__user=request.user) |
                Q(workflow__shared=request.user)
            ).distinct().prefetch_related('columns').get(pk=pk)
        except ObjectDoesNotExist:
            # Go back to show the workflow detail
            return redirect(reverse('workflow:detail',
                                    kwargs={'pk': workflow.id}))

    # Fetch the data frame
    data_frame = pandas_db.get_subframe(
        workflow.id,
        view,
        [x.name for x in view.columns.all()] if view is not None else None)

    # Create the response object
    response = HttpResponse(content_type='text/csv')
    response['Content-Disposition'] = \
        'attachment; filename="ontask_table.csv"'

    # Dump the data frame as the content of the response object
    data_frame.to_csv(path_or_buf=response, sep=str(','), index=False)

    return response
コード例 #6
0
ファイル: views.py プロジェクト: AmitozeNandha/ontask_b
    def get_context_data(self, **kwargs):

        context = super(WorkflowDetailView, self).get_context_data(**kwargs)

        workflow_id = self.request.session.get('ontask_workflow_id', None)
        if not workflow_id:
            return context

        # Get the table information (if it exist)
        context['table_info'] = None
        if ops.workflow_id_has_table(self.object.id):
            context['table_info'] = {
                'num_rows': self.object.nrows,
                'num_cols': self.object.ncols,
                'num_actions': self.object.actions.all().count(),
                'num_attributes': len(self.object.attributes)
            }

        # Get the key columns
        columns = Column.objects.filter(workflow__id=workflow_id, is_key=True)

        # put the number of key columns in the workflow
        context['num_key_columns'] = columns.count()

        # Guarantee that column position is set for backward compatibility
        columns = self.object.columns.all()
        if any(x.position == 0 for x in columns):
            # At least a column has index equal to zero, so reset all of them
            for idx, c in enumerate(columns):
                c.position = idx + 1
                c.save()

        # Safety check for consistency (only in development)
        if settings.DEBUG:
            assert pandas_db.check_wf_df(self.object)

            # Columns are properly numbered
            cpos = Column.objects.filter(workflow__id=workflow_id).values_list(
                'position', flat=True)
            assert sorted(cpos) == range(1, len(cpos) + 1)

        return context
コード例 #7
0
def upload_s2(request):
    """
    The four step process will populate the following dictionary with name
    upload_data (divided by steps in which they are set

    ASSUMES:

    initial_column_names: List of column names in the initial file.

    column_types: List of column types as detected by pandas

    src_is_key_column: Boolean list with src columns that are unique

    step_1: URL name of the first step

    CREATES:

    rename_column_names: Modified column names to remove ambiguity when
                          merging.

    columns_to_upload: Boolean list denoting the columns in SRC that are
                       marked for upload.

    keep_key_column: Boolean list with those key columns that need to be kept.

    :param request: Web request
    :return: the dictionary upload_data in the session object
    """
    workflow = get_workflow(request)
    if not workflow:
        return redirect('workflow:index')

    # Get the dictionary to store information about the upload
    # is stored in the session.
    upload_data = request.session.get('upload_data', None)
    if not upload_data:
        # If there is no object, or it is an empty dict, it denotes a direct
        # jump to this step, get back to the dataops page
        return redirect('dataops:uploadmerge')

    # Get the column names, types, and those that are unique from the data frame
    try:
        initial_columns = upload_data.get('initial_column_names')
        column_types = upload_data.get('column_types')
        src_is_key_column = upload_data.get('src_is_key_column')
    except KeyError:
        # The page has been invoked out of order
        return redirect(
            upload_data.get('step_1', reverse('dataops:uploadmerge')))

    # Get or create the list with the renamed column names
    rename_column_names = upload_data.get('rename_column_names', None)
    if rename_column_names is None:
        rename_column_names = initial_columns[:]
        upload_data['rename_column_names'] = rename_column_names

    # Get or create list of booleans identifying columns to be uploaded
    columns_to_upload = upload_data.get('columns_to_upload', None)
    if columns_to_upload is None:
        columns_to_upload = [True] * len(initial_columns)
        upload_data['columns_to_upload'] = columns_to_upload

    # Get or create list of booleans identifying key columns to be kept
    keep_key_column = upload_data.get('keep_key_column', None)
    if keep_key_column is None:
        keep_key_column = upload_data['src_is_key_column'][:]
        upload_data['keep_key_column'] = keep_key_column

    # Bind the form with the received data (remember unique columns)
    form = SelectColumnUploadForm(request.POST or None,
                                  column_names=rename_column_names,
                                  columns_to_upload=columns_to_upload,
                                  is_key=src_is_key_column,
                                  keep_key=keep_key_column)

    # Get a hold of the fields to create a list to be processed in the page
    load_fields = [f for f in form if f.name.startswith('upload_')]
    newname_fields = [f for f in form if f.name.startswith('new_name_')]
    src_key_fields = [
        form['make_key_%s' % idx] if src_is_key_column[idx] else None
        for idx in range(len(src_is_key_column))
    ]

    # Create one of the context elements for the form. Pack the lists so that
    # they can be iterated in the template
    df_info = [
        list(i) for i in zip(load_fields, initial_columns, newname_fields,
                             column_types, src_key_fields)
    ]

    # Process the initial loading of the form and return
    if request.method != 'POST' or not form.is_valid():
        # Update the dictionary with the session information
        request.session['upload_data'] = upload_data
        context = {
            'form': form,
            'wid': workflow.id,
            'prev_step': upload_data['step_1'],
            'df_info': df_info
        }

        if not ops.workflow_id_has_table(workflow.id):
            # It is an upload, not a merge, set the next step to finish
            context['next_name'] = _('Finish')
        return render(request, 'dataops/upload_s2.html', context)

    # At this point we are processing a valid POST request

    # We need to modify upload_data with the information received in the post
    for i in range(len(initial_columns)):
        new_name = form.cleaned_data['new_name_%s' % i]
        upload_data['rename_column_names'][i] = new_name
        upload = form.cleaned_data['upload_%s' % i]
        upload_data['columns_to_upload'][i] = upload

        if src_is_key_column[i]:
            # If the column is key, check if the user wants to keep it
            keep_key_column[i] = form.cleaned_data['make_key_%s' % i]

    # Update the dictionary with the session information
    request.session['upload_data'] = upload_data

    # Load the existing DF or None if it doesn't exist
    existing_df = pandas_db.load_from_db(workflow.id)

    if existing_df is not None:
        # This is a merge operation, so move to Step 3
        return redirect('dataops:upload_s3')

    # This is an upload operation (not a merge) save the uploaded dataframe in
    # the DB and finish.

    # Get the uploaded data_frame
    try:
        data_frame = ops.load_upload_from_db(workflow.id)
    except Exception:
        return render(
            request, 'error.html',
            {'message': _('Exception while retrieving the data frame')})

    # Update the data frame
    status = ops.perform_dataframe_upload_merge(workflow.id, existing_df,
                                                data_frame, upload_data)

    if status:
        # Something went wrong. Flag it and reload
        context = {
            'form': form,
            'wid': workflow.id,
            'prev_step': upload_data['step_1'],
            'df_info': df_info
        }
        return render(request, 'dataops/upload_s2.html', context)

    # Nuke the temporary table
    pandas_db.delete_upload_table(workflow.id)

    # Log the event
    col_info = workflow.get_column_info()
    logs.ops.put(
        request.user, 'workflow_data_upload', workflow, {
            'id': workflow.id,
            'name': workflow.name,
            'num_rows': workflow.nrows,
            'num_cols': workflow.ncols,
            'column_names': col_info[0],
            'column_types': col_info[1],
            'column_unique': col_info[2]
        })

    # Go back to show the workflow detail
    return redirect(reverse('workflow:detail', kwargs={'pk': workflow.id}))
コード例 #8
0
def run_ss(request, pk):
    """
    Serve the AJAX requests to show the elements in the table that satisfy
    the filter and between the given limits.
    :param request:
    :param pk: action id being run
    :return:
    """

    workflow = get_workflow(request)
    if not workflow:
        return JsonResponse({'error': 'Incorrect request. Unable to process'})

    # If there is not DF, go to workflow details.
    if not ops.workflow_id_has_table(workflow.id):
        return JsonResponse({'error': 'There is no data in the table'})

    # Get the action
    try:
        action = Action.objects.filter(
            Q(workflow__user=request.user) |
            Q(workflow__shared=request.user)).distinct().get(pk=pk)
    except ObjectDoesNotExist:
        return redirect('action:index')

    # Check that the GET parameter are correctly given
    try:
        draw = int(request.POST.get('draw', None))
        start = int(request.POST.get('start', None))
        length = int(request.POST.get('length', None))
        order_col = request.POST.get('order[0][column]', None)
        order_dir = request.POST.get('order[0][dir]', 'asc')
    except ValueError:
        return JsonResponse({'error': 'Incorrect request. Unable to process'})

    # Get the column information from the request and the rest of values.
    search_value = request.POST.get('search[value]', None)

    # Get columns
    columns = action.columns.all()
    column_names = [x.name for x in columns]

    # See if an order column has been given.
    if order_col:
        order_col = columns[int(order_col)]

    # Get the search pairs of field, value
    cv_tuples = []
    if search_value:
        cv_tuples = [(c.name, search_value, c.data_type) for c in columns]

    # Get the query set (including the filter in the action)
    qs = pandas_db.search_table_rows(
        workflow.id,
        cv_tuples,
        True,
        order_col.name,
        order_dir == 'asc',
        column_names,  # Column names in the action
        action.filter  # Filter in the action
    )

    # Post processing + adding operations
    final_qs = []
    items = 0
    for row in qs[start:start + length]:
        items += 1

        # Render the first element (the key) as the link to the page to update
        # the content.
        dst_url = reverse('action:run_row', kwargs={'pk': action.id})
        url_parts = list(urlparse.urlparse(dst_url))
        query = dict(urlparse.parse_qs(url_parts[4]))
        query.update({'uatn': column_names[0], 'uatv': row[0]})
        url_parts[4] = urlencode(query)
        link_item = '<a href="{0}">{1}</a>'.format(
            urlparse.urlunparse(url_parts), row[0]
        )

        # Add the row for rendering
        final_qs.append([link_item] + list(row)[1:])

        if items == length:
            # We reached the number or requested elements, abandon loop
            break

    data = {
        'draw': draw,
        'recordsTotal': workflow.nrows,
        'recordsFiltered': len(qs),
        'data': final_qs
    }

    return JsonResponse(data)
コード例 #9
0
ファイル: views.py プロジェクト: uts-cic/ontask_b
def column_ss(request, pk):
    """
    Given the workflow id and the request, return to DataTable the proper
    list of columns to be rendered.
    :param request: Http request received from DataTable
    :param pk: Workflow id
    :return: Data to visualize in the table
    """
    workflow = get_workflow(request)
    if not workflow:
        return JsonResponse({'error': 'Incorrect request. Unlable to process'})

    # If there is no DF, there are no columns to show, this should be
    # detected before this is executed
    if not ops.workflow_id_has_table(workflow.id):
        return JsonResponse({'error': 'There is no data in the workflow'})

    # Check that the GET parameter are correctly given
    try:
        draw = int(request.POST.get('draw', None))
        start = int(request.POST.get('start', None))
        length = int(request.POST.get('length', None))
        order_col = request.POST.get('order[0][column]', None)
        order_dir = request.POST.get('order[0][dir]', 'asc')
    except ValueError:
        return JsonResponse({'error': 'Incorrect request. Unable to process'})

    # Get the column information from the request and the rest of values.
    search_value = request.POST.get('search[value]', None)

    # Get the initial set
    qs = workflow.columns.all()
    recordsTotal = len(qs)
    recordsFiltered = recordsTotal

    # Reorder if required
    if order_col:
        col_name = ['name', 'data_type', 'is_key'][int(order_col)]
        if order_dir == 'desc':
            col_name = '-' + col_name
        qs = qs.order_by(col_name)

    if search_value:
        qs = qs.filter(
            Q(name__contains=search_value)
            | Q(data_type__contains=search_value))
        recordsFiltered = len(qs)

    # Creating the result
    final_qs = []
    for col in qs[start:start + length]:
        ops_string = render_to_string(
            'workflow/includes/workflow_column_operations.html', {
                'id': col.id,
                'is_key': col.is_key
            })

        final_qs.append([
            col.name,
            col.data_type,
            '<span class="true">✔</span>' if col.is_key \
                  else '<span class="true">✘</span>',
            ops_string
        ])

        if len(final_qs) == length:
            break

    # Result to return as Ajax response
    data = {
        'draw': draw,
        'recordsTotal': recordsTotal,
        'recordsFiltered': recordsFiltered,
        'data': final_qs
    }

    return JsonResponse(data)