示例#1
0
        def view(request):
            rows = subscribers_as_csv_rows()

            # we can refactor this later
            # copied from core/views.py
            pseudo_buffer = Echo()
            writer = csv.writer(pseudo_buffer, dialect=csv.excel)
            response = StreamingHttpResponse(
                (writer.writerow(row) for row in rows), content_type="text/csv;charset=UTF-8",
            )
            response["Content-Disposition"] = 'attachment; filename="newsletter_subscribers.csv"'
            response.encoding = "UTF-8"
            return response
示例#2
0
def dataset_detail(request, slug, tablename=""):
    try:
        dataset = Dataset.objects.get(slug=slug)
    except Dataset.DoesNotExist:
        context = {"message": "Dataset does not exist"}
        return render(request, "404.html", context, status=404)

    if not tablename:
        tablename = dataset.get_default_table().name
        return redirect(reverse("core:dataset-table-detail", kwargs={"slug": slug, "tablename": tablename},))

    try:
        allow_hidden = request.user.is_superuser
        table = dataset.get_table(tablename, allow_hidden=allow_hidden)
    except Table.DoesNotExist:
        context = {"message": "Table does not exist"}
        return render(request, "404.html", context, status=404)

    querystring = request.GET.copy()
    page_number = querystring.pop("page", ["1"])[0].strip() or "1"
    items_per_page = querystring.pop("items", [str(settings.ROWS_PER_PAGE)])[0].strip() or str(settings.ROWS_PER_PAGE)
    download_csv = querystring.pop("format", [""]) == ["csv"]
    try:
        page = int(page_number)
    except ValueError:
        context = {"message": "Invalid page number."}
        return render(request, "404.html", context, status=404)
    try:
        items_per_page = int(items_per_page)
    except ValueError:
        context = {"message": "Invalid items per page."}
        return render(request, "404.html", context, status=404)
    items_per_page = min(items_per_page, 1000)

    version = dataset.version_set.order_by("-order").first()
    fields = table.fields

    TableModel = table.get_model()
    query, search_query, order_by = TableModel.objects.parse_querystring(querystring)
    all_data = TableModel.objects.composed_query(query, search_query, order_by)

    if download_csv:
        agent = request.headers["User-Agent"]
        block_agent = any((a for a in settings.BLOCKED_AGENTS if agent.startswith(a)))

        if not any([query, search_query]) or block_agent:  # user trying to download a CSV without custom filters
            context = {"html_content": "400-csv-without-filters.html", "download_url": table.version.download_url}
            return render(request, "404.html", context, status=400)

        if all_data.count() > max_export_rows:
            context = {"message": "Max rows exceeded."}
            return render(request, "404.html", context, status=400)

        filename = "{}-{}.csv".format(slug, uuid.uuid4().hex)
        pseudo_buffer = Echo()
        writer = csv.writer(pseudo_buffer, dialect=csv.excel)
        csv_rows = queryset_to_csv(all_data, fields)
        response = StreamingHttpResponse(
            (writer.writerow(row) for row in csv_rows), content_type="text/csv;charset=UTF-8",
        )
        response["Content-Disposition"] = 'attachment; filename="{}"'.format(filename)
        response.encoding = "UTF-8"
        return response

    paginator = Paginator(all_data, items_per_page)
    data = paginator.get_page(page)

    for key, value in list(querystring.items()):
        if not value:
            del querystring[key]

    context = {
        "data": data,
        "dataset": dataset,
        "fields": fields,
        "max_export_rows": max_export_rows,
        "query_dict": querystring,
        "querystring": querystring.urlencode(),
        "slug": slug,
        "table": table,
        "table": table,
        "total_count": all_data.count(),
        "version": version,
    }
    return render(request, "dataset-detail.html", context)
示例#3
0
def dataset_detail(request, slug, tablename=''):
    dataset = get_object_or_404(Dataset, slug=slug)
    if not tablename:
        tablename = dataset.get_default_table().name
        return redirect(reverse('core:dataset-table-detail',
                                kwargs={'slug': slug, 'tablename': tablename}))

    try:
        table = dataset.get_table(tablename)
    except Table.DoesNotExist:
        return HttpResponseBadRequest(f'Table does not exist.', status=404)

    version = dataset.version_set.order_by('-order').first()
    fields = table.fields
    all_data = table.get_model().objects
    querystring = request.GET.copy()
    page_number = querystring.pop('page', ['1'])[0].strip() or '1'
    search_query = request.GET.get('search')
    order_by = querystring.pop('order-by', [''])
    order_by = [field.strip().lower()
                for field in order_by[0].split(',')
                if field.strip()]

    if search_query:
        all_data = all_data.filter(search_data=SearchQuery(search_query))
    if querystring:
        keys = list(querystring.keys())
        for key in keys:
            if not querystring[key]:
                del querystring[key]
        all_data = all_data.apply_filters(querystring)

    all_data = all_data.apply_ordering(order_by)
    if (querystring.get('format', '') == 'csv' and
        0 < all_data.count() <= max_export_rows):
        filename = '{}-{}.csv'.format(slug, uuid.uuid4().hex)
        pseudo_buffer = Echo()
        writer = csv.writer(pseudo_buffer, dialect=csv.excel)
        csv_rows = queryset_to_csv(all_data, fields)
        response = StreamingHttpResponse(
            (writer.writerow(row) for row in csv_rows),
            content_type='text/csv;charset=UTF-8',
        )
        response['Content-Disposition'] = ('attachment; filename="{}"'
                                           .format(filename))
        response.encoding = 'UTF-8'
        return response


    paginator = Paginator(all_data, 20)
    try:
        page = int(page_number)
    except ValueError:
        raise HttpResponseBadRequest
    data = paginator.get_page(page)

    if order_by:
        querystring['order-by'] = ','.join(order_by)
    if search_query:
        querystring['search'] = search_query
    context = {
        'data': data,
        'dataset': dataset,
        'table': table,
        'fields': fields,
        'max_export_rows': max_export_rows,
        'query_dict': querystring,
        'querystring': querystring.urlencode(),
        'search_query': search_query,
        'slug': slug,
        'table': table,
        'total_count': all_data.count(),
        'version': version,
    }
    return render(request, 'dataset-detail.html', context)
示例#4
0
def dataset_detail(request, slug, tablename=""):
    if len(request.GET) > 0 and not request.user.is_authenticated:
        return redirect(f"{settings.LOGIN_URL}?next={request.get_full_path()}")

    try:
        dataset = Dataset.objects.get(slug=slug)
    except Dataset.DoesNotExist:
        context = {"message": "Dataset does not exist"}
        return render(request, "404.html", context, status=404)

    if not tablename:
        tablename = dataset.get_default_table().name
        return redirect(
            reverse(
                "core:dataset-table-detail",
                kwargs={
                    "slug": slug,
                    "tablename": tablename
                },
            ))

    try:
        allow_hidden = request.user.is_superuser
        table = dataset.get_table(tablename, allow_hidden=allow_hidden)
    except Table.DoesNotExist:
        context = {"message": "Table does not exist"}
        try:
            # log 404 request only if hidden table exist
            hidden_table = dataset.get_table(tablename, allow_hidden=True)
            if hidden_table:
                log_blocked_request(request, 404)
        except Table.DoesNotExist:
            pass
        return render(request, "404.html", context, status=404)

    querystring = request.GET.copy()
    page_number = querystring.pop("page", ["1"])[0].strip() or "1"
    items_per_page = querystring.pop("items", [str(
        settings.ROWS_PER_PAGE)])[0].strip() or str(settings.ROWS_PER_PAGE)
    download_csv = querystring.pop("format", [""]) == ["csv"]
    try:
        page = int(page_number)
    except ValueError:
        context = {"message": "Invalid page number."}
        return render(request, "404.html", context, status=404)
    try:
        items_per_page = int(items_per_page)
    except ValueError:
        context = {"message": "Invalid items per page."}
        return render(request, "404.html", context, status=404)
    items_per_page = min(items_per_page, 1000)

    version = dataset.version_set.order_by("-order").first()

    TableModel = table.get_model()
    query, search_query, order_by = parse_querystring(querystring)

    DynamicForm = get_table_dynamic_form(table)
    filter_form = DynamicForm(data=query)
    if filter_form.is_valid():
        query = {k: v for k, v in filter_form.cleaned_data.items() if v != ""}
    else:
        query = {}

    all_data = TableModel.objects.composed_query(query, search_query, order_by)

    if download_csv:
        user_agent = request.headers.get("User-Agent", "")
        block_agent = any(True for agent in settings.BLOCKED_AGENTS
                          if agent.lower() in user_agent.lower())

        if not any([query, search_query]) or not user_agent or block_agent:
            # User trying to download a CSV without custom filters or invalid
            # user-agent specified.
            context = {
                "html_code_snippet": "core/400-csv-without-filters.html",
                "download_url": dataset.files_url,
            }
            return render(request, "4xx.html", context, status=400)

        if all_data.count() > settings.CSV_EXPORT_MAX_ROWS:
            context = {
                "message": "Max rows exceeded.",
                "title_4xx": "Oops! Ocorreu um erro:"
            }
            return render(request, "4xx.html", context, status=400)

        filename = "{}-{}.csv".format(slug, uuid.uuid4().hex)
        pseudo_buffer = Echo()
        writer = csv.writer(pseudo_buffer, dialect=csv.excel)
        csv_rows = queryset_to_csv(all_data, table.fields)
        response = StreamingHttpResponse(
            (writer.writerow(row) for row in csv_rows),
            content_type="text/csv;charset=UTF-8",
        )
        response["Content-Disposition"] = 'attachment; filename="{}"'.format(
            filename)
        response.encoding = "UTF-8"
        return response

    paginator = Paginator(all_data, items_per_page)
    data = paginator.get_page(page)

    for key, value in list(querystring.items()):
        if not value:
            del querystring[key]

    context = {
        "data": data,
        "dataset": dataset,
        "filter_form": filter_form,
        "max_export_rows": settings.CSV_EXPORT_MAX_ROWS,
        "search_term": querystring.get("search", ""),
        "querystring": querystring.urlencode(),
        "slug": slug,
        "table": table,
        "total_count": all_data.count(),
        "version": version,
    }

    status = 200
    if filter_form.errors:
        status = 400
    return render(request, "core/dataset-detail.html", context, status=status)
示例#5
0
def dataset_detail(request, slug, tablename=""):
    dataset = get_object_or_404(Dataset, slug=slug)
    if not tablename:
        tablename = dataset.get_default_table().name
        return redirect(
            reverse(
                "core:dataset-table-detail",
                kwargs={
                    "slug": slug,
                    "tablename": tablename
                },
            ))

    try:
        table = dataset.get_table(tablename)
    except Table.DoesNotExist:
        return HttpResponseBadRequest(f"Table does not exist.", status=404)

    querystring = request.GET.copy()
    page_number = querystring.pop("page", ["1"])[0].strip() or "1"
    download_csv = querystring.pop("format", [""]) == ["csv"]
    try:
        page = int(page_number)
    except ValueError:
        return HttpResponseBadRequest("Invalid page number.", status=404)

    version = dataset.version_set.order_by("-order").first()
    fields = table.fields

    all_data = table.get_model().objects.filter_by_querystring(querystring)

    if not download_csv:
        fieldnames_to_show = [
            field.name for field in fields if field.show_on_frontend
        ]
        all_data = all_data.values(*fieldnames_to_show)
    else:
        if all_data.count() <= max_export_rows:
            filename = "{}-{}.csv".format(slug, uuid.uuid4().hex)
            pseudo_buffer = Echo()
            writer = csv.writer(pseudo_buffer, dialect=csv.excel)
            csv_rows = queryset_to_csv(all_data, fields)
            response = StreamingHttpResponse(
                (writer.writerow(row) for row in csv_rows),
                content_type="text/csv;charset=UTF-8",
            )
            response[
                "Content-Disposition"] = 'attachment; filename="{}"'.format(
                    filename)
            response.encoding = "UTF-8"
            return response
        else:
            return HttpResponseBadRequest("Max rows exceeded.", status=404)

    paginator = Paginator(all_data, 20)
    data = paginator.get_page(page)

    for key, value in list(querystring.items()):
        if not value:
            del querystring[key]
    context = {
        "data": data,
        "dataset": dataset,
        "table": table,
        "fields": fields,
        "max_export_rows": max_export_rows,
        "query_dict": querystring,
        "querystring": querystring.urlencode(),
        "slug": slug,
        "table": table,
        "total_count": all_data.count(),
        "version": version,
    }
    return render(request, "dataset-detail.html", context)
示例#6
0
def dataset_detail(request, slug, tablename=''):
    dataset = get_object_or_404(Dataset, slug=slug)
    if not tablename:
        tablename = dataset.get_default_table().name
        return redirect(
            reverse('core:dataset-table-detail',
                    kwargs={
                        'slug': slug,
                        'tablename': tablename
                    }))

    try:
        table = dataset.get_table(tablename)
    except Table.DoesNotExist:
        return HttpResponseBadRequest(f'Table does not exist.', status=404)

    querystring = request.GET.copy()
    page_number = querystring.pop('page', ['1'])[0].strip() or '1'
    download_csv = querystring.pop('format', ['']) == ['csv']
    try:
        page = int(page_number)
    except ValueError:
        return HttpResponseBadRequest('Invalid page number.', status=404)

    version = dataset.version_set.order_by('-order').first()
    fields = table.fields

    all_data = table.get_model().objects.filter_by_querystring(querystring)

    if not download_csv:
        fieldnames_to_show = [
            field.name for field in fields if field.show_on_frontend
        ]
        all_data = all_data.values(*fieldnames_to_show)
    else:
        if all_data.count() <= max_export_rows:
            filename = '{}-{}.csv'.format(slug, uuid.uuid4().hex)
            pseudo_buffer = Echo()
            writer = csv.writer(pseudo_buffer, dialect=csv.excel)
            csv_rows = queryset_to_csv(all_data, fields)
            response = StreamingHttpResponse(
                (writer.writerow(row) for row in csv_rows),
                content_type='text/csv;charset=UTF-8',
            )
            response['Content-Disposition'] = (
                'attachment; filename="{}"'.format(filename))
            response.encoding = 'UTF-8'
            return response
        else:
            return HttpResponseBadRequest('Max rows exceeded.', status=404)

    paginator = Paginator(all_data, 20)
    data = paginator.get_page(page)

    for key, value in list(querystring.items()):
        if not value:
            del querystring[key]
    context = {
        'data': data,
        'dataset': dataset,
        'table': table,
        'fields': fields,
        'max_export_rows': max_export_rows,
        'query_dict': querystring,
        'querystring': querystring.urlencode(),
        'slug': slug,
        'table': table,
        'total_count': all_data.count(),
        'version': version,
    }
    return render(request, 'dataset-detail.html', context)