Exemple #1
0
    def dataset_recall(self, request, **kwargs):
        """
        Send and email to Site admin to recall Dataset from HSM system
        """
        from .exceptions import HsmException

        self.method_check(request, allowed=['get'])
        self.is_authenticated(request)
        self.throttle_check(request)

        ds = Dataset.objects.get(id=kwargs['pk'])
        if not has_dataset_download_access(
                request=request, dataset_id=ds.id):
            return HttpResponseForbidden()

        """
        send an email to MyTardis admin
        """
        try:
            subject, content = email_dataset_recall_requested(ds, request.user)
            logger.info("sending email to %s", settings.RDSM_SUPPORT_EMAIL)
            email = EmailMessage(subject, content, to=[settings.RDSM_SUPPORT_EMAIL], cc=[request.user.email],
                                 from_email=settings.SUPPORT_EMAIL, connection=get_connection(fail_silently=True))
            email.send(fail_silently=True)
        except HsmException as err:
            return JsonResponse(
                {'error_message': "%s: %s" % (type(err), str(err))},
                status=HttpResponseServerError.status_code)

        return JsonResponse({
            "message": "Recall requested for Dataset %s" % ds.id
        })
Exemple #2
0
def view_full_dataset(request, dataset_id):
    """Displays a MX Dataset and associated information.

    Shows a full (hundreds of images) dataset its metadata and a list
    of associated files with the option to show metadata of each file
    and ways to download those files.  With write permission this page
    also allows uploading and metadata editing.

    Settings for this view:
    INSTALLED_APPS += ("tardis.apps.mx_views",)
    DATASET_VIEWS = [("http://synchrotron.org.au/views/dataset/full",
                      "tardis.apps.mx_views.views.view_full_dataset"),]

    """
    dataset = Dataset.objects.get(id=dataset_id)

    def get_datafiles_page():
        # pagination was removed by someone in the interface but not here.
        # need to fix.
        pgresults = 100

        paginator = Paginator(dataset.dataset_file_set.all(), pgresults)

        try:
            page = int(request.GET.get('page', '1'))
        except ValueError:
            page = 1

        # If page request (9999) is out of range, deliver last page of results.

        try:
            return paginator.page(page)
        except (EmptyPage, InvalidPage):
            return paginator.page(paginator.num_pages)

    display_images = dataset.get_images()
    image_count = len(display_images)
    if image_count > 4:
        # take 4 evenly spaced images from the set
        display_images = display_images[0::image_count / 4][:4]

    c = Context({
        'dataset': dataset,
        'datafiles': get_datafiles_page(),
        'parametersets': dataset.getParameterSets()
                                .exclude(schema__hidden=True),
        'has_download_permissions':
            authz.has_dataset_download_access(request, dataset_id),
        'has_write_permissions':
            authz.has_dataset_write(request, dataset_id),
        'from_experiment': \
            get_experiment_referer(request, dataset_id),
        'other_experiments': \
            authz.get_accessible_experiments_for_dataset(request, dataset_id),
        'display_images': display_images,
    })
    return HttpResponse(render_response_index(
        request, 'mx_views/view_full_dataset.html', c))
def view_full_dataset(request, dataset_id):
    """Displays a HRMC Dataset as a single scatter plot of x,y values
    from grfinalXX.dat and gerr.dat files

    Requires BDPMytardis with single

    Settings for this view:
    INSTALLED_APPS += ("tardis.apps.hrmc_views",)
    DATASET_VIEWS = [("http://rmit.edu.au/schemas/hrmcdataset",
                      "tardis.apps.hrmc_views.views.view_full_dataset"),]

    """
    logger.debug("got to hrmc views")
    dataset = Dataset.objects.get(id=dataset_id)

    # FIXME: as single image, can remove this
    def get_datafiles_page():
        # pagination was removed by someone in the interface but not here.
        # need to fix.
        pgresults = 100

        paginator = Paginator(dataset.dataset_file_set.all(), pgresults)

        try:
            page = int(request.GET.get('page', '1'))
        except ValueError:
            page = 1

        # If page request (9999) is out of range, deliver last page of results.

        try:
            return paginator.page(page)
        except (EmptyPage, InvalidPage):
            return paginator.page(paginator.num_pages)

    display_images = []
    image_to_show = get_image_to_show(dataset)
    if image_to_show:
        display_images.append(image_to_show)

    c = Context({
        'dataset': dataset,
        'datafiles': get_datafiles_page(),
        'parametersets': dataset.getParameterSets()
                                .exclude(schema__hidden=True),
        'has_download_permissions':
            authz.has_dataset_download_access(request, dataset_id),
        'has_write_permissions':
            authz.has_dataset_write(request, dataset_id),
        'from_experiment': \
            get_experiment_referer(request, dataset_id),
        'other_experiments': \
            authz.get_accessible_experiments_for_dataset(request, dataset_id),
        'display_images': display_images,
    })
    return HttpResponse(render_response_index(
        request, 'hrmc_views/view_full_dataset.html', c))
Exemple #4
0
def view_full_dataset(request, dataset_id):
    """Displays a HRMC Dataset as a single scatter plot of x,y values
    from grfinalXX.dat and gerr.dat files

    Requires BDPMytardis with single

    Settings for this view:
    INSTALLED_APPS += ("tardis.apps.hrmc_views",)
    DATASET_VIEWS = [("http://rmit.edu.au/schemas/hrmcdataset",
                      "tardis.apps.hrmc_views.views.view_full_dataset"),]

    """
    logger.debug("got to hrmc views")
    dataset = Dataset.objects.get(id=dataset_id)

    # FIXME: as single image, can remove this
    def get_datafiles_page():
        # pagination was removed by someone in the interface but not here.
        # need to fix.
        pgresults = 100

        paginator = Paginator(dataset.dataset_file_set.all(), pgresults)

        try:
            page = int(request.GET.get('page', '1'))
        except ValueError:
            page = 1

        # If page request (9999) is out of range, deliver last page of results.

        try:
            return paginator.page(page)
        except (EmptyPage, InvalidPage):
            return paginator.page(paginator.num_pages)

    display_images = []
    image_to_show = get_image_to_show(dataset)
    if image_to_show:
        display_images.append(image_to_show)

    c = Context({
        'dataset': dataset,
        'datafiles': get_datafiles_page(),
        'parametersets': dataset.getParameterSets()
                                .exclude(schema__hidden=True),
        'has_download_permissions':
            authz.has_dataset_download_access(request, dataset_id),
        'has_write_permissions':
            authz.has_dataset_write(request, dataset_id),
        'from_experiment': \
            get_experiment_referer(request, dataset_id),
        'other_experiments': \
            authz.get_accessible_experiments_for_dataset(request, dataset_id),
        'display_images': display_images,
    })
    return HttpResponse(
        render_response_index(request, 'hrmc_views/view_full_dataset.html', c))
def retrieve_parameters(request, datafile_id):

    parametersets = DatafileParameterSet.objects.all()
    parametersets = parametersets.filter(datafile__pk=datafile_id)\
                                 .exclude(schema__hidden=True)

    datafile = DataFile.objects.get(id=datafile_id)
    dataset_id = datafile.dataset.id
    has_write_permissions = authz.has_dataset_write(request, dataset_id)

    c = {'parametersets': parametersets,
         'datafile': datafile,
         'has_write_permissions': has_write_permissions,
         'has_download_permissions':
         authz.has_dataset_download_access(request, dataset_id)}

    return HttpResponse(render_response_index(request,
                        'tardis_portal/ajax/parameters.html', c))
Exemple #6
0
def retrieve_parameters(request, datafile_id):

    parametersets = DatafileParameterSet.objects.all()
    parametersets = parametersets.filter(datafile__pk=datafile_id)\
                                 .exclude(schema__hidden=True)

    datafile = DataFile.objects.get(id=datafile_id)
    dataset_id = datafile.dataset.id
    has_write_permissions = authz.has_dataset_write(request, dataset_id)

    c = {
        'parametersets':
        parametersets,
        'datafile':
        datafile,
        'has_write_permissions':
        has_write_permissions,
        'has_download_permissions':
        authz.has_dataset_download_access(request, dataset_id)
    }

    return HttpResponse(
        render_response_index(request, 'tardis_portal/ajax/parameters.html',
                              c))
Exemple #7
0
def view_full_dataset(request, dataset_id):
    """Displays a MX Dataset and associated information.

    Shows a full (hundreds of images) dataset its metadata and a list
    of associated files with the option to show metadata of each file
    and ways to download those files.  With write permission this page
    also allows uploading and metadata editing.

    Settings for this view:
    INSTALLED_APPS += ("tardis.apps.mx_views",)
    DATASET_VIEWS = [("http://synchrotron.org.au/views/dataset/full",
                      "tardis.apps.mx_views.views.view_full_dataset"),]

    """
    dataset = Dataset.objects.get(id=dataset_id)

    def get_datafiles_page():
        # pagination was removed by someone in the interface but not here.
        # need to fix.
        pgresults = 100

        paginator = Paginator(dataset.datafile_set.all(), pgresults)

        try:
            page = int(request.GET.get('page', '1'))
        except ValueError:
            page = 1

        # If page request (9999) is out of range, deliver last page of results.

        try:
            return paginator.page(page)
        except (EmptyPage, InvalidPage):
            return paginator.page(paginator.num_pages)

    display_images = dataset.get_images()
    image_count = len(display_images)
    if image_count > 4:
        # take 4 evenly spaced images from the set
        display_images = display_images[0::image_count / 4][:4]

    upload_method = getattr(settings, "UPLOAD_METHOD", "uploadify")

    c = {
        'dataset': dataset,
        'datafiles': get_datafiles_page(),
        'parametersets': dataset.getParameterSets()
                                .exclude(schema__hidden=True),
        'has_download_permissions':
            authz.has_dataset_download_access(request, dataset_id),
        'has_write_permissions':
            authz.has_dataset_write(request, dataset_id),
        'from_experiment': \
            get_experiment_referer(request, dataset_id),
        'other_experiments': \
            authz.get_accessible_experiments_for_dataset(request, dataset_id),
        'display_images': display_images,
        'upload_method': upload_method,
        'default_organization':
            getattr(settings, 'DEFAULT_ARCHIVE_ORGANIZATION', 'classic'),
        'default_format':
            getattr(settings, 'DEFAULT_ARCHIVE_FORMATS', ['tgz', 'tar'])[0]
    }
    return HttpResponse(
        render_response_index(request, 'mx_views/view_full_dataset.html', c))
Exemple #8
0
def dataset_json(request, experiment_id=None, dataset_id=None):
    # Experiment ID is optional (but dataset_id is not)!
    dataset = Dataset.objects.get(id=dataset_id)

    if experiment_id:
        try:
            # PUT is fine for non-existing resources, but GET/DELETE is not
            if request.method == 'PUT':
                experiment = Experiment.objects.get(id=experiment_id)
            else:
                experiment = dataset.experiments.get(id=experiment_id)
        except Experiment.DoesNotExist:
            return HttpResponseNotFound()

    # Convenience methods for permissions
    def can_update():
        return authz.has_dataset_ownership(request, dataset_id)
    can_delete = can_update

    def add_experiments(updated_experiments):
        current_experiments = \
            frozenset(dataset.experiments.values_list('id', flat=True))
        # Get all the experiments that currently aren't associated
        for experiment_id in updated_experiments - current_experiments:
            # You must own the experiment to assign datasets to it
            if authz.has_experiment_ownership(request, experiment_id):
                experiment = Experiment.safe.get(request.user, experiment_id)
                logger.info("Adding dataset #%d to experiment #%d" %
                            (dataset.id, experiment.id))
                dataset.experiments.add(experiment)

    # Update this experiment to add it to more experiments
    if request.method == 'PUT':
        # Obviously you can't do this if you don't own the dataset
        if not can_update():
            return HttpResponseForbidden()
        data = json.loads(request.body)
        # Detect if any experiments are new, and add the dataset to them
        add_experiments(frozenset(data['experiments']))
        # Include the experiment we PUT to, as it may also be new
        if experiment is not None:
            add_experiments(frozenset((experiment.id,)))
        dataset.save()

    # Remove this dataset from the given experiment
    if request.method == 'DELETE':
        # First, we need an experiment
        if experiment_id is None:
            # As the experiment is in the URL, this method will never be
            # allowed
            if can_update():
                return HttpResponseMethodNotAllowed(allow="GET PUT")
            return HttpResponseMethodNotAllowed(allow="GET")
        # Cannot remove if this is the last experiment or if it is being
        # removed from a publication
        if (not can_delete() or dataset.experiments.count() < 2 or
           experiment.is_publication()):
            return HttpResponseForbidden()
        dataset.experiments.remove(experiment)
        dataset.save()

    has_download_permissions = \
        authz.has_dataset_download_access(request, dataset_id)

    return HttpResponse(json.dumps(get_dataset_info(dataset,
                                                    has_download_permissions)),
                        content_type='application/json')
Exemple #9
0
    def get_context_data(self, request, dataset, **kwargs):
        """
        Prepares the values to be passed to the default dataset view,
        respecting authorization rules. Returns a dict of values (the context).

        :param request: a HTTP request object
        :type request: :class:`django.http.HttpRequest`
        :param dataset: the Dataset model instance
        :type dataset: tardis.tardis_portal.models.dataset.Dataset
        :return: A dictionary of values for the view/template.
        :rtype: dict
        """

        def get_datafiles_page():
            # pagination was removed by someone in the interface but not here.
            # need to fix.
            pgresults = 100

            paginator = Paginator(dataset.datafile_set.all(), pgresults)

            try:
                page = int(request.GET.get('page', '1'))
            except ValueError:
                page = 1

            # If page request is out of range (eg 9999), deliver last page of
            # results.
            try:
                return paginator.page(page)
            except (EmptyPage, InvalidPage):
                return paginator.page(paginator.num_pages)

        c = super(DatasetView, self).get_context_data(**kwargs)

        dataset_id = dataset.id
        upload_method = getattr(settings, "UPLOAD_METHOD", False)
        max_images_in_carousel = getattr(settings, "MAX_IMAGES_IN_CAROUSEL", 0)
        if max_images_in_carousel:
            carousel_slice = ":%s" % max_images_in_carousel
        else:
            carousel_slice = ":"

        c.update(
            {'dataset': dataset,
             'datafiles': get_datafiles_page(),
             'parametersets': dataset.getParameterSets().exclude(
                     schema__hidden=True),
             'has_download_permissions': authz.has_dataset_download_access(
                 request, dataset_id),
             'has_write_permissions': authz.has_dataset_write(request,
                                                              dataset_id),
             'from_experiment': get_experiment_referer(request, dataset_id),
             'other_experiments': authz.get_accessible_experiments_for_dataset(
                 request,
                 dataset_id),
             'upload_method': upload_method,
             'push_to_enabled': PushToConfig.name in settings.INSTALLED_APPS,
             'carousel_slice': carousel_slice,
             }
        )

        # Enables UI elements for the push_to app
        if c['push_to_enabled']:
            push_to_args = {
                'dataset_id': dataset.pk
            }
            c['push_to_url'] = reverse(initiate_push_dataset,
                                       kwargs=push_to_args)

        _add_protocols_and_organizations(request, dataset, c)

        return c
Exemple #10
0
    def get_context_data(self, request, dataset, **kwargs):
        """
        Prepares the values to be passed to the default dataset view,
        respecting authorization rules. Returns a dict of values (the context).

        :param request: a HTTP request object
        :type request: :class:`django.http.HttpRequest`
        :param dataset: the Dataset model instance
        :type dataset: tardis.tardis_portal.models.dataset.Dataset
        :return: A dictionary of values for the view/template.
        :rtype: dict
        """
        def get_datafiles_page():
            # pagination was removed by someone in the interface but not here.
            # need to fix.
            pgresults = 100

            paginator = Paginator(dataset.datafile_set.all(), pgresults)

            try:
                page = int(request.GET.get('page', '1'))
            except ValueError:
                page = 1

            # If page request is out of range (eg 9999), deliver last page of
            # results.
            try:
                return paginator.page(page)
            except (EmptyPage, InvalidPage):
                return paginator.page(paginator.num_pages)

        c = super(DatasetView, self).get_context_data(**kwargs)

        dataset_id = dataset.id
        upload_method = getattr(settings, "UPLOAD_METHOD", False)
        max_images_in_carousel = getattr(settings, "MAX_IMAGES_IN_CAROUSEL", 0)
        if max_images_in_carousel:
            carousel_slice = ":%s" % max_images_in_carousel
        else:
            carousel_slice = ":"

        c.update({
            'dataset':
            dataset,
            'datafiles':
            get_datafiles_page(),
            'parametersets':
            dataset.getParameterSets().exclude(schema__hidden=True),
            'has_download_permissions':
            authz.has_dataset_download_access(request, dataset_id),
            'has_write_permissions':
            authz.has_dataset_write(request, dataset_id),
            'from_experiment':
            get_experiment_referer(request, dataset_id),
            'other_experiments':
            authz.get_accessible_experiments_for_dataset(request, dataset_id),
            'upload_method':
            upload_method,
            'push_to_enabled':
            PushToConfig.name in settings.INSTALLED_APPS,
            'carousel_slice':
            carousel_slice,
        })

        # Enables UI elements for the push_to app
        if c['push_to_enabled']:
            push_to_args = {'dataset_id': dataset.pk}
            c['push_to_url'] = reverse(initiate_push_dataset,
                                       kwargs=push_to_args)

        _add_protocols_and_organizations(request, dataset, c)

        return c
Exemple #11
0
def dataset_json(request, experiment_id=None, dataset_id=None):
    # Experiment ID is optional (but dataset_id is not)!
    dataset = Dataset.objects.get(id=dataset_id)

    if experiment_id:
        try:
            # PUT is fine for non-existing resources, but GET/DELETE is not
            if request.method == 'PUT':
                experiment = Experiment.objects.get(id=experiment_id)
            else:
                experiment = dataset.experiments.get(id=experiment_id)
        except Experiment.DoesNotExist:
            return HttpResponseNotFound()

    # Convenience methods for permissions
    def can_update():
        return authz.has_dataset_ownership(request, dataset_id)

    can_delete = can_update

    def add_experiments(updated_experiments):
        current_experiments = \
            frozenset(dataset.experiments.values_list('id', flat=True))
        # Get all the experiments that currently aren't associated
        for experiment_id in updated_experiments - current_experiments:
            # You must own the experiment to assign datasets to it
            if authz.has_experiment_ownership(request, experiment_id):
                experiment = Experiment.safe.get(request.user, experiment_id)
                logger.info("Adding dataset #%d to experiment #%d" %
                            (dataset.id, experiment.id))
                dataset.experiments.add(experiment)

    # Update this experiment to add it to more experiments
    if request.method == 'PUT':
        # Obviously you can't do this if you don't own the dataset
        if not can_update():
            return HttpResponseForbidden()
        data = json.loads(request.body)
        # Detect if any experiments are new, and add the dataset to them
        add_experiments(frozenset(data['experiments']))
        # Include the experiment we PUT to, as it may also be new
        if experiment is not None:
            add_experiments(frozenset((experiment.id, )))
        dataset.save()

    # Remove this dataset from the given experiment
    if request.method == 'DELETE':
        # First, we need an experiment
        if experiment_id is None:
            # As the experiment is in the URL, this method will never be
            # allowed
            if can_update():
                return HttpResponseMethodNotAllowed(allow="GET PUT")
            return HttpResponseMethodNotAllowed(allow="GET")
        # Cannot remove if this is the last experiment or if it is being
        # removed from a publication
        if (not can_delete() or dataset.experiments.count() < 2
                or experiment.is_publication()):
            return HttpResponseForbidden()
        dataset.experiments.remove(experiment)
        dataset.save()

    has_download_permissions = \
        authz.has_dataset_download_access(request, dataset_id)

    return HttpResponse(json.dumps(
        get_dataset_info(dataset, has_download_permissions)),
                        content_type='application/json')
Exemple #12
0
def retrieve_datafile_list(
        request,
        dataset_id,
        template_name='tardis_portal/ajax/datafile_list.html'):

    params = {}

    query = None
    highlighted_dsf_pks = []

    if 'query' in request.GET:
        search_query = FacetFixedSearchQuery()
        sqs = SearchQuerySet(query=search_query)
        query = SearchQueryString(request.GET['query'])
        results = sqs.raw_search(query.query_string() +
                                 ' AND dataset_id_stored:%i' %
                                 (int(dataset_id))).load_all()
        highlighted_dsf_pks = [
            int(r.pk) for r in results if r.model_name == 'datafile'
            and r.dataset_id_stored == int(dataset_id)
        ]

        params['query'] = query.query_string()

    elif 'datafileResults' in request.session and 'search' in request.GET:
        highlighted_dsf_pks = [
            r.pk for r in request.session['datafileResults']
        ]

    dataset_results = \
        DataFile.objects.filter(
            dataset__pk=dataset_id,
        ).order_by('filename')

    if request.GET.get('limit', False) and len(highlighted_dsf_pks):
        dataset_results = dataset_results.filter(pk__in=highlighted_dsf_pks)
        params['limit'] = request.GET['limit']

    filename_search = None

    if 'filename' in request.GET and len(request.GET['filename']):
        filename_search = request.GET['filename']
        dataset_results = \
            dataset_results.filter(filename__icontains=filename_search)

        params['filename'] = filename_search

    # pagination was removed by someone in the interface but not here.
    # need to fix.
    pgresults = 100

    paginator = Paginator(dataset_results, pgresults)

    try:
        page = int(request.GET.get('page', '1'))
    except ValueError:
        page = 1

    # If page request (9999) is out of range, deliver last page of results.

    try:
        dataset = paginator.page(page)
    except (EmptyPage, InvalidPage):
        dataset = paginator.page(paginator.num_pages)

    is_owner = False
    has_download_permissions = authz.has_dataset_download_access(
        request, dataset_id)
    has_write_permissions = False

    if request.user.is_authenticated():
        is_owner = authz.has_dataset_ownership(request, dataset_id)
        has_write_permissions = authz.has_dataset_write(request, dataset_id)

    immutable = Dataset.objects.get(id=dataset_id).immutable

    c = {
        'datafiles': dataset,
        'paginator': paginator,
        'immutable': immutable,
        'dataset': Dataset.objects.get(id=dataset_id),
        'filename_search': filename_search,
        'is_owner': is_owner,
        'highlighted_datafiles': highlighted_dsf_pks,
        'has_download_permissions': has_download_permissions,
        'has_write_permissions': has_write_permissions,
        'search_query': query,
        'params': urlencode(params),
    }
    _add_protocols_and_organizations(request, None, c)
    return HttpResponse(render_response_index(request, template_name, c))
def retrieve_datafile_list(
        request, dataset_id,
        template_name='tardis_portal/ajax/datafile_list.html'):

    params = {}

    query = None
    highlighted_dsf_pks = []

    if 'query' in request.GET:
        search_query = FacetFixedSearchQuery()
        sqs = SearchQuerySet(query=search_query)
        query = SearchQueryString(request.GET['query'])
        results = sqs.raw_search(
            query.query_string() + ' AND dataset_id_stored:%i' %
            (int(dataset_id))).load_all()
        highlighted_dsf_pks = [int(r.pk) for r in results
                               if r.model_name == 'datafile' and
                               r.dataset_id_stored == int(dataset_id)]

        params['query'] = query.query_string()

    elif 'datafileResults' in request.session and 'search' in request.GET:
        highlighted_dsf_pks = [r.pk
                               for r in request.session['datafileResults']]

    dataset_results = \
        DataFile.objects.filter(
            dataset__pk=dataset_id,
        ).order_by('filename')

    if request.GET.get('limit', False) and len(highlighted_dsf_pks):
        dataset_results = dataset_results.filter(pk__in=highlighted_dsf_pks)
        params['limit'] = request.GET['limit']

    filename_search = None

    if 'filename' in request.GET and len(request.GET['filename']):
        filename_search = request.GET['filename']
        dataset_results = \
            dataset_results.filter(filename__icontains=filename_search)

        params['filename'] = filename_search

    # pagination was removed by someone in the interface but not here.
    # need to fix.
    pgresults = 100

    paginator = Paginator(dataset_results, pgresults)

    try:
        page = int(request.GET.get('page', '1'))
    except ValueError:
        page = 1

    # If page request (9999) is out of range, deliver last page of results.

    try:
        dataset = paginator.page(page)
    except (EmptyPage, InvalidPage):
        dataset = paginator.page(paginator.num_pages)

    is_owner = False
    has_download_permissions = authz.has_dataset_download_access(request,
                                                                 dataset_id)
    has_write_permissions = False

    if request.user.is_authenticated():
        is_owner = authz.has_dataset_ownership(request, dataset_id)
        has_write_permissions = authz.has_dataset_write(request, dataset_id)

    immutable = Dataset.objects.get(id=dataset_id).immutable

    c = {
        'datafiles': dataset,
        'paginator': paginator,
        'immutable': immutable,
        'dataset': Dataset.objects.get(id=dataset_id),
        'filename_search': filename_search,
        'is_owner': is_owner,
        'highlighted_datafiles': highlighted_dsf_pks,
        'has_download_permissions': has_download_permissions,
        'has_write_permissions': has_write_permissions,
        'search_query': query,
        'params': urlencode(params),
    }
    _add_protocols_and_organizations(request, None, c)
    return HttpResponse(render_response_index(request, template_name, c))