Ejemplo n.º 1
0
Archivo: ajax.py Proyecto: cfe-lab/Kive
 def filter_granted(self, queryset):
     """ Filter a queryset to only include records explicitly granted.
     """
     return Dataset.filter_by_user(self.request.user)
Ejemplo n.º 2
0
Archivo: ajax.py Proyecto: cfe-lab/Kive
 def filter_granted(self, queryset):
     """ Filter a queryset to only include records explicitly granted.
     """
     return Dataset.filter_by_user(self.request.user)
Ejemplo n.º 3
0
def dataset_view(request, dataset_id):
    """
    Display the file associated with the dataset in the browser, or update its name/description.
    """
    return_to_run = request.GET.get('run_id', None)
    is_view_results = "view_results" in request.GET
    is_view_run = "view_run" in request.GET
    return_url = reverse("datasets")
    if return_to_run is not None:
        if is_view_run:
            return_url = reverse('view_run', kwargs={'run_id': return_to_run})
        elif is_view_results:
            return_url = reverse('view_results', kwargs={'run_id': return_to_run})

    try:
        if admin_check(request.user):
            accessible_datasets = Dataset.objects
        else:
            accessible_datasets = Dataset.filter_by_user(request.user)
        dataset = accessible_datasets.prefetch_related(
            'structure',
            'structure__compounddatatype',
            'structure__compounddatatype__members',
            'structure__compounddatatype__members__datatype',
            'structure__compounddatatype__members__datatype__basic_constraints'
        ).get(pk=dataset_id)

    except ObjectDoesNotExist:
        raise Http404("ID {} cannot be accessed".format(dataset_id))

    # Figure out which users and groups could be given access to this Dataset.
    # If the Dataset is uploaded, it's anyone who doesn't already have access;
    # if it was generated, it's anyone who had access to the generating run.
    addable_users, addable_groups = dataset.other_users_groups()

    if dataset.file_source is None:
        generating_run = None
    else:
        generating_run = dataset.file_source.top_level_run
    container_dataset = dataset.containers.filter(argument__type='O').first()  # Output from which runs?
    if container_dataset is None:
        container_run = None
    else:
        container_run = container_dataset.run
    inputs_count = dataset.containers.filter(
        argument__type='I').values('run_id').distinct().count()

    if request.method == "POST":
        # We are going to try and update this Dataset.
        dataset_form = DatasetDetailsForm(
            request.POST,
            access_limits=dataset.get_access_limits(),
            instance=dataset
        )
        try:
            if dataset_form.is_valid():
                dataset.name = dataset_form.cleaned_data["name"]
                dataset.description = dataset_form.cleaned_data["description"]
                dataset.clean()
                dataset.save()
                with transaction.atomic():
                    dataset.grant_from_json(dataset_form.cleaned_data["permissions"])
                    dataset.validate_restrict_access(dataset.get_access_limits())

                return HttpResponseRedirect(return_url)
        except (AttributeError, ValidationError, ValueError) as e:
            LOGGER.exception(e.message)
            dataset_form.add_error(None, e)

    else:
        # A DatasetForm which we can use to make submission and editing easier.
        dataset_form = DatasetDetailsForm(
            access_limits=dataset.get_access_limits(),
            initial={"name": dataset.name, "description": dataset.description}
        )

    c = {
        "is_admin": admin_check(request.user),
        "is_owner": dataset.user == request.user,
        "dataset": dataset,
        "return": return_url,
        "dataset_form": dataset_form,
        "generating_run": generating_run,
        "inputs_count": inputs_count,
        "container_run": container_run
    }

    if not dataset.has_data():
        t = loader.get_template("librarian/missing_dataset_view.html")
        if dataset.external_path:
            c["missing_data_message"] = "This dataset's external file is missing or has "\
                                        "been modified (MD5 mismatch).  " \
                                        "Please consult your system administrator if this is unexpected."
        elif dataset.is_redacted():
            c["missing_data_message"] = "Data has been redacted."
        else:
            c["missing_data_message"] = "Data was not retained or has been purged."
        rendered_response = t.render(c, request)

    elif dataset.is_raw():
        t = loader.get_template("librarian/raw_dataset_view.html")

        # Test whether this is a binary file or not.
        # Read 1000 characters.
        data_handle = dataset.get_open_file_handle('r')
        if data_handle is None:
            c["missing_data_message"] = "Data has been removed or renamed."
        else:
            with data_handle:
                sample_content = data_handle.read(1000)
            c.update({"sample_content": sample_content})
        c["is_binary"] = False
        try:
            rendered_response = t.render(c, request)
        except DjangoUnicodeDecodeError as e:
            c["is_binary"] = True
            del c["sample_content"]
            rendered_response = t.render(c, request)
    else:
        extra_errors = []
        # If we have a mismatched output, we do an alignment
        # over the columns.
        if dataset.content_matches_header:
            col_matching, processed_rows = None, dataset.rows(
                True,
                limit=settings.DATASET_DISPLAY_MAX,
                extra_errors=extra_errors)
        else:
            col_matching, insert = dataset.column_alignment()
            processed_rows = dataset.rows(data_check=True,
                                          insert_at=insert,
                                          limit=settings.DATASET_DISPLAY_MAX,
                                          extra_errors=extra_errors)
        t = loader.get_template("librarian/csv_dataset_view.html")
        processed_rows = list(processed_rows)
        c.update(
            {
                'column_matching': col_matching,
                'processed_rows': processed_rows,
                'extra_errors': extra_errors,
                "are_rows_truncated": len(processed_rows) >= settings.DATASET_DISPLAY_MAX
            }
        )
        rendered_response = t.render(c, request)
    return HttpResponse(rendered_response)
Ejemplo n.º 4
0
def dataset_view(request, dataset_id):
    """
    Display the file associated with the dataset in the browser, or update its name/description.
    """
    return_url = reverse("datasets")

    try:
        if admin_check(request.user):
            accessible_datasets = Dataset.objects
        else:
            accessible_datasets = Dataset.filter_by_user(request.user)
        dataset = accessible_datasets.get(pk=dataset_id)

    except ObjectDoesNotExist:
        raise Http404("ID {} cannot be accessed".format(dataset_id))

    # Figure out which users and groups could be given access to this Dataset.
    # If the Dataset is uploaded, it's anyone who doesn't already have access;
    # if it was generated, it's anyone who had access to the generating run.
    addable_users, addable_groups = dataset.other_users_groups()

    generating_run = None
    container_dataset = dataset.containers.filter(
        argument__type='O').first()  # Output from which runs?
    if container_dataset is None:
        container_run = None
    else:
        container_run = container_dataset.run
    inputs_count = dataset.containers.filter(
        argument__type='I').values('run_id').distinct().count()

    if request.method == "POST":
        # We are going to try and update this Dataset.
        dataset_form = DatasetDetailsForm(
            request.POST,
            access_limits=dataset.get_access_limits(),
            instance=dataset)
        try:
            if dataset_form.is_valid():
                dataset.name = dataset_form.cleaned_data["name"]
                dataset.description = dataset_form.cleaned_data["description"]
                dataset.clean()
                dataset.save()
                with transaction.atomic():
                    dataset.grant_from_json(
                        dataset_form.cleaned_data["permissions"])
                    dataset.validate_restrict_access(
                        dataset.get_access_limits())

                return HttpResponseRedirect(return_url)
        except (AttributeError, ValidationError, ValueError) as e:
            LOGGER.exception(e.message)
            dataset_form.add_error(None, e)

    else:
        # A DatasetForm which we can use to make submission and editing easier.
        dataset_form = DatasetDetailsForm(
            access_limits=dataset.get_access_limits(),
            initial={
                "name": dataset.name,
                "description": dataset.description
            })

    c = {
        "is_admin": admin_check(request.user),
        "is_owner": dataset.user == request.user,
        "dataset": dataset,
        "return": return_url,
        "dataset_form": dataset_form,
        "generating_run": generating_run,
        "inputs_count": inputs_count,
        "container_run": container_run
    }

    if not dataset.has_data():
        t = loader.get_template("librarian/missing_dataset_view.html")
        if dataset.external_path:
            c["missing_data_message"] = "This dataset's external file is missing or has "\
                                        "been modified (MD5 mismatch).  " \
                                        "Please consult your system administrator if this is unexpected."
        elif dataset.is_redacted():
            c["missing_data_message"] = "Data has been redacted."
        else:
            c["missing_data_message"] = "Data was not retained or has been purged."
        rendered_response = t.render(c, request)

    else:
        t = loader.get_template("librarian/raw_dataset_view.html")

        # Test whether this is a binary file or not.
        # Read 1000 characters.
        data_handle = dataset.get_open_file_handle('r')
        if data_handle is None:
            c["missing_data_message"] = "Data has been removed or renamed."
        else:
            with data_handle:
                sample_content = data_handle.read(1000)
            c.update({"sample_content": sample_content})
        c["is_binary"] = False
        try:
            rendered_response = t.render(c, request)
        except DjangoUnicodeDecodeError:
            c["is_binary"] = True
            del c["sample_content"]
            rendered_response = t.render(c, request)
    return HttpResponse(rendered_response)
Ejemplo n.º 5
0
def dataset_view(request, dataset_id):
    """
    Display the file associated with the dataset in the browser, or update its name/description.
    """
    return_to_run = request.GET.get('run_id', None)
    is_view_results = "view_results" in request.GET
    is_view_run = "view_run" in request.GET
    return_url = reverse("datasets")
    if return_to_run is not None:
        if is_view_run:
            return_url = reverse('view_run', kwargs={'run_id': return_to_run})
        elif is_view_results:
            return_url = reverse('view_results',
                                 kwargs={'run_id': return_to_run})

    try:
        if admin_check(request.user):
            accessible_datasets = Dataset.objects
        else:
            accessible_datasets = Dataset.filter_by_user(request.user)
        dataset = accessible_datasets.prefetch_related(
            'structure', 'structure__compounddatatype',
            'structure__compounddatatype__members',
            'structure__compounddatatype__members__datatype',
            'structure__compounddatatype__members__datatype__basic_constraints'
        ).get(pk=dataset_id)

    except ObjectDoesNotExist:
        raise Http404("ID {} cannot be accessed".format(dataset_id))

    # Figure out which users and groups could be given access to this Dataset.
    # If the Dataset is uploaded, it's anyone who doesn't already have access;
    # if it was generated, it's anyone who had access to the generating run.
    addable_users, addable_groups = dataset.other_users_groups()

    if dataset.file_source is None:
        generating_run = None
    else:
        generating_run = dataset.file_source.top_level_run
    container_dataset = dataset.containers.filter(
        argument__type='O').first()  # Output from which runs?
    if container_dataset is None:
        container_run = None
    else:
        container_run = container_dataset.run
    inputs_count = dataset.containers.filter(
        argument__type='I').values('run_id').distinct().count()

    if request.method == "POST":
        # We are going to try and update this Dataset.
        dataset_form = DatasetDetailsForm(
            request.POST,
            access_limits=dataset.get_access_limits(),
            instance=dataset)
        try:
            if dataset_form.is_valid():
                dataset.name = dataset_form.cleaned_data["name"]
                dataset.description = dataset_form.cleaned_data["description"]
                dataset.clean()
                dataset.save()
                with transaction.atomic():
                    dataset.grant_from_json(
                        dataset_form.cleaned_data["permissions"])
                    dataset.validate_restrict_access(
                        dataset.get_access_limits())

                return HttpResponseRedirect(return_url)
        except (AttributeError, ValidationError, ValueError) as e:
            LOGGER.exception(e.message)
            dataset_form.add_error(None, e)

    else:
        # A DatasetForm which we can use to make submission and editing easier.
        dataset_form = DatasetDetailsForm(
            access_limits=dataset.get_access_limits(),
            initial={
                "name": dataset.name,
                "description": dataset.description
            })

    c = {
        "is_admin": admin_check(request.user),
        "is_owner": dataset.user == request.user,
        "dataset": dataset,
        "return": return_url,
        "dataset_form": dataset_form,
        "generating_run": generating_run,
        "inputs_count": inputs_count,
        "container_run": container_run
    }

    if not dataset.has_data():
        t = loader.get_template("librarian/missing_dataset_view.html")
        if dataset.external_path:
            c["missing_data_message"] = "This dataset's external file is missing or has "\
                                        "been modified (MD5 mismatch).  " \
                                        "Please consult your system administrator if this is unexpected."
        elif dataset.is_redacted():
            c["missing_data_message"] = "Data has been redacted."
        else:
            c["missing_data_message"] = "Data was not retained or has been purged."
        rendered_response = t.render(c, request)

    elif dataset.is_raw():
        t = loader.get_template("librarian/raw_dataset_view.html")

        # Test whether this is a binary file or not.
        # Read 1000 characters.
        data_handle = dataset.get_open_file_handle('r')
        if data_handle is None:
            c["missing_data_message"] = "Data has been removed or renamed."
        else:
            with data_handle:
                sample_content = data_handle.read(1000)
            c.update({"sample_content": sample_content})
        c["is_binary"] = False
        try:
            rendered_response = t.render(c, request)
        except DjangoUnicodeDecodeError as e:
            c["is_binary"] = True
            del c["sample_content"]
            rendered_response = t.render(c, request)
    else:
        extra_errors = []
        # If we have a mismatched output, we do an alignment
        # over the columns.
        if dataset.content_matches_header:
            col_matching, processed_rows = None, dataset.rows(
                True,
                limit=settings.DATASET_DISPLAY_MAX,
                extra_errors=extra_errors)
        else:
            col_matching, insert = dataset.column_alignment()
            processed_rows = dataset.rows(data_check=True,
                                          insert_at=insert,
                                          limit=settings.DATASET_DISPLAY_MAX,
                                          extra_errors=extra_errors)
        t = loader.get_template("librarian/csv_dataset_view.html")
        processed_rows = list(processed_rows)
        c.update({
            'column_matching':
            col_matching,
            'processed_rows':
            processed_rows,
            'extra_errors':
            extra_errors,
            "are_rows_truncated":
            len(processed_rows) >= settings.DATASET_DISPLAY_MAX
        })
        rendered_response = t.render(c, request)
    return HttpResponse(rendered_response)