Ejemplo n.º 1
0
def _create_download_response(request, datafile_id, disposition='attachment'):  # too complex # noqa
    # Get datafile (and return 404 if absent)
    try:
        datafile = DataFile.objects.get(pk=datafile_id)
    except DataFile.DoesNotExist:
        return return_response_not_found(request)
    # Check users has access to datafile
    if not has_datafile_download_access(request=request,
                                        datafile_id=datafile.id):
        return return_response_error(request)
    # Send an image that can be seen in the browser
    if disposition == 'inline' and datafile.is_image():
        from tardis.tardis_portal.iiif import download_image
        args = (request, datafile.id, 'full', 'full', '0', 'native')
        # Send unconverted image if web-compatible
        if datafile.get_mimetype() in ('image/gif', 'image/jpeg', 'image/png'):
            return download_image(*args)
        # Send converted image
        return download_image(*args, format='png')
    # Send local file
    try:
        verified_only = True
        # Query parameter to allow download of unverified files
        ignore_verif = request.GET.get('ignore_verification_status', '0')
        # Ensure ignore_verification_status=0 etc works as expected
        # a bare ?ignore_verification_status is True
        if ignore_verif.lower() in [u'', u'1', u'true']:
            verified_only = False

        # Get file object for datafile
        file_obj = datafile.get_file(verified_only=verified_only)
        if not file_obj:
            # If file path doesn't resolve, return not found
            if verified_only:
                return render_error_message(request,
                                            "File is unverified, "
                                            "please try again later.",
                                            status=503)
            else:
                return return_response_not_found(request)
        wrapper = FileWrapper(file_obj, blksize=65535)
        response = StreamingHttpResponse(wrapper,
                                         content_type=datafile.get_mimetype())
        response['Content-Disposition'] = \
            '%s; filename="%s"' % (disposition, datafile.filename)
        return response
    except IOError:
        # If we can't read the file, return not found
        return return_response_not_found(request)
    except ValueError:  # raised when replica not verified TODO: custom excptn
        redirect = request.META.get('HTTP_REFERER',
                                    'http://%s/' %
                                    request.META.get('HTTP_HOST'))
        message = """The file you are trying to access has not yet been
                     verified. Verification is an automated background process.
                     Please try again later or contact the system
                     administrator if the issue persists."""
        message = ' '.join(message.split())  # removes spaces
        redirect = redirect + '#error:' + message
        return HttpResponseRedirect(redirect)
Ejemplo n.º 2
0
def _streaming_downloader(request, datafiles, rootdir, filename,
                          comptype='tgz', organization=DEFAULT_ORGANIZATION):
    '''
    private function to be called by wrappers
    creates download response with given files and names
    '''
    mapper = _make_mapper(organization, rootdir)
    if not mapper:
        return render_error_message(
            request, 'Unknown download organization: %s' % organization,
            status=400)
    try:
        files = _get_datafile_details_for_archive(mapper, datafiles)
        tfs = UncachedTarStream(
            files,
            filename=filename,
            do_gzip=comptype != 'tar')
        return tfs.get_response()
    except ValueError:  # raised when replica not verified TODO: custom excptn
        redirect = request.META.get('HTTP_REFERER',
                                    'http://%s/' %
                                    request.META.get('HTTP_HOST'))
        message = """The experiment you are trying to access has not yet been
                     verified completely.
                     Verification is an automated background process.
                     Please try again later or contact the system
                     administrator if the issue persists."""
        message = ' '.join(message.split())  # removes spaces
        redirect = redirect + '#error:' + message
        return HttpResponseRedirect(redirect)
Ejemplo n.º 3
0
def _create_download_response(request, datafile_id, disposition='attachment'):  # too complex # noqa
    # Get datafile (and return 404 if absent)
    try:
        datafile = DataFile.objects.get(pk=datafile_id)
    except DataFile.DoesNotExist:
        return return_response_not_found(request)
    # Check users has access to datafile
    if not has_datafile_download_access(request=request,
                                        datafile_id=datafile.id):
        return return_response_error(request)
    # Send an image that can be seen in the browser
    if disposition == 'inline' and datafile.is_image():
        from tardis.tardis_portal.iiif import download_image
        args = (request, datafile.id, 'full', 'full', '0', 'native')
        # Send unconverted image if web-compatible
        if datafile.get_mimetype() in ('image/gif', 'image/jpeg', 'image/png'):
            return download_image(*args)
        # Send converted image
        return download_image(*args, format='png')
    # Send local file
    try:
        verified_only = True
        # Query parameter to allow download of unverified files
        ignore_verif = request.GET.get('ignore_verification_status', '0')
        # Ensure ignore_verification_status=0 etc works as expected
        # a bare ?ignore_verification_status is True
        if ignore_verif.lower() in [u'', u'1', u'true']:
            verified_only = False

        # Get file object for datafile
        file_obj = datafile.get_file(verified_only=verified_only)
        if not file_obj:
            # If file path doesn't resolve, return not found
            if verified_only:
                return render_error_message(request,
                                            "File is unverified, "
                                            "please try again later.",
                                            status=503)
            else:
                return return_response_not_found(request)
        wrapper = FileWrapper(file_obj, blksize=65535)
        response = StreamingHttpResponse(wrapper,
                                         content_type=datafile.get_mimetype())
        response['Content-Disposition'] = \
            '%s; filename="%s"' % (disposition, datafile.filename)
        return response
    except IOError:
        # If we can't read the file, return not found
        return return_response_not_found(request)
    except ValueError:  # raised when replica not verified TODO: custom excptn
        redirect = request.META.get('HTTP_REFERER',
                                    'http://%s/' %
                                    request.META.get('HTTP_HOST'))
        message = """The file you are trying to access has not yet been
                     verified. Verification is an automated background process.
                     Please try again later or contact the system
                     administrator if the issue persists."""
        message = ' '.join(message.split())  # removes spaces
        redirect = redirect + '#error:' + message
        return HttpResponseRedirect(redirect)
Ejemplo n.º 4
0
def change_user_permissions(request, experiment_id, username):

    try:
        user = User.objects.get(username=username)
    except User.DoesNotExist:
        return return_response_error(request)

    try:
        experiment = Experiment.objects.get(pk=experiment_id)
    except Experiment.DoesNotExist:
        return return_response_error(request)

    try:
        expt_acls = Experiment.safe.user_acls(experiment_id)
        acl = None
        for eacl in expt_acls:
            if eacl.pluginId == 'django_user' and \
               eacl.get_related_object().id == user.id:
                acl = eacl
        #acl = expt_acls.filter(entityId=str(user.id))
        if acl is None:
            raise ObjectACL.DoesNotExist
        owner_acls = [oacl for oacl in expt_acls if oacl.isOwner]
    except ObjectACL.DoesNotExist:
        return return_response_error(request)

    if request.method == 'POST':
        form = ChangeUserPermissionsForm(request.POST, instance=acl)

        if form.is_valid():
            if 'isOwner' in form.changed_data and \
                            form.cleaned_data['isOwner'] is False and \
                            len(owner_acls) == 1:
                owner = owner_acls[0].get_related_object()
                plugin = owner_acls[0].pluginId
                if plugin == 'django_user' and owner.id == user.id:
                    return render_error_message(
                        request,
                        'Cannot remove ownership, every experiment must have at '
                        'least one user owner.',
                        status=409)
            form.save()
            url = reverse('tardis.tardis_portal.views.control_panel')
            return HttpResponseRedirect(url)

    else:
        form = ChangeUserPermissionsForm(instance=acl)
        c = {
            'form': form,
            'header': "Change User Permissions for '%s'" % user.username
        }

    return HttpResponse(
        render_response_index(request, 'tardis_portal/form_template.html', c))
Ejemplo n.º 5
0
def change_user_permissions(request, experiment_id, username):

    try:
        user = User.objects.get(username=username)
    except User.DoesNotExist:
        return return_response_error(request)

    try:
        experiment = Experiment.objects.get(pk=experiment_id)
    except Experiment.DoesNotExist:
        return return_response_error(request)

    try:
        expt_acls = Experiment.safe.user_acls(experiment_id)
        acl = None
        for eacl in expt_acls:
            if eacl.pluginId == 'django_user' and \
               eacl.get_related_object().id == user.id:
                acl = eacl
        #acl = expt_acls.filter(entityId=str(user.id))
        if acl is None:
            raise ObjectACL.DoesNotExist
        owner_acls = [oacl for oacl in expt_acls if oacl.isOwner]
    except ObjectACL.DoesNotExist:
        return return_response_error(request)

    if request.method == 'POST':
        form = ChangeUserPermissionsForm(request.POST, instance=acl)

        if form.is_valid():
            if 'isOwner' in form.changed_data and \
                            form.cleaned_data['isOwner'] is False and \
                            len(owner_acls) == 1:
                owner = owner_acls[0].get_related_object()
                plugin = owner_acls[0].pluginId
                if plugin == 'django_user' and owner.id == user.id:
                    return render_error_message(
                        request,
                        'Cannot remove ownership, every experiment must have at '
                        'least one user owner.', status=409)
            form.save()
            url = reverse('tardis.tardis_portal.views.control_panel')
            return HttpResponseRedirect(url)

    else:
        form = ChangeUserPermissionsForm(instance=acl)
        c = {'form': form,
             'header':
             "Change User Permissions for '%s'" % user.username}

    return HttpResponse(render_response_index(
        request, 'tardis_portal/form_template.html', c))
Ejemplo n.º 6
0
def _streaming_downloader(request,
                          datafiles,
                          rootdir,
                          filename,
                          comptype='tgz',
                          organization=DEFAULT_ORGANIZATION):
    '''
    private function to be called by wrappers
    creates download response with given files and names
    '''
    mapper = make_mapper(organization, rootdir)
    if not mapper:
        return render_error_message(request,
                                    'Unknown download organization: %s' %
                                    organization,
                                    status=400)
    try:
        files = _get_datafile_details_for_archive(mapper, datafiles)
        tfs = UncachedTarStream(files,
                                filename=filename,
                                do_gzip=comptype != 'tar')
        tracker_data = dict(label='tar',
                            session_id=request.COOKIES.get('_ga'),
                            ip=request.META.get('REMOTE_ADDR', ''),
                            user=request.user,
                            total_size=tfs.tar_size,
                            num_files=len(datafiles),
                            ua=request.META.get('HTTP_USER_AGENT', None))
        return tfs.get_response(tracker_data)
    except ValueError:  # raised when replica not verified TODO: custom excptn
        redirect = request.META.get(
            'HTTP_REFERER', 'http://%s/' % request.META.get('HTTP_HOST'))
        message = """The experiment you are trying to access has not yet been
                     verified completely.
                     Verification is an automated background process.
                     Please try again later or contact the system
                     administrator if the issue persists."""
        message = ' '.join(message.split())  # removes spaces
        redirect = redirect + '#error:' + message
        return HttpResponseRedirect(redirect)
Ejemplo n.º 7
0
def streaming_download_datafiles(request):  # too complex # noqa
    """
    takes string parameter "comptype" for compression method.
    Currently implemented: "tgz" and "tar"
    The datafiles to be downloaded are selected using "datafile", "dataset"
    or "url" parameters.  An "expid" parameter may be supplied for use in
    the download archive name.  If "url" is used, the "expid" parameter
    is also used to limit the datafiles to be downloaded to a given experiment.
    """
    # Create the HttpResponse object with the appropriate headers.
    # TODO: handle no datafile, invalid filename, all http links
    # TODO: intelligent selection of temp file versus in-memory buffering.
    logger.error('In download_datafiles !!')
    comptype = getattr(settings, 'DEFAULT_ARCHIVE_FORMATS', ['tar'])[0]
    organization = getattr(settings, 'DEFAULT_PATH_MAPPER', 'classic')
    if 'comptype' in request.POST:
        comptype = request.POST['comptype']
    if 'organization' in request.POST:
        organization = request.POST['organization']

    if 'datafile' in request.POST or 'dataset' in request.POST:
        if request.POST.getlist('datafile') or request.POST.getlist('dataset'):

            datasets = request.POST.getlist('dataset')
            datafiles = request.POST.getlist('datafile')

            # Generator to produce datafiles from dataset id
            def get_dataset_datafiles(dsid):
                for datafile in DataFile.objects.filter(dataset=dsid):
                    if has_datafile_download_access(request=request,
                                                    datafile_id=datafile.id):
                        yield datafile

            # Generator to produce datafile from datafile id
            def get_datafile(dfid):
                datafile = DataFile.objects.get(pk=dfid)
                if has_datafile_download_access(request=request,
                                                datafile_id=datafile.id):
                    yield datafile

            # Take chained generators and turn them into a set of datafiles
            df_set = set(
                chain(
                    chain.from_iterable(map(get_dataset_datafiles, datasets)),
                    chain.from_iterable(map(get_datafile, datafiles))))
        else:
            return render_error_message(
                request,
                'No Datasets or Datafiles were selected for downloaded',
                status=404)

    elif 'url' in request.POST:
        if not request.POST.getlist('url'):
            return render_error_message(
                request,
                'No Datasets or Datafiles were selected for downloaded',
                status=404)

        for url in request.POST.getlist('url'):
            url = urllib.unquote(url)
            raw_path = url.partition('//')[2]
            experiment_id = request.POST['expid']
            datafile = DataFile.objects.filter(
                url__endswith=raw_path,
                dataset__experiment__id=experiment_id)[0]
            if has_datafile_download_access(request=request,
                                            datafile_id=datafile.id):
                df_set = set([datafile])
    else:
        return render_error_message(
            request,
            'No Datasets or Datafiles were selected for downloaded',
            status=404)

    logger.info('Files for archive command: %s' % df_set)

    if not df_set:
        return render_error_message(
            request, 'You do not have download access for any of the '
            'selected Datasets or Datafiles ',
            status=403)

    try:
        expid = request.POST['expid']
        experiment = Experiment.objects.get(id=expid)
    except (KeyError, Experiment.DoesNotExist):
        experiment = iter(df_set).next().dataset.get_first_experiment()

    exp_title = get_filesystem_safe_experiment_name(experiment)
    filename = '%s-selection.tar' % exp_title
    rootdir = '%s-selection' % exp_title
    return _streaming_downloader(request, df_set, rootdir, filename, comptype,
                                 organization)
Ejemplo n.º 8
0
def streaming_download_datafiles(request):  # too complex # noqa
    """
    takes string parameter "comptype" for compression method.
    Currently implemented: "tgz" and "tar"
    The datafiles to be downloaded are selected using "datafile", "dataset"
    or "url" parameters.  An "expid" parameter may be supplied for use in
    the download archive name.  If "url" is used, the "expid" parameter
    is also used to limit the datafiles to be downloaded to a given experiment.
    """
    # Create the HttpResponse object with the appropriate headers.
    # TODO: handle no datafile, invalid filename, all http links
    # TODO: intelligent selection of temp file versus in-memory buffering.
    logger.error('In download_datafiles !!')
    comptype = getattr(settings, 'DEFAULT_ARCHIVE_FORMATS', ['tar'])[0]
    organization = getattr(settings, 'DEFAULT_PATH_MAPPER', 'classic')
    if 'comptype' in request.POST:
        comptype = request.POST['comptype']
    if 'organization' in request.POST:
        organization = request.POST['organization']

    if 'datafile' in request.POST or 'dataset' in request.POST:
        if request.POST.getlist('datafile') or request.POST.getlist('dataset'):

            datasets = request.POST.getlist('dataset')
            datafiles = request.POST.getlist('datafile')

            # Generator to produce datafiles from dataset id
            def get_dataset_datafiles(dsid):
                for datafile in DataFile.objects.filter(dataset=dsid):
                    if has_datafile_download_access(
                            request=request, datafile_id=datafile.id):
                        yield datafile

            # Generator to produce datafile from datafile id
            def get_datafile(dfid):
                datafile = DataFile.objects.get(pk=dfid)
                if has_datafile_download_access(request=request,
                                                datafile_id=datafile.id):
                    yield datafile

            # Take chained generators and turn them into a set of datafiles
            df_set = set(chain(chain.from_iterable(map(get_dataset_datafiles,
                                                       datasets)),
                               chain.from_iterable(map(get_datafile,
                                                       datafiles))))
        else:
            return render_error_message(
                request,
                'No Datasets or Datafiles were selected for downloaded',
                status=404)

    elif 'url' in request.POST:
        if not request.POST.getlist('url'):
            return render_error_message(
                request,
                'No Datasets or Datafiles were selected for downloaded',
                status=404)

        for url in request.POST.getlist('url'):
            url = urllib.unquote(url)
            raw_path = url.partition('//')[2]
            experiment_id = request.POST['expid']
            datafile = DataFile.objects.filter(
                url__endswith=raw_path,
                dataset__experiment__id=experiment_id)[0]
            if has_datafile_download_access(request=request,
                                            datafile_id=datafile.id):
                df_set = set([datafile])
    else:
        return render_error_message(
            request, 'No Datasets or Datafiles were selected for downloaded',
            status=404)

    logger.info('Files for archive command: %s' % df_set)

    if not df_set:
        return render_error_message(
            request,
            'You do not have download access for any of the '
            'selected Datasets or Datafiles ',
            status=403)

    try:
        expid = request.POST['expid']
        experiment = Experiment.objects.get(id=expid)
    except (KeyError, Experiment.DoesNotExist):
        experiment = iter(df_set).next().dataset.get_first_experiment()

    exp_title = get_filesystem_safe_experiment_name(experiment)
    filename = '%s-selection.tar' % exp_title
    rootdir = '%s-selection' % exp_title
    return _streaming_downloader(request, df_set, rootdir, filename,
                                 comptype, organization)