Exemple #1
0
    def __init__(self, *args, **kwargs):
        self._base_url = getattr(settings, 'FILE_STORAGE',
                                 '/uds/utility/files')
        if self._base_url[-1] != '/':
            self._base_url += '/'

        cacheName: str = getattr(settings, 'FILE_CACHE', 'memory')

        try:
            cache = caches[cacheName]
        except Exception:
            logger.info('No cache for FileStorage configured.')
            self.cache = None
            return

        self.cache = cache
        if 'owner' in kwargs:
            self.owner = kwargs.get('owner')
            del kwargs['owner']
        else:
            self.owner = 'fstor'

        # On start, ensures that cache is empty to avoid surprises
        self.cache._cache.flush_all()  # pylint: disable=protected-access

        Storage.__init__(self, *args, **kwargs)
    def post(self, request, *args, **kwargs):

        qdict = QueryDict('', mutable=True)
        qdict.update(request.FILES)
        files = qdict.getlist("data_file[]")
        saved_files = []

        zpath = Storage.get_available_name(default_storage, 'Bioimp.zip')
        path_zip = os.path.join(settings.MEDIA_ROOT, zpath)
        zf = zipfile.ZipFile(path_zip, "w")

        p = re.compile(r'(\D),(\D)')

        for f in files:
            broken_file = p.sub(r'\1;\2', f.read().decode('utf-8'))
            repaired_file = pd.read_csv(io.StringIO(broken_file), sep=';')
            repaired_file = repaired_file.iloc[:, [0, 1, 2]]

            path = Storage.get_available_name(default_storage,
                                              os.path.join('tmp', f.name))
            file_name = os.path.join(settings.MEDIA_ROOT, path)
            repaired_file.to_csv(file_name, sep=';', index=False)

            saved_files.append({
                "name": f.name,
                "data": repaired_file.values.tolist()
            })

            zf.write(file_name, os.path.basename(file_name))
            os.remove(file_name)

        zf.close()
        """tmp_file = os.path.join(settings.MEDIA_ROOT, path)
        saved_files.append(tmp_file)


        ziped_files = filefixer.repair_csv(saved_files)  """
        """ qdict = QueryDict('', mutable=True)
        qdict.update(request.FILES)
        files = qdict.getlist("data_file[]")
        saved_files = []

        for f in files:
            path = default_storage.save(f'tmp/{f}', ContentFile(f.read()))
            tmp_file = os.path.join(settings.MEDIA_ROOT, path)
            saved_files.append(tmp_file)


        ziped_files = filefixer.repair_csv(saved_files) """

        responseData = {
            'initialPreview': [],
            'initialPreviewConfig': [],
            'initialPreviewThumbTags': [],
            'append': True,
            'urlDownload': os.path.basename(zf.filename),
            'graphData': saved_files,
        }

        return JsonResponse(responseData)
Exemple #3
0
def directory_path(instance, filename):
    """."""

    # In COR_PARAMETER, add a new parameter by company, in this repo is a dict
    base_folder_by_company = {1: 'ftpettc', 5: 'ftpnrcc'}

    #  Concat filename with time for generate new_name
    filename_new = '{}{}'.format(filename, instance.date.strftime('%H:%M:%S'))
    filename_new = hashlib.md5(filename_new.encode()).hexdigest()

    company = base_folder_by_company[instance.company.id]
    file_path = '{}/{}/'.format(company, instance.document_type.folder_path)

    sub_folder_list_conf = instance.document_type.sub_folder_path.split(',') \
        if instance.document_type.sub_folder_path else []
    sub_folder_list = instance.folder_params.split(',') \
        if instance.folder_params else []
    sub_folder = ''

    # validate sub_folder structure
    if sub_folder_list_conf and len(sub_folder_list) == len(
            sub_folder_list_conf):
        storage = Storage()
        for sub_folder_item in sub_folder_list:
            valid_sub_folder = storage.get_valid_name(sub_folder_item)
            if valid_sub_folder != '':
                sub_folder += '{}/'.format(valid_sub_folder)
            else:
                # if not is a valid subfolder, dicard subfolder
                sub_folder = ''
                break

    file_path += sub_folder
    file_path += filename_new
    return file_path
Exemple #4
0
 def __init__(self, hosts=None, base_url=None):
     DistributedStorageMixin.__init__(self, hosts, base_url)
     if not self.fatal_exceptions:
         logger.warning("You're using the DistributedStorage backend with "
                 "RESTO_FATAL_EXCEPTIONS = %r.", self.fatal_exceptions)
         logger.warning("This is prone to data-loss problems, and I won't "
                 "take any responsibility in what happens from now on.")
         logger.warning("You have been warned.")
     Storage.__init__(self)
Exemple #5
0
 def __init__(self, hosts=None, base_url=None):
     DistributedStorageMixin.__init__(self, hosts, base_url)
     if not self.fatal_exceptions:
         logger.warning(
             "You're using the DistributedStorage backend with "
             "RESTO_FATAL_EXCEPTIONS = %r.", self.fatal_exceptions)
         logger.warning(
             "This is prone to data-loss problems, and I won't "
             "take any responsibility in what happens from now on.")
         logger.warning("You have been warned.")
     Storage.__init__(self)
Exemple #6
0
def test_checksum(faker, storage: Storage):
    name = faker.file_name()
    sentence = bytes(faker.sentence(), 'utf-8')
    content = ContentFile(sentence)

    storage.save(name, content)

    h = hashlib.sha256()
    h.update(sentence)
    expected_sha256 = h.hexdigest()

    actual_sha256 = calculate_sha256_checksum(storage, name)

    assert actual_sha256 == expected_sha256
Exemple #7
0
def expiring_url(storage: Storage, name: str, expiration: timedelta) -> str:
    """
    Return an expiring URL to a file name on a `Storage`.

    `S3Boto3Storage` and `MinioStorage` are specifically supported.
    """
    # Each storage backend uses a slightly different API for URL expiration
    if isinstance(storage, S3Boto3Storage):
        return storage.url(name, expire=int(expiration.total_seconds()))
    elif isinstance(storage, MinioStorage):
        return storage.url(name, max_age=expiration)
    else:
        # Unsupported Storage type
        return storage.url(name)
Exemple #8
0
def file(request):
    user = request.user
    if request.method == "POST" and user.is_authenticated:
        filename = request.POST.get('ename')
        ufile = request.FILES.get('files')
        if not filename:
            messages.error(request, " Filename is Required")
        elif len(filename) < 5:
            messages.error(request, " Filename must be 5 char long...")
        elif len(filename) > 30:
            messages.error(request, " Filename must be less than 30 char...")
        elif not ufile:
            messages.error(request, " File is Required")
        elif ufile.size > 419430400:
            messages.error(request, " File should not more than 400MB ")
        else:
            modified_name = change_name(ufile.name)
            data = ufile.read()
            file_root, file_ext = os.path.splitext(modified_name)
            fal = Storage.get_alternative_name(request, file_root, file_ext)
            # fl,ext = os.path.splitext(fal)
            p2 = f'./media/uploads/{fal}'
            print(p2)
            f = Filesystem(user=user, file=p2, filename=filename)
            f.save()
            with open(p2, 'w') as pd:
                pd.write(str(data))
            messages.success(request, " file uploaded sucessfully ")
    return render(request, 'file.html')
Exemple #9
0
def get_name_by_incrementing(
    instance: Storage,
    name: str,
    max_length: Optional[int] = None,
) -> str:
    """Generate usable file name for storage iterating if needed.

    Returns a filename that is available in the storage mechanism,
    taking the provided filename into account.

    This maintains the old behavior of get_available_name that was available
    prior to Django 1.5.9. This behavior increments the file name by adding _1,
    _2, etc., but was removed because incrementing the file names in this
    manner created a security vector if users were able to upload (many) files.

    We are only able to use it in places where users are not uploading files,
    and we are instead creating them programmatically (for example, via a
    scraper).

    For more detail, see:

    https://docs.djangoproject.com/en/1.8/releases/1.5.9/#file-upload-denial-of-service

    :param instance: The instance of the storage class being used
    :param max_length: The name will not exceed max_length, if provided
    :param name: File name of the object being saved
    :return: The filepath
    """
    dir_name, file_name = os.path.split(name)
    file_root, file_ext = os.path.splitext(file_name)
    count = itertools.count(1)
    while instance.exists(name):
        # file_ext includes the dot.
        name = os.path.join(dir_name, f"{file_root}_{next(count)}{file_ext}")
    return name
 def __init__(self, *args: Any, **kwargs: Any) -> None:
     super().__init__(*args, **kwargs)
     self.num_copied_files = 0
     self.tasks: List[Task] = []
     self.collectfast_enabled = settings.enabled
     self.strategy: Strategy = DisabledStrategy(Storage())
     self.found_files: Dict[str, Tuple[Storage, str]] = {}
Exemple #11
0
def repair_csv(files):
    p = re.compile(r'(\D),(\D)')
    path = Storage.get_available_name(default_storage, 'Bioimp.zip')
    path_zip = os.path.join(settings.MEDIA_ROOT, path)
    zf = zipfile.ZipFile(path_zip, "w")

    for f in files:
        with open(f, "r+") as openFile:
            new_content = "Frecuency ;Impedance ;Phase"
            next(openFile)

            for line in openFile:
                if len(line.strip()) != 0:
                    line = line.strip()
                    column = re.split(p, line)
                    new_content = f"{new_content} \n{column[0]} ;{column[1]} ;{column[2]}"

            openFile.seek(0)
            openFile.write(new_content)
            openFile.close()

        zf.write(f, os.path.basename(f))
        os.remove(f)

    zf.close()
    return os.path.basename(zf.filename)
Exemple #12
0
 def __init__(self, *args, **kwargs):
     # type: (Any, Any) -> None
     super().__init__(*args, **kwargs)
     self.num_copied_files = 0
     self.tasks = []  # type: List[Task]
     self.collectfast_enabled = settings.enabled
     self.strategy = DisabledStrategy(Storage())  # type: Strategy
Exemple #13
0
def test_write_dandiset_yaml_already_exists(storage: Storage,
                                            version: Version):
    # Pretend like AssetBlob was defined with the given storage
    # The task piggybacks off of the AssetBlob storage to write the yamls
    AssetBlob.blob.field.storage = storage

    # Save an invalid file for the task to overwrite
    dandiset_yaml_path = (
        f'{settings.DANDI_DANDISETS_BUCKET_PREFIX}'
        f'dandisets/{version.dandiset.identifier}/{version.version}/dandiset.yaml'
    )
    storage.save(dandiset_yaml_path, ContentFile(b'wrong contents'))

    write_dandiset_yaml(version)
    expected = YAMLRenderer().render(version.metadata)

    with storage.open(dandiset_yaml_path) as f:
        assert f.read() == expected
Exemple #14
0
    def get_local_file_hash(self, path: str, local_storage: Storage) -> str:
        """Create md5 hash from file contents."""
        contents = local_storage.open(path).read()
        file_hash = hashlib.md5(contents).hexdigest()

        # Check if content should be gzipped and hash gzipped content
        content_type = mimetypes.guess_type(path)[0] or "application/octet-stream"
        if self.use_gzip and content_type in settings.gzip_content_types:
            file_hash = self.get_gzipped_local_file_hash(file_hash, path, contents)

        return file_hash
Exemple #15
0
def test_write_assets_yaml_already_exists(storage: Storage, version: Version,
                                          asset_factory):
    # Pretend like AssetBlob was defined with the given storage
    # The task piggybacks off of the AssetBlob storage to write the yamls
    AssetBlob.blob.field.storage = storage

    # Create a new asset in the version so there is information to write
    version.assets.add(asset_factory())

    # Save an invalid file for the task to overwrite
    assets_yaml_path = (
        f'{settings.DANDI_DANDISETS_BUCKET_PREFIX}'
        f'dandisets/{version.dandiset.identifier}/{version.version}/assets.yaml'
    )
    storage.save(assets_yaml_path, ContentFile(b'wrong contents'))

    write_assets_yaml(version)
    expected = YAMLRenderer().render(
        [asset.metadata for asset in version.assets.all()])

    with storage.open(assets_yaml_path) as f:
        assert f.read() == expected
Exemple #16
0
def test_write_manifest_files(storage: Storage, version: Version,
                              asset_factory):
    # Pretend like AssetBlob was defined with the given storage
    # The task piggybacks off of the AssetBlob storage to write the yamls
    AssetBlob.blob.field.storage = storage

    # Create a new asset in the version so there is information to write
    version.assets.add(asset_factory())

    # All of these files should be generated by the task
    assets_yaml_path = (
        f'{settings.DANDI_DANDISETS_BUCKET_PREFIX}'
        f'dandisets/{version.dandiset.identifier}/{version.version}/assets.yaml'
    )
    dandiset_yaml_path = (
        f'{settings.DANDI_DANDISETS_BUCKET_PREFIX}'
        f'dandisets/{version.dandiset.identifier}/{version.version}/dandiset.yaml'
    )
    assets_jsonld_path = (
        f'{settings.DANDI_DANDISETS_BUCKET_PREFIX}'
        f'dandisets/{version.dandiset.identifier}/{version.version}/assets.jsonld'
    )
    dandiset_jsonld_path = (
        f'{settings.DANDI_DANDISETS_BUCKET_PREFIX}'
        f'dandisets/{version.dandiset.identifier}/{version.version}/dandiset.jsonld'
    )
    collection_jsonld_path = (
        f'{settings.DANDI_DANDISETS_BUCKET_PREFIX}'
        f'dandisets/{version.dandiset.identifier}/{version.version}/collection.jsonld'
    )

    tasks.write_manifest_files(version.id)

    assert storage.exists(assets_yaml_path)
    assert storage.exists(dandiset_yaml_path)
    assert storage.exists(assets_jsonld_path)
    assert storage.exists(dandiset_jsonld_path)
    assert storage.exists(collection_jsonld_path)
Exemple #17
0
def test_write_dandiset_yaml(storage: Storage, version: Version):
    # Pretend like AssetBlob was defined with the given storage
    # The task piggybacks off of the AssetBlob storage to write the yamls
    AssetBlob.blob.field.storage = storage

    write_dandiset_yaml(version)
    expected = YAMLRenderer().render(version.metadata)

    dandiset_yaml_path = (
        f'{settings.DANDI_DANDISETS_BUCKET_PREFIX}'
        f'dandisets/{version.dandiset.identifier}/{version.version}/dandiset.yaml'
    )
    with storage.open(dandiset_yaml_path) as f:
        assert f.read() == expected
Exemple #18
0
    def __init__(self, *args, **kwargs):
        self._base_url = getattr(settings, 'FILE_STORAGE', '/files')
        if self._base_url[-1] != '/':
            self._base_url += '/'

        cacheName = getattr(settings, 'FILE_CACHE', 'memory')

        try:
            cache = caches[cacheName]
        except:
            logger.info('No cache for FileStorage configured.')
            cache = None

        self.cache = cache
        if 'owner' in kwargs:
            self.owner = kwargs.get('owner')
            del kwargs['owner']
        else:
            self.owner = 'fstor'

        self.cache._cache.flush_all()  # On start, ensures that cache is empty to avoid surprises

        Storage.__init__(self, *args, **kwargs)  # @UndefinedVariable
Exemple #19
0
    def __init__(self, *args, **kwargs):
        self._base_url = getattr(settings, 'FILE_STORAGE', '/files')
        if self._base_url[-1] != '/':
            self._base_url += '/'

        cacheName = getattr(settings, 'FILE_CACHE', 'memory')

        try:
            cache = caches[cacheName]
        except:
            cache = None

        self.cache = cache
        if 'owner' in kwargs:
            self.owner = kwargs.get('owner')
            del kwargs['owner']
        else:
            self.owner = 'fstor'

        self.cache._cache.flush_all()  # On start, ensures that cache is empty to avoid surprises

        # noinspection PyArgumentList
        Storage.__init__(self, *args, **kwargs)
Exemple #20
0
    def __init__(self, *args, **kwargs):
        self._base_url = getattr(settings, 'FILE_STORAGE', '/files')
        if self._base_url[-1] != '/':
            self._base_url += '/'

        cacheName = getattr(settings, 'FILE_CACHE', 'memory')

        try:
            cache = caches[cacheName]
        except:
            logger.info('No cache for FileStorage configured.')
            cache = None

        self.cache = cache
        if 'owner' in kwargs:
            self.owner = kwargs.get('owner')
            del kwargs['owner']
        else:
            self.owner = 'fstor'

        self.cache._cache.flush_all(
        )  # On start, ensures that cache is empty to avoid surprises

        Storage.__init__(self, *args, **kwargs)
Exemple #21
0
    def __init__(self, *args, **kwargs):
        self._base_url = getattr(settings, 'FILE_STORAGE', '/files')
        if self._base_url[-1] != '/':
            self._base_url += '/'

        cacheName = getattr(settings, 'FILE_CACHE', 'memory')

        try:
            cache = caches[cacheName]
        except:
            cache = None

        self.cache = cache
        if 'owner' in kwargs:
            self.owner = kwargs.get('owner')
            del kwargs['owner']
        else:
            self.owner = 'fstor'

        self.cache._cache.flush_all(
        )  # On start, ensures that cache is empty to avoid surprises

        # noinspection PyArgumentList
        Storage.__init__(self, *args, **kwargs)
Exemple #22
0
    def _check_is_init_files_deleted(self, video_name: str, storage: Storage):
        count_of_stream_init = 5

        stream_init_name = DashFilesNames.dash_init_files_mask(video_name)
        stream_init_name_mask = stream_init_name.replace(
            r"\$RepresentationID\$", "{0}")

        stream_init_files_checked = 0
        for stream_id in range(count_of_stream_init):
            init_name = stream_init_name_mask.format(stream_id)
            if not storage.exists(init_name):
                stream_init_files_checked += 1

        self.assertEqual(
            stream_init_files_checked, count_of_stream_init,
            f"Удаляются не все DASH потоки. "
            f"Не удалилось {count_of_stream_init - stream_init_files_checked} потоков"
        )
Exemple #23
0
    def test_storage_methods(self):
        """
        Make sure that QueuedStorage implements all the methods
        """
        storage = QueuedStorage('django.core.files.storage.FileSystemStorage',
                                'django.core.files.storage.FileSystemStorage')

        file_storage = Storage()

        for attr in dir(file_storage):
            method = getattr(file_storage, attr)

            if not callable(method):
                continue

            method = getattr(storage, attr, False)
            self.assertTrue(callable(method),
                            "QueuedStorage has no method '%s'" % attr)
Exemple #24
0
    def get(self, request, event, *args, **kwargs):
        netloc = urlparse(settings.SITE_URL).netloc
        speaker = self.get_object()
        slots = self.request.event.current_schedule.talks.filter(
            submission__speakers=speaker.user, is_visible=True)

        cal = vobject.iCalendar()
        cal.add(
            'prodid'
        ).value = f'-//pretalx//{netloc}//{request.event.slug}//{speaker.code}'

        for slot in slots:
            slot.build_ical(cal)

        resp = HttpResponse(cal.serialize(), content_type='text/calendar')
        speaker_name = Storage().get_valid_name(name=speaker.user.name)
        resp[
            'Content-Disposition'] = f'attachment; filename="{request.event.slug}-{speaker_name}.ics"'
        return resp
Exemple #25
0
def test_write_assets_jsonld(storage: Storage, version: Version,
                             asset_factory):
    # Pretend like AssetBlob was defined with the given storage
    # The task piggybacks off of the AssetBlob storage to write the yamls
    AssetBlob.blob.field.storage = storage

    # Create a new asset in the version so there is information to write
    version.assets.add(asset_factory())

    write_assets_jsonld(version)
    expected = JSONRenderer().render(
        [asset.metadata for asset in version.assets.all()])

    assets_jsonld_path = (
        f'{settings.DANDI_DANDISETS_BUCKET_PREFIX}'
        f'dandisets/{version.dandiset.identifier}/{version.version}/assets.jsonld'
    )
    with storage.open(assets_jsonld_path) as f:
        assert f.read() == expected
Exemple #26
0
def test_write_collection_jsonld(storage: Storage, version: Version, asset):
    # Pretend like AssetBlob was defined with the given storage
    # The task piggybacks off of the AssetBlob storage to write the yamls
    AssetBlob.blob.field.storage = storage

    version.assets.add(asset)

    write_collection_jsonld(version)
    expected = JSONRenderer().render({
        '@context': version.metadata['@context'],
        'id': version.metadata['id'],
        '@type': 'prov:Collection',
        'hasMember': [asset.metadata['id']],
    })

    collection_jsonld_path = (
        f'{settings.DANDI_DANDISETS_BUCKET_PREFIX}'
        f'dandisets/{version.dandiset.identifier}/{version.version}/collection.jsonld'
    )
    with storage.open(collection_jsonld_path) as f:
        assert f.read() == expected
Exemple #27
0
    def get(self, request, event, *args, **kwargs):
        netloc = urlparse(settings.SITE_URL).netloc
        speaker = self.get_object()
        slots = self.request.event.current_schedule.talks.filter(
            submission__speakers=speaker.user, is_visible=True
        ).select_related("room", "submission")

        cal = vobject.iCalendar()
        cal.add(
            "prodid"
        ).value = f"-//pretalx//{netloc}//{request.event.slug}//{speaker.code}"

        for slot in slots:
            slot.build_ical(cal)

        speaker_name = Storage().get_valid_name(name=speaker.user.name)
        return HttpResponse(
            cal.serialize(),
            content_type="text/calendar",
            headers={
                "Content-Disposition": f'attachment; filename="{request.event.slug}-{safe_filename(speaker_name)}.ics"'
            },
        )
Exemple #28
0
 def __init__(self, *args, **kwargs):
     Storage.__init__(self, *args, **kwargs)
     ensure_bucket_exists(StaticFilesBucket)
Exemple #29
0
import os, sys
from django.conf import settings
from django.core.files.storage import Storage

DIRNAME = os.path.dirname(__file__)
settings.configure(
    DEBUG=True,
    DATABASES={'default': {
        'ENGINE': 'django.db.backends.sqlite3',
    }},
    DATABASE_NAME=os.path.join('database.db'),
    INSTALLED_APPS=(
        'django.contrib.auth',
        'django.contrib.contenttypes',
        'django.contrib.sessions',
        'django.contrib.admin',
        'filemignon',
        'tests',
    ),
    FILEMIGNON_STORAGE=Storage(),
)

from django.test.simple import DjangoTestSuiteRunner
test_runner = DjangoTestSuiteRunner(verbosity=1)

failures = test_runner.run_tests([
    'tests',
])
if failures:
    sys.exit(failures)
 def __init__(self, *args, **kwargs):
     Storage.__init__(self, *args, **kwargs)
Exemple #31
0

# Storage location for uploaded images depends on environment.

if not settings.IN_PRODUCTION:
    # Local development environment upload
    upload_storage = FileSystemStorage(
            location=os.path.join(settings.MEDIA_ROOT, 'upload_images'),
            base_url=urlparse.urljoin(settings.MEDIA_URL, 'upload_images/'))
elif settings.IS_AWS_AUTHENTICATED:
    # Direct upload to S3
    upload_storage = S3BotoStorage(location='/upload_images',
            bucket=getattr(settings, 'AWS_STORAGE_BUCKET_NAME', 'newfs'))
else:
    # Direct upload to S3
    upload_storage = Storage()


def rename_image_by_type(instance, filename):
    user_name = instance.uploaded_by.username.lower()
    image_type = instance.image_type.lower()

    # Create and update a hash object for getting a unique name.
    md5 = hashlib.md5()
    md5.update(filename)
    md5.update(user_name)
    md5.update(image_type)
    md5.update(str(datetime.datetime.now()))

    new_name = '{0}_{1}.jpg'.format(user_name, md5.hexdigest())
    return os.path.join(image_type, new_name)