Example #1
0
    def rename_generic_file_to(self, generic_file, initial_filepath,
                               destination_filepath):
        # There is no need to verify if the destination directory exists, and if there is not already a file with the same name,
        # as FUSE will automatically verify those conditions before calling our implementation of file moving.

        destination_directory = GenericFile.get_directory(
            filepath=destination_filepath)
        if not GenericFile.has_user_access_right(destination_directory,
                                                 GenericFile.WRITE_RIGHTS):
            print('No rights to write on folder ' +
                  destination_directory.filename)
            raise FuseOSError(errno.EACCES)

        # First we decrease the number of nlink in the directory above (even if we might stay in the same repository
        # at the end, that's not a big deal)
        initial_directory_id = GenericFile.get_directory_id(
            filepath=initial_filepath)
        self.add_nlink_directory(directory_id=initial_directory_id, value=-1)

        # We rename it
        destination_directory_id = destination_directory._id
        dest_filename = destination_filepath.split('/')[-1]
        Mongo.cache.find_one_and_update(
            self.files_coll, {'_id': generic_file._id}, {
                '$set': {
                    'directory_id': destination_directory_id,
                    'filename': dest_filename
                }
            })

        # We increase the number of nlink in the final directory
        self.add_nlink_directory(directory_id=destination_directory_id,
                                 value=-1)
Example #2
0
    def rename_generic_file_to(self, generic_file, initial_filepath,
                               destination_filepath):
        # There is no need to verify if the destination directory exists, and if there is not already a file with the same name,
        # as FUSE will automatically verify those conditions before calling our implementation of file moving.

        # First we decrease the number of nlink in the directory above (even if we might stay in the same repository
        # at the end, that's not a big deal)
        initial_directory_id = GenericFile.get_directory_id(
            filepath=initial_filepath)
        self.add_nlink_directory(directory_id=initial_directory_id, value=-1)

        # We rename it
        destination_directory_id = GenericFile.get_directory_id(
            filepath=destination_filepath)
        dest_filename = destination_filepath.split('/')[-1]
        Mongo.cache.find_one_and_update(
            self.files_coll, {'_id': generic_file._id}, {
                '$set': {
                    'directory_id': destination_directory_id,
                    'filename': dest_filename
                }
            })

        # We increase the number of nlink in the final directory
        self.add_nlink_directory(directory_id=destination_directory_id,
                                 value=-1)
    def test_is_generic_filename_available(self):
        self.utils.insert_file()

        is_available = GenericFile.is_generic_filepath_available(
            filepath=self.utils.file.filepath)
        self.assertFalse(is_available)

        is_available = GenericFile.is_generic_filepath_available(
            filepath=self.utils.file.filepath + '.something')
        self.assertTrue(is_available)
    def test_get_directory_id(self):
        self.utils.insert_directory()
        self.utils.insert_directory_file()

        directory_id = GenericFile.get_directory_id(
            filepath=self.utils.directory_file.filepath)
        self.assertEqual(directory_id, self.utils.directory_file.directory_id)
Example #5
0
    def truncate(self, file, length):
        # We drop every unnecessary chunk
        chunk_size = file.chunkSize
        maximum_chunks = int(ceil(length / chunk_size))
        Mongo.cache.delete_many(self.chunks_coll, {
            'files_id': file._id,
            'n': {
                '$gte': maximum_chunks
            }
        })

        # We update the last chunk
        if length % chunk_size != 0:
            last_chunk = Mongo.cache.find_one(self.chunks_coll, {
                'files_id': file._id,
                'n': maximum_chunks - 1
            })
            last_chunk['data'] = last_chunk['data'][0:length % chunk_size]
            Mongo.cache.find_one_and_update(
                self.chunks_coll, {'_id': last_chunk['_id']},
                {'$set': {
                    'data': last_chunk['data']
                }})

        # We update the total length and that's it
        Mongo.cache.find_one_and_update(self.files_coll, {'_id': file._id}, {
            '$set': {
                'length': length,
                'metadata.st_size': length,
                'metadata.st_blocks': GenericFile.size_to_blocks(length)
            }
        })
        return True
 def test_new_generic_file_file(self):
     # Creation of a basic file
     GenericFile.new_generic_file(filepath=self.utils.file.filepath,
                                  mode=0o755,
                                  file_type=GenericFile.FILE_TYPE)
     inserted_file = self.utils.files_coll.find_one({
         'directory_id':
         self.utils.file.directory_id,
         'filename':
         self.utils.file.filename
     })
     self.assertEqual(inserted_file['filename'], self.utils.file.filename)
     self.assertEqual(inserted_file['metadata']['st_mode'],
                      (S_IFREG | 0o755))
     self.assertEqual(inserted_file['generic_file_type'],
                      GenericFile.FILE_TYPE)
 def test_new_generic_file_directory(self):
     # Creation of a directory
     GenericFile.new_generic_file(filepath=self.utils.directory.filepath,
                                  mode=0o755,
                                  file_type=GenericFile.DIRECTORY_TYPE)
     inserted_file = self.utils.files_coll.find_one({
         'directory_id':
         self.utils.directory.directory_id,
         'filename':
         self.utils.directory.filename
     })
     self.assertEqual(inserted_file['filename'],
                      self.utils.directory.filename)
     self.assertEqual(inserted_file['metadata']['st_mode'],
                      (S_IFDIR | 0o755))
     self.assertEqual(inserted_file['generic_file_type'],
                      GenericFile.DIRECTORY_TYPE)
Example #8
0
 def load_generic_file(json):
     if json['generic_file_type'] == GenericFile.FILE_TYPE:
         return File(json)
     elif json['generic_file_type'] == GenericFile.DIRECTORY_TYPE:
         return Directory(json)
     elif json['generic_file_type'] == GenericFile.SYMBOLIC_LINK_TYPE:
         return SymbolicLink(json)
     else:
         print('Unsupported file type!')
         return GenericFile(json)
Example #9
0
    def list_generic_files_in_directory(self, filepath):
        dir = self.get_generic_file(filepath=filepath)
        if not GenericFile.has_user_access_right(dir,
                                                 GenericFile.EXECUTE_RIGHTS):
            raise FuseOSError(errno.EACCES)

        files = []
        for elem in Mongo.cache.find(self.files_coll,
                                     {'directory_id': dir._id}):
            files.append(Mongo.load_generic_file(elem))
        return files
Example #10
0
    def __init__(self):
        # We reuse the same connexion
        Mongo.configuration = Configuration()
        Mongo.cache = MongoCache()

        # Collection name
        self.gridfs_coll = Mongo.configuration.mongo_prefix() + 'files'
        self.files_coll = Mongo.configuration.mongo_prefix() + 'files.files'
        self.chunks_coll = Mongo.configuration.mongo_prefix() + 'files.chunks'

        # We need to be sure to have the top folder created in MongoDB
        GenericFile.mongo = self
        GenericFile.new_generic_file(filepath='/',
                                     mode=0o755,
                                     file_type=GenericFile.DIRECTORY_TYPE)

        # Create the initial indexes
        self.create_indexes()

        # Temporary cache for the file data
        self.data_cache = {}
Example #11
0
    def test_new_generic_file_symbolic_link(self):
        # Creation of a symbolic link to the initial self.utils.file just below
        self.utils.insert_file()

        GenericFile.new_generic_file(
            filepath=self.utils.symbolic_link.filepath,
            mode=0o755,
            file_type=GenericFile.SYMBOLIC_LINK_TYPE,
            target=self.utils.file.filename)
        inserted_file = self.utils.files_coll.find_one({
            'directory_id':
            self.utils.symbolic_link.directory_id,
            'filename':
            self.utils.symbolic_link.filename
        })
        self.assertEqual(inserted_file['filename'],
                         self.utils.symbolic_link.filename)
        self.assertEqual(inserted_file['metadata']['st_mode'],
                         (S_IFLNK | 0o755))
        self.assertEqual(inserted_file['generic_file_type'],
                         GenericFile.SYMBOLIC_LINK_TYPE)
        self.assertEqual(inserted_file['target'], self.utils.file.filename)
Example #12
0
    def __init__(self, do_clean_up=False):
        # We reuse the same connexion
        Mongo.configuration = Configuration()
        Mongo.cache = MongoCache()

        # Collection name
        self.gridfs_coll = Mongo.configuration.mongo_prefix() + 'files'
        self.files_coll = Mongo.configuration.mongo_prefix() + 'files.files'
        self.chunks_coll = Mongo.configuration.mongo_prefix() + 'files.chunks'

        if do_clean_up is True:
            self.clean_database()

        # Create the initial indexes
        self.create_indexes()

        # Temporary cache for the file data
        self.data_cache = {}

        # Temporary cache for user information
        self.user_cache = ExpiringDict(max_len=1000, max_age_seconds=2)

        # Temporary cache for group information
        self.group_cache = ExpiringDict(max_len=1000, max_age_seconds=2)

        # We need to be sure to have the top folder created in MongoDB
        GenericFile.mongo = self
        root = self.get_generic_file(filepath='/')
        default_root_mode = S_IFDIR | self.configuration.default_root_mode()
        if root is None:
            GenericFile.new_generic_file(filepath='/',
                                         mode=default_root_mode,
                                         file_type=GenericFile.DIRECTORY_TYPE)
        elif self.configuration.force_root_mode(
        ) and root.metadata['st_mode'] != default_root_mode:
            root.metadata['st_mode'] = default_root_mode
            root.basic_save()
Example #13
0
    def remove_generic_file(self, generic_file):
        if not GenericFile.has_user_access_right(generic_file,
                                                 GenericFile.WRITE_RIGHTS):
            raise FuseOSError(errno.EACCES)

        # We cannot directly remove every sub-file in the directory (permissions check to do, ...), but we need to
        # be sure the directory is empty.
        if generic_file.is_dir():
            if Mongo.cache.find(self.files_coll, {
                    'directory_id': generic_file._id
            }).count() != 0:
                raise FuseOSError(errno.ENOTEMPTY)

        # First we delete the file (metadata + chunks)
        Mongo.cache.gridfs_delete(generic_file._id)

        # Then we decrease the number of link in the directory above it
        self.add_nlink_directory(directory_id=generic_file.directory_id,
                                 value=-1)
Example #14
0
 def mkdir(self, path, mode):
     GenericFile.new_generic_file(filepath=path, mode=mode, file_type=GenericFile.DIRECTORY_TYPE)
Example #15
0
 def create(self, path, mode):
     file = GenericFile.new_generic_file(filepath=path, mode=mode, file_type=GenericFile.FILE_TYPE)
     return file.file_descriptor
Example #16
0
    def add_data(self, file, data, offset, use_cache=True):
        # Normally, we should not update a gridfs document, but re-write everything. I don't see any specific reason
        # to do that, so we will try to update it anyway. But we will only rewrite the last chunks of it, or add information
        # to them, while keeping the limitation of ~255KB/chunk

        # We try to cache data
        if use_cache is True:
            return self.add_data_to_write(file=file, data=data, offset=offset)

        # Final size after the update
        total_size = offset + len(data)

        # Important note: the data that we receive are replacing any existing data from "offset".
        chunk_size = file.chunkSize
        total_chunks = int(ceil(file.length / chunk_size))
        starting_chunk = int(floor(offset / chunk_size))
        starting_byte = offset - starting_chunk * chunk_size
        if starting_byte < 0:
            print('Computation error for offset: ' + str(offset))
        for chunk in Mongo.cache.find(self.chunks_coll, {
                'files_id': file._id,
                'n': {
                    '$gte': starting_chunk
                }
        }):
            chunk['data'] = chunk['data'][0:starting_byte] + data[
                0:chunk_size - starting_byte]
            Mongo.cache.find_one_and_update(self.chunks_coll,
                                            {'_id': chunk['_id']},
                                            {'$set': {
                                                'data': chunk['data']
                                            }})

            # We have written a part of what we wanted, we only need to keep the remaining
            data = data[chunk_size - starting_byte:]

            # For the next chunks, we start to replace bytes from zero.
            starting_byte = 0

            # We might not need to go further to write the data
            if len(data) == 0:
                break

        # The code above was only to update a document, we might want to add new chunks
        if len(data) > 0:
            remaining_chunks = int(ceil(len(data) / chunk_size))
            chunks = []
            for i in range(0, remaining_chunks):
                chunk = {
                    "files_id": file._id,
                    "data": data[0:chunk_size],
                    "n": total_chunks
                }
                chunks.append(chunk)

                # We have written a part of what we wanted, we only the keep the remaining
                data = data[chunk_size:]

                # Next entry
                total_chunks += 1
            Mongo.cache.insert_many(self.chunks_coll, chunks)

        # We update the total length and its date and that's it
        dt = time.time()
        Mongo.cache.find_one_and_update(self.files_coll, {'_id': file._id}, {
            '$set': {
                'length': total_size,
                'metadata.st_size': total_size,
                'metadata.st_blocks': GenericFile.size_to_blocks(total_size),
                'metadata.st_mtime': dt,
                'metadata.st_atime': dt,
                'metadata.st_ctime': dt
            }
        })

        return True
Example #17
0
 def symlink(self, source, target):
     GenericFile.new_generic_file(filepath=source, mode=0o777, file_type=GenericFile.SYMBOLIC_LINK_TYPE, target=target)