Ejemplo n.º 1
0
 def create_dir_object(self, dir_path):
     # TODO: if object already exists???
     if os.path.exists(dir_path) and not os.path.isdir(dir_path):
         self.logger.error("Deleting file %s", dir_path)
         do_unlink(dir_path)
     # If dir aleady exist just override metadata.
     mkdirs(dir_path)
     do_chown(dir_path, self.uid, self.gid)
     create_object_metadata(dir_path)
     return True
Ejemplo n.º 2
0
 def create_dir_object(self, dir_path):
     #TODO: if object already exists???
     if os.path.exists(dir_path) and not os.path.isdir(dir_path):
         self.logger.error("Deleting file %s", dir_path)
         do_unlink(dir_path)
     #If dir aleady exist just override metadata.
     mkdirs(dir_path)
     do_chown(dir_path, self.uid, self.gid)
     create_object_metadata(dir_path)
     return True
Ejemplo n.º 3
0
    def list_objects_iter(self, limit, marker, end_marker,
                          prefix, delimiter, path):
        """
        Returns tuple of name, created_at, size, content_type, etag.
        """
        if path:
            prefix = path = path.rstrip('/') + '/'
            delimiter = '/'
        if delimiter and not prefix:
            prefix = ''

        objects = []
        object_count = 0
        bytes_used = 0
        container_list = []

        objects, object_count, bytes_used = get_container_details(self.datadir)

        if int(self.metadata[X_OBJECTS_COUNT][0]) != object_count or \
           int(self.metadata[X_BYTES_USED][0]) != bytes_used:
            self.metadata[X_OBJECTS_COUNT] = (object_count, 0)
            self.metadata[X_BYTES_USED] = (bytes_used, 0)
            self.update_container(self.metadata)

        if objects:
            objects.sort()

        if objects and prefix:
            objects = self.filter_prefix(objects, prefix)

        if objects and delimiter:
            objects = self.filter_delimiter(objects, delimiter, prefix)

        if objects and marker:
            objects = self.filter_marker(objects, marker)

        if objects and end_marker:
            objects = self.filter_end_marker(objects, end_marker)

        if objects and limit:
            if len(objects) > limit:
                objects = self.filter_limit(objects, limit)

        if objects:
            for obj in objects:
                list_item = []
                list_item.append(obj)
                obj_path = os.path.join(self.datadir, obj)
                metadata = read_metadata(obj_path)
                if not metadata or not validate_object(metadata):
                    metadata = create_object_metadata(obj_path)
                if metadata:
                    list_item.append(metadata[X_TIMESTAMP])
                    list_item.append(int(metadata[X_CONTENT_LENGTH]))
                    list_item.append(metadata[X_CONTENT_TYPE])
                    list_item.append(metadata[X_ETAG])
                container_list.append(list_item)

        return container_list
Ejemplo n.º 4
0
    def list_objects_iter(self, limit, marker, end_marker, prefix, delimiter,
                          path):
        """
        Returns tuple of name, created_at, size, content_type, etag.
        """
        if path:
            prefix = path = path.rstrip('/') + '/'
            delimiter = '/'
        if delimiter and not prefix:
            prefix = ''

        objects = []
        object_count = 0
        bytes_used = 0
        container_list = []

        objects, object_count, bytes_used = get_container_details(self.datadir)

        if int(self.metadata[X_OBJECTS_COUNT]) != object_count or \
           int(self.metadata[X_BYTES_USED]) != bytes_used:
            self.metadata[X_OBJECTS_COUNT] = object_count
            self.metadata[X_BYTES_USED] = bytes_used
            self.update_container(self.metadata)

        if objects:
            objects.sort()

        if objects and prefix:
            objects = self.filter_prefix(objects, prefix)

        if objects and delimiter:
            objects = self.filter_delimiter(objects, delimiter, prefix)

        if objects and marker:
            objects = self.filter_marker(objects, marker)

        if objects and end_marker:
            objects = self.filter_end_marker(objects, end_marker)

        if objects and limit:
            if len(objects) > limit:
                objects = self.filter_limit(objects, limit)

        if objects:
            for obj in objects:
                list_item = []
                list_item.append(obj)
                metadata = read_metadata(self.datadir + '/' + obj)
                if not metadata or not validate_object(metadata):
                    metadata = create_object_metadata(self.datadir + '/' + obj)
                if metadata:
                    list_item.append(metadata[X_TIMESTAMP])
                    list_item.append(int(metadata[X_CONTENT_LENGTH]))
                    list_item.append(metadata[X_CONTENT_TYPE])
                    list_item.append(metadata[X_ETAG])
                container_list.append(list_item)

        return container_list
Ejemplo n.º 5
0
    def test_create_object_metadata_file(self):
        tf = tempfile.NamedTemporaryFile()
        tf.file.write('4567'); tf.file.flush()
        r_md = utils.create_object_metadata(tf.name)

        xkey = _xkey(tf.name, utils.METADATA_KEY)
        assert len(_xattrs.keys()) == 1
        assert xkey in _xattrs
        assert _xattr_op_cnt['get'] == 1
        assert _xattr_op_cnt['set'] == 1
        md = pickle.loads(_xattrs[xkey])
        assert r_md == md

        for key in self.obj_keys:
            assert key in md, "Expected key %s in %r" % (key, md)
        assert md[utils.X_TYPE] == utils.OBJECT
        assert md[utils.X_OBJECT_TYPE] == utils.FILE
        assert md[utils.X_CONTENT_TYPE] == utils.FILE_TYPE
        assert md[utils.X_CONTENT_LENGTH] == os.path.getsize(tf.name)
        assert md[utils.X_TIMESTAMP] == normalize_timestamp(os.path.getctime(tf.name))
        assert md[utils.X_ETAG] == utils._get_etag(tf.name)
Ejemplo n.º 6
0
    def test_create_object_metadata_dir(self):
        td = tempfile.mkdtemp()
        try:
            r_md = utils.create_object_metadata(td)

            xkey = _xkey(td, utils.METADATA_KEY)
            assert len(_xattrs.keys()) == 1
            assert xkey in _xattrs
            assert _xattr_op_cnt['get'] == 1
            assert _xattr_op_cnt['set'] == 1
            md = pickle.loads(_xattrs[xkey])
            assert r_md == md

            for key in self.obj_keys:
                assert key in md, "Expected key %s in %r" % (key, md)
            assert md[utils.X_TYPE] == utils.OBJECT
            assert md[utils.X_OBJECT_TYPE] == utils.DIR
            assert md[utils.X_CONTENT_TYPE] == utils.DIR_TYPE
            assert md[utils.X_CONTENT_LENGTH] == 0
            assert md[utils.X_TIMESTAMP] == normalize_timestamp(os.path.getctime(td))
            assert md[utils.X_ETAG] == hashlib.md5().hexdigest()
        finally:
            os.rmdir(td)
Ejemplo n.º 7
0
    def __init__(
        self,
        path,
        device,
        partition,
        account,
        container,
        obj,
        logger,
        keep_data_fp=False,
        disk_chunk_size=65536,
        uid=DEFAULT_UID,
        gid=DEFAULT_GID,
    ):
        self.disk_chunk_size = disk_chunk_size
        device = account
        # Don't support obj_name ending/begining with '/', like /a, a/, /a/b/ etc
        obj = obj.strip("/")
        if "/" in obj:
            self.obj_path, self.obj = obj.rsplit("/", 1)
        else:
            self.obj_path = ""
            self.obj = obj

        if self.obj_path:
            self.name = "/".join((container, self.obj_path))
        else:
            self.name = container
        # Absolute path for obj directory.
        self.datadir = os.path.join(path, device, self.name)

        self.device_path = os.path.join(path, device)
        if not check_mount(path, device):
            check_valid_account(account)

        self.container_path = os.path.join(path, device, container)
        self.tmpdir = os.path.join(path, device, "tmp")
        self.logger = logger
        self.metadata = {}
        self.meta_file = None
        self.data_file = None
        self.fp = None
        self.iter_etag = None
        self.started_at_0 = False
        self.read_to_eof = False
        self.quarantined_dir = None
        self.keep_cache = False
        self.is_dir = False
        self.is_valid = True
        self.uid = int(uid)
        self.gid = int(gid)
        if not os.path.exists(self.datadir + "/" + self.obj):
            return

        self.data_file = os.path.join(self.datadir, self.obj)
        self.metadata = read_metadata(self.datadir + "/" + self.obj)
        if not self.metadata:
            create_object_metadata(self.datadir + "/" + self.obj)
            self.metadata = read_metadata(self.datadir + "/" + self.obj)

        if not validate_object(self.metadata):
            create_object_metadata(self.datadir + "/" + self.obj)
            self.metadata = read_metadata(self.datadir + "/" + self.obj)

        self.filter_metadata()

        if os.path.isdir(self.datadir + "/" + self.obj):
            self.is_dir = True
        else:
            self.fp = do_open(self.data_file, "rb")
            if not keep_data_fp:
                self.close(verify_file=False)
Ejemplo n.º 8
0
    def __init__(self,
                 path,
                 device,
                 partition,
                 account,
                 container,
                 obj,
                 logger,
                 keep_data_fp=False,
                 disk_chunk_size=65536,
                 uid=DEFAULT_UID,
                 gid=DEFAULT_GID,
                 fs_object=None):
        self.disk_chunk_size = disk_chunk_size
        device = account
        #Don't support obj_name ending/begining with '/', like /a, a/, /a/b/ etc
        obj = obj.strip('/')
        if '/' in obj:
            self.obj_path, self.obj = obj.rsplit('/', 1)
        else:
            self.obj_path = ''
            self.obj = obj

        if self.obj_path:
            self.name = '/'.join((container, self.obj_path))
        else:
            self.name = container
        #Absolute path for obj directory.
        self.datadir = os.path.join(path, device, self.name)

        self.device_path = os.path.join(path, device)
        if not check_mount(path, device):
            check_valid_account(account, fs_object)

        self.container_path = os.path.join(path, device, container)
        self.tmpdir = os.path.join(path, device, 'tmp')
        self.logger = logger
        self.metadata = {}
        self.meta_file = None
        self.data_file = None
        self.fp = None
        self.iter_etag = None
        self.started_at_0 = False
        self.read_to_eof = False
        self.quarantined_dir = None
        self.keep_cache = False
        self.is_dir = False
        self.is_valid = True
        self.uid = int(uid)
        self.gid = int(gid)
        if not os.path.exists(self.datadir + '/' + self.obj):
            return

        self.data_file = os.path.join(self.datadir, self.obj)
        self.metadata = read_metadata(self.datadir + '/' + self.obj)
        if not self.metadata:
            create_object_metadata(self.datadir + '/' + self.obj)
            self.metadata = read_metadata(self.datadir + '/' + self.obj)

        if not validate_object(self.metadata):
            create_object_metadata(self.datadir + '/' + self.obj)
            self.metadata = read_metadata(self.datadir + '/' + self.obj)

        self.filter_metadata()

        if os.path.isdir(self.datadir + '/' + self.obj):
            self.is_dir = True
        else:
            self.fp = do_open(self.data_file, 'rb')
            if not keep_data_fp:
                self.close(verify_file=False)