Exemple #1
0
 def test_url(self):
     """Verify url functionality"""
     # NOTE: most of the features are tested with real objects above
     url = ZFSURL.new('hostname', 'store', 'foo_bar_fs?send_args=-R&recv_args=-F')
     fields = url.query_fields()
     assert len(fields) == 2
     assert fields['send_args'] == '-R'
Exemple #2
0
    def _find_destination_fs_candidates(cls, source_fs):
        """@return a list of tuples of (url, filesystem_or_none) pairs.
        filesystem is either the filesystem matching the url, or None
        if the url is of a 'to-be-created' filesystem.

        The list is sorted to show the most viable candidates first. The weight is by existing filesystem names that 
        match the one of source_fs, ordered by free space. No free space check was made here.
        Then you will get urls pointing to non-existing filesystems on pools large enough to hold all data.

        Please note that we didn't yet verify the actual space requirements of 

        @param source_fs the filesystem instance for which you want to find a good location to send it to."""
        # Find mathing fileystems, sorted by free space
        session = object_session(source_fs)
        candidates = list()
        urls_seen = set()
        pools_seen = set()
        for fs in session.query(ZDataset).filter(ZDataset.avail != None).\
                    filter(ZDataset.name.like('%%/%s' % source_fs.url().basename())).\
                    filter(ZDataset.name != source_fs.name).\
                    order_by(ZDataset.avail.desc()):
            fs_url = fs.url()
            urls_seen.add(str(fs_url))
            pools_seen.add(str(ZFSURL.new(fs_url.host(), fs_url.pool())))
            candidates.append((fs_url, fs))
        # end for each matching filesytem

        # Find filesystems which are big enough to hold the entire filesystem + snapshots
        # traverse the hierarchy for that
        ssss = list(source_fs.snapshots())
        surl = source_fs.url()
        ssize, tr_size = compute_snapshot_effort(ssss)
        ssize += source_fs.used
        for pool in session.query(ZPool).filter(ZPool.free > ssize).\
                    filter(ZPool.host != source_fs.host).\
                    order_by(ZPool.free.desc()):
            # prefer filesystems that have a matching subpath and see if parts exist
            if str(pool.url()) in pools_seen:
                continue
            # end make sure that we don't put it onto the same pool twice
            url = pool.url().joined(surl.filesystem())
            if str(url) not in urls_seen:
                candidates.append((url, None))
            # end handle duplicates
            # NOTE: We don't have to update the set anymore, as the pool iteration will yield unique names
        # end handle candidates

        return candidates
Exemple #3
0
    def test_url(self):
        """Verify our URL type can handle all the various names"""
        pid = 0

        # TEST POOLS
        ############
        for pid, pool in enumerate(self.session.query(ZPool)):
            url = ZFSURL.new(pool.host, pool.name)
            assert url.pool() == pool.name
            assert url.parent_filesystem_url() is None
            assert url.host() == pool.host
            assert url.filesystem() == pool.name, 'each pool has an associated filesystem'
            assert url.is_pool()
            assert not url.is_snapshot()
            assert url.snapshot() is None
            assert url.name() == pool.name
            assert ZFSURL(str(url)) == url
            assert pool.url() == url
            assert self.session.instance_by_url(url) is pool

            # Just try some additional functionality
            assert not url.joined('foo').is_pool()
        # end for each pool
        assert pid

        # TEST TRAVERSAL
        ################
        pool = self.session.instance_by_url(ZFSURL('zfs://fs5/internal'))
        assert isinstance(pool, ZPool)
        pfs = pool.as_filesystem()
        assert pfs.is_pool_filesystem()
        assert pfs.parent() is None
        assert isinstance(pfs, ZDataset)
        assert pfs.as_pool() is pool
        assert pfs.pool() is pool

        for child in pfs.children():
            assert child.parent() is pfs
            assert not child.is_pool_filesystem()
            assert child.pool() is pool
            ss = None
            for ss in child.snapshots():
                assert ss.is_snapshot()
                assert ss.parent() is child
            # end check snapshot
            if ss:
                assert child.latest_snapshot() is ss
            # end check latest snapshot
        # end verify parent/child relationships


        # TEST DATASETS
        ################
        sid = 0
        for sid, ds in enumerate(self.session.query(ZDataset)):
            tokens = ds.name.split('/', 1)
            if len(tokens) == 1:
                pool = fs = tokens[0]
            else:
                pool, fs = tokens
            # end handle pool == filesystem
            tokens = ds.name.split('@')
            fs = tokens[0]
            ss = None
            assert len(tokens) < 3
            if len(tokens) == 2:
                ss = tokens[1]
            # end handle token
            url = ds.url()
            assert url.pool() == pool
            assert url.host() == ds.host
            assert url.name() == ds.name
            assert url.filesystem() == fs
            assert url.snapshot_name() == ss
            assert not url.is_pool()
            assert url.is_snapshot() == (ss is not None)
            assert (url.parent_filesystem_url() is None) == (ds.name.count('/') == 0)
            assert url.joined('foo/bar').is_snapshot() == (ss is not None)
            if ss is None:
                assert url.snapshot() is None
            else:
                assert url.snapshot() == ds.name, 'should be fully qualified name'
            assert ZFSURL(str(url)) == url
            assert ZFSURL.new_from_dataset(ds.host, ds.name) == url
        # end for each dataset
        assert sid
Exemple #4
0
    def generate(self):
        # Create an initial query and map filesystems by basename
        rep = self.ReportType(copy.deepcopy(self.report_schema))
        now = datetime.now()
        config = self.settings_value()
        if config.mode not in self.valid_modes:
            raise ValueError("Can only support the following modes: %s" % ', '.join(self.valid_modes))
        # end handle

        rep.columns[4][0] = config.mode == self.MODE_RESERVATION and 'reserved' or 'quota'

        query = self._session.query(ZDataset).filter(ZDataset.avail != None).\
                                              filter(ZDataset.zfs_priority != None).\
                                              filter(ZDataset.name.like(config.pool_name + '%/%'))

        query = host_filter(config.hosts, ZDataset.host, query)
        fs_map = dict()
        for fs in query:
            if fs.property_is_inherited('zfs_priority'):
                continue
            fs_map.setdefault((fs.host, fs.url().pool()), list()).append(fs)
        # end for each filesystem

        distribute_space = config.distribute_space
        if distribute_space:
            distribute_space = size_to_int(distribute_space)
        # end convert space

        if distribute_space and config.max_cap:
            raise ValueError("Please specify either 'max_cap or 'distribute_space', or set one of them to 0")
        # end assure we don't see both

        if config.debug_priorities and len(fs_map) > 1:
            raise AssertionError("If debug_priorities are used, you muse limit the amount of involved hosts to one")
        # end make sure debugging makes sense

        for (host, pool), fs_list in fs_map.iteritems():
            if config.debug_priorities and len(config.debug_priorities) != len(fs_list):
                raise AssertionError("Please specify exactly %i priorities, one for each filesystem, got %i" % (len(fs_list), len(config.debug_priorities)))
            # end verify priorities

            priorities = config.debug_priorities or [fs.zfs_priority for fs in fs_list]
            total_parts = sum(priorities)
            pool = self._session.instance_by_url(ZFSURL.new(host, pool))
            if distribute_space:
                total_alloc = distribute_space
            else:
                total_alloc = pool.size * (config.max_cap / 100.0)
            # end handle total_alloc

            for fs, prio in zip(fs_list, priorities):
                reserve = (total_alloc / float(total_parts)) * prio
                rep.records.append([now - fs.updated_at,
                                    fs,
                                    prio,
                                    fs.used,
                                    reserve,
                                    reserve - fs.used,
                                    reserve - fs.avail,
                                    (fs.used / float(reserve)) * 100.0
                                    ])
            # end for each filesystem
        # end for each pool-host pair
        if len(rep.records) > 1:
            rep.records.append(rep.aggregate_record())
        # end aggregate only if it makes sense

        return rep