コード例 #1
0
ファイル: test_sql_reports.py プロジェクト: Byron/bit
 def test_url(self):
     """Verify url functionality"""
     # NOTE: most of the features are tested with real objects above
     url = ZFSURL.new('hostname', 'store', 'foo_bar_fs?send_args=-R&recv_args=-F')
     fields = url.query_fields()
     assert len(fields) == 2
     assert fields['send_args'] == '-R'
コード例 #2
0
ファイル: snapshot.py プロジェクト: Byron/bit
    def _find_destination_fs_candidates(cls, source_fs):
        """@return a list of tuples of (url, filesystem_or_none) pairs.
        filesystem is either the filesystem matching the url, or None
        if the url is of a 'to-be-created' filesystem.

        The list is sorted to show the most viable candidates first. The weight is by existing filesystem names that 
        match the one of source_fs, ordered by free space. No free space check was made here.
        Then you will get urls pointing to non-existing filesystems on pools large enough to hold all data.

        Please note that we didn't yet verify the actual space requirements of 

        @param source_fs the filesystem instance for which you want to find a good location to send it to."""
        # Find mathing fileystems, sorted by free space
        session = object_session(source_fs)
        candidates = list()
        urls_seen = set()
        pools_seen = set()
        for fs in session.query(ZDataset).filter(ZDataset.avail != None).\
                    filter(ZDataset.name.like('%%/%s' % source_fs.url().basename())).\
                    filter(ZDataset.name != source_fs.name).\
                    order_by(ZDataset.avail.desc()):
            fs_url = fs.url()
            urls_seen.add(str(fs_url))
            pools_seen.add(str(ZFSURL.new(fs_url.host(), fs_url.pool())))
            candidates.append((fs_url, fs))
        # end for each matching filesytem

        # Find filesystems which are big enough to hold the entire filesystem + snapshots
        # traverse the hierarchy for that
        ssss = list(source_fs.snapshots())
        surl = source_fs.url()
        ssize, tr_size = compute_snapshot_effort(ssss)
        ssize += source_fs.used
        for pool in session.query(ZPool).filter(ZPool.free > ssize).\
                    filter(ZPool.host != source_fs.host).\
                    order_by(ZPool.free.desc()):
            # prefer filesystems that have a matching subpath and see if parts exist
            if str(pool.url()) in pools_seen:
                continue
            # end make sure that we don't put it onto the same pool twice
            url = pool.url().joined(surl.filesystem())
            if str(url) not in urls_seen:
                candidates.append((url, None))
            # end handle duplicates
            # NOTE: We don't have to update the set anymore, as the pool iteration will yield unique names
        # end handle candidates

        return candidates
コード例 #3
0
ファイル: test_sql_reports.py プロジェクト: Byron/bit
    def test_url(self):
        """Verify our URL type can handle all the various names"""
        pid = 0

        # TEST POOLS
        ############
        for pid, pool in enumerate(self.session.query(ZPool)):
            url = ZFSURL.new(pool.host, pool.name)
            assert url.pool() == pool.name
            assert url.parent_filesystem_url() is None
            assert url.host() == pool.host
            assert url.filesystem() == pool.name, 'each pool has an associated filesystem'
            assert url.is_pool()
            assert not url.is_snapshot()
            assert url.snapshot() is None
            assert url.name() == pool.name
            assert ZFSURL(str(url)) == url
            assert pool.url() == url
            assert self.session.instance_by_url(url) is pool

            # Just try some additional functionality
            assert not url.joined('foo').is_pool()
        # end for each pool
        assert pid

        # TEST TRAVERSAL
        ################
        pool = self.session.instance_by_url(ZFSURL('zfs://fs5/internal'))
        assert isinstance(pool, ZPool)
        pfs = pool.as_filesystem()
        assert pfs.is_pool_filesystem()
        assert pfs.parent() is None
        assert isinstance(pfs, ZDataset)
        assert pfs.as_pool() is pool
        assert pfs.pool() is pool

        for child in pfs.children():
            assert child.parent() is pfs
            assert not child.is_pool_filesystem()
            assert child.pool() is pool
            ss = None
            for ss in child.snapshots():
                assert ss.is_snapshot()
                assert ss.parent() is child
            # end check snapshot
            if ss:
                assert child.latest_snapshot() is ss
            # end check latest snapshot
        # end verify parent/child relationships


        # TEST DATASETS
        ################
        sid = 0
        for sid, ds in enumerate(self.session.query(ZDataset)):
            tokens = ds.name.split('/', 1)
            if len(tokens) == 1:
                pool = fs = tokens[0]
            else:
                pool, fs = tokens
            # end handle pool == filesystem
            tokens = ds.name.split('@')
            fs = tokens[0]
            ss = None
            assert len(tokens) < 3
            if len(tokens) == 2:
                ss = tokens[1]
            # end handle token
            url = ds.url()
            assert url.pool() == pool
            assert url.host() == ds.host
            assert url.name() == ds.name
            assert url.filesystem() == fs
            assert url.snapshot_name() == ss
            assert not url.is_pool()
            assert url.is_snapshot() == (ss is not None)
            assert (url.parent_filesystem_url() is None) == (ds.name.count('/') == 0)
            assert url.joined('foo/bar').is_snapshot() == (ss is not None)
            if ss is None:
                assert url.snapshot() is None
            else:
                assert url.snapshot() == ds.name, 'should be fully qualified name'
            assert ZFSURL(str(url)) == url
            assert ZFSURL.new_from_dataset(ds.host, ds.name) == url
        # end for each dataset
        assert sid
コード例 #4
0
ファイル: reserve.py プロジェクト: Byron/bit
    def generate(self):
        # Create an initial query and map filesystems by basename
        rep = self.ReportType(copy.deepcopy(self.report_schema))
        now = datetime.now()
        config = self.settings_value()
        if config.mode not in self.valid_modes:
            raise ValueError("Can only support the following modes: %s" % ', '.join(self.valid_modes))
        # end handle

        rep.columns[4][0] = config.mode == self.MODE_RESERVATION and 'reserved' or 'quota'

        query = self._session.query(ZDataset).filter(ZDataset.avail != None).\
                                              filter(ZDataset.zfs_priority != None).\
                                              filter(ZDataset.name.like(config.pool_name + '%/%'))

        query = host_filter(config.hosts, ZDataset.host, query)
        fs_map = dict()
        for fs in query:
            if fs.property_is_inherited('zfs_priority'):
                continue
            fs_map.setdefault((fs.host, fs.url().pool()), list()).append(fs)
        # end for each filesystem

        distribute_space = config.distribute_space
        if distribute_space:
            distribute_space = size_to_int(distribute_space)
        # end convert space

        if distribute_space and config.max_cap:
            raise ValueError("Please specify either 'max_cap or 'distribute_space', or set one of them to 0")
        # end assure we don't see both

        if config.debug_priorities and len(fs_map) > 1:
            raise AssertionError("If debug_priorities are used, you muse limit the amount of involved hosts to one")
        # end make sure debugging makes sense

        for (host, pool), fs_list in fs_map.iteritems():
            if config.debug_priorities and len(config.debug_priorities) != len(fs_list):
                raise AssertionError("Please specify exactly %i priorities, one for each filesystem, got %i" % (len(fs_list), len(config.debug_priorities)))
            # end verify priorities

            priorities = config.debug_priorities or [fs.zfs_priority for fs in fs_list]
            total_parts = sum(priorities)
            pool = self._session.instance_by_url(ZFSURL.new(host, pool))
            if distribute_space:
                total_alloc = distribute_space
            else:
                total_alloc = pool.size * (config.max_cap / 100.0)
            # end handle total_alloc

            for fs, prio in zip(fs_list, priorities):
                reserve = (total_alloc / float(total_parts)) * prio
                rep.records.append([now - fs.updated_at,
                                    fs,
                                    prio,
                                    fs.used,
                                    reserve,
                                    reserve - fs.used,
                                    reserve - fs.avail,
                                    (fs.used / float(reserve)) * 100.0
                                    ])
            # end for each filesystem
        # end for each pool-host pair
        if len(rep.records) > 1:
            rep.records.append(rep.aggregate_record())
        # end aggregate only if it makes sense

        return rep
コード例 #5
0
ファイル: list.py プロジェクト: Byron/bit
    def execute(self, args, remaining_args):
        config = self.settings_value()
        session = ZSession.new()
        zcls = args.type == ZReportGenerator.TYPE_POOL and ZPool or ZDataset
        query = session.query(zcls)
        table = zcls.__table__
        columns = table.columns.keys()
        hosts_attribute = zcls.host
        name_attribute = zcls.name
        columns_important = getattr(config.columns, args.type)

        if args.type == ZReportGenerator.TYPE_SNAPSHOT:
            query = query.filter(ZDataset.avail == None)
            columns_important = config.columns.snapshot
        elif args.type == ZReportGenerator.TYPE_FILESYSTEM:
            query = query.filter(ZDataset.avail != None).filter(ZDataset.type == ZReportGenerator.TYPE_FILESYSTEM)
            columns_important = config.columns.filesystem

        # COLUMNS FILTER
        #################
        if args.columns:
            has_user_columns = True
            if len(args.columns) == 1 and args.columns[0] in (self.COLUMN_ALL, self.COLUMN_IMPORTANT):
                if args.columns[0] == self.COLUMN_IMPORTANT:
                    args.columns = columns_important
                else:
                    has_user_columns = False
                # end handle 'all'
            # end handle presets

            if has_user_columns:
                columns = self.verify_columns(table.columns, args.columns)
                if not columns:
                    return self.ERROR
                # end early abort
            # end handle special case: all
        # end check provided columns

        # Always use the updated_at column
        columns.insert(0, 'updated_at')

        # HOSTS FILTER
        ##############
        if args.host:
            query = query.filter(hosts_attribute == args.host)
        # end

        # Name filter
        ##############
        if args.name:
            name = '%%%s%%' % args.name
            query = query.filter(name_attribute.like(name))
        # end handle name filter

        # ORDER
        #########
        # NOTE: if there is no order, order by creation ascending !
        if not args.order_by_asc and not args.order_by_desc:
            args.order_by_asc = ['host', 'creation']
        # end auto-order

        for attr, order in (('order_by_asc', 'asc'), ('order_by_desc', 'desc')):
            order_cols = getattr(args, attr)
            if not order_cols:
                continue
            # end handle order_cols
            order_cols = self.columns_by_names(table.columns, order_cols)
            if order_cols:
                query = query.order_by(*(getattr(col, order)() for col in order_cols))
        # end for each attr, order
        
        rep = Report()
        rep.columns = self.table_schema_from_colums(table.columns, columns)
        now = datetime.now()

        # FILL RECORDS
        ##############
        col_to_attr = zcls.__mapper__.get_property_by_column
        name_to_col = table.columns.__getitem__
        for inst in query:
            rec = list()

            if isinstance(inst, ZDataset) and args.leaf and not inst.is_snapshot() and list(inst.children()):
                continue
            # end skip non-leaf datasets

            for cid, name in enumerate(columns):
                if name == self.COLUMN_URL:
                    val = str(ZFSURL.new_from_dataset(inst.host, inst.name))
                else:
                    val = getattr(inst, col_to_attr(name_to_col(name)).key)
                    if isinstance(val, datetime):
                        val = now - val
                    # end handle conversions
                # end handle special case
                rec.append(val)
            # end for each colum
            rep.records.append(rec)
        # end for each row

        # AGGREGATION
        ##################
        if len(rep.records) > 1:
            agr = rep.aggregate_record()
            agr[0] = now - now
            rep.records.append(agr)

            if args.aggregate_only:
                rep.records = rep.records[-1:]
            # end remove all records but aggregate
        # end aggregate only if there is something

        # Finally, make sure updated_at becomes seen - we now have the values and no one cares about the schema
        # names anymore
        for col in rep.columns:
            if col[0] == 'updated_at':
                col[0] = 'seen'
        # end rename updated_at

        rep.serialize(Report.SERIALIZE_TTY, sys.stdout.write)
        return self.SUCCESS