def test_url(self): """Verify our URL type can handle all the various names""" pid = 0 # TEST POOLS ############ for pid, pool in enumerate(self.session.query(ZPool)): url = ZFSURL.new(pool.host, pool.name) assert url.pool() == pool.name assert url.parent_filesystem_url() is None assert url.host() == pool.host assert url.filesystem() == pool.name, 'each pool has an associated filesystem' assert url.is_pool() assert not url.is_snapshot() assert url.snapshot() is None assert url.name() == pool.name assert ZFSURL(str(url)) == url assert pool.url() == url assert self.session.instance_by_url(url) is pool # Just try some additional functionality assert not url.joined('foo').is_pool() # end for each pool assert pid # TEST TRAVERSAL ################ pool = self.session.instance_by_url(ZFSURL('zfs://fs5/internal')) assert isinstance(pool, ZPool) pfs = pool.as_filesystem() assert pfs.is_pool_filesystem() assert pfs.parent() is None assert isinstance(pfs, ZDataset) assert pfs.as_pool() is pool assert pfs.pool() is pool for child in pfs.children(): assert child.parent() is pfs assert not child.is_pool_filesystem() assert child.pool() is pool ss = None for ss in child.snapshots(): assert ss.is_snapshot() assert ss.parent() is child # end check snapshot if ss: assert child.latest_snapshot() is ss # end check latest snapshot # end verify parent/child relationships # TEST DATASETS ################ sid = 0 for sid, ds in enumerate(self.session.query(ZDataset)): tokens = ds.name.split('/', 1) if len(tokens) == 1: pool = fs = tokens[0] else: pool, fs = tokens # end handle pool == filesystem tokens = ds.name.split('@') fs = tokens[0] ss = None assert len(tokens) < 3 if len(tokens) == 2: ss = tokens[1] # end handle token url = ds.url() assert url.pool() == pool assert url.host() == ds.host assert url.name() == ds.name assert url.filesystem() == fs assert url.snapshot_name() == ss assert not url.is_pool() assert url.is_snapshot() == (ss is not None) assert (url.parent_filesystem_url() is None) == (ds.name.count('/') == 0) assert url.joined('foo/bar').is_snapshot() == (ss is not None) if ss is None: assert url.snapshot() is None else: assert url.snapshot() == ds.name, 'should be fully qualified name' assert ZFSURL(str(url)) == url assert ZFSURL.new_from_dataset(ds.host, ds.name) == url # end for each dataset assert sid
def execute(self, args, remaining_args): config = self.settings_value() session = ZSession.new() zcls = args.type == ZReportGenerator.TYPE_POOL and ZPool or ZDataset query = session.query(zcls) table = zcls.__table__ columns = table.columns.keys() hosts_attribute = zcls.host name_attribute = zcls.name columns_important = getattr(config.columns, args.type) if args.type == ZReportGenerator.TYPE_SNAPSHOT: query = query.filter(ZDataset.avail == None) columns_important = config.columns.snapshot elif args.type == ZReportGenerator.TYPE_FILESYSTEM: query = query.filter(ZDataset.avail != None).filter(ZDataset.type == ZReportGenerator.TYPE_FILESYSTEM) columns_important = config.columns.filesystem # COLUMNS FILTER ################# if args.columns: has_user_columns = True if len(args.columns) == 1 and args.columns[0] in (self.COLUMN_ALL, self.COLUMN_IMPORTANT): if args.columns[0] == self.COLUMN_IMPORTANT: args.columns = columns_important else: has_user_columns = False # end handle 'all' # end handle presets if has_user_columns: columns = self.verify_columns(table.columns, args.columns) if not columns: return self.ERROR # end early abort # end handle special case: all # end check provided columns # Always use the updated_at column columns.insert(0, 'updated_at') # HOSTS FILTER ############## if args.host: query = query.filter(hosts_attribute == args.host) # end # Name filter ############## if args.name: name = '%%%s%%' % args.name query = query.filter(name_attribute.like(name)) # end handle name filter # ORDER ######### # NOTE: if there is no order, order by creation ascending ! if not args.order_by_asc and not args.order_by_desc: args.order_by_asc = ['host', 'creation'] # end auto-order for attr, order in (('order_by_asc', 'asc'), ('order_by_desc', 'desc')): order_cols = getattr(args, attr) if not order_cols: continue # end handle order_cols order_cols = self.columns_by_names(table.columns, order_cols) if order_cols: query = query.order_by(*(getattr(col, order)() for col in order_cols)) # end for each attr, order rep = Report() rep.columns = self.table_schema_from_colums(table.columns, columns) now = datetime.now() # FILL RECORDS ############## col_to_attr = zcls.__mapper__.get_property_by_column name_to_col = table.columns.__getitem__ for inst in query: rec = list() if isinstance(inst, ZDataset) and args.leaf and not inst.is_snapshot() and list(inst.children()): continue # end skip non-leaf datasets for cid, name in enumerate(columns): if name == self.COLUMN_URL: val = str(ZFSURL.new_from_dataset(inst.host, inst.name)) else: val = getattr(inst, col_to_attr(name_to_col(name)).key) if isinstance(val, datetime): val = now - val # end handle conversions # end handle special case rec.append(val) # end for each colum rep.records.append(rec) # end for each row # AGGREGATION ################## if len(rep.records) > 1: agr = rep.aggregate_record() agr[0] = now - now rep.records.append(agr) if args.aggregate_only: rep.records = rep.records[-1:] # end remove all records but aggregate # end aggregate only if there is something # Finally, make sure updated_at becomes seen - we now have the values and no one cares about the schema # names anymore for col in rep.columns: if col[0] == 'updated_at': col[0] = 'seen' # end rename updated_at rep.serialize(Report.SERIALIZE_TTY, sys.stdout.write) return self.SUCCESS