def run(self, params, args): if len(args) == 0: raise ArgRequired(self, 'host') hosts = self.getHostnames(args) if not hosts: raise ArgRequired(self, 'host') (interface, mac, all_interfaces) = self.fillParams([('interface', None), ('mac', None), ('all', 'false')]) all_interfaces = self.str2bool(all_interfaces) if not all_interfaces and not interface and not mac: raise ParamRequired(self, ('interface', 'mac')) networks = () for host in hosts: if all_interfaces: networks = flatten( self.db.select( """ id from networks where node=(select id from nodes where name=%s) """, (host, ))) elif interface: networks = flatten( self.db.select( """ id from networks where node=(select id from nodes where name=%s) and device=%s """, (host, interface))) if not networks: raise CommandError( self, 'no interface "%s" exists on %s' % (interface, host)) else: networks = flatten( self.db.select( """ id from networks where node=(select id from nodes where name=%s) and mac=%s """, (host, mac))) if not networks: raise CommandError( self, 'no mac address "%s" exists on %s' % (mac, host)) self.runPlugins(networks)
def run(self, params, args): self.beginOutput() expanded, = self.fillParams([ ('expanded', 'false') ]) expanded = self.str2bool(expanded) for pallet in self.getPallets(args, params): boxes = ' '.join(flatten(self.db.select(""" boxes.name from stacks, boxes where stacks.roll=%s and stacks.box=boxes.id """, (pallet.id,)))) # Constuct our data to output output = [ pallet.version, pallet.rel, pallet.arch, pallet.os, boxes ] if expanded: output.append(pallet.url) self.addOutput(pallet.name, output) header = ['name', 'version', 'release', 'arch', 'os', 'boxes'] if expanded: header.append('url') self.endOutput(header, trimOwner=False)
def run(self, params, args): self.beginOutput() expanded, = self.fillParams([ ('expanded', 'false') ]) expanded = self.str2bool(expanded) for pallet in self.getPallets(args, params): boxes = ' '.join(flatten(self.db.select(""" boxes.name from stacks, boxes where stacks.roll=%s and stacks.box=boxes.id """, (pallet.id,)))) # Constuct our data to output output = [ pallet.version, pallet.rel, pallet.arch, pallet.os, boxes ] if expanded: output.append(pallet.url) self.addOutput(pallet.name, output) header = ['name', 'version', 'release', 'arch', 'os', 'boxes'] if expanded: header.append('url') self.endOutput(header, trimOwner=False)
def host_lines(self, name, zone): "Lists the name->IP mappings for all hosts" s = "" for (host_name, ip, device, network_name) in self.db.select( """ nodes.name, networks.ip, networks.device, networks.name from subnets, nodes, networks where subnets.zone=%s and networks.subnet=subnets.id and networks.node=nodes.id """, (zone, )): if ip is None: continue if not network_name: network_name = host_name s += '%s A %s\n' % (network_name, ip) # Now record the aliases as CNAMEs for alias in flatten( self.db.select( """ aliases.name from aliases, networks where networks.device=%s and networks.id=aliases.network """, (device, ))): s += '%s CNAME %s\n' % (alias, network_name) return s
def run(self, params, args): if 'host' in params or 'hosts' in params: raise UsageError(self, "Incorrect usage.") (interface, ) = self.fillParams([ ('interface', None) ]) self.beginOutput() for host in self.getHostnames(args): if interface == None: devices = flatten(self.db.select(""" device from networks where node = (select id from nodes where name = %s) """, (host,) )) else: devices = (interface,) for device in devices: for alias, in self.db.select(""" name from aliases where network = ( select id from networks where node = (select id from nodes where name = %s) and device = %s )""", (host, device) ): self.addOutput(host, (alias, device)) self.endOutput(header=['host', 'alias', 'interface'], trimOwner=False)
def run(self, params, args): if 'host' in params or 'hosts' in params: raise UsageError(self, "Incorrect usage.") interface, = self.fillParams([('interface', None)]) self.beginOutput() for host in self.getHostnames(args): if not interface: devices = flatten( self.db.select( """ networks.device FROM networks LEFT JOIN nodes ON networks.node = nodes.id WHERE nodes.name = %s """, (host, ))) else: devices = [interface] query = """ networks.device, aliases.name FROM aliases LEFT JOIN networks ON aliases.network = networks.id LEFT JOIN nodes ON networks.node = nodes.id WHERE nodes.name = %s """ values = [host] if len(devices): query += " AND networks.device IN %s" values.append(devices) for device, alias in self.db.select(query, values): self.addOutput(host, (alias, device)) self.endOutput(header=['host', 'alias', 'interface'], trimOwner=False)
def run(self, params, args): # Get the scope and make sure the args are valid scope, = self.fillParams([('scope', 'global')]) scope_mappings = self.getScopeMappings(args, scope) # Now validate the params adapter, enclosure, slot = self.fillParams([ ('adapter', None), ('enclosure', None), ('slot', None, True) ]) # Make sure the adapter is an integer greater than 0, if it exists if adapter and adapter != '*': try: adapter = int(adapter) except: raise ParamType(self, 'adapter', 'integer') if adapter < 0: raise ParamValue(self, 'adapter', '>= 0') else: adapter = None # Make sure the enclosure is an integer greater than 0, if it exists if enclosure and enclosure != '*': try: enclosure = int(enclosure) except: raise ParamType(self, 'enclosure', 'integer') if enclosure < 0: raise ParamValue(self, 'enclosure', '>= 0') else: enclosure = None # Parse the slots slots = [] if slot: for s in slot.split(','): # Make sure the slot is valid if s == '*': # We're removing them all s = None else: try: s = int(s) except: raise ParamType(self, 'slot', 'integer') if s < 0: raise ParamValue(self, 'slot', '>= 0') if s in slots: raise ParamError( self, 'slot', f'"{s}" is listed twice' ) # Looks good slots.append(s) scope_ids = [] for scope_mapping in scope_mappings: for slot in slots: # Check that the controller configuration exists for the scope query = """ scope_map.id FROM storage_controller,scope_map WHERE storage_controller.scope_map_id = scope_map.id AND scope_map.scope = %s AND scope_map.appliance_id <=> %s AND scope_map.os_id <=> %s AND scope_map.environment_id <=> %s AND scope_map.node_id <=> %s """ values = list(scope_mapping) # 0 might be valid so need to check for None if adapter is not None: query += " AND storage_controller.adapter = %s" values.append(adapter) if enclosure is not None: query += " AND storage_controller.enclosure = %s" values.append(enclosure) if slot is not None: query += " AND storage_controller.slot = %s" values.append(slot) rows = self.db.select(query, values) if not rows: if adapter is None: adapter = '*' if enclosure is None: enclosure = '*' if slot is None: slot = '*' raise CommandError( self, f'disk specification for "{adapter}/' f'{enclosure}/{slot}" doesn\'t exist' ) scope_ids.extend(flatten(rows)) # Controller disk specifications existed for all the scope mappings, # so delete them. # Note: We just delete the scope mapping, the ON DELETE CASCADE takes # care of removing the storage_controller table entries for us. self.db.execute('delete from scope_map where id in %s', (scope_ids,))
def run(self, params, args): # Get the scope and make sure the args are valid scope, = self.fillParams([('scope', 'global')]) scope_mappings = self.getScopeMappings(args, scope) # Now validate the params adapter, enclosure, slot = self.fillParams([('adapter', None), ('enclosure', None), ('slot', None, True)]) # Make sure the adapter is an integer greater than 0, if it exists if adapter and adapter != '*': try: adapter = int(adapter) except: raise ParamType(self, 'adapter', 'integer') if adapter < 0: raise ParamValue(self, 'adapter', '>= 0') else: adapter = None # Make sure the enclosure is an integer greater than 0, if it exists if enclosure and enclosure != '*': try: enclosure = int(enclosure) except: raise ParamType(self, 'enclosure', 'integer') if enclosure < 0: raise ParamValue(self, 'enclosure', '>= 0') else: enclosure = None # Parse the slots slots = [] if slot: for s in slot.split(','): # Make sure the slot is valid if s == '*': # We're removing them all s = None else: try: s = int(s) except: raise ParamType(self, 'slot', 'integer') if s < 0: raise ParamValue(self, 'slot', '>= 0') if s in slots: raise ParamError(self, 'slot', f'"{s}" is listed twice') # Looks good slots.append(s) scope_ids = [] for scope_mapping in scope_mappings: for slot in slots: # Check that the controller configuration exists for the scope query = """ scope_map.id FROM storage_controller,scope_map WHERE storage_controller.scope_map_id = scope_map.id AND scope_map.scope = %s AND scope_map.appliance_id <=> %s AND scope_map.os_id <=> %s AND scope_map.environment_id <=> %s AND scope_map.node_id <=> %s """ values = list(scope_mapping) # 0 might be valid so need to check for None if adapter is not None: query += " AND storage_controller.adapter = %s" values.append(adapter) if enclosure is not None: query += " AND storage_controller.enclosure = %s" values.append(enclosure) if slot is not None: query += " AND storage_controller.slot = %s" values.append(slot) rows = self.db.select(query, values) if not rows: if adapter is None: adapter = '*' if enclosure is None: enclosure = '*' if slot is None: slot = '*' raise CommandError( self, f'disk specification for "{adapter}/' f'{enclosure}/{slot}" doesn\'t exist') scope_ids.extend(flatten(rows)) # Controller disk specifications existed for all the scope mappings, # so delete them. # Note: We just delete the scope mapping, the ON DELETE CASCADE takes # care of removing the storage_controller table entries for us. self.db.execute('delete from scope_map where id in %s', (scope_ids, ))
def run(self, params, args): clean, stacki_pallet_dir, updatedb, self.username, self.password = self.fillParams( [ ('clean', False), ('dir', '/export/stack/pallets'), ('updatedb', True), ('username', None), ('password', None), ]) # need to provide either both or none if self.username or self.password and not all( (self.username, self.password)): raise UsageError(self, 'must supply a password along with the username') clean = self.str2bool(clean) updatedb = self.str2bool(updatedb) # create a contextmanager that we can append cleanup jobs to # add its closing to run atexit, so we know it will run self.deferred = ExitStack() atexit.register(self.deferred.close) # special case: no args were specified - check if a pallet is mounted at /mnt/cdrom if not args: mount_point = '/mnt/cdrom' result = self._exec(f'mount | grep {mount_point}', shell=True) if result.returncode != 0: raise CommandError( self, 'no pallets specified and /mnt/cdrom is unmounted') args.append(mount_point) # resolve args and check for existence bad_args = [] for i, arg in enumerate(list(args)): # TODO: is this a problem? if arg.startswith(('https://', 'http://', 'ftp://')): args[i] = arg continue p = pathlib.Path(arg) if not p.exists(): bad_args.append(arg) else: args[i] = str(p.resolve()) if bad_args: msg = 'The following arguments appear to be local paths that do not exist: ' raise CommandError(self, msg + ', '.join(bad_args)) # most plugins will need a temporary directory, so allocate them here so we do cleanup # 'canonical_arg' is the arg provided by the user, but cleaned to be explicit (relative # paths resolved, etc) # 'exploded_path' is the directory where we will start searching for pallets # 'matched_pallets' is a list of pallet_info objects found at that path. pallet_args = {} for arg in args: tmpdir = tempfile.mkdtemp() self.deferred.callback(shutil.rmtree, tmpdir) pallet_args[arg] = { 'canonical_arg': arg, 'exploded_path': tmpdir, 'matched_pallets': [], } self.runPlugins(pallet_args) prober = probepal.Prober() pallet_infos = prober.find_pallets( *[pallet_args[path]['exploded_path'] for path in pallet_args]) # pallet_infos returns a dict {path: [pallet1, ...]} # note the list - an exploded_path can point to a jumbo pallet for path, pals in pallet_infos.items(): for arg in pallet_args: if pallet_args[arg]['exploded_path'] == path: pallet_args[arg]['matched_pallets'] = pals # TODO what to do if we match something twice. bad_args = [ arg for arg, info in pallet_args.items() if not info['matched_pallets'] ] if bad_args: msg = 'The following arguments do not appear to be pallets: ' raise CommandError(self, msg + ', '.join(bad_args)) # work off of a copy of pallet args, as we modify it as we go for arg, data in pallet_args.copy().items(): if len(data['matched_pallets']) == 1: pallet_args[arg]['exploded_path'] = data['matched_pallets'][ 0].pallet_root continue # delete the arg pointing to a jumbo and replace it with N new 'dummy' args del pallet_args[arg] for pal in data['matched_pallets']: fake_arg_name = '-'.join(info_getter(pal)) pallet_args[fake_arg_name] = data.copy() pallet_args[fake_arg_name]['exploded_path'] = pal.pallet_root pallet_args[fake_arg_name]['matched_pallets'] = [pal] # we want to be able to go tempdir to arg # this is because we want `canonical_arg` to be what goes in as the `URL` field in the db paths_to_args = { data['exploded_path']: data['canonical_arg'] for data in pallet_args.values() } # we have everything we need, copy the pallet to the fs, add it to the db, and maybe patch it for pallet in flatten(pallet_infos.values()): self.copy(stacki_pallet_dir, pallet, clean) self.write_pallet_xml(stacki_pallet_dir, pallet) if updatedb: self.update_db(pallet, paths_to_args[pallet.pallet_root]) if stacki_pallet_dir == '/export/stack/pallets': self.patch_pallet(pallet) # Clear the old packages self._exec('systemctl start ludicrous-cleaner'.split())
def run(self, args): # Unpack args. CommonKey, CommonResult, hosts_makes_models, expanded, hashit = args hosts = [ host_make_model.host for host_make_model in hosts_makes_models ] # get all host attrs up front host_attrs = self.owner.getHostAttrDict(host=hosts) mapped_by_imp_name = {} # don't look in the db if here are no hosts. if hosts: for row in self.owner.db.select( """ firmware_imp.name, nodes.Name, firmware_make.name, firmware_model.name FROM firmware_mapping INNER JOIN nodes ON firmware_mapping.node_id = nodes.ID INNER JOIN firmware ON firmware_mapping.firmware_id = firmware.id INNER JOIN firmware_model ON firmware.model_id = firmware_model.id INNER JOIN firmware_make ON firmware_model.make_id = firmware_make.id INNER JOIN firmware_imp ON firmware_model.imp_id = firmware_imp.id WHERE nodes.Name IN %s """, (hosts, )): imp, host, make, model = row if imp in mapped_by_imp_name: mapped_by_imp_name[imp].update( {CommonKey(host, make, model): host_attrs[host]}) else: mapped_by_imp_name[imp] = { CommonKey(host, make, model): host_attrs[host] } # run the implementations in parallel. results_by_imp = self.owner.run_implementations_parallel( implementation_mapping=mapped_by_imp_name, display_progress=True, ) # Check for any errors. This will raise an exception if any implementations raised an exception. self.check_errors(results=results_by_imp) # rebuild the results as (host, make, model) mapped to version results_by_host_make_model = { host_make_model: version for host_make_model, version in flatten( results.result.items() for results in results_by_imp.values() if results is not None and results.result is not None) } # Use the version_regex (if set) to parse out and validate the version numbers returned by the implementations for host_make_model, version in results_by_host_make_model.items(): regex_obj = self.owner.try_get_version_regex( make=host_make_model.make, model=host_make_model.model, ) if regex_obj: match = re.search(regex_obj.regex, version, re.IGNORECASE) if not match: results_by_host_make_model[ host_make_model] = f"{version} (Doesn't validate using the version_regex named {regex_obj.name} and will be ignored by sync)" else: results_by_host_make_model[host_make_model] = match.group() # Do a final pass to turn the results into a list as the top level command expects. # Also set None for host + make + model combos that had no results. for host_make_model in hosts_makes_models: if host_make_model not in results_by_host_make_model: results_by_host_make_model[host_make_model] = [None] else: results_by_host_make_model[host_make_model] = [ results_by_host_make_model[host_make_model] ] return CommonResult(header=['current_firmware_version'], values=results_by_host_make_model)
def run(self, scope_mappings): # Figure out the output targets node_ids = [] targets = [] for scope_mapping in scope_mappings: # We only need to run for host or global scope if scope_mapping.scope == 'host': node_ids.append(scope_mapping.node_id) elif scope_mapping.scope == 'global': targets.append('') else: continue if node_ids: targets.extend( flatten( self.db.select( 'nodes.name FROM nodes WHERE nodes.id IN %s', (node_ids, )))) # Get various kickstart networking data output_rows = [] if targets: for ip, hostname, zone, address, netmask, network_name in self.db.select( """ networks.ip, IF(networks.name IS NOT NULL, networks.name, nodes.name), subnets.zone, subnets.address, subnets.mask, subnets.name FROM networks INNER JOIN subnets ON subnets.id = networks.subnet INNER JOIN nodes ON nodes.id = networks.node INNER JOIN appliances ON appliances.id = nodes.appliance WHERE appliances.name = 'frontend' AND (subnets.name = 'public' OR subnets.name = 'private') """): network = IPv4Network(f"{address}/{netmask}") if network_name == 'private': for target in targets: output_rows.append([ target, 'global', 'const', 'Kickstart_PrivateKickstartHost', ip ]) output_rows.append([ target, 'global', 'const', 'Kickstart_PrivateAddress', ip ]) output_rows.append([ target, 'global', 'const', 'Kickstart_PrivateHostname', hostname ]) output_rows.append([ target, 'global', 'const', 'Kickstart_PrivateBroadcast', str(network.broadcast_address) ]) output_rows.append([ target, 'global', 'const', 'Kickstart_PrivateDNSDomain', zone ]) output_rows.append([ target, 'global', 'const', 'Kickstart_PrivateNetwork', address ]) output_rows.append([ target, 'global', 'const', 'Kickstart_PrivateNetmask', netmask ]) output_rows.append([ target, 'global', 'const', 'Kickstart_PrivateNetmaskCIDR', str(network.prefixlen) ]) elif network_name == 'public': for target in targets: output_rows.append([ target, 'global', 'const', 'Kickstart_PublicAddress', ip ]) output_rows.append([ target, 'global', 'const', 'Kickstart_PublicHostname', f'{hostname}.{zone}' ]) output_rows.append([ target, 'global', 'const', 'Kickstart_PublicBroadcast', str(network.broadcast_address) ]) output_rows.append([ target, 'global', 'const', 'Kickstart_PublicDNSDomain', zone ]) output_rows.append([ target, 'global', 'const', 'Kickstart_PublicNetwork', address ]) output_rows.append([ target, 'global', 'const', 'Kickstart_PublicNetmask', netmask ]) output_rows.append([ target, 'global', 'const', 'Kickstart_PublicNetmaskCIDR', str(network.prefixlen) ]) # Add in the Stacki version info for target in targets: output_rows.append( [target, 'global', 'const', 'release', stack.release]) output_rows.append( [target, 'global', 'const', 'version', stack.version]) return output_rows
def run(self, scope_mappings): # Figure out the output targets node_ids = [] for scope_mapping in scope_mappings: # We only need to run for host scope if scope_mapping.scope == 'host': node_ids.append(scope_mapping.node_id) else: continue output_rows = [] box_map = defaultdict(list) hostname_map = {} # Get the data for the hosts if node_ids: for node_id, hostname, appliance, box_id, box, os, environment, rack, rank, metadata in self.db.select(""" nodes.id, nodes.name, appliances.name, boxes.id, boxes.name, oses.name, environments.name, nodes.rack, nodes.rank, nodes.metadata FROM nodes INNER JOIN appliances ON appliances.id = nodes.appliance INNER JOIN boxes ON boxes.id = nodes.box INNER JOIN oses ON oses.id = boxes.os LEFT JOIN environments ON environments.id = nodes.environment WHERE nodes.id IN %s """, (node_ids,)): output_rows.append([hostname, 'host', 'const', 'hostname', hostname]) output_rows.append([hostname, 'host', 'const', 'appliance', appliance]) output_rows.append([hostname, 'host', 'const', 'box', box]) output_rows.append([hostname, 'host', 'const', 'os', os]) if environment: output_rows.append([hostname, 'host', 'const', 'environment', environment]) output_rows.append([hostname, 'host', 'const', 'rack', rack]) output_rows.append([hostname, 'host', 'const', 'rank', rank]) if metadata: output_rows.append([hostname, 'host', 'const', 'metadata', metadata]) # Add the host to the box map box_map[box_id].append(hostname) # And to the hostname map hostname_map[node_id] = hostname # Now figure out the pallets and carts for every box seen for box_id in box_map: # First the pallets pallets = [] os_version = None for name, version, rel, pallet_os in self.db.select(""" rolls.name, rolls.version, rolls.rel, rolls.os FROM rolls INNER JOIN stacks ON stacks.roll = rolls.id WHERE stacks.box = %s """, (box_id,)): pallets.append(f"{name}-{version}-{rel}") if name in ['SLES', 'CentOS', 'RHEL', 'Ubuntu', 'Ubuntu-Server', 'Fedora']: # the attr os.version is '{major_version}.x' # release is now '{OS}{major_version}' if pallet_os in rel: os_version = f'{rel.replace(pallet_os, "")}.x' # fedora's OS is 'redhat' ... elif name.lower() in rel: os_version = f'{rel.replace(name.lower(), "")}.x' for hostname in box_map[box_id]: output_rows.append([hostname, 'host', 'const', 'pallets', pallets]) output_rows.append([hostname, 'host', 'const', 'os.version', os_version]) # Then the carts carts = flatten(self.db.select(""" carts.name FROM carts INNER JOIN cart_stacks ON cart_stacks.cart = carts.id WHERE cart_stacks.box = %s """, (box_id,))) for hostname in box_map[box_id]: output_rows.append([hostname, 'host', 'const', 'carts', carts]) # Get some network info for the hosts for node_id, zone, address in self.db.select(""" networks.node, subnets.zone, networks.ip FROM networks INNER JOIN subnets ON networks.subnet=subnets.id WHERE networks.main = true AND networks.node IN %s """, (node_ids,)): output_rows.append([hostname_map[node_id], 'host', 'const', 'domainname', zone]) if address: output_rows.append([hostname_map[node_id], 'host', 'const', 'hostaddr', address]) # And finally any groups the hosts are in groups = defaultdict(list) for node_id, group in self.db.select(""" memberships.nodeid, groups.name FROM groups INNER JOIN memberships ON memberships.groupid=groups.id WHERE memberships.nodeid IN %s ORDER BY groups.name """, (node_ids,)): groups[node_id].append(group) output_rows.append([hostname_map[node_id], 'host', 'const', f'group.{group}', 'true']) for node_id in node_ids: output_rows.append([hostname_map[node_id], 'host', 'const', 'groups', ' '.join(groups[node_id])]) return output_rows
def run(self, params, args): # Get the scope and make sure the args are valid scope, = self.fillParams([('scope', 'global')]) scope_mappings = self.getScopeMappings(args, scope) # Now validate the params attr, shadow, resolve, var, const, display = self.fillParams([ ('attr', None), ('shadow', True), ('resolve', True), ('var', True), ('const', True), ('display', 'all'), ]) # If there isn't any environments, scope_mappings could be # an empty list, in which case we are done if not scope_mappings: return # Make sure bool params are bools resolve = self.str2bool(resolve) shadow = self.str2bool(shadow) var = self.str2bool(var) const = self.str2bool(const) is_glob = attr is not None and re.match('^[a-zA-Z_][a-zA-Z0-9_.]*$', attr) is None output = defaultdict(dict) if var: if resolve and scope == 'host': node_ids = [s.node_id for s in scope_mappings] hostnames = flatten( self.db.select( "nodes.name FROM nodes WHERE nodes.id IN %s", [node_ids])) # Get all the normal attributes for the host's scopes query, values = self._construct_host_query( node_ids, 'attributes', 'var', attr, is_glob) # The attributes come out of the DB with the higher weighted # scopes first. Surprisingly, there is no simple way in SQL # to squash these rules down by scope weight. So, we do it # here instead. Also, filter by attr name, if provided. seen = defaultdict(set) for host, *row in self.db.select(query, values, prepend_select=False): if row[2] not in seen[host]: if attr is None or self._fnmatchcase(row[2], attr): output[host][row[2]] = row seen[host].add(row[2]) # Merge in any normal global attrs for each host query = """ 'global', 'var', attributes.name, attributes.value FROM attributes, scope_map WHERE attributes.scope_map_id = scope_map.id AND scope_map.scope = 'global' """ values = [] # If we aren't a glob, we can let the DB filter by case-insensitive name if attr and not is_glob: query += "AND attributes.name = %s" values.append(attr) for row in self.db.select(query, values): for host in hostnames: if row[2] not in seen[host]: if attr is None or self._fnmatchcase(row[2], attr): output[host][row[2]] = row seen[host].add(row[2]) # Now get the shadow attributes, if requested if shadow: query, values = self._construct_host_query( node_ids, 'shadow.attributes', 'shadow', attr, is_glob) # Merge in the shadow attributes for the host's scopes weights = { 'global': 0, 'appliance': 1, 'os': 2, 'environment': 3, 'host': 4 } for host, *row in self.db.select(query, values, prepend_select=False): if row[2] not in seen[host]: # If we haven't seen it if attr is None or self._fnmatchcase(row[2], attr): output[host][row[2]] = row seen[host].add(row[2]) else: # Maybe the shadow attr is higher scope if weights[row[0]] >= weights[output[host][row[2]] [0]]: output[host][row[2]] = row # Merge in any shadow global attrs for each host query = """ 'global', 'shadow', attributes.name, attributes.value FROM shadow.attributes, scope_map WHERE attributes.scope_map_id = scope_map.id AND scope_map.scope = 'global' """ values = [] # If we aren't a glob, we can let the DB filter by case-insensitive name if attr and not is_glob: query += "AND attributes.name = %s" values.append(attr) for row in self.db.select(query, values): for host in hostnames: if row[2] not in seen[host]: if attr is None or self._fnmatchcase( row[2], attr): output[host][row[2]] = row seen[host].add(row[2]) else: if output[host][row[2]][0] == 'global': output[host][row[2]] = row else: query_data = [('attributes', 'var')] if shadow: query_data.append(('shadow.attributes', 'shadow')) for table, attr_type in query_data: if scope == 'global': query = f""" '', 'global', '{attr_type}', attributes.name, attributes.value FROM {table} INNER JOIN scope_map ON attributes.scope_map_id = scope_map.id WHERE scope_map.scope = 'global' """ else: query = f""" target.name, scope_map.scope, '{attr_type}', attributes.name, attributes.value FROM {table} INNER JOIN scope_map ON attributes.scope_map_id = scope_map.id """ values = [] if scope == 'appliance': query += """ INNER JOIN appliances AS target ON target.id = scope_map.appliance_id WHERE scope_map.appliance_id IN %s """ values.append([s.appliance_id for s in scope_mappings]) elif scope == 'os': query += """ INNER JOIN oses AS target ON target.id = scope_map.os_id WHERE scope_map.os_id IN %s """ values.append([s.os_id for s in scope_mappings]) elif scope == 'environment': query += """ INNER JOIN environments AS target ON target.id = scope_map.environment_id WHERE scope_map.environment_id IN %s """ values.append( [s.environment_id for s in scope_mappings]) elif scope == 'host': query += """ INNER JOIN nodes AS target ON target.id = scope_map.node_id WHERE scope_map.node_id IN %s """ values.append([s.node_id for s in scope_mappings]) # If we aren't a glob, we can let the DB filter by case-insensitive name if attr and not is_glob: query += "AND attributes.name = %s" values.append(attr) # Filter by attr name, if provided. for target, *row in self.db.select(query, values): if attr is None or self._fnmatchcase(row[2], attr): output[target][row[2]] = row if const: # For any host targets, figure out if they have a "const_overwrite" attr node_ids = [s.node_id for s in scope_mappings if s.scope == 'host'] const_overwrite = defaultdict(lambda: True) if node_ids: for target, value in self.db.select( """ nodes.name, attributes.value FROM attributes INNER JOIN scope_map ON attributes.scope_map_id = scope_map.id INNER JOIN nodes ON scope_map.node_id = nodes.id WHERE attributes.name = BINARY 'const_overwrite' AND scope_map.scope = 'host' AND scope_map.node_id IN %s """, (node_ids, )): const_overwrite[target] = self.str2bool(value) # Now run the plugins and merge in the intrensic attrs results = self.runPlugins(scope_mappings) for result in results: for target, *row in result[1]: if attr is None or self._fnmatchcase(row[2], attr): if const_overwrite[target]: output[target][row[2]] = row else: if row[2] not in output[target]: output[target][row[2]] = row # Handle the display parameter if we are host scoped self.beginOutput() if scope == 'host' and display in {'common', 'distinct'}: # Construct a set of attr (name, value) for each target host_attrs = {} for target in output: host_attrs[target] = {(row[2], str(row[3])) for row in output[target].values()} common_attrs = set.intersection(*host_attrs.values()) if display == 'common': for name, value in sorted(common_attrs): self.addOutput('_common_', [None, None, name, value]) elif display == 'distinct': common_attr_names = set(v[0] for v in common_attrs) for target in sorted(output.keys()): for key in sorted(output[target].keys()): if key not in common_attr_names: self.addOutput(target, output[target][key]) else: # Output our combined attributes, sorting them by target then attr for target in sorted(output.keys()): for key in sorted(output[target].keys()): self.addOutput(target, output[target][key]) if scope == 'global': header = '' else: header = scope self.endOutput(header=[header, 'scope', 'type', 'attr', 'value'])