def render(self, session, logger, dbuser, sandbox, **arguments): sandbox = self.force_my_sandbox(session, logger, dbuser, sandbox) dbsandbox = Sandbox.get_unique(session, sandbox, compel=True) if not dbuser: raise AuthorizationException("Cannot get a sandbox without" " an authenticated connection.") userdir = os.path.join(self.config.get("broker", "templatesdir"), dbuser.name) sandboxdir = os.path.join(userdir, dbsandbox.name) if os.path.exists(sandboxdir): raise ArgumentError("Directory '%s' already exists. Use git " "fetch within the directory to update it." % sandboxdir) if not os.path.exists(userdir): try: logger.client_info("creating %s" % userdir) os.makedirs(userdir, mode=0775) except OSError, e: raise ArgumentError("failed to mkdir %s: %s" % (userdir, e)) args = [self.config.get("broker", "mean")] args.append("chown") args.append("-owner") args.append("%s" % dbuser.name) args.append("-path") args.append("%s" % userdir) try: run_command(args, logger=logger) except ProcessException, e: remove_dir(userdir) raise e
def render(self, session, logger, building, dryrun, incremental, **arguments): if building: dbbuilding = get_location(session, building=building) else: dbbuilding = None # --dryrun and --incremental do not mix well if dryrun and incremental: raise ArgumentError("--dryrun and --incremental cannot be given " "simultaneously.") key = SyncKey(data="network", logger=logger) lock_queue.acquire(key) rundir = self.config.get("broker", "rundir") tempdir = mkdtemp(prefix="refresh_network_", dir=rundir) try: args = [self.config.get("broker", "qip_dump_subnetdata"), "--datarootdir", tempdir, "--format", "txt", "--noaudit"] run_command(args, logger=logger) subnetdata = file(os.path.join(tempdir, "subnetdata.txt"), "r") refresher = QIPRefresh(session, logger, dbbuilding, dryrun, incremental) refresher.refresh(subnetdata) session.flush() if dryrun: session.rollback() finally: lock_queue.release(key) remove_dir(tempdir, logger=logger)
def render(self, session, logger, hostname, **arguments): # The default is now --configure, but that does not play nice with # --status. Turn --configure off if --status is present if arguments.get("status", False): arguments["configure"] = None dbhost = hostname_to_host(session, hostname) if arguments.get("install", None) and (dbhost.status.name == "ready" or dbhost.status.name == "almostready"): raise ArgumentError("You should change the build status before " "switching the PXE link to install.") # Find what "bootserver" instance we're bound to dbservice = Service.get_unique(session, "bootserver", compel=True) si = get_host_bound_service(dbhost, dbservice) if not si: raise ArgumentError("{0} has no bootserver.".format(dbhost)) # for that instance, find what servers are bound to it. servers = [host.fqdn for host in si.server_hosts] command = self.config.get("broker", "installfe") args = [command] for (option, mapped) in self._option_map.items(): if arguments[option]: args.append(mapped) args.append(dbhost.fqdn) if args[-1] == command: raise ArgumentError("Missing required target parameter.") args.append("--cfgfile") args.append("/dev/null") args.append("--servers") user = self.config.get("broker", "installfe_user") args.append(" ".join(["%s@%s" % (user, s) for s in servers])) args.append("--sshdir") args.append(self.config.get("broker", "installfe_sshdir")) args.append("--logfile") logdir = self.config.get("broker", "logdir") args.append("%s/aii-installfe.log" % logdir) run_command(args, logger=logger, loglevel=CLIENT_INFO)
def render(self, session, logger, building, dryrun, incremental, **arguments): if building: dbbuilding = get_location(session, building=building) else: dbbuilding = None # --dryrun and --incremental do not mix well if dryrun and incremental: raise ArgumentError("--dryrun and --incremental cannot be given " "simultaneously.") key = SyncKey(data="network", logger=logger) lock_queue.acquire(key) rundir = self.config.get("broker", "rundir") tempdir = mkdtemp(prefix="refresh_network_", dir=rundir) try: args = [ self.config.get("broker", "qip_dump_subnetdata"), "--datarootdir", tempdir, "--format", "txt", "--noaudit" ] run_command(args, logger=logger) subnetdata = file(os.path.join(tempdir, "subnetdata.txt"), "r") refresher = QIPRefresh(session, logger, dbbuilding, dryrun, incremental) refresher.refresh(subnetdata) session.flush() if dryrun: session.rollback() finally: lock_queue.release(key) remove_dir(tempdir, logger=logger)
def poll_vlan(self, session, logger, switch, now, ssh_args): if not switch.primary_ip: raise ArgumentError("Cannot poll VLAN info for {0:l} without " "a registered IP address.".format(switch)) session.query(ObservedVlan).filter_by(switch=switch).delete() session.flush() # Restrict operations to the internal network dbnet_env = NetworkEnvironment.get_unique_or_default(session) args = [] if ssh_args: args.extend(ssh_args) args.append(self.config.get("broker", "vlan2net")) args.append("-ip") args.append(switch.primary_ip) out = run_command(args) try: reader = DictReader(StringIO(out)) for row in reader: vlan = row.get("vlan", None) network = row.get("network", None) bitmask = row.get("bitmask", None) if vlan is None or network is None or bitmask is None or \ len(vlan) == 0 or len(network) == 0 or len(bitmask) == 0: logger.info( "Missing value for vlan, network or bitmask in " "output line #%d: %s" % (reader.line_num, row)) continue try: vlan_int = int(vlan) except ValueError, e: logger.info("Error parsing vlan number in output " "line #%d: %s error: %s" % (reader.line_num, row, e)) continue try: network = force_ipv4("network", network) except ArgumentError, e: raise InternalError(e) try: bitmask_int = int(bitmask) except ValueError, e: logger.info("Error parsing bitmask in output " "line #%d: %s error: %s" % (reader.line_num, row, e)) continue
def poll_vlan(self, session, logger, switch, now, ssh_args): if not switch.primary_ip: raise ArgumentError("Cannot poll VLAN info for {0:l} without " "a registered IP address.".format(switch)) session.query(ObservedVlan).filter_by(switch=switch).delete() session.flush() # Restrict operations to the internal network dbnet_env = NetworkEnvironment.get_unique_or_default(session) args = [] if ssh_args: args.extend(ssh_args) args.append(self.config.get("broker", "vlan2net")) args.append("-ip") args.append(switch.primary_ip) out = run_command(args) try: reader = DictReader(StringIO(out)) for row in reader: vlan = row.get("vlan", None) network = row.get("network", None) bitmask = row.get("bitmask", None) if vlan is None or network is None or bitmask is None or \ len(vlan) == 0 or len(network) == 0 or len(bitmask) == 0: logger.info("Missing value for vlan, network or bitmask in " "output line #%d: %s" % (reader.line_num, row)) continue try: vlan_int = int(vlan) except ValueError, e: logger.info("Error parsing vlan number in output " "line #%d: %s error: %s" % (reader.line_num, row, e)) continue try: network = force_ipv4("network", network) except ArgumentError, e: raise InternalError(e) try: bitmask_int = int(bitmask) except ValueError, e: logger.info("Error parsing bitmask in output " "line #%d: %s error: %s" % (reader.line_num, row, e)) continue
def poll_mac(self, session, netdev, now, ssh_args): importer = self.config.get("broker", "get_camtable") if not netdev.primary_name: hostname = netdev.label elif netdev.primary_name.fqdn.dns_domain.name == 'ms.com': hostname = netdev.primary_name.fqdn.name else: hostname = netdev.fqdn args = [] if ssh_args: args.extend(ssh_args) # TODO debug options shows CheckNet fails to return data and not # get-camtable args.extend([importer, "--debug", hostname]) try: out = run_command(args) except ProcessException, err: raise ArgumentError("Failed to run network device discovery: %s" % err)
def poll_mac(self, session, switch, now, ssh_args): importer = self.config.get("broker", "get_camtable") # run_checknet factored in if not switch.primary_name: hostname = switch.label elif switch.primary_name.fqdn.dns_domain.name == 'ms.com': hostname = switch.primary_name.fqdn.name else: hostname = switch.fqdn args = [] if ssh_args: args.extend(ssh_args) # TODO debug options shows CheckNet fails to return data and not # get-camtable args.extend([importer, "--debug", hostname]) try: out = run_command(args) except ProcessException, err: raise ArgumentError("Failed to run switch discovery: %s" % err)
def discover_switch(session, logger, config, dbswitch, dryrun): """ Perform switch discovery This function can operate in two modes: - If dryrun is False, it performs all the operations required to bring the definition of the switch in AQDB in line with the discovered status in one transaction. - If dryrun is True, it returns the list of individual "aq" commands the user should execute to get the switch into the desired state. In order to make the core logic less complex, simple actions like adding/deleting IP addresses and interfaces are implemented as helper functions, and those helper functions hide the differences between the normal and dryrun modes from the rest of the code. """ importer = config.get("broker", "switch_discover") results = [] dbnet_env = NetworkEnvironment.get_unique_or_default(session) def aqcmd(cmd, *args): """ Helper function to print an AQ command to be executed by the user """ quoted_args = [quote(str(arg)) for arg in args] results.append("aq %s --switch %s %s" % (cmd, dbswitch.primary_name, " ".join(quoted_args))) def update_switch(dbmodel, serial_no, comments): """ Helper for updating core switch attributes, honouring dryrun """ if dryrun: args = ["update_switch"] if dbmodel and dbmodel != dbswitch.model: args.extend(["--model", dbmodel.name, "--vendor", dbmodel.vendor.name]) if serial_no and serial_no != dbswitch.serial_no: args.extend(["--serial", serial_no]) if comments and comments != dbswitch.comments: args.extend(["--comments", comments]) aqcmd(*args) else: if dbmodel: dbswitch.model = dbmodel if serial_no: dbswitch.serial_no = serial_no if comments: dbswitch.comments = comments def del_address(iface, addr): """ Helper for deleting an IP address, honouring dryrun """ if dryrun: aqcmd("del_interface_address", "--interface", iface.name, "--ip", addr.ip) else: iface.assignments.remove(addr) session.flush() q = session.query(AddressAssignment.id) q = q.filter_by(network=addr.network) q = q.filter_by(ip=addr.ip) if not q.first(): q = session.query(ARecord) q = q.filter_by(network=addr.network) q = q.filter_by(ip=addr.ip) map(delete_dns_record, q.all()) def del_interface(iface): """ Helper for deleting an interface, honouring dryrun """ if dryrun: aqcmd("del_interface", "--interface", iface.name) else: dbswitch.interfaces.remove(iface) def do_rename_interface(iface, new_name): """ Helper for renaming an interface, honouring dryrun """ if dryrun: aqcmd("update_interface", "--interface", iface.name, "--rename_to", new_name) else: rename_interface(session, iface, new_name) def add_interface(ifname, iftype): """ Helper for adding a new interface, honouring dryrun """ if dryrun: aqcmd("add_interface", "--interface", ifname, "--type", iftype) # There's no Interface instace we could return here, but fortunately # nothing will use the returned value in dryrun mode return None else: return get_or_create_interface(session, dbswitch, name=ifname, interface_type=iftype) def add_address(iface, ifname, ip, label, relaxed): """ Helper for adding an IP address, honouring dryrun """ if label: name = "%s-%s-%s" % (dbswitch.primary_name.fqdn.name, ifname, label) else: name = "%s-%s" % (dbswitch.primary_name.fqdn.name, ifname) fqdn = "%s.%s" % (name, dbswitch.primary_name.fqdn.dns_domain) if dryrun: args = ["add_interface_address", "--interface", ifname, "--ip", ip] if label: args.extend(["--label", label]) aqcmd(*args) else: # Doing the DSDB update if the address existed before would be # tricky, so prevent that case by passing preclude=True dbdns_rec, newly_created = grab_address(session, fqdn, ip, dbnet_env, relaxed=relaxed, preclude=True) assign_address(iface, ip, dbdns_rec.network, label=label) def add_router(ip): """ Helper command for managing router IPs, honouring dryrun """ # TODO: the command should be configurable cmd = "qip-set-router %s" % ip if dryrun: results.append(cmd) else: # If we're not the authoritative source, then we can't just create # the RouterAddress directly. TODO: It should be configurable # whether we're authoritative for network data logger.client_info("You should run '%s'." % cmd) def warning(msg): """ Helper for sending warning messages to the client We cannot use the side channel in dryrun mode, because the "aq show_switch" command does not issue show_request. So we need to embed the warnings in the output. """ if dryrun: results.append("# Warning: " + msg) else: logger.client_info("Warning: " + msg) hostname = determine_helper_hostname(session, logger, config, dbswitch) if hostname: args = determine_helper_args(config) args.append(hostname) else: args = [] args.extend([importer, str(dbswitch.primary_name)]) try: out = run_command(args) except ProcessException, err: raise ArgumentError("Failed to run switch discovery: %s" % err)
except NotFoundException, nfe: failed.append("%s: %s" % (host, nfe)) except ArgumentError, ae: failed.append("%s: %s" % (host, ae)) if failed: raise ArgumentError("Invalid hosts in list:\n%s" % "\n".join(failed)) for (group, hostlist) in groups.items(): # create temporary file, point aii-installfe at that file. groupargs = args[:] with NamedTemporaryFile() as tmpfile: tmpfile.writelines([x.fqdn + "\n" for x in hostlist]) tmpfile.flush() for (option, mapped) in self._option_map.items(): if arguments[option]: groupargs.append(mapped) groupargs.append(tmpfile.name) if groupargs[-1] == command: raise ArgumentError("Missing required target parameter.") groupargs.append("--servers") groupargs.append(" ".join( ["%s@%s" % (user, s) for s in servers[group]])) # it would be nice to parallelize this.... run_command(groupargs, logger=logger, loglevel=CLIENT_INFO)
except NotFoundException, nfe: failed.append("%s: %s" % (host, nfe)) except ArgumentError, ae: failed.append("%s: %s" % (host, ae)) if failed: raise ArgumentError("Invalid hosts in list:\n%s" % "\n".join(failed)) for (group, hostlist) in groups.items(): # create temporary file, point aii-installfe at that file. groupargs = args[:] with NamedTemporaryFile() as tmpfile: tmpfile.writelines([x.fqdn + "\n" for x in hostlist]) tmpfile.flush() for (option, mapped) in self._option_map.items(): if arguments[option]: groupargs.append(mapped) groupargs.append(tmpfile.name) if groupargs[-1] == command: raise ArgumentError("Missing required target parameter.") groupargs.append("--servers") groupargs.append(" ".join(["%s@%s" % (user, s) for s in servers[group]])) # it would be nice to parallelize this.... run_command(groupargs, logger=logger, loglevel=CLIENT_INFO)
class TemplateDomain(object): def __init__(self, domain, author=None, logger=LOGGER): self.domain = domain self.author = author self.logger = logger def directories(self): """Return a list of directories required for compiling this domain""" config = Config() dirs = [] if self.domain.branch_type == 'domain': dirs.append(os.path.join(config.get("broker", "domainsdir"), self.domain.name)) dirs.append(os.path.join(config.get("broker", "quattordir"), "cfg", "domains", self.domain.name)) dirs.append(os.path.join(config.get("broker", "quattordir"), "build", "xml", self.domain.name)) return dirs def outputdirs(self): """Returns a list of directories that should exist before compiling""" config = Config() dirs = [] dirs.append(config.get("broker", "profilesdir")) # The regression tests occasionally have issues with panc # auto-creating this directory - not sure why. if self.domain.clusters: dirs.append(os.path.join(config.get("broker", "quattordir"), "build", "xml", self.domain.name, "clusters")) return dirs def compile(self, session, only=None, locked=False, panc_debug_include=None, panc_debug_exclude=None, cleandeps=False): """The build directories are checked and constructed if necessary, so no prior setup is required. The compile may take some time (current rate is 10 hosts per second, with a couple of seconds of constant overhead), and the possibility of blocking on the compile lock. If the 'only' parameter is provided, then it should be a list or set containing the profiles that need to be compiled. May raise ArgumentError exception, else returns the standard output (as a string) of the compile """ config = Config() if self.domain.branch_type == 'sandbox': if not self.author: raise InternalError("Missing required author to compile " "sandbox %s" % self.domain.name) sandboxdir = os.path.join(config.get("broker", "templatesdir"), self.author.name, self.domain.name) if not os.path.exists(sandboxdir): raise ArgumentError("Sandbox directory '%s' does not exist." % sandboxdir) if not self.sandbox_has_latest(config, sandboxdir): self.logger.warn("Sandbox %s/%s does not contain the " "latest changes from the prod domain. If " "there are failures try " "`git fetch && git merge origin/prod`" % (self.author.name, self.domain.name)) self.logger.info("preparing domain %s for compile" % self.domain.name) # Ensure that the compile directory is in a good state. outputdir = config.get("broker", "profilesdir") for d in self.directories() + self.outputdirs(): if not os.path.exists(d): try: self.logger.info("creating %s" % d) os.makedirs(d) except OSError, e: raise ArgumentError("Failed to mkdir %s: %s" % (d, e)) nothing_to_do = True if only: nothing_to_do = False else: hostnames = session.query(Fqdn) hostnames = hostnames.join(DnsRecord, HardwareEntity, Machine, Host) hostnames = hostnames.filter_by(branch=self.domain, sandbox_author=self.author) clusternames = session.query(Cluster.name) clusternames = clusternames.filter_by(branch=self.domain, sandbox_author=self.author) if self.author: # Need to restrict to the subset of the sandbox managed # by this author. only = [str(fqdn) for fqdn in hostnames] only.extend(["cluster/%s" % c.name for c in clusternames]) nothing_to_do = not bool(only) else: nothing_to_do = not hostnames.count() and not clusternames.count() if nothing_to_do: return 'No hosts: nothing to do.' # The ant wrapper is silly and it may pick up the wrong set of .jars if # ANT_HOME is not set panc_env = {"PATH": "%s/bin:%s" % (config.get("broker", "java_home"), os_environ.get("PATH", "")), "ANT_HOME": config.get("broker", "ant_home"), "JAVA_HOME": config.get("broker", "java_home")} if config.has_option("broker", "ant_options"): panc_env["ANT_OPTS"] = config.get("broker", "ant_options") args = [config.get("broker", "ant")] args.append("--noconfig") args.append("-f") args.append("%s/build.xml" % config.get("broker", "compiletooldir")) args.append("-Dbasedir=%s" % config.get("broker", "quattordir")) args.append("-Dpanc.jar=%s" % self.domain.compiler) args.append("-Dpanc.formatter=%s" % config.get("panc", "formatter")) args.append("-Dpanc.template_extension=%s" % config.get("panc", "template_extension")) args.append("-Ddomain=%s" % self.domain.name) args.append("-Ddistributed.profiles=%s" % outputdir) args.append("-Dpanc.batch.size=%s" % config.get("panc", "batch_size")) args.append("-Dant-contrib.jar=%s" % config.get("broker", "ant_contrib_jar")) args.append("-Dgzip.output=%s" % config.get("panc", "gzip_output")) if self.domain.branch_type == 'sandbox': args.append("-Ddomain.templates=%s" % sandboxdir) if only: # Use -Dforce.build=true? # TODO: pass the list in a temp file args.append("-Dobject.profile=%s" % " ".join(only)) args.append("compile.object.profile") else: # Technically this is the default, but being explicit # doesn't hurt. args.append("compile.domain.profiles") if panc_debug_include is not None: args.append("-Dpanc.debug.include=%s" % panc_debug_include) if panc_debug_exclude is not None: args.append("-Dpanc.debug.exclude=%s" % panc_debug_exclude) if cleandeps: # Cannot send a false value - the test in build.xml is for # whether or not the property is defined at all. args.append("-Dclean.dep.files=%s" % cleandeps) out = '' try: if not locked: if only and len(only) == 1: key = CompileKey(domain=self.domain.name, profile=list(only)[0], logger=self.logger) else: key = CompileKey(domain=self.domain.name, logger=self.logger) lock_queue.acquire(key) self.logger.info("starting compile") try: out = run_command(args, env=panc_env, logger=self.logger, path=config.get("broker", "quattordir"), loglevel=CLIENT_INFO) except ProcessException, e: raise ArgumentError("\n%s%s" % (e.out, e.err)) finally: if not locked: lock_queue.release(key) # No need for a lock here - there is only a single file written # and it is swapped into place atomically. build_index(config, session, outputdir, logger=self.logger) return out
def render(self, session, logger, list, **arguments): check_hostlist_size(self.command, self.config, list) # The default is now --configure, but that does not play nice with # --status. Turn --configure off if --status is present if arguments.get("status", False): arguments["configure"] = None user = self.config.get("broker", "installfe_user") command = self.config.get("broker", "installfe") args = [command] args.append("--cfgfile") args.append("/dev/null") args.append("--sshdir") args.append(self.config.get("broker", "installfe_sshdir")) args.append("--logfile") logdir = self.config.get("broker", "logdir") args.append("%s/aii-installfe.log" % logdir) dbservice = Service.get_unique(session, "bootserver", compel=True) dbhosts = hostlist_to_hosts(session, list) hosts_per_instance = defaultdict(ListType) failed = [] for dbhost in dbhosts: if arguments.get("install", None) and (dbhost.status.name == "ready" or dbhost.status.name == "almostready"): failed.append("{0}: You should change the build status " "before switching the PXE link to install." .format(dbhost)) # Find what "bootserver" instance we're bound to si = get_host_bound_service(dbhost, dbservice) if not si: failed.append("{0} has no bootserver.".format(dbhost)) else: hosts_per_instance[si].append(dbhost) if failed: raise ArgumentError("Invalid hosts in list:\n%s" % "\n".join(failed)) for (si, hostlist) in hosts_per_instance.items(): # create temporary file, point aii-installfe at that file. groupargs = args[:] with NamedTemporaryFile() as tmpfile: tmpfile.writelines([x.fqdn + "\n" for x in hostlist]) tmpfile.flush() for (option, mapped) in self._option_map.items(): if arguments[option]: groupargs.append(mapped) groupargs.append(tmpfile.name) if groupargs[-1] == command: raise ArgumentError("Missing required target parameter.") servers = [] for srv in si.servers: # The primary name is the address to be used for delivering # configuration to a host, so we should use that even if the # service itself is bound to a different IP address if srv.host: servers.append(srv.host.fqdn) else: servers.append(srv.fqdn) groupargs.append("--servers") groupargs.append(" ".join(["%s@%s" % (user, s) for s in servers])) # it would be nice to parallelize this.... run_command(groupargs, logger=logger, loglevel=CLIENT_INFO)