def render(self, session, service, server, client, **arguments): instance = arguments.get("instance", None) dbserver = server and hostname_to_host(session, server) or None dbclient = client and hostname_to_host(session, client) or None dbservice = Service.get_unique(session, service, compel=True) if dbserver: q = session.query(ServiceInstance) q = q.filter_by(service=dbservice) q = q.join('servers') q = q.filter_by(host=dbserver) q = q.order_by(ServiceInstance.name) return ServiceInstanceList(q.all()) elif dbclient: service_instances = dbclient.services_used service_instances = [ si for si in service_instances if si.service == dbservice ] if instance: service_instances = [ si for si in service_instances if si.name == instance ] return ServiceInstanceList(service_instances) if not instance: return dbservice return get_service_instance(session, dbservice, instance)
def render(self, session, logger, hostname, **arguments): dbhost = hostname_to_host(session, hostname) if dbhost.status.name == 'ready': raise ArgumentError("{0:l} is in ready status, " "advertised status can be reset only " "when host is in non ready state." .format(dbhost)) dbhost.advertise_status = False session.flush() td = TemplateDomain(dbhost.branch, dbhost.sandbox_author, logger=logger) plenary = Plenary.get_plenary(dbhost, logger=logger) # Force a host lock as pan might overwrite the profile... with plenary.get_key(): try: plenary.write(locked=True) td.compile(session, only=plenary.object_templates, locked=True) except IncompleteError: raise ArgumentError("Run aq make for host %s first." % dbhost.fqdn) except: plenary.restore_stash() raise return
def render(self, session, logger, hostname, buildstatus, **arguments): dbhost = hostname_to_host(session, hostname) dbstatus = HostLifecycle.get_unique(session, buildstatus, compel=True) changed = dbhost.status.transition(dbhost, dbstatus) if not changed or not dbhost.archetype.is_compileable: return session.add(dbhost) session.flush() plenary = PlenaryHost(dbhost, logger=logger) # Force a host lock as pan might overwrite the profile... key = CompileKey(domain=dbhost.branch.name, profile=dbhost.fqdn, logger=logger) try: lock_queue.acquire(key) plenary.write(locked=True) td = TemplateDomain(dbhost.branch, dbhost.sandbox_author, logger=logger) td.compile(session, only=[dbhost.fqdn], locked=True) except IncompleteError: raise ArgumentError("Run aq make for host %s first." % dbhost.fqdn) except: plenary.restore_stash() raise finally: lock_queue.release(key)
def render(self, session, logger, hostname, service, instance, **arguments): dbhost = hostname_to_host(session, hostname) dbservice = Service.get_unique(session, service, compel=True) msg = "Service %s" % service if instance: dbinstances = [get_service_instance(session, dbservice, instance)] msg = "Service %s, instance %s" % (service, instance) else: q = session.query(ServiceInstance) q = q.filter_by(service=dbservice) q = q.join('servers') q = q.filter_by(host=dbhost) dbinstances = q.all() for dbinstance in dbinstances: if dbhost in dbinstance.server_hosts: if (dbinstance.client_count > 0 and len(dbinstance.server_hosts) <= 1): logger.warning("WARNING: Server %s, is the last server " "bound to %s which still has clients" % (hostname, msg)) dbinstance.server_hosts.remove(dbhost) session.expire(dbhost, ['_services_provided']) session.flush() plenaries = PlenaryCollection(logger=logger) plenaries.append(Plenary.get_plenary(dbhost)) for dbinstance in dbinstances: plenaries.append(Plenary.get_plenary(dbinstance)) plenaries.write() return
def render(self, session, hostname, **arguments): dbhost = hostname_to_host(session, hostname) arguments['machine'] = dbhost.machine.label return CommandUpdateInterfaceMachine.render(self, session=session, hostname=hostname, **arguments)
def render(self, session, logger, hostname, service, instance, **arguments): dbhost = hostname_to_host(session, hostname) dbservice = Service.get_unique(session, service, compel=True) dbinstance = get_service_instance(session, dbservice, instance) if dbhost in dbinstance.server_hosts: # FIXME: This should just be a warning. There is currently # no way of returning output that would "do the right thing" # on the client but still show status 200 (OK). # The right thing would generally be writing to stderr for # a CLI (either raw or csv), and some sort of generic error # page for a web client. raise ArgumentError("Server %s is already bound to service %s " "instance %s." % (hostname, service, instance)) # The ordering_list will manage the position for us dbinstance.server_hosts.append(dbhost) session.flush() plenary_info = Plenary.get_plenary(dbinstance, logger=logger) plenary_info.write() # XXX: Need to recompile... return
def render(self, *args, **kwargs): session = kwargs['session'] hostname = kwargs['hostname'] dbhost = hostname_to_host(session, hostname) if dbhost.archetype.name != 'windows': raise ArgumentError("Host %s has archetype %s, expected windows." % (dbhost.fqdn, dbhost.archetype.name)) # The superclass already contains the logic to handle this case. return CommandDelHost.render(self, *args, **kwargs)
def lookup_target(session, plenaries, hostname, ip, cluster, resourcegroup, service_address, alias): """ Check the parameters of the server providing a given service Look for potential conflicts, and return a dict that is suitable to be passed to either the constructor of ServiceInstanceServer, or to the find_server() function. """ params = {} if cluster and hostname: raise ArgumentError("Only one of --cluster and --hostname may be " "specified.") if alias: dbdns_env = DnsEnvironment.get_unique_or_default(session) dbdns_rec = Alias.get_unique(session, fqdn=alias, dns_environment=dbdns_env, compel=True) params["alias"] = dbdns_rec if hostname: params["host"] = hostname_to_host(session, hostname) plenaries.append(Plenary.get_plenary(params["host"])) if cluster: params["cluster"] = Cluster.get_unique(session, cluster, compel=True) plenaries.append(Plenary.get_plenary(params["cluster"])) if service_address: # TODO: calling get_resource_holder() means doing redundant DB lookups # TODO: it would be nice to also accept an FQDN for the service address, # to be consistent with the usage of the --service_address option in # add_service_address/del_service_address holder = get_resource_holder(session, hostname=hostname, cluster=cluster, resgroup=resourcegroup, compel=True) dbsrv_addr = ServiceAddress.get_unique(session, name=service_address, holder=holder, compel=True) params["service_address"] = dbsrv_addr elif ip: for addr in params["host"].hardware_entity.all_addresses(): if ip != addr.ip: continue if addr.service_address: params["service_address"] = addr.service_address else: params["address_assignment"] = addr break return params
def render(self, session, logger, list, **arguments): check_hostlist_size(self.command, self.config, list) # The default is now --configure, but that does not play nice with # --status. Turn --configure off if --status is present if arguments.get("status", False): arguments["configure"] = None user = self.config.get("broker", "installfe_user") command = self.config.get("broker", "installfe") args = [command] args.append("--cfgfile") args.append("/dev/null") args.append("--sshdir") args.append(self.config.get("broker", "installfe_sshdir")) args.append("--logfile") logdir = self.config.get("broker", "logdir") args.append("%s/aii-installfe.log" % logdir) servers = dict() groups = dict() failed = [] for host in list: try: dbhost = hostname_to_host(session, host) if arguments.get( "install", None) and (dbhost.status.name == "ready" or dbhost.status.name == "almostready"): failed.append("%s: You should change the build status " "before switching the PXE link to install." % host) # Find what "bootserver" instance we're bound to dbservice = Service.get_unique(session, "bootserver", compel=True) si = get_host_bound_service(dbhost, dbservice) if not si: failed.append("%s: Host has no bootserver." % host) else: if si.name in groups: groups[si.name].append(dbhost) else: # for that instance, find what servers are bound to it. servers[si.name] = [ host.fqdn for host in si.server_hosts ] groups[si.name] = [dbhost] except NotFoundException, nfe: failed.append("%s: %s" % (host, nfe)) except ArgumentError, ae: failed.append("%s: %s" % (host, ae))
def render(self, session, logger, hostname, manager, interface, mac, comments, **arguments): dbhost = hostname_to_host(session, hostname) dbmachine = dbhost.machine oldinfo = DSDBRunner.snapshot_hw(dbmachine) if not manager: manager = "%sr.%s" % (dbmachine.primary_name.fqdn.name, dbmachine.primary_name.fqdn.dns_domain.name) dbinterface = get_or_create_interface(session, dbmachine, name=interface, mac=mac, interface_type='management') addrs = ", ".join(["%s [%s]" % (addr.logical_name, addr.ip) for addr in dbinterface.assignments]) if addrs: raise ArgumentError("{0} already has the following addresses: " "{1}.".format(dbinterface, addrs)) audit_results = [] ip = generate_ip(session, logger, dbinterface, compel=True, audit_results=audit_results, **arguments) dbdns_rec, newly_created = grab_address(session, manager, ip, comments=comments, preclude=True) assign_address(dbinterface, ip, dbdns_rec.network) session.flush() plenary_info = PlenaryMachineInfo(dbmachine, logger=logger) key = plenary_info.get_write_key() try: lock_queue.acquire(key) plenary_info.write(locked=True) dsdb_runner = DSDBRunner(logger=logger) dsdb_runner.update_host(dbmachine, oldinfo) dsdb_runner.commit_or_rollback("Could not add host to DSDB") except: plenary_info.restore_stash() raise finally: lock_queue.release(key) if dbmachine.host: # XXX: Host needs to be reconfigured. pass for name, value in audit_results: self.audit_result(session, name, value, **arguments) return
def render(self, session, server, client, **arguments): instance = arguments.get("instance", None) dbserver = server and hostname_to_host(session, server) or None dbclient = client and hostname_to_host(session, client) or None if dbserver: q = session.query(ServiceInstance) if instance: q = q.filter_by(name=instance) q = q.join(Service) q = q.reset_joinpoint() q = q.join('servers') q = q.filter_by(host=dbserver) q = q.order_by(Service.name, ServiceInstance.name) return ServiceInstanceList(q.all()) elif dbclient: service_instances = dbclient.services_used if instance: service_instances = [ si for si in service_instances if si.name == instance ] return ServiceInstanceList(service_instances) else: # Try to load as much as we can as bulk queries since loading the # objects one by one is much more expensive q = session.query(Service) q = q.join(ServiceInstance) q = q.options(contains_eager('instances')) q = q.options(subqueryload('archetypes')) q = q.options(subqueryload('personalities')) q = q.options(undefer('instances._client_count')) q = q.options(subqueryload('instances.personality_service_map')) q = q.options(subqueryload('instances.servers')) q = q.options(joinedload('instances.servers.host')) q = q.options(joinedload('instances.servers.host.machine')) q = q.options(subqueryload('instances.service_map')) q = q.options(joinedload('instances.service_map.location')) q = q.options(subqueryload('instances.personality_service_map')) q = q.options( joinedload('instances.personality_service_map.location')) q = q.order_by(Service.name, ServiceInstance.name) return ServiceList(q.all())
def render(self, session, logger, hostname, cluster, personality, **arguments): dbcluster = Cluster.get_unique(session, cluster, compel=True) dbhost = hostname_to_host(session, hostname) if not dbhost.cluster: raise ArgumentError( "{0} is not bound to a cluster.".format(dbhost)) if dbhost.cluster != dbcluster: raise ArgumentError("{0} is bound to {1:l}, not {2:l}.".format( dbhost, dbhost.cluster, dbcluster)) if personality: dbpersonality = Personality.get_unique(session, name=personality, archetype=dbhost.archetype, compel=True) if dbpersonality.cluster_required: raise ArgumentError("Cannot switch host to personality %s " "because that personality requires a " "cluster" % personality) dbhost.personality = dbpersonality elif dbhost.personality.cluster_required: raise ArgumentError("Host personality %s requires a cluster, " "use --personality to change personality " "when leaving the cluster." % dbhost.personality.name) dbcluster.hosts.remove(dbhost) remove_service_addresses(dbcluster, dbhost) dbcluster.validate() session.flush() session.expire(dbhost, ['_cluster']) # Will need to write a cluster plenary and either write or # remove a host plenary. Grab the domain key since the two # must be in the same domain. host_plenary = Plenary.get_plenary(dbhost, logger=logger) cluster_plenary = Plenary.get_plenary(dbcluster, logger=logger) key = CompileKey(domain=dbcluster.branch.name, logger=logger) try: lock_queue.acquire(key) cluster_plenary.write(locked=True) try: host_plenary.write(locked=True) except IncompleteError: host_plenary.cleanup(domain=dbhost.branch.name, locked=True) except: cluster_plenary.restore_stash() host_plenary.restore_stash() raise finally: lock_queue.release(key)
def render(self, session, service, server, client, **arguments): instance = arguments.get("instance", None) dbserver = server and hostname_to_host(session, server) or None dbclient = client and hostname_to_host(session, client) or None dbservice = Service.get_unique(session, service, compel=True) if dbserver: q = session.query(ServiceInstance) q = q.filter_by(service=dbservice) q = q.join('servers') q = q.filter_by(host=dbserver) q = q.order_by(ServiceInstance.name) return ServiceInstanceList(q.all()) elif dbclient: service_instances = dbclient.services_used service_instances = [si for si in service_instances if si.service == dbservice] if instance: service_instances = [si for si in service_instances if si.name == instance] return ServiceInstanceList(service_instances) if not instance: return dbservice return get_service_instance(session, dbservice, instance)
def render(self, session, logger, hostname, cluster, personality, **arguments): dbcluster = Cluster.get_unique(session, cluster, compel=True) dbhost = hostname_to_host(session, hostname) if not dbhost.cluster: raise ArgumentError("{0} is not bound to a cluster.".format(dbhost)) if dbhost.cluster != dbcluster: raise ArgumentError("{0} is bound to {1:l}, not {2:l}.".format( dbhost, dbhost.cluster, dbcluster)) if personality: dbpersonality = Personality.get_unique(session, name=personality, archetype=dbhost.archetype, compel=True) if dbpersonality.cluster_required: raise ArgumentError("Cannot switch host to personality %s " "because that personality requires a " "cluster" % personality) dbhost.personality = dbpersonality elif dbhost.personality.cluster_required: raise ArgumentError("Host personality %s requires a cluster, " "use --personality to change personality " "when leaving the cluster." % dbhost.personality.name) dbcluster.hosts.remove(dbhost) remove_service_addresses(dbcluster, dbhost) dbcluster.validate() session.flush() session.expire(dbhost, ['_cluster']) # Will need to write a cluster plenary and either write or # remove a host plenary. Grab the domain key since the two # must be in the same domain. host_plenary = Plenary.get_plenary(dbhost, logger=logger) cluster_plenary = Plenary.get_plenary(dbcluster, logger=logger) key = CompileKey(domain=dbcluster.branch.name, logger=logger) try: lock_queue.acquire(key) cluster_plenary.write(locked=True) try: host_plenary.write(locked=True) except IncompleteError: host_plenary.cleanup(domain=dbhost.branch.name, locked=True) except: cluster_plenary.restore_stash() host_plenary.restore_stash() raise finally: lock_queue.release(key)
def render(self, session, server, client, **arguments): instance = arguments.get("instance", None) dbserver = server and hostname_to_host(session, server) or None dbclient = client and hostname_to_host(session, client) or None if dbserver: q = session.query(ServiceInstance) if instance: q = q.filter_by(name=instance) q = q.join(Service) q = q.reset_joinpoint() q = q.join('servers') q = q.filter_by(host=dbserver) q = q.order_by(Service.name, ServiceInstance.name) return q.all() elif dbclient: service_instances = dbclient.services_used if instance: service_instances = [si for si in service_instances if si.name == instance] return service_instances else: # Try to load as much as we can as bulk queries since loading the # objects one by one is much more expensive q = session.query(Service) q = q.join(ServiceInstance) q = q.options(contains_eager('instances')) q = q.options(subqueryload('archetypes')) q = q.options(subqueryload('personalities')) q = q.options(undefer('instances._client_count')) q = q.options(subqueryload('instances.personality_service_map')) q = q.options(subqueryload('instances.servers')) q = q.options(joinedload('instances.servers.host')) q = q.options(joinedload('instances.servers.host.hardware_entity')) q = q.options(subqueryload('instances.service_map')) q = q.options(joinedload('instances.service_map.location')) q = q.options(subqueryload('instances.personality_service_map')) q = q.options(joinedload('instances.personality_service_map.location')) q = q.order_by(Service.name, ServiceInstance.name) return q.all()
def render(self, session, logger, hostname, pancinclude, pancexclude, pancdebug, cleandeps, **arguments): dbhost = hostname_to_host(session, hostname) if pancdebug: pancinclude = r'.*' pancexclude = r'components/spma/functions' dom = TemplateDomain(dbhost.branch, dbhost.sandbox_author, logger=logger) dom.compile(session, only=[dbhost.fqdn], panc_debug_include=pancinclude, panc_debug_exclude=pancexclude, cleandeps=cleandeps) return
def render(self, session, logger, list, **arguments): check_hostlist_size(self.command, self.config, list) # The default is now --configure, but that does not play nice with # --status. Turn --configure off if --status is present if arguments.get("status", False): arguments["configure"] = None user = self.config.get("broker", "installfe_user") command = self.config.get("broker", "installfe") args = [command] args.append("--cfgfile") args.append("/dev/null") args.append("--sshdir") args.append(self.config.get("broker", "installfe_sshdir")) args.append("--logfile") logdir = self.config.get("broker", "logdir") args.append("%s/aii-installfe.log" % logdir) servers = dict() groups = dict() failed = [] for host in list: try: dbhost = hostname_to_host(session, host) if arguments.get("install", None) and (dbhost.status.name == "ready" or dbhost.status.name == "almostready"): failed.append("%s: You should change the build status " "before switching the PXE link to install." % host) # Find what "bootserver" instance we're bound to dbservice = Service.get_unique(session, "bootserver", compel=True) si = get_host_bound_service(dbhost, dbservice) if not si: failed.append("%s: Host has no bootserver." % host) else: if si.name in groups: groups[si.name].append(dbhost) else: # for that instance, find what servers are bound to it. servers[si.name] = [host.fqdn for host in si.server_hosts] groups[si.name] = [dbhost] except NotFoundException, nfe: failed.append("%s: %s" % (host, nfe)) except ArgumentError, ae: failed.append("%s: %s" % (host, ae))
def render(self, session, logger, hostname, domain, sandbox, force, **arguments): (dbbranch, dbauthor) = get_branch_and_author(session, logger, domain=domain, sandbox=sandbox, compel=True) if hasattr(dbbranch, "allow_manage") and not dbbranch.allow_manage: raise ArgumentError("Managing hosts to {0:l} is not allowed." .format(dbbranch)) dbhost = hostname_to_host(session, hostname) dbsource = dbhost.branch dbsource_author = dbhost.sandbox_author if dbhost.cluster: raise ArgumentError("Cluster nodes must be managed at the " "cluster level; this host is a member of " "{0}.".format(dbhost.cluster)) if not force: validate_branch_commits(dbsource, dbsource_author, dbbranch, dbauthor, logger, self.config) plenary_host = Plenary.get_plenary(dbhost, logger=logger) dbhost.branch = dbbranch dbhost.sandbox_author = dbauthor session.flush() # We're crossing domains, need to lock everything. # XXX: There's a directory per domain. Do we need subdirectories # for different authors for a sandbox? with CompileKey.merge([CompileKey(domain=dbsource.name, logger=logger), CompileKey(domain=dbbranch.name, logger=logger)]): plenary_host.stash() try: plenary_host.write(locked=True) except IncompleteError: # This template cannot be written, we leave it alone # It would be nice to flag the state in the the host? plenary_host.remove(locked=True) except: # This will not restore the cleaned up build files. That's OK. # They will be recreated as needed. plenary_host.restore_stash() raise return
def render(self, session, logger, hostname, service, instance, force=False, **arguments): dbhost = hostname_to_host(session, hostname) dbservice = Service.get_unique(session, service, compel=True) chooser = Chooser(dbhost, logger=logger, required_only=False) if instance: dbinstance = get_service_instance(session, dbservice, instance) chooser.set_single(dbservice, dbinstance, force=force) else: chooser.set_single(dbservice, force=force) chooser.flush_changes() chooser.write_plenary_templates() return
def render(self, session, logger, hostname, data, generate, **arguments): dbhost = hostname_to_host(session, hostname) dbresource = get_resource(session, dbhost, **arguments) if dbresource: plenary_info = Plenary.get_plenary(dbresource, logger=logger) else: if data: plenary_info = PlenaryHostData(dbhost, logger=logger) else: plenary_info = PlenaryToplevelHost(dbhost, logger=logger) if generate: return plenary_info._generate_content() else: return plenary_info.read()
def render(self, session, logger, hostname, **arguments): dbhost = hostname_to_host(session, hostname) if dbhost.status.name == 'ready': raise ArgumentError("{0:l} is in ready status, " "advertised status can be reset only " "when host is in non ready state".format(dbhost)) dbhost.advertise_status = False session.add(dbhost) session.flush() if dbhost.archetype.is_compileable: return self.compile(session, logger, dbhost) return
def render(self, session, logger, hostname, pancinclude, pancexclude, pancdebug, cleandeps, **arguments): dbhost = hostname_to_host(session, hostname) if pancdebug: pancinclude = r'.*' pancexclude = r'components/spma/functions' dom = TemplateDomain(dbhost.branch, dbhost.sandbox_author, logger=logger) plenary = Plenary.get_plenary(dbhost, logger=logger) with plenary.get_key(): dom.compile(session, only=plenary.object_templates, panc_debug_include=pancinclude, panc_debug_exclude=pancexclude, cleandeps=cleandeps, locked=True) return
def render(self, session, logger, hostname, cluster, personality, **arguments): dbcluster = Cluster.get_unique(session, cluster, compel=True) dbhost = hostname_to_host(session, hostname) if not dbhost.cluster: raise ArgumentError("{0} is not bound to a cluster.".format(dbhost)) if dbhost.cluster != dbcluster: raise ArgumentError("{0} is bound to {1:l}, not {2:l}.".format( dbhost, dbhost.cluster, dbcluster)) if personality: dbpersonality = Personality.get_unique(session, name=personality, archetype=dbhost.archetype, compel=True) if dbpersonality.cluster_required: raise ArgumentError("Cannot switch host to personality %s " "because that personality requires a " "cluster" % personality) dbhost.personality = dbpersonality elif dbhost.personality.cluster_required: raise ArgumentError("Host personality %s requires a cluster, " "use --personality to change personality " "when leaving the cluster." % dbhost.personality.name) dbcluster.hosts.remove(dbhost) remove_service_addresses(dbcluster, dbhost) dbcluster.validate() session.flush() session.expire(dbhost, ['_cluster']) host_plenary = Plenary.get_plenary(dbhost, logger=logger) cluster_plenary = Plenary.get_plenary(dbcluster, logger=logger) with CompileKey.merge([host_plenary.get_key(), cluster_plenary.get_key()]): try: cluster_plenary.write(locked=True) try: host_plenary.write(locked=True) except IncompleteError: host_plenary.remove(locked=True) except: cluster_plenary.restore_stash() host_plenary.restore_stash() raise
def render(self, session, logger, hostname, **arguments): dbhost = hostname_to_host(session, hostname) if dbhost.status.name == 'ready': raise ArgumentError( "{0:l} is in ready status, " "advertised status can be reset only " "when host is in non ready state".format(dbhost)) dbhost.advertise_status = False session.add(dbhost) session.flush() if dbhost.archetype.is_compileable: return self.compile(session, logger, dbhost) return
def get_resource_holder(session, hostname=None, cluster=None, resgroup=None, compel=True): who = None if hostname is not None: dbhost = hostname_to_host(session, hostname) who = dbhost.resholder if who is None: if compel: raise NotFoundException("{0} has no resources.".format(dbhost)) dbhost.resholder = HostResource(host=dbhost) session.add(dbhost.resholder) session.flush() who = dbhost.resholder if cluster is not None: dbcluster = Cluster.get_unique(session, cluster, compel=True) who = dbcluster.resholder if who is None: if compel: raise NotFoundException( "{0} has no resources.".format(dbcluster)) dbcluster.resholder = ClusterResource(cluster=dbcluster) session.add(dbcluster.resholder) session.flush() who = dbcluster.resholder if resgroup is not None: dbrg = ResourceGroup.get_unique(session, name=resgroup, holder=who, compel=True) who = dbrg.resholder if who is None: if compel: raise NotFoundException("{0} has no resources.".format(dbrg)) dbrg.resholder = BundleResource(resourcegroup=dbrg) session.add(dbrg.resholder) session.flush() who = dbrg.resholder return who
def render(self, session, logger, hostname, **arguments): # The default is now --configure, but that does not play nice with # --status. Turn --configure off if --status is present if arguments.get("status", False): arguments["configure"] = None dbhost = hostname_to_host(session, hostname) if arguments.get("install", None) and (dbhost.status.name == "ready" or dbhost.status.name == "almostready"): raise ArgumentError("You should change the build status before " "switching the PXE link to install.") # Find what "bootserver" instance we're bound to dbservice = Service.get_unique(session, "bootserver", compel=True) si = get_host_bound_service(dbhost, dbservice) if not si: raise ArgumentError("{0} has no bootserver.".format(dbhost)) # for that instance, find what servers are bound to it. servers = [host.fqdn for host in si.server_hosts] command = self.config.get("broker", "installfe") args = [command] for (option, mapped) in self._option_map.items(): if arguments[option]: args.append(mapped) args.append(dbhost.fqdn) if args[-1] == command: raise ArgumentError("Missing required target parameter.") args.append("--cfgfile") args.append("/dev/null") args.append("--servers") user = self.config.get("broker", "installfe_user") args.append(" ".join(["%s@%s" % (user, s) for s in servers])) args.append("--sshdir") args.append(self.config.get("broker", "installfe_sshdir")) args.append("--logfile") logdir = self.config.get("broker", "logdir") args.append("%s/aii-installfe.log" % logdir) run_command(args, logger=logger, loglevel=CLIENT_INFO)
def render(self, session, logger, target, grn, eon_id, hostname, list, personality, archetype, **arguments): dbgrn = lookup_grn(session, grn, eon_id, logger=logger, config=self.config) plenaries = PlenaryCollection(logger=logger) if hostname: objs = [hostname_to_host(session, hostname)] config_key = "host_grn_targets" elif list: check_hostlist_size(self.command, self.config, list) objs = hostlist_to_hosts(session, list) config_key = "host_grn_targets" elif personality: objs = [Personality.get_unique(session, name=personality, archetype=archetype, compel=True)] config_key = "personality_grn_targets" for obj in objs: section = "archetype_" + obj.archetype.name if self.config.has_option(section, config_key): valid_targets = [s.strip() for s in self.config.get(section, config_key).split(",")] else: raise ArgumentError("{0} has no valid GRN targets configured." .format(obj.archetype)) if target not in valid_targets: raise ArgumentError("Invalid target %s for archetype %s, please " "choose from: %s." % (target, obj.archetype.name, ", ".join(valid_targets))) plenaries.append(Plenary.get_plenary(obj)) self._update_dbobj(obj, target, dbgrn) session.flush() plenaries.write() return
def render(self, session, logger, hostname, service, **arguments): dbhost = hostname_to_host(session, hostname) for srv in (dbhost.archetype.services + dbhost.personality.services): if srv.name == service: raise ArgumentError("Cannot unbind a required service. " "Perhaps you want to rebind?") dbservice = Service.get_unique(session, service, compel=True) si = get_host_bound_service(dbhost, dbservice) if si: logger.info("Removing client binding") dbhost.services_used.remove(si) session.flush() plenaries = PlenaryCollection(logger=logger) plenaries.append(PlenaryHost(dbhost, logger=logger)) plenaries.append(PlenaryServiceInstanceServer(si, logger=logger)) plenaries.write() return
def get_resource_holder(session, hostname=None, cluster=None, resgroup=None, compel=True): who = None if hostname is not None: dbhost = hostname_to_host(session, hostname) who = dbhost.resholder if who is None: if compel: raise NotFoundException("{0} has no resources.".format(dbhost)) dbhost.resholder = HostResource(host=dbhost) session.add(dbhost.resholder) session.flush() who = dbhost.resholder if cluster is not None: dbcluster = Cluster.get_unique(session, cluster, compel=True) who = dbcluster.resholder if who is None: if compel: raise NotFoundException("{0} has no resources.".format(dbcluster)) dbcluster.resholder = ClusterResource(cluster=dbcluster) session.add(dbcluster.resholder) session.flush() who = dbcluster.resholder if resgroup is not None: dbrg = ResourceGroup.get_unique(session, name=resgroup, holder=who, compel=True) who = dbrg.resholder if who is None: if compel: raise NotFoundException("{0} has no resources.".format(dbrg)) dbrg.resholder = BundleResource(resourcegroup=dbrg) session.add(dbrg.resholder) session.flush() who = dbrg.resholder return who
def render(self, session, logger, target, grn, eon_id, hostname, list, personality, archetype, **arguments): dbgrn = lookup_grn(session, grn, eon_id, logger=logger, config=self.config) target_type = "personality" if personality else "host" if hostname: objs = [hostname_to_host(session, hostname)] elif list: check_hostlist_size(self.command, self.config, list) objs = hostlist_to_hosts(session, list) elif personality: objs = [Personality.get_unique(session, name=personality, archetype=archetype, compel=True)] for obj in objs: # INFO: Fails for archetypes other than 'aquilon', 'vmhost' valid_targets = self.config.get("archetype_" + obj.archetype.name, target_type + "_grn_targets") if target not in map(lambda s: s.strip(), valid_targets.split(",")): raise ArgumentError("Invalid %s target %s for archetype %s, please " "choose from %s" % (target_type, target, obj.archetype.name, valid_targets)) self._update_dbobj(obj, target, dbgrn) session.flush() if personality: plenary = PlenaryPersonality(objs[0], logger=logger) plenary.write() return
def render(self, session, logger, target, hostname, list, personality, archetype, **arguments): target_type = "personality" if personality else "host" if hostname: objs = [hostname_to_host(session, hostname)] elif list: check_hostlist_size(self.command, self.config, list) objs = hostlist_to_hosts(session, list) elif personality: objs = [Personality.get_unique(session, name=personality, archetype=archetype, compel=True)] plenaries = PlenaryCollection(logger=logger) for obj in objs: # INFO: Fails for archetypes other than 'aquilon', 'vmhost' valid_targets = self.config.get("archetype_" + obj.archetype.name, target_type + "_grn_targets") if target not in [s.strip() for s in valid_targets.split(",")]: raise ArgumentError("Invalid %s target %s for archetype %s, please " "choose from %s" % (target_type, target, obj.archetype.name, valid_targets)) for grn_rec in obj._grns[:]: if target == grn_rec.target: obj._grns.remove(grn_rec) plenaries.append(Plenary.get_plenary(obj)) session.flush() plenaries.write() return
def render(self, session, logger, hostname, buildstatus, **arguments): dbhost = hostname_to_host(session, hostname) dbstatus = HostLifecycle.get_instance(session, buildstatus) changed = dbhost.status.transition(dbhost, dbstatus) if not changed or not dbhost.archetype.is_compileable: return session.flush() td = TemplateDomain(dbhost.branch, dbhost.sandbox_author, logger=logger) plenary = Plenary.get_plenary(dbhost, logger=logger) # Force a host lock as pan might overwrite the profile... with plenary.get_key(): plenary.stash() try: plenary.write(locked=True) td.compile(session, only=plenary.object_templates, locked=True) except IncompleteError: raise ArgumentError("Run aq make for host %s first." % dbhost.fqdn) except: plenary.restore_stash() raise return
def render(self, session, logger, hostname, cluster, personality, **arguments): dbhost = hostname_to_host(session, hostname) dbcluster = Cluster.get_unique(session, cluster, compel=True) if dbcluster.status.name == 'decommissioned': raise ArgumentError("Cannot add hosts to decommissioned clusters.") # We only support changing personality within the same # archetype. The archetype decides things like which OS, how # it builds (dhcp, etc), whether it's compilable, and # switching all of that by side-effect seems wrong # somehow. And besides, it would make the user-interface and # implementation for this command ugly in order to support # changing all of those options. personality_change = False if personality is not None: dbpersonality = Personality.get_unique(session, name=personality, archetype=dbhost.archetype, compel=True) if dbhost.personality != dbpersonality: dbhost.personality = dbpersonality personality_change = True # Allow for non-restricted clusters (the default?) if (len(dbcluster.allowed_personalities) > 0 and dbhost.personality not in dbcluster.allowed_personalities): raise ArgumentError("The personality %s for %s is not allowed " "by the cluster. Specify --personality " "and provide one of %s" % (dbhost.personality, dbhost.fqdn, ", ".join([x.name for x in dbcluster.allowed_personalities]))) # Now that we've changed the personality, we can check # if this is a valid membership change dbcluster.validate_membership(dbhost) plenaries = PlenaryCollection(logger=logger) plenaries.append(Plenary.get_plenary(dbcluster)) if dbhost.cluster and dbhost.cluster != dbcluster: logger.client_info("Removing {0:l} from {1:l}.".format(dbhost, dbhost.cluster)) old_cluster = dbhost.cluster old_cluster.hosts.remove(dbhost) remove_service_addresses(old_cluster, dbhost) old_cluster.validate() session.expire(dbhost, ['_cluster']) plenaries.append(Plenary.get_plenary(old_cluster)) # Apply the service addresses to the new member for res in walk_resources(dbcluster): if not isinstance(res, ServiceAddress): continue apply_service_address(dbhost, res.interfaces, res, logger) if dbhost.cluster: if personality_change: raise ArgumentError("{0:l} already in {1:l}, use " "aq reconfigure to change personality." .format(dbhost, dbhost.cluster)) # the cluster has not changed, therefore there's nothing # to do here. return # Calculate the node index: build a map of all possible values, remove # the used ones, and pick the smallest remaining one node_index_map = set(xrange(len(dbcluster._hosts) + 1)) for link in dbcluster._hosts: # The cluster may have been bigger in the past, so node indexes may # be larger than the current cluster size try: node_index_map.remove(link.node_index) except KeyError: pass dbcluster.hosts.append((dbhost, min(node_index_map))) dbcluster.validate() # demote a host when switching clusters # promote a host when switching clusters if dbhost.status.name == 'ready': if dbcluster.status.name != 'ready': dbalmost = HostAlmostready.get_instance(session) dbhost.status.transition(dbhost, dbalmost) plenaries.append(Plenary.get_plenary(dbhost)) elif dbhost.status.name == 'almostready': if dbcluster.status.name == 'ready': dbready = HostReady.get_instance(session) dbhost.status.transition(dbhost, dbready) plenaries.append(Plenary.get_plenary(dbhost)) session.flush() # Enforce that service instances are set correctly for the # new cluster association. chooser = Chooser(dbhost, logger=logger) chooser.set_required() chooser.flush_changes() # the chooser will include the host plenary with CompileKey.merge([chooser.get_key(), plenaries.get_key()]): plenaries.stash() try: chooser.write_plenary_templates(locked=True) plenaries.write(locked=True) except: chooser.restore_stash() plenaries.restore_stash() raise return
def render(self, session, logger, hostname, **arguments): # removing the plenary host requires a compile lock, however # we want to avoid deadlock by the fact that we're messing # with two locks here, so we want to be careful. We grab the # plenaryhost early on (in order to get the filenames filled # in from the db info before we delete it from the db. We then # hold onto those references until we've completed the db # cleanup and if all of that is successful, then we delete the # plenary file (which doesn't require re-evaluating any stale # db information) after we've released the delhost lock. delplenary = False # Any service bindings that we need to clean up afterwards bindings = PlenaryCollection(logger=logger) resources = PlenaryCollection(logger=logger) with DeleteKey("system", logger=logger) as key: # Check dependencies, translate into user-friendly message dbhost = hostname_to_host(session, hostname) host_plenary = Plenary.get_plenary(dbhost, logger=logger) domain = dbhost.branch.name deps = get_host_dependencies(session, dbhost) if (len(deps) != 0): deptext = "\n".join([" %s" % d for d in deps]) raise ArgumentError("Cannot delete host %s due to the " "following dependencies:\n%s." % (hostname, deptext)) archetype = dbhost.archetype.name dbmachine = dbhost.machine oldinfo = DSDBRunner.snapshot_hw(dbmachine) ip = dbmachine.primary_ip fqdn = dbmachine.fqdn for si in dbhost.services_used: plenary = PlenaryServiceInstanceServer(si) bindings.append(plenary) logger.info( "Before deleting host '%s', removing binding '%s'" % (fqdn, si.cfg_path)) del dbhost.services_used[:] if dbhost.resholder: for res in dbhost.resholder.resources: resources.append(Plenary.get_plenary(res)) # In case of Zebra, the IP may be configured on multiple interfaces for iface in dbmachine.interfaces: if ip in iface.addresses: iface.addresses.remove(ip) if dbhost.cluster: dbcluster = dbhost.cluster dbcluster.hosts.remove(dbhost) set_committed_value(dbhost, '_cluster', None) dbcluster.validate() dbdns_rec = dbmachine.primary_name dbmachine.primary_name = None dbmachine.host = None session.delete(dbhost) delete_dns_record(dbdns_rec) session.flush() delplenary = True if dbmachine.vm_container: bindings.append(Plenary.get_plenary(dbmachine.vm_container)) if archetype != 'aurora' and ip is not None: dsdb_runner = DSDBRunner(logger=logger) dsdb_runner.update_host(dbmachine, oldinfo) dsdb_runner.commit_or_rollback("Could not remove host %s from " "DSDB" % hostname) if archetype == 'aurora': logger.client_info("WARNING: removing host %s from AQDB and " "*not* changing DSDB." % hostname) # Past the point of no return... commit the transaction so # that we can free the delete lock. session.commit() # Only if we got here with no exceptions do we clean the template # Trying to clean up after any errors here is really difficult # since the changes to dsdb have already been made. if (delplenary): key = host_plenary.get_remove_key() with CompileKey.merge( [key, bindings.get_write_key(), resources.get_remove_key()]) as key: host_plenary.cleanup(domain, locked=True) # And we also want to remove the profile itself profiles = self.config.get("broker", "profilesdir") # Only one of these should exist, but it doesn't hurt # to try to clean up both. xmlfile = os.path.join(profiles, fqdn + ".xml") remove_file(xmlfile, logger=logger) xmlgzfile = xmlfile + ".gz" remove_file(xmlgzfile, logger=logger) # And the cached template created by ant remove_file(os.path.join( self.config.get("broker", "quattordir"), "objects", fqdn + TEMPLATE_EXTENSION), logger=logger) bindings.write(locked=True) resources.remove(locked=True) build_index(self.config, session, profiles, logger=logger) return
def get_or_create_user_principal(session, principal, createuser=True, createrealm=True, commitoncreate=False, comments=None, query_options=None): if principal is None: return None m = principal_re.match(principal) if not m: raise ArgumentError("User principal '%s' is not valid." % principal) realm = m.group(2) user = m.group(1) m = host_re.match(user) if m: user = '******' # Verify that the host exists in AQDB hostname_to_host(session, m.group(1)) # Short circuit the common case, and optimize it to eager load in # a single query since this happens on every command: q = session.query(UserPrincipal) q = q.filter_by(name=user) q = q.join(Realm) q = q.filter_by(name=realm) q = q.reset_joinpoint() q = q.options(contains_eager('realm'), joinedload('role')) if query_options: q = q.options(*query_options) dbuser = q.first() if dbuser: return dbuser # If here, need more complicated behavior... dbnobody = Role.get_unique(session, 'nobody', compel=True) try: dbrealm = Realm.get_unique(session, realm, compel=True) except NotFoundException: if not createrealm: raise ArgumentError("Could not find realm %s to create principal " "%s, use --createrealm to create a new record " "for the realm." % (realm, principal)) LOGGER.info("Realm %s did not exist, creating..." % realm) dbrealm = Realm(name=realm) session.add(dbrealm) LOGGER.info("Creating user %s@%s..." % (user, realm)) dbuser = UserPrincipal(name=user, realm=dbrealm, role=dbnobody, comments=comments) session.add(dbuser) if commitoncreate: session.commit() return dbuser q = session.query(UserPrincipal).filter_by(name=user, realm=dbrealm) dbuser = q.first() if not dbuser: if not createuser: raise ArgumentError("Could not find principal %s to permission, " "use --createuser to create a new record for " "the principal." % principal) LOGGER.info("User %s did not exist in realm %s, creating..." % (user, realm)) dbuser = UserPrincipal(name=user, realm=dbrealm, role=dbnobody, comments=comments) session.add(dbuser) if commitoncreate: session.commit() return dbuser
def render(self, session, logger, hostname, domain, sandbox, force, **arguments): (dbbranch, dbauthor) = get_branch_and_author(session, logger, domain=domain, sandbox=sandbox, compel=True) if hasattr(dbbranch, "allow_manage") and not dbbranch.allow_manage: raise ArgumentError("Managing hosts to {0:l} is not allowed." .format(dbbranch)) dbhost = hostname_to_host(session, hostname) dbsource = dbhost.branch dbsource_author = dbhost.sandbox_author old_branch = dbhost.branch.name if dbhost.cluster: raise ArgumentError("Cluster nodes must be managed at the " "cluster level; this host is a member of " "{0}.".format(dbhost.cluster)) if not force: validate_branch_commits(dbsource, dbsource_author, dbbranch, dbauthor, logger, self.config) dbhost.branch = dbbranch dbhost.sandbox_author = dbauthor session.add(dbhost) session.flush() plenary_host = PlenaryHost(dbhost, logger=logger) # We're crossing domains, need to lock everything. # XXX: There's a directory per domain. Do we need subdirectories # for different authors for a sandbox? key = CompileKey(logger=logger) try: lock_queue.acquire(key) plenary_host.stash() plenary_host.cleanup(old_branch, locked=True) # Now we recreate the plenary to ensure that the domain is ready # to compile, however (esp. if there was no existing template), we # have to be aware that there might not be enough information yet # with which we can create a template try: plenary_host.write(locked=True) except IncompleteError: # This template cannot be written, we leave it alone # It would be nice to flag the state in the the host? pass except: # This will not restore the cleaned up files. That's OK. # They will be recreated as needed. plenary_host.restore_stash() raise finally: lock_queue.release(key) return
def render(self, session, logger, hostname, machine, auxiliary, interface, mac, comments, **arguments): if machine: dbmachine = Machine.get_unique(session, machine, compel=True) if hostname: dbhost = hostname_to_host(session, hostname) if machine and dbhost.machine != dbmachine: raise ArgumentError("Use either --hostname or --machine to " "uniquely identify a system.") dbmachine = dbhost.machine oldinfo = DSDBRunner.snapshot_hw(dbmachine) dbinterface = get_or_create_interface(session, dbmachine, name=interface, mac=mac, interface_type='public', bootable=False) # Multiple addresses will only be allowed with the "add interface # address" command addrs = ", ".join(["%s [%s]" % (addr.logical_name, addr.ip) for addr in dbinterface.assignments]) if addrs: raise ArgumentError("{0} already has the following addresses: " "{1}.".format(dbinterface, addrs)) audit_results = [] ip = generate_ip(session, logger, dbinterface, compel=True, audit_results=audit_results, **arguments) dbdns_rec, newly_created = grab_address(session, auxiliary, ip, comments=comments, preclude=True) if dbmachine.primary_name: # This command cannot use a non-default DNS environment, so no extra # checks are necessary dbdns_rec.reverse_ptr = dbmachine.primary_name.fqdn assign_address(dbinterface, ip, dbdns_rec.network) session.flush() plenary_info = PlenaryMachineInfo(dbmachine, logger=logger) key = plenary_info.get_write_key() try: lock_queue.acquire(key) plenary_info.write(locked=True) dsdb_runner = DSDBRunner(logger=logger) dsdb_runner.update_host(dbmachine, oldinfo) dsdb_runner.commit_or_rollback("Could not add host to DSDB") except: plenary_info.restore_stash() raise finally: lock_queue.release(key) if dbmachine.host: # XXX: Host needs to be reconfigured. pass for name, value in audit_results: self.audit_result(session, name, value, **arguments) return
def render(self, session, hostname, **kwargs): return hostname_to_host(session, hostname)
def render(self, session, hostname, **kwargs): host = hostname_to_host(session, hostname) return GrnHostList([host])
def render(self, session, logger, hostname, osname, osversion, archetype, personality, buildstatus, keepbindings, grn, eon_id, **arguments): dbhost = hostname_to_host(session, hostname) # Currently, for the Host to be created it *must* be associated with # a Machine already. If that ever changes, need to check here and # bail if dbhost.machine does not exist. if archetype and archetype != dbhost.archetype.name: if not personality: raise ArgumentError("Changing archetype also requires " "specifying --personality.") if personality: if archetype: dbarchetype = Archetype.get_unique(session, archetype, compel=True) if dbarchetype.cluster_type is not None: raise ArgumentError("Archetype %s is a cluster archetype" % dbarchetype.name) else: dbarchetype = dbhost.archetype if not osname and not osversion and \ dbhost.operating_system.archetype != dbarchetype: raise ArgumentError("{0} belongs to {1:l}, not {2:l}. Please " "specify --osname/--osversion.".format( dbhost.operating_system, dbhost.operating_system.archetype, dbarchetype)) dbpersonality = Personality.get_unique(session, name=personality, archetype=dbarchetype, compel=True) if dbhost.cluster and dbhost.cluster.allowed_personalities and \ dbpersonality not in dbhost.cluster.allowed_personalities: allowed = [ "%s/%s" % (p.archetype.name, p.name) for p in dbhost.cluster.allowed_personalities ] raise ArgumentError("The {0:l} is not allowed by {1}. " "Specify one of {2}.".format( dbpersonality, dbhost.cluster, allowed)) dbhost.personality = dbpersonality if not osname: osname = dbhost.operating_system.name if osname and osversion: dbos = OperatingSystem.get_unique(session, name=osname, version=osversion, archetype=dbhost.archetype, compel=True) # Hmm... no cluster constraint here... dbhost.operating_system = dbos elif osname != dbhost.operating_system.name: raise ArgumentError("Please specify a version to use for OS %s." % osname) if buildstatus: dbstatus = HostLifecycle.get_unique(session, buildstatus, compel=True) dbhost.status.transition(dbhost, dbstatus) if grn or eon_id: dbgrn = lookup_grn(session, grn, eon_id, logger=logger, config=self.config) dbhost.owner_grn = dbgrn session.flush() if dbhost.archetype.is_compileable: self.compile(session, dbhost, logger, keepbindings) return
def render(self, session, logger, hostname, cluster, personality, **arguments): dbhost = hostname_to_host(session, hostname) dbcluster = Cluster.get_unique(session, cluster, compel=True) if dbcluster.status.name == 'decommissioned': raise ArgumentError("Cannot add hosts to decommissioned clusters.") # We only support changing personality within the same # archetype. The archetype decides things like which OS, how # it builds (dhcp, etc), whether it's compilable, and # switching all of that by side-effect seems wrong # somehow. And besides, it would make the user-interface and # implementation for this command ugly in order to support # changing all of those options. personality_change = False if personality is not None: dbpersonality = Personality.get_unique(session, name=personality, archetype=dbhost.archetype, compel=True) if dbhost.personality != dbpersonality: dbhost.personality = dbpersonality personality_change = True # Allow for non-restricted clusters (the default?) if (len(dbcluster.allowed_personalities) > 0 and dbhost.personality not in dbcluster.allowed_personalities): raise ArgumentError( "The personality %s for %s is not allowed " "by the cluster. Specify --personality " "and provide one of %s" % (dbhost.personality, dbhost.fqdn, ", ".join( [x.name for x in dbcluster.allowed_personalities]))) # Now that we've changed the personality, we can check # if this is a valid membership change dbcluster.validate_membership(dbhost) plenaries = PlenaryCollection(logger=logger) plenaries.append(Plenary.get_plenary(dbcluster)) if dbhost.cluster and dbhost.cluster != dbcluster: logger.client_info("Removing {0:l} from {1:l}.".format( dbhost, dbhost.cluster)) old_cluster = dbhost.cluster old_cluster.hosts.remove(dbhost) remove_service_addresses(old_cluster, dbhost) old_cluster.validate() session.expire(dbhost, ['_cluster']) plenaries.append(Plenary.get_plenary(old_cluster)) # Apply the service addresses to the new member for res in walk_resources(dbcluster): if not isinstance(res, ServiceAddress): continue apply_service_address(dbhost, res.interfaces, res) if dbhost.cluster: if personality_change: raise ArgumentError( "{0:l} already in {1:l}, use " "aq reconfigure to change personality.".format( dbhost, dbhost.cluster)) # the cluster has not changed, therefore there's nothing # to do here. return # Calculate the node index: build a map of all possible values, remove # the used ones, and pick the smallest remaining one node_index_map = set(xrange(len(dbcluster._hosts) + 1)) for link in dbcluster._hosts: # The cluster may have been bigger in the past, so node indexes may # be larger than the current cluster size try: node_index_map.remove(link.node_index) except KeyError: pass dbcluster.hosts.append((dbhost, min(node_index_map))) dbcluster.validate() # demote a host when switching clusters # promote a host when switching clusters if dbhost.status.name == 'ready': if dbcluster.status.name != 'ready': dbalmost = HostLifecycle.get_unique(session, 'almostready', compel=True) dbhost.status.transition(dbhost, dbalmost) plenaries.append(Plenary.get_plenary(dbhost)) elif dbhost.status.name == 'almostready': if dbcluster.status.name == 'ready': dbready = HostLifecycle.get_unique(session, 'ready', compel=True) dbhost.status.transition(dbhost, dbready) plenaries.append(Plenary.get_plenary(dbhost)) session.flush() # Enforce that service instances are set correctly for the # new cluster association. chooser = Chooser(dbhost, logger=logger) chooser.set_required() chooser.flush_changes() # the chooser will include the host plenary key = CompileKey.merge( [chooser.get_write_key(), plenaries.get_write_key()]) try: lock_queue.acquire(key) chooser.write_plenary_templates(locked=True) plenaries.write(locked=True) except: chooser.restore_stash() plenaries.restore_stash() raise finally: lock_queue.release(key) return
def render( self, session, logger, # search_cluster archetype, cluster_type, personality, domain, sandbox, branch, buildstatus, allowed_archetype, allowed_personality, down_hosts_threshold, down_maint_threshold, max_members, member_archetype, member_hostname, member_personality, capacity_override, cluster, esx_guest, instance, esx_metacluster, service, share, esx_share, esx_switch, esx_virtual_machine, fullinfo, style, **arguments): if esx_share: self.deprecated_option("esx_share", "Please use --share instead.", logger=logger, **arguments) share = esx_share if cluster_type == 'esx': cls = EsxCluster else: cls = Cluster # Don't load full objects if we only want to show their name if fullinfo or style != 'raw': q = session.query(cls) else: q = session.query(cls.name) # The ORM automatically de-duplicates the result if we query full # objects, but not when we query just the names. Tell the DB to do so. q = q.distinct() (dbbranch, dbauthor) = get_branch_and_author(session, logger, domain=domain, sandbox=sandbox, branch=branch) if dbbranch: q = q.filter_by(branch=dbbranch) if dbauthor: q = q.filter_by(sandbox_author=dbauthor) if archetype: # Added to the searches as appropriate below. dbarchetype = Archetype.get_unique(session, archetype, compel=True) if personality and archetype: dbpersonality = Personality.get_unique(session, archetype=dbarchetype, name=personality, compel=True) q = q.filter_by(personality=dbpersonality) elif personality: PersAlias = aliased(Personality) q = q.join(PersAlias).filter_by(name=personality) q = q.reset_joinpoint() elif archetype: PersAlias = aliased(Personality) q = q.join(PersAlias).filter_by(archetype=dbarchetype) q = q.reset_joinpoint() if buildstatus: dbbuildstatus = ClusterLifecycle.get_unique(session, buildstatus, compel=True) q = q.filter_by(status=dbbuildstatus) if cluster_type: q = q.filter_by(cluster_type=cluster_type) # Go through the arguments and make special dicts for each # specific set of location arguments that are stripped of the # given prefix. location_args = {'cluster_': {}, 'member_': {}} for prefix in location_args.keys(): for (k, v) in arguments.items(): if k.startswith(prefix): # arguments['cluster_building'] = 'dd' # becomes # location_args['cluster_']['building'] = 'dd' location_args[prefix][k.replace(prefix, '')] = v dblocation = get_location(session, **location_args['cluster_']) if dblocation: if location_args['cluster_']['exact_location']: q = q.filter_by(location_constraint=dblocation) else: childids = dblocation.offspring_ids() q = q.filter(Cluster.location_constraint_id.in_(childids)) dblocation = get_location(session, **location_args['member_']) if dblocation: q = q.join('_hosts', 'host', 'machine') if location_args['member_']['exact_location']: q = q.filter_by(location=dblocation) else: childids = dblocation.offspring_ids() q = q.filter(Machine.location_id.in_(childids)) q = q.reset_joinpoint() # esx stuff if cluster: q = q.filter_by(name=cluster) if esx_metacluster: dbmetacluster = MetaCluster.get_unique(session, esx_metacluster, compel=True) q = q.join('_metacluster') q = q.filter_by(metacluster=dbmetacluster) q = q.reset_joinpoint() if esx_virtual_machine: dbvm = Machine.get_unique(session, esx_virtual_machine, compel=True) # TODO: support VMs inside resource groups? q = q.join(ClusterResource, VirtualMachine) q = q.filter_by(machine=dbvm) q = q.reset_joinpoint() if esx_guest: dbguest = hostname_to_host(session, esx_guest) # TODO: support VMs inside resource groups? q = q.join(ClusterResource, VirtualMachine, Machine) q = q.filter_by(host=dbguest) q = q.reset_joinpoint() if capacity_override: q = q.filter(EsxCluster.memory_capacity != None) if esx_switch: dbswitch = Switch.get_unique(session, esx_switch, compel=True) q = q.filter_by(switch=dbswitch) if service: dbservice = Service.get_unique(session, name=service, compel=True) if instance: dbsi = ServiceInstance.get_unique(session, name=instance, service=dbservice, compel=True) q = q.filter(Cluster.service_bindings.contains(dbsi)) else: q = q.join('service_bindings') q = q.filter_by(service=dbservice) q = q.reset_joinpoint() elif instance: q = q.join('service_bindings') q = q.filter_by(name=instance) q = q.reset_joinpoint() if share: # Perform sanity check on the share name q2 = session.query(Share) q2 = q2.filter_by(name=share) if not q2.first(): raise NotFoundException("Share %s not found." % share) CR = aliased(ClusterResource) S1 = aliased(Share) S2 = aliased(Share) RG = aliased(ResourceGroup) BR = aliased(BundleResource) q = q.join(CR) q = q.outerjoin((S1, S1.holder_id == CR.id)) q = q.outerjoin((RG, RG.holder_id == CR.id), (BR, BR.resourcegroup_id == RG.id), (S2, S2.holder_id == BR.id)) q = q.filter(or_(S1.name == share, S2.name == share)) q = q.reset_joinpoint() if max_members: q = q.filter_by(max_hosts=max_members) if down_hosts_threshold: (pct, dht) = Cluster.parse_threshold(down_hosts_threshold) q = q.filter_by(down_hosts_percent=pct) q = q.filter_by(down_hosts_threshold=dht) if down_maint_threshold: (pct, dmt) = Cluster.parse_threshold(down_maint_threshold) q = q.filter_by(down_maint_percent=pct) q = q.filter_by(down_maint_threshold=dmt) if allowed_archetype: # Added to the searches as appropriate below. dbaa = Archetype.get_unique(session, allowed_archetype, compel=True) if allowed_personality and allowed_archetype: dbap = Personality.get_unique(session, archetype=dbaa, name=allowed_personality, compel=True) q = q.filter(Cluster.allowed_personalities.contains(dbap)) elif allowed_personality: q = q.join('allowed_personalities') q = q.filter_by(name=allowed_personality) q = q.reset_joinpoint() elif allowed_archetype: q = q.join('allowed_personalities') q = q.filter_by(archetype=dbaa) q = q.reset_joinpoint() if member_hostname: dbhost = hostname_to_host(session, member_hostname) q = q.join('_hosts') q = q.filter_by(host=dbhost) q = q.reset_joinpoint() if member_archetype: # Added to the searches as appropriate below. dbma = Archetype.get_unique(session, member_archetype, compel=True) if member_personality and member_archetype: q = q.join('_hosts', 'host') dbmp = Personality.get_unique(session, archetype=dbma, name=member_personality, compel=True) q = q.filter_by(personality=dbmp) q = q.reset_joinpoint() elif member_personality: q = q.join('_hosts', 'host', 'personality') q = q.filter_by(name=member_personality) q = q.reset_joinpoint() elif member_archetype: q = q.join('_hosts', 'host', 'personality') q = q.filter_by(archetype=dbma) q = q.reset_joinpoint() if cluster_type == 'esx': q = q.order_by(EsxCluster.name) else: q = q.order_by(Cluster.name) if fullinfo: return q.all() return SimpleClusterList(q.all())
def render(self, session, logger, hostname, manager, interface, mac, comments, **arguments): dbhost = hostname_to_host(session, hostname) dbmachine = dbhost.machine oldinfo = DSDBRunner.snapshot_hw(dbmachine) if not manager: manager = "%sr.%s" % (dbmachine.primary_name.fqdn.name, dbmachine.primary_name.fqdn.dns_domain.name) dbinterface = get_or_create_interface(session, dbmachine, name=interface, mac=mac, interface_type='management') addrs = ", ".join([ "%s [%s]" % (addr.logical_name, addr.ip) for addr in dbinterface.assignments ]) if addrs: raise ArgumentError("{0} already has the following addresses: " "{1}.".format(dbinterface, addrs)) audit_results = [] ip = generate_ip(session, logger, dbinterface, compel=True, audit_results=audit_results, **arguments) dbdns_rec, newly_created = grab_address(session, manager, ip, comments=comments, preclude=True) assign_address(dbinterface, ip, dbdns_rec.network) session.flush() plenary_info = PlenaryMachineInfo(dbmachine, logger=logger) key = plenary_info.get_write_key() try: lock_queue.acquire(key) plenary_info.write(locked=True) dsdb_runner = DSDBRunner(logger=logger) dsdb_runner.update_host(dbmachine, oldinfo) dsdb_runner.commit_or_rollback("Could not add host to DSDB") except: plenary_info.restore_stash() raise finally: lock_queue.release(key) if dbmachine.host: # XXX: Host needs to be reconfigured. pass for name, value in audit_results: self.audit_result(session, name, value, **arguments) return