def render(self, session, logger, hostname, buildstatus, **arguments): dbhost = hostname_to_host(session, hostname) dbstatus = HostLifecycle.get_unique(session, buildstatus, compel=True) changed = dbhost.status.transition(dbhost, dbstatus) if not changed or not dbhost.archetype.is_compileable: return session.add(dbhost) session.flush() plenary = PlenaryHost(dbhost, logger=logger) # Force a host lock as pan might overwrite the profile... key = CompileKey(domain=dbhost.branch.name, profile=dbhost.fqdn, logger=logger) try: lock_queue.acquire(key) plenary.write(locked=True) td = TemplateDomain(dbhost.branch, dbhost.sandbox_author, logger=logger) td.compile(session, only=[dbhost.fqdn], locked=True) except IncompleteError: raise ArgumentError("Run aq make for host %s first." % dbhost.fqdn) except: plenary.restore_stash() raise finally: lock_queue.release(key)
def onEnter(self, dbcluster): dbdecommissioned = HostLifecycle.get_unique(object_session(dbcluster), "decommissioned", compel=True) config = Config() archetype = dbcluster.personality.archetype section = "archetype_" + archetype.name opt = "allow_cascaded_deco" if dbcluster.hosts and (not config.has_option(section, opt) or not config.getboolean(section, opt)): raise ArgumentError("Cannot change state to {0}, as {1}'s " "archetype is {2}." .format(dbdecommissioned.name, dbcluster, archetype.name)) if dbcluster.virtual_machines: raise ArgumentError("Cannot change state to {0}, as {1} has " "{2} VM(s)." .format(dbdecommissioned.name, dbcluster, len(dbcluster.virtual_machines))) for dbhost in dbcluster.hosts: dbhost.status.transition(dbhost, dbdecommissioned)
def onLeave(self, dbcluster): dbalmostready = HostLifecycle.get_unique(object_session(dbcluster), "almostready", compel=True) for dbhost in dbcluster.hosts: if dbhost.status.name == 'ready': dbhost.status.transition(dbhost, dbalmostready)
def onEnter(self, dbcluster): dbdecommissioned = HostLifecycle.get_unique(object_session(dbcluster), "decommissioned", compel=True) config = Config() archetype = dbcluster.personality.archetype section = "archetype_" + archetype.name opt = "allow_cascaded_deco" if dbcluster.hosts and (not config.has_option(section, opt) or not config.getboolean(section, opt)): raise ArgumentError("Cannot change state to {0}, as {1}'s " "archetype is {2}.".format( dbdecommissioned.name, dbcluster, archetype.name)) if dbcluster.machines: raise ArgumentError("Cannot change state to {0}, as {1} has " "{2} VM(s).".format(dbdecommissioned.name, dbcluster, len(dbcluster.machines))) for dbhost in dbcluster.hosts: dbhost.status.transition(dbhost, dbdecommissioned)
def validate_rebuild_required(session, path, param_holder): """ check if this parameter requires hosts to be in non-ready state """ q = session.query(Host) dbready = HostLifecycle.get_unique(session, "ready", compel=True) dbalmostready = HostLifecycle.get_unique(session, "almostready", compel=True) q = q.filter(or_(Host.status == dbready, Host.status == dbalmostready)) personality = param_holder.personality if isinstance(param_holder, PersonalityParameter): q = q.filter_by(personality=personality) if q.count(): raise ArgumentError("Modifying parameter %s value needs a host rebuild. " "There are hosts associated to the personality in non-ready state. " "Please set these host to status of rebuild to continue. " "Run 'aq search host --personality %s --buildstatus ready' " "and 'aq search host --personality %s --buildstatus almostready' to " "get the list of the affected hosts." % (path, personality, personality))
def validate_rebuild_required(session, path, param_holder): """ check if this parameter requires hosts to be in non-ready state """ q = session.query(Host) dbready = HostLifecycle.get_unique(session, "ready", compel=True) dbalmostready = HostLifecycle.get_unique(session, "almostready", compel=True) q = q.filter(or_(Host.status == dbready, Host.status == dbalmostready)) personality = param_holder.personality if isinstance(param_holder, PersonalityParameter): q = q.filter_by(personality=personality) if q.count(): raise ArgumentError( "Modifying parameter %s value needs a host rebuild. " "There are hosts associated to the personality in non-ready state. " "Please set these host to status of rebuild to continue. " "Run 'aq search host --personality %s --buildstatus ready' " "and 'aq search host --personality %s --buildstatus almostready' to " "get the list of the affected hosts." % (path, personality, personality))
def render(self, session, logger, hostname, machine, archetype, domain, sandbox, osname, osversion, buildstatus, personality, comments, zebra_interfaces, grn, eon_id, skip_dsdb_check=False, **arguments): dbarchetype = Archetype.get_unique(session, archetype, compel=True) section = "archetype_" + dbarchetype.name # This is for the various add_*_host commands if not domain and not sandbox: domain = self.config.get(section, "host_domain") (dbbranch, dbauthor) = get_branch_and_author(session, logger, domain=domain, sandbox=sandbox, compel=True) if hasattr(dbbranch, "allow_manage") and not dbbranch.allow_manage: raise ArgumentError( "Adding hosts to {0:l} is not allowed.".format(dbbranch)) if not buildstatus: buildstatus = 'build' dbstatus = HostLifecycle.get_unique(session, buildstatus, compel=True) dbmachine = Machine.get_unique(session, machine, compel=True) oldinfo = DSDBRunner.snapshot_hw(dbmachine) if not personality: if self.config.has_option(section, "default_personality"): personality = self.config.get(section, "default_personality") else: personality = 'generic' dbpersonality = Personality.get_unique(session, name=personality, archetype=dbarchetype, compel=True) if not osname: if self.config.has_option(section, "default_osname"): osname = self.config.get(section, "default_osname") if not osversion: if self.config.has_option(section, "default_osversion"): osversion = self.config.get(section, "default_osversion") if not osname or not osversion: raise ArgumentError("Can not determine a sensible default OS " "for archetype %s. Please use the " "--osname and --osversion parameters." % (dbarchetype.name)) dbos = OperatingSystem.get_unique(session, name=osname, version=osversion, archetype=dbarchetype, compel=True) if (dbmachine.model.machine_type == 'aurora_node' and dbpersonality.archetype.name != 'aurora'): raise ArgumentError("Machines of type aurora_node can only be " "added with archetype aurora.") if dbmachine.host: raise ArgumentError("{0:c} {0.label} is already allocated to " "{1:l}.".format(dbmachine, dbmachine.host)) if grn or eon_id: dbgrn = lookup_grn(session, grn, eon_id, logger=logger, config=self.config) else: dbgrn = dbpersonality.owner_grn dbhost = Host(machine=dbmachine, branch=dbbranch, owner_grn=dbgrn, sandbox_author=dbauthor, personality=dbpersonality, status=dbstatus, operating_system=dbos, comments=comments) session.add(dbhost) if self.config.has_option("archetype_" + archetype, "default_grn_target"): dbhost.grns.append((dbhost, dbgrn, self.config.get("archetype_" + archetype, "default_grn_target"))) if zebra_interfaces: # --autoip does not make sense for Zebra (at least not the way it's # implemented currently) dbinterface = None else: dbinterface = get_boot_interface(dbmachine) # This method is allowed to return None. This can only happen # (currently) using add_aurora_host, add_windows_host, or possibly by # bypassing the aq client and posting a request directly. audit_results = [] ip = generate_ip(session, logger, dbinterface, audit_results=audit_results, **arguments) dbdns_rec, newly_created = grab_address(session, hostname, ip, allow_restricted_domain=True, allow_reserved=True, preclude=True) dbmachine.primary_name = dbdns_rec # Fix up auxiliary addresses to point to the primary name by default if ip: dns_env = dbdns_rec.fqdn.dns_environment for addr in dbmachine.all_addresses(): if addr.interface.interface_type == "management": continue if addr.service_address_id: # pragma: no cover continue for rec in addr.dns_records: if rec.fqdn.dns_environment == dns_env: rec.reverse_ptr = dbdns_rec.fqdn if zebra_interfaces: if not ip: raise ArgumentError( "Zebra configuration requires an IP address.") dbsrv_addr = self.assign_zebra_address(session, dbmachine, dbdns_rec, zebra_interfaces) else: if ip: if not dbinterface: raise ArgumentError( "You have specified an IP address for the " "host, but {0:l} does not have a bootable " "interface.".format(dbmachine)) assign_address(dbinterface, ip, dbdns_rec.network) dbsrv_addr = None session.flush() plenaries = PlenaryCollection(logger=logger) plenaries.append(Plenary.get_plenary(dbmachine)) if dbmachine.vm_container: plenaries.append(Plenary.get_plenary(dbmachine.vm_container)) if dbsrv_addr: plenaries.append(Plenary.get_plenary(dbsrv_addr)) key = plenaries.get_write_key() try: lock_queue.acquire(key) plenaries.write(locked=True) # XXX: This (and some of the code above) is horrible. There # should be a generic/configurable hook here that could kick # in based on archetype and/or domain. dsdb_runner = DSDBRunner(logger=logger) if dbhost.archetype.name == 'aurora': # For aurora, check that DSDB has a record of the host. if not skip_dsdb_check: try: dsdb_runner.show_host(hostname) except ProcessException, e: raise ArgumentError("Could not find host in DSDB: %s" % e) elif not dbmachine.primary_ip: logger.info("No IP for %s, not adding to DSDB." % dbmachine.fqdn)
def render(self, session, logger, hostname, cluster, personality, **arguments): dbhost = hostname_to_host(session, hostname) dbcluster = Cluster.get_unique(session, cluster, compel=True) if dbcluster.status.name == 'decommissioned': raise ArgumentError("Cannot add hosts to decommissioned clusters.") # We only support changing personality within the same # archetype. The archetype decides things like which OS, how # it builds (dhcp, etc), whether it's compilable, and # switching all of that by side-effect seems wrong # somehow. And besides, it would make the user-interface and # implementation for this command ugly in order to support # changing all of those options. personality_change = False if personality is not None: dbpersonality = Personality.get_unique(session, name=personality, archetype=dbhost.archetype, compel=True) if dbhost.personality != dbpersonality: dbhost.personality = dbpersonality personality_change = True # Allow for non-restricted clusters (the default?) if (len(dbcluster.allowed_personalities) > 0 and dbhost.personality not in dbcluster.allowed_personalities): raise ArgumentError( "The personality %s for %s is not allowed " "by the cluster. Specify --personality " "and provide one of %s" % (dbhost.personality, dbhost.fqdn, ", ".join( [x.name for x in dbcluster.allowed_personalities]))) # Now that we've changed the personality, we can check # if this is a valid membership change dbcluster.validate_membership(dbhost) plenaries = PlenaryCollection(logger=logger) plenaries.append(Plenary.get_plenary(dbcluster)) if dbhost.cluster and dbhost.cluster != dbcluster: logger.client_info("Removing {0:l} from {1:l}.".format( dbhost, dbhost.cluster)) old_cluster = dbhost.cluster old_cluster.hosts.remove(dbhost) remove_service_addresses(old_cluster, dbhost) old_cluster.validate() session.expire(dbhost, ['_cluster']) plenaries.append(Plenary.get_plenary(old_cluster)) # Apply the service addresses to the new member for res in walk_resources(dbcluster): if not isinstance(res, ServiceAddress): continue apply_service_address(dbhost, res.interfaces, res) if dbhost.cluster: if personality_change: raise ArgumentError( "{0:l} already in {1:l}, use " "aq reconfigure to change personality.".format( dbhost, dbhost.cluster)) # the cluster has not changed, therefore there's nothing # to do here. return # Calculate the node index: build a map of all possible values, remove # the used ones, and pick the smallest remaining one node_index_map = set(xrange(len(dbcluster._hosts) + 1)) for link in dbcluster._hosts: # The cluster may have been bigger in the past, so node indexes may # be larger than the current cluster size try: node_index_map.remove(link.node_index) except KeyError: pass dbcluster.hosts.append((dbhost, min(node_index_map))) dbcluster.validate() # demote a host when switching clusters # promote a host when switching clusters if dbhost.status.name == 'ready': if dbcluster.status.name != 'ready': dbalmost = HostLifecycle.get_unique(session, 'almostready', compel=True) dbhost.status.transition(dbhost, dbalmost) plenaries.append(Plenary.get_plenary(dbhost)) elif dbhost.status.name == 'almostready': if dbcluster.status.name == 'ready': dbready = HostLifecycle.get_unique(session, 'ready', compel=True) dbhost.status.transition(dbhost, dbready) plenaries.append(Plenary.get_plenary(dbhost)) session.flush() # Enforce that service instances are set correctly for the # new cluster association. chooser = Chooser(dbhost, logger=logger) chooser.set_required() chooser.flush_changes() # the chooser will include the host plenary key = CompileKey.merge( [chooser.get_write_key(), plenaries.get_write_key()]) try: lock_queue.acquire(key) chooser.write_plenary_templates(locked=True) plenaries.write(locked=True) except: chooser.restore_stash() plenaries.restore_stash() raise finally: lock_queue.release(key) return
def onLeave(self, dbcluster): dbalmostready = HostLifecycle.get_unique(object_session(dbcluster), "almostready", compel=True) for dbhost in dbcluster.hosts: if dbhost.status.name == "ready": dbhost.status.transition(dbhost, dbalmostready)
def render(self, session, logger, hostname, osname, osversion, archetype, personality, buildstatus, keepbindings, grn, eon_id, **arguments): dbhost = hostname_to_host(session, hostname) # Currently, for the Host to be created it *must* be associated with # a Machine already. If that ever changes, need to check here and # bail if dbhost.machine does not exist. if archetype and archetype != dbhost.archetype.name: if not personality: raise ArgumentError("Changing archetype also requires " "specifying --personality.") if personality: if archetype: dbarchetype = Archetype.get_unique(session, archetype, compel=True) if dbarchetype.cluster_type is not None: raise ArgumentError("Archetype %s is a cluster archetype" % dbarchetype.name) else: dbarchetype = dbhost.archetype if not osname and not osversion and \ dbhost.operating_system.archetype != dbarchetype: raise ArgumentError("{0} belongs to {1:l}, not {2:l}. Please " "specify --osname/--osversion.".format( dbhost.operating_system, dbhost.operating_system.archetype, dbarchetype)) dbpersonality = Personality.get_unique(session, name=personality, archetype=dbarchetype, compel=True) if dbhost.cluster and dbhost.cluster.allowed_personalities and \ dbpersonality not in dbhost.cluster.allowed_personalities: allowed = [ "%s/%s" % (p.archetype.name, p.name) for p in dbhost.cluster.allowed_personalities ] raise ArgumentError("The {0:l} is not allowed by {1}. " "Specify one of {2}.".format( dbpersonality, dbhost.cluster, allowed)) dbhost.personality = dbpersonality if not osname: osname = dbhost.operating_system.name if osname and osversion: dbos = OperatingSystem.get_unique(session, name=osname, version=osversion, archetype=dbhost.archetype, compel=True) # Hmm... no cluster constraint here... dbhost.operating_system = dbos elif osname != dbhost.operating_system.name: raise ArgumentError("Please specify a version to use for OS %s." % osname) if buildstatus: dbstatus = HostLifecycle.get_unique(session, buildstatus, compel=True) dbhost.status.transition(dbhost, dbstatus) if grn or eon_id: dbgrn = lookup_grn(session, grn, eon_id, logger=logger, config=self.config) dbhost.owner_grn = dbgrn session.flush() if dbhost.archetype.is_compileable: self.compile(session, dbhost, logger, keepbindings) return
def reconfigure_list(self, session, logger, dbhosts, archetype, personality, buildstatus, osname, osversion, **arguments): failed = [] # Check all the parameters up front. # Some of these could be more intelligent about defaults # (either by checking for unique entries or relying on the list) # - starting simple. if archetype: dbarchetype = Archetype.get_unique(session, archetype, compel=True) if dbarchetype.cluster_type is not None: raise ArgumentError("Archetype %s is a cluster archetype" % dbarchetype.name) # TODO: Once OS is a first class object this block needs # to check that either OS is also being reset or that the # OS is valid for the new archetype. else: dbarchetype = None if personality: dbpersonality = Personality.get_unique(session, name=personality, archetype=dbarchetype, compel=True) if osname and not osversion: raise ArgumentError("Please specify --osversion for OS %s." % osname) if osversion: if not osname: raise ArgumentError("Please specify --osname to use with " "OS version %s." % osversion) # Linux model names are the same under aurora and aquilon, so # allowing to omit --archetype would not be useful if not archetype: raise ArgumentError("Please specify --archetype for OS " "%s, version %s." % (osname, osversion)) dbos = OperatingSystem.get_unique(session, name=osname, version=osversion, archetype=dbarchetype, compel=True) else: dbos = None if buildstatus: dbstatus = HostLifecycle.get_unique(session, buildstatus, compel=True) # Take a shortcut if there's nothing to do, but only after all the other # parameters have been checked if not dbhosts: return personalities = {} branches = {} authors = {} # Do any final cross-list or dependency checks before entering # the Chooser loop. for dbhost in dbhosts: if dbhost.branch in branches: branches[dbhost.branch].append(dbhost) else: branches[dbhost.branch] = [dbhost] if dbhost.sandbox_author in authors: authors[dbhost.sandbox_author].append(dbhost) else: authors[dbhost.sandbox_author] = [dbhost] if dbos and not dbarchetype and dbhost.archetype != dbos.archetype: failed.append("{0}: Cannot change operating system because it " "needs {1:l} instead of " "{2:l}.".format(dbhost.fqdn, dbhost.archetype, dbos.archetype)) if dbarchetype and not dbos and \ dbhost.operating_system.archetype != dbarchetype: failed.append("{0}: Cannot change archetype because {1:l} needs " "{2:l}.".format(dbhost.fqdn, dbhost.operating_system, dbhost.operating_system.archetype)) if (personality and dbhost.cluster and len(dbhost.cluster.allowed_personalities) > 0 and dbpersonality not in dbhost.cluster.allowed_personalities): allowed = ["%s/%s" % (p.archetype.name, p.name) for p in dbhost.cluster.allowed_personalities] failed.append("{0}: The {1:l} is not allowed by {2}. " "Specify one of {3}.".format( dbhost.fqdn, dbpersonality, dbhost.cluster, allowed)) if personality: personalities[dbhost.fqdn] = dbpersonality elif archetype: personalities[dbhost.fqdn] = Personality.get_unique(session, name=dbhost.personality.name, archetype=dbarchetype) if not personalities[dbhost.fqdn]: failed.append("%s: No personality %s found for archetype " "%s." % (dbhost.fqdn, dbhost.personality.name, dbarchetype.name)) if failed: raise ArgumentError("Cannot modify the following hosts:\n%s" % "\n".join(failed)) if len(branches) > 1: keys = branches.keys() branch_sort = lambda x, y: cmp(len(branches[x]), len(branches[y])) keys.sort(cmp=branch_sort) stats = ["{0:d} hosts in {1:l}".format(len(branches[branch]), branch) for branch in keys] raise ArgumentError("All hosts must be in the same domain or " "sandbox:\n%s" % "\n".join(stats)) dbbranch = branches.keys()[0] if len(authors) > 1: keys = authors.keys() author_sort = lambda x, y: cmp(len(authors[x]), len(authors[y])) keys.sort(cmp=author_sort) stats = ["%s hosts with sandbox author %s" % (len(authors[author]), author.name) for author in keys] raise ArgumentError("All hosts must be managed by the same " "sandbox author:\n%s" % "\n".join(stats)) dbauthor = authors.keys()[0] failed = [] choosers = [] for dbhost in dbhosts: if dbhost.fqdn in personalities: dbhost.personality = personalities[dbhost.fqdn] session.add(dbhost) if osversion: dbhost.operating_system = dbos session.add(dbhost) if buildstatus: dbhost.status.transition(dbhost, dbstatus) session.add(dbhost) session.flush() logger.client_info("Verifying service bindings.") for dbhost in dbhosts: if dbhost.archetype.is_compileable: if arguments.get("keepbindings", None): chooser = Chooser(dbhost, logger=logger, required_only=False) else: chooser = Chooser(dbhost, logger=logger, required_only=True) choosers.append(chooser) try: chooser.set_required() except ArgumentError, e: failed.append(str(e))
def render(self, session, logger, hostname, machine, archetype, buildstatus, personality, osname, osversion, service, instance, model, machine_type, vendor, serial, cluster, guest_on_cluster, guest_on_share, member_cluster_share, domain, sandbox, branch, sandbox_owner, dns_domain, shortname, mac, ip, networkip, network_environment, exact_location, server_of_service, server_of_instance, grn, eon_id, fullinfo, **arguments): dbnet_env = NetworkEnvironment.get_unique_or_default( session, network_environment) q = session.query(Host) if machine: dbmachine = Machine.get_unique(session, machine, compel=True) q = q.filter_by(machine=dbmachine) # Add the machine definition and the primary name. Use aliases to make # sure the end result will be ordered by primary name. PriDns = aliased(DnsRecord) PriFqdn = aliased(Fqdn) PriDomain = aliased(DnsDomain) q = q.join(Machine, (PriDns, PriDns.id == Machine.primary_name_id), (PriFqdn, PriDns.fqdn_id == PriFqdn.id), (PriDomain, PriFqdn.dns_domain_id == PriDomain.id)) q = q.order_by(PriFqdn.name, PriDomain.name) q = q.options( contains_eager('machine'), contains_eager('machine.primary_name', alias=PriDns), contains_eager('machine.primary_name.fqdn', alias=PriFqdn), contains_eager('machine.primary_name.fqdn.dns_domain', alias=PriDomain)) q = q.reset_joinpoint() # Hardware-specific filters dblocation = get_location(session, **arguments) if dblocation: if exact_location: q = q.filter(Machine.location == dblocation) else: childids = dblocation.offspring_ids() q = q.filter(Machine.location_id.in_(childids)) if model or vendor or machine_type: subq = Model.get_matching_query(session, name=model, vendor=vendor, machine_type=machine_type, compel=True) q = q.filter(Machine.model_id.in_(subq)) if serial: self.deprecated_option( "serial", "Please use search machine --serial instead.", logger=logger, **arguments) q = q.filter(Machine.serial_no == serial) # DNS IP address related filters if mac or ip or networkip or hostname or dns_domain or shortname: # Inner joins are cheaper than outer joins, so make some effort to # use inner joins when possible if mac or ip or networkip: q = q.join(Interface) else: q = q.outerjoin(Interface) if ip or networkip: q = q.join(AddressAssignment, Network, from_joinpoint=True) else: q = q.outerjoin(AddressAssignment, Network, from_joinpoint=True) if mac: self.deprecated_option("mac", "Please use search machine " "--mac instead.", logger=logger, **arguments) q = q.filter(Interface.mac == mac) if ip: q = q.filter(AddressAssignment.ip == ip) q = q.filter(Network.network_environment == dbnet_env) if networkip: dbnetwork = get_network_byip(session, networkip, dbnet_env) q = q.filter(AddressAssignment.network == dbnetwork) dbdns_domain = None if hostname: (shortname, dbdns_domain) = parse_fqdn(session, hostname) if dns_domain: dbdns_domain = DnsDomain.get_unique(session, dns_domain, compel=True) if shortname or dbdns_domain: ARecAlias = aliased(ARecord) ARecFqdn = aliased(Fqdn) q = q.outerjoin( (ARecAlias, and_(ARecAlias.ip == AddressAssignment.ip, ARecAlias.network_id == AddressAssignment.network_id)), (ARecFqdn, ARecAlias.fqdn_id == ARecFqdn.id)) if shortname: q = q.filter( or_(ARecFqdn.name == shortname, PriFqdn.name == shortname)) if dbdns_domain: q = q.filter( or_(ARecFqdn.dns_domain == dbdns_domain, PriFqdn.dns_domain == dbdns_domain)) q = q.reset_joinpoint() (dbbranch, dbauthor) = get_branch_and_author(session, logger, domain=domain, sandbox=sandbox, branch=branch) if sandbox_owner: dbauthor = get_user_principal(session, sandbox_owner) if dbbranch: q = q.filter_by(branch=dbbranch) if dbauthor: q = q.filter_by(sandbox_author=dbauthor) if archetype: # Added to the searches as appropriate below. dbarchetype = Archetype.get_unique(session, archetype, compel=True) if personality and archetype: dbpersonality = Personality.get_unique(session, archetype=dbarchetype, name=personality, compel=True) q = q.filter_by(personality=dbpersonality) elif personality: PersAlias = aliased(Personality) q = q.join(PersAlias).filter_by(name=personality) q = q.reset_joinpoint() elif archetype: PersAlias = aliased(Personality) q = q.join(PersAlias).filter_by(archetype=dbarchetype) q = q.reset_joinpoint() if buildstatus: dbbuildstatus = HostLifecycle.get_unique(session, buildstatus, compel=True) q = q.filter_by(status=dbbuildstatus) if osname and osversion and archetype: # archetype was already resolved above dbos = OperatingSystem.get_unique(session, name=osname, version=osversion, archetype=dbarchetype, compel=True) q = q.filter_by(operating_system=dbos) elif osname or osversion: q = q.join('operating_system') if osname: q = q.filter_by(name=osname) if osversion: q = q.filter_by(version=osversion) q = q.reset_joinpoint() if service: dbservice = Service.get_unique(session, service, compel=True) if instance: dbsi = get_service_instance(session, dbservice, instance) q = q.filter(Host.services_used.contains(dbsi)) else: q = q.join('services_used') q = q.filter_by(service=dbservice) q = q.reset_joinpoint() elif instance: q = q.join('services_used') q = q.filter_by(name=instance) q = q.reset_joinpoint() if server_of_service: dbserver_service = Service.get_unique(session, server_of_service, compel=True) if server_of_instance: dbssi = get_service_instance(session, dbserver_service, server_of_instance) q = q.join('_services_provided') q = q.filter_by(service_instance=dbssi) q = q.reset_joinpoint() else: q = q.join('_services_provided', 'service_instance') q = q.filter_by(service=dbserver_service) q = q.reset_joinpoint() elif server_of_instance: q = q.join('_services_provided', 'service_instance') q = q.filter_by(name=server_of_instance) q = q.reset_joinpoint() if cluster: dbcluster = Cluster.get_unique(session, cluster, compel=True) if isinstance(dbcluster, MetaCluster): q = q.join('_cluster', 'cluster', '_metacluster') q = q.filter_by(metacluster=dbcluster) else: q = q.filter_by(cluster=dbcluster) q = q.reset_joinpoint() if guest_on_cluster: # TODO: this does not handle metaclusters according to Wes dbcluster = Cluster.get_unique(session, guest_on_cluster, compel=True) q = q.join('machine', VirtualMachine, ClusterResource) q = q.filter_by(cluster=dbcluster) q = q.reset_joinpoint() if guest_on_share: #v2 v2shares = session.query( Share.id).filter_by(name=guest_on_share).all() if not v2shares: raise NotFoundException( "No shares found with name {0}.".format(guest_on_share)) NasAlias = aliased(VirtualDisk) q = q.join('machine', 'disks', (NasAlias, NasAlias.id == Disk.id)) q = q.filter(NasAlias.share_id.in_(map(lambda s: s[0], v2shares))) q = q.reset_joinpoint() if member_cluster_share: #v2 v2shares = session.query( Share.id).filter_by(name=member_cluster_share).all() if not v2shares: raise NotFoundException( "No shares found with name {0}.".format(guest_on_share)) NasAlias = aliased(VirtualDisk) q = q.join('_cluster', 'cluster', 'resholder', VirtualMachine, 'machine', 'disks', (NasAlias, NasAlias.id == Disk.id)) q = q.filter(NasAlias.share_id.in_(map(lambda s: s[0], v2shares))) q = q.reset_joinpoint() if grn or eon_id: dbgrn = lookup_grn(session, grn, eon_id, autoupdate=False) persq = session.query(Personality.id) persq = persq.outerjoin(PersonalityGrnMap) persq = persq.filter( or_(Personality.owner_eon_id == dbgrn.eon_id, PersonalityGrnMap.eon_id == dbgrn.eon_id)) q = q.outerjoin(HostGrnMap) q = q.filter( or_(Host.owner_eon_id == dbgrn.eon_id, HostGrnMap.eon_id == dbgrn.eon_id, Host.personality_id.in_(persq.subquery()))) q = q.reset_joinpoint() if fullinfo: return q.all() return SimpleHostList(q.all())
def render(self, session, logger, hostname, machine, archetype, buildstatus, personality, osname, osversion, service, instance, model, machine_type, vendor, serial, cluster, guest_on_cluster, guest_on_share, member_cluster_share, domain, sandbox, branch, sandbox_owner, dns_domain, shortname, mac, ip, networkip, network_environment, exact_location, server_of_service, server_of_instance, grn, eon_id, fullinfo, **arguments): dbnet_env = NetworkEnvironment.get_unique_or_default(session, network_environment) q = session.query(Host) if machine: dbmachine = Machine.get_unique(session, machine, compel=True) q = q.filter_by(machine=dbmachine) # Add the machine definition and the primary name. Use aliases to make # sure the end result will be ordered by primary name. PriDns = aliased(DnsRecord) PriFqdn = aliased(Fqdn) PriDomain = aliased(DnsDomain) q = q.join(Machine, (PriDns, PriDns.id == Machine.primary_name_id), (PriFqdn, PriDns.fqdn_id == PriFqdn.id), (PriDomain, PriFqdn.dns_domain_id == PriDomain.id)) q = q.order_by(PriFqdn.name, PriDomain.name) q = q.options(contains_eager('machine'), contains_eager('machine.primary_name', alias=PriDns), contains_eager('machine.primary_name.fqdn', alias=PriFqdn), contains_eager('machine.primary_name.fqdn.dns_domain', alias=PriDomain)) q = q.reset_joinpoint() # Hardware-specific filters dblocation = get_location(session, **arguments) if dblocation: if exact_location: q = q.filter(Machine.location == dblocation) else: childids = dblocation.offspring_ids() q = q.filter(Machine.location_id.in_(childids)) if model or vendor or machine_type: subq = Model.get_matching_query(session, name=model, vendor=vendor, machine_type=machine_type, compel=True) q = q.filter(Machine.model_id.in_(subq)) if serial: self.deprecated_option("serial", "Please use search machine --serial instead.", logger=logger, **arguments) q = q.filter(Machine.serial_no == serial) # DNS IP address related filters if mac or ip or networkip or hostname or dns_domain or shortname: # Inner joins are cheaper than outer joins, so make some effort to # use inner joins when possible if mac or ip or networkip: q = q.join(Interface) else: q = q.outerjoin(Interface) if ip or networkip: q = q.join(AddressAssignment, Network, from_joinpoint=True) else: q = q.outerjoin(AddressAssignment, Network, from_joinpoint=True) if mac: self.deprecated_option("mac", "Please use search machine " "--mac instead.", logger=logger, **arguments) q = q.filter(Interface.mac == mac) if ip: q = q.filter(AddressAssignment.ip == ip) q = q.filter(Network.network_environment == dbnet_env) if networkip: dbnetwork = get_network_byip(session, networkip, dbnet_env) q = q.filter(AddressAssignment.network == dbnetwork) dbdns_domain = None if hostname: (shortname, dbdns_domain) = parse_fqdn(session, hostname) if dns_domain: dbdns_domain = DnsDomain.get_unique(session, dns_domain, compel=True) if shortname or dbdns_domain: ARecAlias = aliased(ARecord) ARecFqdn = aliased(Fqdn) q = q.outerjoin((ARecAlias, and_(ARecAlias.ip == AddressAssignment.ip, ARecAlias.network_id == AddressAssignment.network_id)), (ARecFqdn, ARecAlias.fqdn_id == ARecFqdn.id)) if shortname: q = q.filter(or_(ARecFqdn.name == shortname, PriFqdn.name == shortname)) if dbdns_domain: q = q.filter(or_(ARecFqdn.dns_domain == dbdns_domain, PriFqdn.dns_domain == dbdns_domain)) q = q.reset_joinpoint() (dbbranch, dbauthor) = get_branch_and_author(session, logger, domain=domain, sandbox=sandbox, branch=branch) if sandbox_owner: dbauthor = get_user_principal(session, sandbox_owner) if dbbranch: q = q.filter_by(branch=dbbranch) if dbauthor: q = q.filter_by(sandbox_author=dbauthor) if archetype: # Added to the searches as appropriate below. dbarchetype = Archetype.get_unique(session, archetype, compel=True) if personality and archetype: dbpersonality = Personality.get_unique(session, archetype=dbarchetype, name=personality, compel=True) q = q.filter_by(personality=dbpersonality) elif personality: PersAlias = aliased(Personality) q = q.join(PersAlias).filter_by(name=personality) q = q.reset_joinpoint() elif archetype: PersAlias = aliased(Personality) q = q.join(PersAlias).filter_by(archetype=dbarchetype) q = q.reset_joinpoint() if buildstatus: dbbuildstatus = HostLifecycle.get_unique(session, buildstatus, compel=True) q = q.filter_by(status=dbbuildstatus) if osname and osversion and archetype: # archetype was already resolved above dbos = OperatingSystem.get_unique(session, name=osname, version=osversion, archetype=dbarchetype, compel=True) q = q.filter_by(operating_system=dbos) elif osname or osversion: q = q.join('operating_system') if osname: q = q.filter_by(name=osname) if osversion: q = q.filter_by(version=osversion) q = q.reset_joinpoint() if service: dbservice = Service.get_unique(session, service, compel=True) if instance: dbsi = get_service_instance(session, dbservice, instance) q = q.filter(Host.services_used.contains(dbsi)) else: q = q.join('services_used') q = q.filter_by(service=dbservice) q = q.reset_joinpoint() elif instance: q = q.join('services_used') q = q.filter_by(name=instance) q = q.reset_joinpoint() if server_of_service: dbserver_service = Service.get_unique(session, server_of_service, compel=True) if server_of_instance: dbssi = get_service_instance(session, dbserver_service, server_of_instance) q = q.join('_services_provided') q = q.filter_by(service_instance=dbssi) q = q.reset_joinpoint() else: q = q.join('_services_provided', 'service_instance') q = q.filter_by(service=dbserver_service) q = q.reset_joinpoint() elif server_of_instance: q = q.join('_services_provided', 'service_instance') q = q.filter_by(name=server_of_instance) q = q.reset_joinpoint() if cluster: dbcluster = Cluster.get_unique(session, cluster, compel=True) if isinstance(dbcluster, MetaCluster): q = q.join('_cluster', 'cluster', '_metacluster') q = q.filter_by(metacluster=dbcluster) else: q = q.filter_by(cluster=dbcluster) q = q.reset_joinpoint() if guest_on_cluster: # TODO: this does not handle metaclusters according to Wes dbcluster = Cluster.get_unique(session, guest_on_cluster, compel=True) q = q.join('machine', VirtualMachine, ClusterResource) q = q.filter_by(cluster=dbcluster) q = q.reset_joinpoint() if guest_on_share: #v2 v2shares = session.query(Share.id).filter_by(name=guest_on_share).all() if not v2shares: raise NotFoundException("No shares found with name {0}." .format(guest_on_share)) NasAlias = aliased(VirtualDisk) q = q.join('machine', 'disks', (NasAlias, NasAlias.id == Disk.id)) q = q.filter( NasAlias.share_id.in_(map(lambda s: s[0], v2shares))) q = q.reset_joinpoint() if member_cluster_share: #v2 v2shares = session.query(Share.id).filter_by(name=member_cluster_share).all() if not v2shares: raise NotFoundException("No shares found with name {0}." .format(guest_on_share)) NasAlias = aliased(VirtualDisk) q = q.join('_cluster', 'cluster', 'resholder', VirtualMachine, 'machine', 'disks', (NasAlias, NasAlias.id == Disk.id)) q = q.filter( NasAlias.share_id.in_(map(lambda s: s[0], v2shares))) q = q.reset_joinpoint() if grn or eon_id: dbgrn = lookup_grn(session, grn, eon_id, autoupdate=False) persq = session.query(Personality.id) persq = persq.outerjoin(PersonalityGrnMap) persq = persq.filter(or_(Personality.owner_eon_id == dbgrn.eon_id, PersonalityGrnMap.eon_id == dbgrn.eon_id)) q = q.outerjoin(HostGrnMap) q = q.filter(or_(Host.owner_eon_id == dbgrn.eon_id, HostGrnMap.eon_id == dbgrn.eon_id, Host.personality_id.in_(persq.subquery()))) q = q.reset_joinpoint() if fullinfo: return q.all() return SimpleHostList(q.all())
def render(self, session, logger, hostname, osname, osversion, archetype, personality, buildstatus, keepbindings, grn, eon_id, **arguments): dbhost = hostname_to_host(session, hostname) # Currently, for the Host to be created it *must* be associated with # a Machine already. If that ever changes, need to check here and # bail if dbhost.machine does not exist. if archetype and archetype != dbhost.archetype.name: if not personality: raise ArgumentError("Changing archetype also requires " "specifying --personality.") if personality: if archetype: dbarchetype = Archetype.get_unique(session, archetype, compel=True) if dbarchetype.cluster_type is not None: raise ArgumentError("Archetype %s is a cluster archetype" % dbarchetype.name) else: dbarchetype = dbhost.archetype if not osname and not osversion and \ dbhost.operating_system.archetype != dbarchetype: raise ArgumentError("{0} belongs to {1:l}, not {2:l}. Please " "specify --osname/--osversion." .format(dbhost.operating_system, dbhost.operating_system.archetype, dbarchetype)) dbpersonality = Personality.get_unique(session, name=personality, archetype=dbarchetype, compel=True) if dbhost.cluster and dbhost.cluster.allowed_personalities and \ dbpersonality not in dbhost.cluster.allowed_personalities: allowed = ["%s/%s" % (p.archetype.name, p.name) for p in dbhost.cluster.allowed_personalities] raise ArgumentError("The {0:l} is not allowed by {1}. " "Specify one of {2}.".format( dbpersonality, dbhost.cluster, allowed)) dbhost.personality = dbpersonality if not osname: osname = dbhost.operating_system.name if osname and osversion: dbos = OperatingSystem.get_unique(session, name=osname, version=osversion, archetype=dbhost.archetype, compel=True) # Hmm... no cluster constraint here... dbhost.operating_system = dbos elif osname != dbhost.operating_system.name: raise ArgumentError("Please specify a version to use for OS %s." % osname) if buildstatus: dbstatus = HostLifecycle.get_unique(session, buildstatus, compel=True) dbhost.status.transition(dbhost, dbstatus) if grn or eon_id: dbgrn = lookup_grn(session, grn, eon_id, logger=logger, config=self.config) dbhost.owner_grn = dbgrn session.flush() if dbhost.archetype.is_compileable: self.compile(session, dbhost, logger, keepbindings) return
def refresh_windows_hosts(self, session, logger, containers): conn = sqlite3.connect(self.config.get("broker", "windows_host_info")) # Enable dictionary-style access to the rows. conn.row_factory = sqlite3.Row windows_hosts = {} interfaces = {} cur = conn.cursor() # There are more fields in the dataset like machine and # aqhostname that might be useful for error messages but these # are sufficient. cur.execute("select ether, windowshostname from machines") for row in cur: host = row["windowshostname"] if host: host = host.strip().lower() else: continue mac = row["ether"] if mac: mac = mac.strip().lower() windows_hosts[host] = mac interfaces[mac] = host success = [] failed = [] q = session.query(Host) q = q.filter_by(comments='Created by refresh_windows_host') for dbhost in q.all(): mac_addresses = [iface.mac for iface in dbhost.machine.interfaces] if dbhost.fqdn in windows_hosts and \ windows_hosts[dbhost.fqdn] in mac_addresses: # All is well continue deps = get_host_dependencies(session, dbhost) if deps: msg = "Skipping removal of host %s with dependencies: %s" % \ (dbhost.fqdn, ", ".join(deps)) failed.append(msg) logger.info(msg) continue dbmachine = dbhost.machine success.append("Removed host entry for %s (%s)" % (dbmachine.label, dbmachine.fqdn)) if dbmachine.vm_container: containers.add(dbmachine.vm_container) session.delete(dbhost) dbdns_rec = dbmachine.primary_name dbmachine.primary_name = None delete_dns_record(dbdns_rec) session.flush() # The Host() creations below fail when autoflush is enabled. session.autoflush = False dbdomain = Domain.get_unique(session, self.config.get("archetype_windows", "host_domain"), compel=InternalError) dbarchetype = Archetype.get_unique(session, "windows", compel=InternalError) dbpersonality = Personality.get_unique(session, archetype=dbarchetype, name="generic", compel=InternalError) dbstatus = HostLifecycle.get_unique(session, "ready", compel=InternalError) dbos = OperatingSystem.get_unique(session, name="windows", version="generic", archetype=dbarchetype, compel=InternalError) for (host, mac) in windows_hosts.items(): try: (short, dbdns_domain) = parse_fqdn(session, host) except AquilonError, err: msg = "Skipping host %s: %s" % (host, err) failed.append(msg) logger.info(msg) continue existing = DnsRecord.get_unique(session, name=short, dns_domain=dbdns_domain) if existing: if not existing.hardware_entity: msg = "Skipping host %s: It is not a primary name." % host failed.append(msg) logger.info(msg) continue # If these are invalid there should have been a deletion # attempt above. if not existing.hardware_entity.interfaces: msg = "Skipping host %s: Host already exists but has " \ "no interface attached." % host failed.append(msg) logger.info(msg) elif existing.hardware_entity.interfaces[0].mac != mac: msg = "Skipping host %s: Host already exists but with " \ "MAC address %s and not %s." % \ (host, existing.hardware_entity.interfaces[0].mac, mac) failed.append(msg) logger.info(msg) continue dbinterface = session.query(Interface).filter_by(mac=mac).first() if not dbinterface: msg = "Skipping host %s: MAC address %s is not present in " \ "AQDB." % (host, mac) failed.append(msg) logger.info(msg) continue q = session.query(Machine) q = q.filter_by(id=dbinterface.hardware_entity.id) dbmachine = q.first() if not dbmachine: msg = "Skipping host %s: The AQDB interface with MAC address " \ "%s is tied to hardware %s instead of a virtual " \ "machine." % \ (host, mac, dbinterface.hardware_entity.label) failed.append(msg) logger.info(msg) continue if dbinterface.assignments: msg = "Skipping host %s: The AQDB interface with MAC address " \ "%s is already tied to %s." % \ (host, mac, dbinterface.assignments[0].fqdns[0]) failed.append(msg) logger.info(msg) continue if dbmachine.host: msg = "Skipping host %s: The AQDB interface with MAC address " \ "%s is already tied to %s." % \ (host, mac, dbmachine.fqdn) failed.append(msg) logger.info(msg) continue dbhost = Host(machine=dbmachine, branch=dbdomain, status=dbstatus, owner_grn=dbpersonality.owner_grn, personality=dbpersonality, operating_system=dbos, comments="Created by refresh_windows_host") session.add(dbhost) if self.config.has_option("archetype_windows", "default_grn_target"): dbhost.grns.append((dbhost, dbgrn, self.config.get("archetype_", "default_grn_target"))) dbfqdn = Fqdn.get_or_create(session, name=short, dns_domain=dbdns_domain, preclude=True) dbdns_rec = ReservedName(fqdn=dbfqdn) session.add(dbdns_rec) dbmachine.primary_name = dbdns_rec success.append("Added host entry for %s (%s)." % (dbmachine.label, dbdns_rec.fqdn)) if dbmachine.vm_container: containers.add(dbmachine.vm_container) session.flush()
def render(self, session, logger, hostname, machine, archetype, domain, sandbox, osname, osversion, buildstatus, personality, comments, zebra_interfaces, grn, eon_id, skip_dsdb_check=False, **arguments): dbarchetype = Archetype.get_unique(session, archetype, compel=True) section = "archetype_" + dbarchetype.name # This is for the various add_*_host commands if not domain and not sandbox: domain = self.config.get(section, "host_domain") (dbbranch, dbauthor) = get_branch_and_author(session, logger, domain=domain, sandbox=sandbox, compel=True) if hasattr(dbbranch, "allow_manage") and not dbbranch.allow_manage: raise ArgumentError("Adding hosts to {0:l} is not allowed." .format(dbbranch)) if not buildstatus: buildstatus = 'build' dbstatus = HostLifecycle.get_unique(session, buildstatus, compel=True) dbmachine = Machine.get_unique(session, machine, compel=True) oldinfo = DSDBRunner.snapshot_hw(dbmachine) if not personality: if self.config.has_option(section, "default_personality"): personality = self.config.get(section, "default_personality") else: personality = 'generic' dbpersonality = Personality.get_unique(session, name=personality, archetype=dbarchetype, compel=True) if not osname: if self.config.has_option(section, "default_osname"): osname = self.config.get(section, "default_osname") if not osversion: if self.config.has_option(section, "default_osversion"): osversion = self.config.get(section, "default_osversion") if not osname or not osversion: raise ArgumentError("Can not determine a sensible default OS " "for archetype %s. Please use the " "--osname and --osversion parameters." % (dbarchetype.name)) dbos = OperatingSystem.get_unique(session, name=osname, version=osversion, archetype=dbarchetype, compel=True) if (dbmachine.model.machine_type == 'aurora_node' and dbpersonality.archetype.name != 'aurora'): raise ArgumentError("Machines of type aurora_node can only be " "added with archetype aurora.") if dbmachine.host: raise ArgumentError("{0:c} {0.label} is already allocated to " "{1:l}.".format(dbmachine, dbmachine.host)) if grn or eon_id: dbgrn = lookup_grn(session, grn, eon_id, logger=logger, config=self.config) else: dbgrn = dbpersonality.owner_grn dbhost = Host(machine=dbmachine, branch=dbbranch, owner_grn=dbgrn, sandbox_author=dbauthor, personality=dbpersonality, status=dbstatus, operating_system=dbos, comments=comments) session.add(dbhost) if self.config.has_option("archetype_" + archetype, "default_grn_target"): dbhost.grns.append((dbhost, dbgrn, self.config.get("archetype_" + archetype, "default_grn_target"))) if zebra_interfaces: # --autoip does not make sense for Zebra (at least not the way it's # implemented currently) dbinterface = None else: dbinterface = get_boot_interface(dbmachine) # This method is allowed to return None. This can only happen # (currently) using add_aurora_host, add_windows_host, or possibly by # bypassing the aq client and posting a request directly. audit_results = [] ip = generate_ip(session, logger, dbinterface, audit_results=audit_results, **arguments) dbdns_rec, newly_created = grab_address(session, hostname, ip, allow_restricted_domain=True, allow_reserved=True, preclude=True) dbmachine.primary_name = dbdns_rec # Fix up auxiliary addresses to point to the primary name by default if ip: dns_env = dbdns_rec.fqdn.dns_environment for addr in dbmachine.all_addresses(): if addr.interface.interface_type == "management": continue if addr.service_address_id: # pragma: no cover continue for rec in addr.dns_records: if rec.fqdn.dns_environment == dns_env: rec.reverse_ptr = dbdns_rec.fqdn if zebra_interfaces: if not ip: raise ArgumentError("Zebra configuration requires an IP address.") dbsrv_addr = self.assign_zebra_address(session, dbmachine, dbdns_rec, zebra_interfaces) else: if ip: if not dbinterface: raise ArgumentError("You have specified an IP address for the " "host, but {0:l} does not have a bootable " "interface.".format(dbmachine)) assign_address(dbinterface, ip, dbdns_rec.network) dbsrv_addr = None session.flush() plenaries = PlenaryCollection(logger=logger) plenaries.append(Plenary.get_plenary(dbmachine)) if dbmachine.vm_container: plenaries.append(Plenary.get_plenary(dbmachine.vm_container)) if dbsrv_addr: plenaries.append(Plenary.get_plenary(dbsrv_addr)) key = plenaries.get_write_key() try: lock_queue.acquire(key) plenaries.write(locked=True) # XXX: This (and some of the code above) is horrible. There # should be a generic/configurable hook here that could kick # in based on archetype and/or domain. dsdb_runner = DSDBRunner(logger=logger) if dbhost.archetype.name == 'aurora': # For aurora, check that DSDB has a record of the host. if not skip_dsdb_check: try: dsdb_runner.show_host(hostname) except ProcessException, e: raise ArgumentError("Could not find host in DSDB: %s" % e) elif not dbmachine.primary_ip: logger.info("No IP for %s, not adding to DSDB." % dbmachine.fqdn)
def render(self, session, logger, hostname, cluster, personality, **arguments): dbhost = hostname_to_host(session, hostname) dbcluster = Cluster.get_unique(session, cluster, compel=True) if dbcluster.status.name == 'decommissioned': raise ArgumentError("Cannot add hosts to decommissioned clusters.") # We only support changing personality within the same # archetype. The archetype decides things like which OS, how # it builds (dhcp, etc), whether it's compilable, and # switching all of that by side-effect seems wrong # somehow. And besides, it would make the user-interface and # implementation for this command ugly in order to support # changing all of those options. personality_change = False if personality is not None: dbpersonality = Personality.get_unique(session, name=personality, archetype=dbhost.archetype, compel=True) if dbhost.personality != dbpersonality: dbhost.personality = dbpersonality personality_change = True # Allow for non-restricted clusters (the default?) if (len(dbcluster.allowed_personalities) > 0 and dbhost.personality not in dbcluster.allowed_personalities): raise ArgumentError("The personality %s for %s is not allowed " "by the cluster. Specify --personality " "and provide one of %s" % (dbhost.personality, dbhost.fqdn, ", ".join([x.name for x in dbcluster.allowed_personalities]))) # Now that we've changed the personality, we can check # if this is a valid membership change dbcluster.validate_membership(dbhost) plenaries = PlenaryCollection(logger=logger) plenaries.append(Plenary.get_plenary(dbcluster)) if dbhost.cluster and dbhost.cluster != dbcluster: logger.client_info("Removing {0:l} from {1:l}.".format(dbhost, dbhost.cluster)) old_cluster = dbhost.cluster old_cluster.hosts.remove(dbhost) remove_service_addresses(old_cluster, dbhost) old_cluster.validate() session.expire(dbhost, ['_cluster']) plenaries.append(Plenary.get_plenary(old_cluster)) # Apply the service addresses to the new member for res in walk_resources(dbcluster): if not isinstance(res, ServiceAddress): continue apply_service_address(dbhost, res.interfaces, res) if dbhost.cluster: if personality_change: raise ArgumentError("{0:l} already in {1:l}, use " "aq reconfigure to change personality." .format(dbhost, dbhost.cluster)) # the cluster has not changed, therefore there's nothing # to do here. return # Calculate the node index: build a map of all possible values, remove # the used ones, and pick the smallest remaining one node_index_map = set(xrange(len(dbcluster._hosts) + 1)) for link in dbcluster._hosts: # The cluster may have been bigger in the past, so node indexes may # be larger than the current cluster size try: node_index_map.remove(link.node_index) except KeyError: pass dbcluster.hosts.append((dbhost, min(node_index_map))) dbcluster.validate() # demote a host when switching clusters # promote a host when switching clusters if dbhost.status.name == 'ready': if dbcluster.status.name != 'ready': dbalmost = HostLifecycle.get_unique(session, 'almostready', compel=True) dbhost.status.transition(dbhost, dbalmost) plenaries.append(Plenary.get_plenary(dbhost)) elif dbhost.status.name == 'almostready': if dbcluster.status.name == 'ready': dbready = HostLifecycle.get_unique(session, 'ready', compel=True) dbhost.status.transition(dbhost, dbready) plenaries.append(Plenary.get_plenary(dbhost)) session.flush() # Enforce that service instances are set correctly for the # new cluster association. chooser = Chooser(dbhost, logger=logger) chooser.set_required() chooser.flush_changes() # the chooser will include the host plenary key = CompileKey.merge([chooser.get_write_key(), plenaries.get_write_key()]) try: lock_queue.acquire(key) chooser.write_plenary_templates(locked=True) plenaries.write(locked=True) except: chooser.restore_stash() plenaries.restore_stash() raise finally: lock_queue.release(key) return