def remove_service_addresses(dbcluster, dbhost): for res in walk_resources(dbcluster): if not isinstance(res, ServiceAddress): continue # The interface names are stored implicitly in the AddressAssignment # objects, so we can't allow a cluster with no hosts if not dbcluster.hosts: raise ArgumentError("{0} still has {1:l} assigned, removing the " "last cluster member is not allowed." .format(dbcluster, res)) for iface in dbhost.hardware_entity.interfaces: addrs = [addr for addr in iface.assignments if addr.service_address == res] for addr in addrs: iface.assignments.remove(addr)
def remove_service_addresses(dbcluster, dbhost): for res in walk_resources(dbcluster): if not isinstance(res, ServiceAddress): continue # The interface names are stored implicitly in the AddressAssignment # objects, so we can't allow a cluster with no hosts if not dbcluster.hosts: raise ArgumentError("{0} still has {1:l} assigned, removing the " "last cluster member is not allowed.".format( dbcluster, res)) for iface in dbhost.machine.interfaces: addrs = [ addr for addr in iface.assignments if addr.service_address == res ] for addr in addrs: iface.assignments.remove(addr)
def render(self, session, logger, hostname, cluster, personality, **arguments): dbhost = hostname_to_host(session, hostname) dbcluster = Cluster.get_unique(session, cluster, compel=True) if dbcluster.status.name == 'decommissioned': raise ArgumentError("Cannot add hosts to decommissioned clusters.") # We only support changing personality within the same # archetype. The archetype decides things like which OS, how # it builds (dhcp, etc), whether it's compilable, and # switching all of that by side-effect seems wrong # somehow. And besides, it would make the user-interface and # implementation for this command ugly in order to support # changing all of those options. personality_change = False if personality is not None: dbpersonality = Personality.get_unique(session, name=personality, archetype=dbhost.archetype, compel=True) if dbhost.personality != dbpersonality: dbhost.personality = dbpersonality personality_change = True # Allow for non-restricted clusters (the default?) if (len(dbcluster.allowed_personalities) > 0 and dbhost.personality not in dbcluster.allowed_personalities): raise ArgumentError("The personality %s for %s is not allowed " "by the cluster. Specify --personality " "and provide one of %s" % (dbhost.personality, dbhost.fqdn, ", ".join([x.name for x in dbcluster.allowed_personalities]))) # Now that we've changed the personality, we can check # if this is a valid membership change dbcluster.validate_membership(dbhost) plenaries = PlenaryCollection(logger=logger) plenaries.append(Plenary.get_plenary(dbcluster)) if dbhost.cluster and dbhost.cluster != dbcluster: logger.client_info("Removing {0:l} from {1:l}.".format(dbhost, dbhost.cluster)) old_cluster = dbhost.cluster old_cluster.hosts.remove(dbhost) remove_service_addresses(old_cluster, dbhost) old_cluster.validate() session.expire(dbhost, ['_cluster']) plenaries.append(Plenary.get_plenary(old_cluster)) # Apply the service addresses to the new member for res in walk_resources(dbcluster): if not isinstance(res, ServiceAddress): continue apply_service_address(dbhost, res.interfaces, res, logger) if dbhost.cluster: if personality_change: raise ArgumentError("{0:l} already in {1:l}, use " "aq reconfigure to change personality." .format(dbhost, dbhost.cluster)) # the cluster has not changed, therefore there's nothing # to do here. return # Calculate the node index: build a map of all possible values, remove # the used ones, and pick the smallest remaining one node_index_map = set(xrange(len(dbcluster._hosts) + 1)) for link in dbcluster._hosts: # The cluster may have been bigger in the past, so node indexes may # be larger than the current cluster size try: node_index_map.remove(link.node_index) except KeyError: pass dbcluster.hosts.append((dbhost, min(node_index_map))) dbcluster.validate() # demote a host when switching clusters # promote a host when switching clusters if dbhost.status.name == 'ready': if dbcluster.status.name != 'ready': dbalmost = HostAlmostready.get_instance(session) dbhost.status.transition(dbhost, dbalmost) plenaries.append(Plenary.get_plenary(dbhost)) elif dbhost.status.name == 'almostready': if dbcluster.status.name == 'ready': dbready = HostReady.get_instance(session) dbhost.status.transition(dbhost, dbready) plenaries.append(Plenary.get_plenary(dbhost)) session.flush() # Enforce that service instances are set correctly for the # new cluster association. chooser = Chooser(dbhost, logger=logger) chooser.set_required() chooser.flush_changes() # the chooser will include the host plenary with CompileKey.merge([chooser.get_key(), plenaries.get_key()]): plenaries.stash() try: chooser.write_plenary_templates(locked=True) plenaries.write(locked=True) except: chooser.restore_stash() plenaries.restore_stash() raise return
def render(self, session, logger, hostname, cluster, personality, **arguments): dbhost = hostname_to_host(session, hostname) dbcluster = Cluster.get_unique(session, cluster, compel=True) if dbcluster.status.name == 'decommissioned': raise ArgumentError("Cannot add hosts to decommissioned clusters.") # We only support changing personality within the same # archetype. The archetype decides things like which OS, how # it builds (dhcp, etc), whether it's compilable, and # switching all of that by side-effect seems wrong # somehow. And besides, it would make the user-interface and # implementation for this command ugly in order to support # changing all of those options. personality_change = False if personality is not None: dbpersonality = Personality.get_unique(session, name=personality, archetype=dbhost.archetype, compel=True) if dbhost.personality != dbpersonality: dbhost.personality = dbpersonality personality_change = True # Allow for non-restricted clusters (the default?) if (len(dbcluster.allowed_personalities) > 0 and dbhost.personality not in dbcluster.allowed_personalities): raise ArgumentError( "The personality %s for %s is not allowed " "by the cluster. Specify --personality " "and provide one of %s" % (dbhost.personality, dbhost.fqdn, ", ".join( [x.name for x in dbcluster.allowed_personalities]))) # Now that we've changed the personality, we can check # if this is a valid membership change dbcluster.validate_membership(dbhost) plenaries = PlenaryCollection(logger=logger) plenaries.append(Plenary.get_plenary(dbcluster)) if dbhost.cluster and dbhost.cluster != dbcluster: logger.client_info("Removing {0:l} from {1:l}.".format( dbhost, dbhost.cluster)) old_cluster = dbhost.cluster old_cluster.hosts.remove(dbhost) remove_service_addresses(old_cluster, dbhost) old_cluster.validate() session.expire(dbhost, ['_cluster']) plenaries.append(Plenary.get_plenary(old_cluster)) # Apply the service addresses to the new member for res in walk_resources(dbcluster): if not isinstance(res, ServiceAddress): continue apply_service_address(dbhost, res.interfaces, res) if dbhost.cluster: if personality_change: raise ArgumentError( "{0:l} already in {1:l}, use " "aq reconfigure to change personality.".format( dbhost, dbhost.cluster)) # the cluster has not changed, therefore there's nothing # to do here. return # Calculate the node index: build a map of all possible values, remove # the used ones, and pick the smallest remaining one node_index_map = set(xrange(len(dbcluster._hosts) + 1)) for link in dbcluster._hosts: # The cluster may have been bigger in the past, so node indexes may # be larger than the current cluster size try: node_index_map.remove(link.node_index) except KeyError: pass dbcluster.hosts.append((dbhost, min(node_index_map))) dbcluster.validate() # demote a host when switching clusters # promote a host when switching clusters if dbhost.status.name == 'ready': if dbcluster.status.name != 'ready': dbalmost = HostLifecycle.get_unique(session, 'almostready', compel=True) dbhost.status.transition(dbhost, dbalmost) plenaries.append(Plenary.get_plenary(dbhost)) elif dbhost.status.name == 'almostready': if dbcluster.status.name == 'ready': dbready = HostLifecycle.get_unique(session, 'ready', compel=True) dbhost.status.transition(dbhost, dbready) plenaries.append(Plenary.get_plenary(dbhost)) session.flush() # Enforce that service instances are set correctly for the # new cluster association. chooser = Chooser(dbhost, logger=logger) chooser.set_required() chooser.flush_changes() # the chooser will include the host plenary key = CompileKey.merge( [chooser.get_write_key(), plenaries.get_write_key()]) try: lock_queue.acquire(key) chooser.write_plenary_templates(locked=True) plenaries.write(locked=True) except: chooser.restore_stash() plenaries.restore_stash() raise finally: lock_queue.release(key) return