def validate_rebuild_required(session, path, param_holder): """ check if this parameter requires hosts to be in non-ready state """ q = session.query(Host) dbready = Ready.get_instance(session) dbalmostready = Almostready.get_instance(session) q = q.filter(or_(Host.status == dbready, Host.status == dbalmostready)) personality = param_holder.personality if isinstance(param_holder, PersonalityParameter): q = q.filter_by(personality=personality) if q.count(): raise ArgumentError("Modifying parameter %s value needs a host rebuild. " "There are hosts associated to the personality in non-ready state. " "Please set these host to status of rebuild to continue. " "Run 'aq search host --personality %s --buildstatus ready' " "and 'aq search host --personality %s --buildstatus almostready' to " "get the list of the affected hosts." % (path, personality, personality))
def onLeave(self, dbcluster): dbalmostready = HostAlmostready.get_instance(object_session(dbcluster)) for dbhost in dbcluster.hosts: if dbhost.status.name == 'ready': dbhost.status.transition(dbhost, dbalmostready)
def render(self, session, logger, hostname, cluster, personality, **arguments): dbhost = hostname_to_host(session, hostname) dbcluster = Cluster.get_unique(session, cluster, compel=True) if dbcluster.status.name == 'decommissioned': raise ArgumentError("Cannot add hosts to decommissioned clusters.") # We only support changing personality within the same # archetype. The archetype decides things like which OS, how # it builds (dhcp, etc), whether it's compilable, and # switching all of that by side-effect seems wrong # somehow. And besides, it would make the user-interface and # implementation for this command ugly in order to support # changing all of those options. personality_change = False if personality is not None: dbpersonality = Personality.get_unique(session, name=personality, archetype=dbhost.archetype, compel=True) if dbhost.personality != dbpersonality: dbhost.personality = dbpersonality personality_change = True # Allow for non-restricted clusters (the default?) if (len(dbcluster.allowed_personalities) > 0 and dbhost.personality not in dbcluster.allowed_personalities): raise ArgumentError("The personality %s for %s is not allowed " "by the cluster. Specify --personality " "and provide one of %s" % (dbhost.personality, dbhost.fqdn, ", ".join([x.name for x in dbcluster.allowed_personalities]))) # Now that we've changed the personality, we can check # if this is a valid membership change dbcluster.validate_membership(dbhost) plenaries = PlenaryCollection(logger=logger) plenaries.append(Plenary.get_plenary(dbcluster)) if dbhost.cluster and dbhost.cluster != dbcluster: logger.client_info("Removing {0:l} from {1:l}.".format(dbhost, dbhost.cluster)) old_cluster = dbhost.cluster old_cluster.hosts.remove(dbhost) remove_service_addresses(old_cluster, dbhost) old_cluster.validate() session.expire(dbhost, ['_cluster']) plenaries.append(Plenary.get_plenary(old_cluster)) # Apply the service addresses to the new member for res in walk_resources(dbcluster): if not isinstance(res, ServiceAddress): continue apply_service_address(dbhost, res.interfaces, res, logger) if dbhost.cluster: if personality_change: raise ArgumentError("{0:l} already in {1:l}, use " "aq reconfigure to change personality." .format(dbhost, dbhost.cluster)) # the cluster has not changed, therefore there's nothing # to do here. return # Calculate the node index: build a map of all possible values, remove # the used ones, and pick the smallest remaining one node_index_map = set(xrange(len(dbcluster._hosts) + 1)) for link in dbcluster._hosts: # The cluster may have been bigger in the past, so node indexes may # be larger than the current cluster size try: node_index_map.remove(link.node_index) except KeyError: pass dbcluster.hosts.append((dbhost, min(node_index_map))) dbcluster.validate() # demote a host when switching clusters # promote a host when switching clusters if dbhost.status.name == 'ready': if dbcluster.status.name != 'ready': dbalmost = HostAlmostready.get_instance(session) dbhost.status.transition(dbhost, dbalmost) plenaries.append(Plenary.get_plenary(dbhost)) elif dbhost.status.name == 'almostready': if dbcluster.status.name == 'ready': dbready = HostReady.get_instance(session) dbhost.status.transition(dbhost, dbready) plenaries.append(Plenary.get_plenary(dbhost)) session.flush() # Enforce that service instances are set correctly for the # new cluster association. chooser = Chooser(dbhost, logger=logger) chooser.set_required() chooser.flush_changes() # the chooser will include the host plenary with CompileKey.merge([chooser.get_key(), plenaries.get_key()]): plenaries.stash() try: chooser.write_plenary_templates(locked=True) plenaries.write(locked=True) except: chooser.restore_stash() plenaries.restore_stash() raise return