def render(self, session, logger, list, domain, sandbox, force, **arguments): dbbranch, dbauthor = get_branch_and_author(session, logger, domain=domain, sandbox=sandbox, compel=True) if hasattr(dbbranch, "allow_manage") and not dbbranch.allow_manage: raise ArgumentError("Managing hosts to {0:l} is not allowed." .format(dbbranch)) check_hostlist_size(self.command, self.config, list) dbhosts = hostlist_to_hosts(session, list) failed = [] dbsource, dbsource_author = validate_branch_author(dbhosts) for dbhost in dbhosts: # check if any host in the list is a cluster node if dbhost.cluster: failed.append("Cluster nodes must be managed at the " "cluster level; {0} is a member of {1:l}." .format(dbhost.fqdn, dbhost.cluster)) if failed: raise ArgumentError("Cannot modify the following hosts:\n%s" % "\n".join(failed)) if not force: validate_branch_commits(dbsource, dbsource_author, dbbranch, dbauthor, logger, self.config) plenaries = PlenaryCollection(logger=logger) for dbhost in dbhosts: plenaries.append(Plenary.get_plenary(dbhost)) dbhost.branch = dbbranch dbhost.sandbox_author = dbauthor session.flush() # We're crossing domains, need to lock everything. with CompileKey.merge([CompileKey(domain=dbsource.name, logger=logger), CompileKey(domain=dbbranch.name, logger=logger)]): plenaries.stash() try: plenaries.write(locked=True) except: plenaries.restore_stash() raise return
def render(self, session, logger, domain, sandbox, cluster, force, **arguments): (dbbranch, dbauthor) = get_branch_and_author(session, logger, domain=domain, sandbox=sandbox, compel=True) if hasattr(dbbranch, "allow_manage") and not dbbranch.allow_manage: raise ArgumentError("Managing clusters to {0:l} is not allowed." .format(dbbranch)) dbcluster = Cluster.get_unique(session, cluster, compel=True) dbsource = dbcluster.branch dbsource_author = dbcluster.sandbox_author old_branch = dbcluster.branch.name if not force: validate_branch_commits(dbsource, dbsource_author, dbbranch, dbauthor, logger, self.config) if dbcluster.metacluster: raise ArgumentError("{0.name} is member of metacluster {1.name}, " "it must be managed at metacluster level.". format(dbcluster, dbcluster.metacluster)) old_branch = dbcluster.branch.name plenaries = PlenaryCollection(logger=logger) # manage at metacluster level if dbcluster.cluster_type == 'meta': clusters = dbcluster.members dbcluster.branch = dbbranch dbcluster.sandbox_author = dbauthor session.add(dbcluster) plenaries.append(Plenary.get_plenary(dbcluster)) else: clusters = [dbcluster] for cluster in clusters: # manage at cluster level # Need to set the new branch *before* creating the plenary objects. cluster.branch = dbbranch cluster.sandbox_author = dbauthor session.add(cluster) plenaries.append(Plenary.get_plenary(cluster)) for dbhost in cluster.hosts: dbhost.branch = dbbranch dbhost.sandbox_author = dbauthor session.add(dbhost) plenaries.append(Plenary.get_plenary(dbhost)) session.flush() # We're crossing domains, need to lock everything. key = CompileKey(logger=logger) try: lock_queue.acquire(key) plenaries.stash() plenaries.cleanup(old_branch, locked=True) plenaries.write(locked=True) except: plenaries.restore_stash() raise finally: lock_queue.release(key) return
def render(self, session, logger, domain, sandbox, cluster, force, **arguments): (dbbranch, dbauthor) = get_branch_and_author(session, logger, domain=domain, sandbox=sandbox, compel=True) if hasattr(dbbranch, "allow_manage") and not dbbranch.allow_manage: raise ArgumentError("Managing clusters to {0:l} is not allowed." .format(dbbranch)) dbcluster = Cluster.get_unique(session, cluster, compel=True) dbsource = dbcluster.branch dbsource_author = dbcluster.sandbox_author if not force: validate_branch_commits(dbsource, dbsource_author, dbbranch, dbauthor, logger, self.config) if dbcluster.metacluster: raise ArgumentError("{0.name} is member of metacluster {1.name}, " "it must be managed at metacluster level.". format(dbcluster, dbcluster.metacluster)) plenaries = PlenaryCollection(logger=logger) # manage at metacluster level if isinstance(dbcluster, MetaCluster): plenaries.append(Plenary.get_plenary(dbcluster)) clusters = dbcluster.members dbcluster.branch = dbbranch dbcluster.sandbox_author = dbauthor else: clusters = [dbcluster] for cluster in clusters: plenaries.append(Plenary.get_plenary(cluster)) cluster.branch = dbbranch cluster.sandbox_author = dbauthor for dbhost in cluster.hosts: plenaries.append(Plenary.get_plenary(dbhost)) dbhost.branch = dbbranch dbhost.sandbox_author = dbauthor session.flush() # We're crossing domains, need to lock everything. with CompileKey.merge([CompileKey(domain=dbsource.name, logger=logger), CompileKey(domain=dbbranch.name, logger=logger)]): plenaries.stash() try: plenaries.write(locked=True) except: plenaries.restore_stash() raise return
def render(self, session, logger, domain, sandbox, cluster, force, **arguments): (dbbranch, dbauthor) = get_branch_and_author(session, logger, domain=domain, sandbox=sandbox, compel=True) if hasattr(dbbranch, "allow_manage") and not dbbranch.allow_manage: raise ArgumentError( "Managing clusters to {0:l} is not allowed.".format(dbbranch)) dbcluster = Cluster.get_unique(session, cluster, compel=True) dbsource = dbcluster.branch dbsource_author = dbcluster.sandbox_author old_branch = dbcluster.branch.name if not force: validate_branch_commits(dbsource, dbsource_author, dbbranch, dbauthor, logger, self.config) if dbcluster.metacluster: raise ArgumentError( "{0.name} is member of metacluster {1.name}, " "it must be managed at metacluster level.".format( dbcluster, dbcluster.metacluster)) old_branch = dbcluster.branch.name plenaries = PlenaryCollection(logger=logger) # manage at metacluster level if dbcluster.cluster_type == 'meta': clusters = dbcluster.members dbcluster.branch = dbbranch dbcluster.sandbox_author = dbauthor session.add(dbcluster) plenaries.append(Plenary.get_plenary(dbcluster)) else: clusters = [dbcluster] for cluster in clusters: # manage at cluster level # Need to set the new branch *before* creating the plenary objects. cluster.branch = dbbranch cluster.sandbox_author = dbauthor session.add(cluster) plenaries.append(Plenary.get_plenary(cluster)) for dbhost in cluster.hosts: dbhost.branch = dbbranch dbhost.sandbox_author = dbauthor session.add(dbhost) plenaries.append(Plenary.get_plenary(dbhost)) session.flush() # We're crossing domains, need to lock everything. key = CompileKey(logger=logger) try: lock_queue.acquire(key) plenaries.stash() plenaries.cleanup(old_branch, locked=True) plenaries.write(locked=True) except: plenaries.restore_stash() raise finally: lock_queue.release(key) return
def render(self, session, logger, list, domain, sandbox, force, **arguments): (dbbranch, dbauthor) = get_branch_and_author(session, logger, domain=domain, sandbox=sandbox, compel=True) if hasattr(dbbranch, "allow_manage") and not dbbranch.allow_manage: raise ArgumentError("Managing hosts to {0:l} is not allowed." .format(dbbranch)) check_hostlist_size(self.command, self.config, list) dbhosts = hostlist_to_hosts(session, list) failed = [] branches = defaultdict(ListType) authors = defaultdict(ListType) for dbhost in dbhosts: branches[dbhost.branch].append(dbhost) authors[dbhost.sandbox_author].append(dbhost) # check if any host in the list is a cluster node if dbhost.cluster: failed.append("Cluster nodes must be managed at the " "cluster level; {0} is a member of {1:l}." .format(dbhost.fqdn, dbhost.cluster)) if failed: raise ArgumentError("Cannot modify the following hosts:\n%s" % "\n".join(failed)) if len(branches) > 1: keys = branches.keys() branch_sort = lambda x, y: cmp(len(branches[x]), len(branches[y])) keys.sort(cmp=branch_sort) stats = ["{0:d} hosts in {1:l}".format(len(branches[branch]), branch) for branch in keys] raise ArgumentError("All hosts must be in the same domain or " "sandbox:\n%s" % "\n".join(stats)) # check if all hosts are from the same sandbox author if len(authors) > 1: keys = authors.keys() author_sort = lambda x, y: cmp(len(authors[x]), len(authors[y])) keys.sort(cmp=author_sort) stats = ["{0:d} hosts with sandbox author {1:l}" .format(len(authors[author]), author.name) for author in keys] raise ArgumentError("All hosts must be managed by the same " "sandbox author:\n%s" % "\n".join(stats)) # since we have already checked if all hosts in list are within the # same branch, we only need one dbsource to validate the branch dbhost = dbhosts[0] dbsource = dbhost.branch dbsource_author = dbhost.sandbox_author if not force: validate_branch_commits(dbsource, dbsource_author, dbbranch, dbauthor, logger, self.config) old_branch = branches.keys()[0].name plenaries = PlenaryCollection(logger=logger) for dbhost in dbhosts: dbhost.branch = dbbranch dbhost.sandbox_author = dbauthor plenaries.append(Plenary.get_plenary(dbhost)) session.flush() # We're crossing domains, need to lock everything. key = CompileKey(logger=logger) try: lock_queue.acquire(key) plenaries.stash() plenaries.cleanup(old_branch, locked=True) plenaries.write(locked=True) except: plenaries.restore_stash() raise finally: lock_queue.release(key) return
def render(self, session, logger, list, domain, sandbox, force, **arguments): (dbbranch, dbauthor) = get_branch_and_author(session, logger, domain=domain, sandbox=sandbox, compel=True) if hasattr(dbbranch, "allow_manage") and not dbbranch.allow_manage: raise ArgumentError( "Managing hosts to {0:l} is not allowed.".format(dbbranch)) check_hostlist_size(self.command, self.config, list) dbhosts = hostlist_to_hosts(session, list) failed = [] branches = defaultdict(ListType) authors = defaultdict(ListType) for dbhost in dbhosts: branches[dbhost.branch].append(dbhost) authors[dbhost.sandbox_author].append(dbhost) # check if any host in the list is a cluster node if dbhost.cluster: failed.append( "Cluster nodes must be managed at the " "cluster level; {0} is a member of {1:l}.".format( dbhost.fqdn, dbhost.cluster)) if failed: raise ArgumentError("Cannot modify the following hosts:\n%s" % "\n".join(failed)) if len(branches) > 1: keys = branches.keys() branch_sort = lambda x, y: cmp(len(branches[x]), len(branches[y])) keys.sort(cmp=branch_sort) stats = [ "{0:d} hosts in {1:l}".format(len(branches[branch]), branch) for branch in keys ] raise ArgumentError("All hosts must be in the same domain or " "sandbox:\n%s" % "\n".join(stats)) # check if all hosts are from the same sandbox author if len(authors) > 1: keys = authors.keys() author_sort = lambda x, y: cmp(len(authors[x]), len(authors[y])) keys.sort(cmp=author_sort) stats = [ "{0:d} hosts with sandbox author {1:l}".format( len(authors[author]), author.name) for author in keys ] raise ArgumentError("All hosts must be managed by the same " "sandbox author:\n%s" % "\n".join(stats)) # since we have already checked if all hosts in list are within the # same branch, we only need one dbsource to validate the branch dbhost = dbhosts[0] dbsource = dbhost.branch dbsource_author = dbhost.sandbox_author if not force: validate_branch_commits(dbsource, dbsource_author, dbbranch, dbauthor, logger, self.config) old_branch = branches.keys()[0].name plenaries = PlenaryCollection(logger=logger) for dbhost in dbhosts: dbhost.branch = dbbranch dbhost.sandbox_author = dbauthor plenaries.append(Plenary.get_plenary(dbhost)) session.flush() # We're crossing domains, need to lock everything. key = CompileKey(logger=logger) try: lock_queue.acquire(key) plenaries.stash() plenaries.cleanup(old_branch, locked=True) plenaries.write(locked=True) except: plenaries.restore_stash() raise finally: lock_queue.release(key) return