def compile(self, session, dbhost, logger, keepbindings): chooser = Chooser(dbhost, logger=logger, required_only=not(keepbindings)) chooser.set_required() chooser.flush_changes() hosts = chooser.changed_server_fqdns() hosts.add(dbhost.fqdn) # Force a host lock as pan might overwrite the profile... key = chooser.get_write_key() for fqdn in hosts: key = CompileKey.merge([key, CompileKey(domain=dbhost.branch.name, profile=fqdn, logger=logger)]) try: lock_queue.acquire(key) chooser.write_plenary_templates(locked=True) td = TemplateDomain(dbhost.branch, dbhost.sandbox_author, logger=logger) td.compile(session, only=hosts, locked=True) except: if chooser: chooser.restore_stash() # Okay, cleaned up templates, make sure the caller knows # we've aborted so that DB can be appropriately rollback'd. raise finally: lock_queue.release(key) return
def render(self, session, logger, hostname, buildstatus, **arguments): dbhost = hostname_to_host(session, hostname) dbstatus = HostLifecycle.get_unique(session, buildstatus, compel=True) changed = dbhost.status.transition(dbhost, dbstatus) if not changed or not dbhost.archetype.is_compileable: return session.add(dbhost) session.flush() plenary = PlenaryHost(dbhost, logger=logger) # Force a host lock as pan might overwrite the profile... key = CompileKey(domain=dbhost.branch.name, profile=dbhost.fqdn, logger=logger) try: lock_queue.acquire(key) plenary.write(locked=True) td = TemplateDomain(dbhost.branch, dbhost.sandbox_author, logger=logger) td.compile(session, only=[dbhost.fqdn], locked=True) except IncompleteError: raise ArgumentError("Run aq make for host %s first." % dbhost.fqdn) except: plenary.restore_stash() raise finally: lock_queue.release(key)
def render(self, session, logger, cluster, keepbindings, **arguments): dbcluster = Cluster.get_unique(session, cluster, compel=True) if not dbcluster.personality.archetype.is_compileable: raise ArgumentError( "{0} is not a compilable archetype " "({1!s}).".format(dbcluster, dbcluster.personality.archetype) ) chooser = Chooser(dbcluster, logger=logger, required_only=not keepbindings) chooser.set_required() chooser.flush_changes() td = TemplateDomain(dbcluster.branch, dbcluster.sandbox_author, logger=logger) # Force a domain lock as pan might overwrite any of the profiles... with chooser.get_key(): try: chooser.write_plenary_templates(locked=True) td.compile(session, only=chooser.plenaries.object_templates, locked=True) except: chooser.restore_stash() # Okay, cleaned up templates, make sure the caller knows # we've aborted so that DB can be appropriately rollback'd. raise return
def render(self, session, logger, cluster, pancinclude, pancexclude, pancdebug, cleandeps, **arguments): dbcluster = Cluster.get_unique(session, cluster, compel=True) if pancdebug: pancinclude = r".*" pancexclude = r"components/spma/functions" dom = TemplateDomain(dbcluster.branch, dbcluster.sandbox_author, logger=logger) plenaries = PlenaryCollection(logger=logger) def add_cluster_plenaries(cluster): plenaries.append(Plenary.get_plenary(cluster)) for host in cluster.hosts: plenaries.append(Plenary.get_plenary(host)) add_cluster_plenaries(dbcluster) if isinstance(dbcluster, MetaCluster): for cluster in dbcluster.members: add_cluster_plenaries(cluster) with plenaries.get_key(): dom.compile( session, only=plenaries.object_templates, panc_debug_include=pancinclude, panc_debug_exclude=pancexclude, cleandeps=cleandeps, locked=True, ) return
def render(self, session, logger, cluster, buildstatus, **arguments): dbcluster = Cluster.get_unique(session, cluster, compel=True) dbstatus = ClusterLifecycle.get_unique( session, buildstatus, compel=True) if not dbcluster.status.transition(dbcluster, dbstatus): return if not dbcluster.personality.archetype.is_compileable: return session.flush() plenaries = PlenaryCollection(logger=logger) plenaries.append(Plenary.get_plenary(dbcluster)) for dbhost in dbcluster.hosts: plenaries.append(Plenary.get_plenary(dbhost)) # Force a host lock as pan might overwrite the profile... key = CompileKey(domain=dbcluster.branch.name, logger=logger) try: lock_queue.acquire(key) plenaries.write(locked=True) td = TemplateDomain( dbcluster.branch, dbcluster.sandbox_author, logger=logger) td.compile(session, plenaries.object_templates, locked=True) except: plenaries.restore_stash() raise finally: lock_queue.release(key) return
def render(self, session, logger, domain, sandbox, pancinclude, pancexclude, pancdebug, cleandeps, **arguments): (dbdomain, dbauthor) = get_branch_and_author(session, logger, domain=domain, sandbox=sandbox, compel=True) # Grab a shared lock on personalities and services used by the domain. # Object templates (hosts, clusters) are protected by the domain lock. plenaries = PlenaryCollection(logger=logger) q1 = session.query(Personality) q1 = q1.join(Host) q1 = q1.filter(and_(Host.branch == dbdomain, Host.sandbox_author == dbauthor)) q1 = q1.reset_joinpoint() q1 = q1.options(joinedload('paramholder'), subqueryload('paramholder.parameters')) q2 = session.query(Personality) q2 = q2.join(Cluster) q2 = q2.filter(and_(Cluster.branch == dbdomain, Cluster.sandbox_author == dbauthor)) q2 = q2.reset_joinpoint() q2 = q2.options(joinedload('paramholder'), subqueryload('paramholder.parameters')) for dbpers in q1.union(q2): plenaries.append(Plenary.get_plenary(dbpers)) q1 = session.query(ServiceInstance) q1 = q1.join(ServiceInstance.clients) q1 = q1.filter(and_(Host.branch == dbdomain, Host.sandbox_author == dbauthor)) q2 = session.query(ServiceInstance) q2 = q2.join(ServiceInstance.cluster_clients) q2 = q2.filter(and_(Cluster.branch == dbdomain, Cluster.sandbox_author == dbauthor)) for si in q1.union(q2): plenaries.append(Plenary.get_plenary(si)) if pancdebug: pancinclude = r'.*' pancexclude = r'components/spma/functions' dom = TemplateDomain(dbdomain, dbauthor, logger=logger) with CompileKey.merge([CompileKey(domain=dbdomain.name, logger=logger), plenaries.get_key(exclusive=False)]): dom.compile(session, panc_debug_include=pancinclude, panc_debug_exclude=pancexclude, cleandeps=cleandeps, locked=True) return
def render(self, session, logger, hostname, pancinclude, pancexclude, pancdebug, cleandeps, **arguments): dbhost = hostname_to_host(session, hostname) if pancdebug: pancinclude = r'.*' pancexclude = r'components/spma/functions' dom = TemplateDomain(dbhost.branch, dbhost.sandbox_author, logger=logger) dom.compile(session, only=[dbhost.fqdn], panc_debug_include=pancinclude, panc_debug_exclude=pancexclude, cleandeps=cleandeps) return
def render(self, session, logger, cluster, pancinclude, pancexclude, pancdebug, cleandeps, **arguments): dbclus = Cluster.get_unique(session, cluster, compel=True) if pancdebug: pancinclude = r'.*' pancexclude = r'components/spma/functions' dom = TemplateDomain(dbclus.branch, dbclus.sandbox_author, logger=logger) profile_list = add_cluster_data(dbclus) dom.compile(session, only=profile_list, panc_debug_include=pancinclude, panc_debug_exclude=pancexclude, cleandeps=cleandeps) return
def render(self, session, logger, domain, sandbox, pancinclude, pancexclude, pancdebug, cleandeps, **arguments): (dbdomain, dbauthor) = get_branch_and_author(session, logger, domain=domain, sandbox=sandbox, compel=True) if pancdebug: pancinclude = r'.*' pancexclude = r'components/spma/functions' dom = TemplateDomain(dbdomain, dbauthor, logger=logger) dom.compile(session, panc_debug_include=pancinclude, panc_debug_exclude=pancexclude, cleandeps=cleandeps) return
def compile(self, session, logger, dbhost): """ compile plenary templates """ plenary = PlenaryHost(dbhost, logger=logger) # Force a host lock as pan might overwrite the profile... key = CompileKey(domain=dbhost.branch.name, profile=dbhost.fqdn, logger=logger) try: lock_queue.acquire(key) plenary.write(locked=True) td = TemplateDomain(dbhost.branch, dbhost.sandbox_author, logger=logger) td.compile(session, only=[dbhost.fqdn], locked=True) except IncompleteError: raise ArgumentError("Run aq make for host %s first." % dbhost.fqdn) except: plenary.restore_stash() raise finally: lock_queue.release(key)
def render(self, session, logger, cluster, keepbindings, **arguments): dbcluster = Cluster.get_unique(session, cluster, compel=True) if not dbcluster.personality.archetype.is_compileable: raise ArgumentError("{0} is not a compilable archetype " "({1!s}).".format( dbcluster, dbcluster.personality.archetype)) chooser = Chooser(dbcluster, logger=logger, required_only=not (keepbindings)) chooser.set_required() chooser.flush_changes() # Force a domain lock as pan might overwrite any of the profiles... key = CompileKey.merge([ chooser.get_write_key(), CompileKey(domain=dbcluster.branch.name, logger=logger) ]) try: lock_queue.acquire(key) chooser.write_plenary_templates(locked=True) profile_list = add_cluster_data(dbcluster) profile_list.extend(chooser.changed_server_fqdns()) td = TemplateDomain(dbcluster.branch, dbcluster.sandbox_author, logger=logger) td.compile(session, only=profile_list, locked=True) except: chooser.restore_stash() # Okay, cleaned up templates, make sure the caller knows # we've aborted so that DB can be appropriately rollback'd. raise finally: lock_queue.release(key) return
def remove_branch(config, logger, dbbranch): session = object_session(dbbranch) deps = get_branch_dependencies(dbbranch) if deps: raise ArgumentError("\n".join(deps)) session.delete(dbbranch) domain = TemplateDomain(dbbranch, logger=logger) # Can this fail? Is recovery needed? with CompileKey(domain=dbbranch.name, logger=logger): for dir in domain.directories(): remove_dir(dir, logger=logger) kingdir = config.get("broker", "kingdir") try: run_git(["branch", "-D", dbbranch.name], path=kingdir, logger=logger) except ProcessException, e: logger.warning("Error removing branch %s from template-king, " "proceeding anyway: %s", dbbranch.name, e)
def remove_branch(config, logger, dbbranch): session = object_session(dbbranch) deps = get_branch_dependencies(dbbranch) if deps: raise ArgumentError("\n".join(deps)) session.delete(dbbranch) domain = TemplateDomain(dbbranch, logger=logger) # Can this fail? Is recovery needed? with CompileKey(domain=dbbranch.name, logger=logger): for dir in domain.directories(): remove_dir(dir, logger=logger) kingdir = config.get("broker", "kingdir") try: run_git(["branch", "-D", dbbranch.name], path=kingdir, logger=logger) except ProcessException, e: logger.warning( "Error removing branch %s from template-king, " "proceeding anyway: %s", dbbranch.name, e)
def compile(self, session, dbhost, logger, keepbindings): chooser = Chooser(dbhost, logger=logger, required_only=not (keepbindings)) chooser.set_required() chooser.flush_changes() hosts = chooser.changed_server_fqdns() hosts.add(dbhost.fqdn) # Force a host lock as pan might overwrite the profile... key = chooser.get_write_key() for fqdn in hosts: key = CompileKey.merge([ key, CompileKey(domain=dbhost.branch.name, profile=fqdn, logger=logger) ]) try: lock_queue.acquire(key) chooser.write_plenary_templates(locked=True) td = TemplateDomain(dbhost.branch, dbhost.sandbox_author, logger=logger) td.compile(session, only=hosts, locked=True) except: if chooser: chooser.restore_stash() # Okay, cleaned up templates, make sure the caller knows # we've aborted so that DB can be appropriately rollback'd. raise finally: lock_queue.release(key) return
def compile(self, session, dbhost, logger, keepbindings): chooser = Chooser(dbhost, logger=logger, required_only=not keepbindings) chooser.set_required() chooser.flush_changes() td = TemplateDomain(dbhost.branch, dbhost.sandbox_author, logger=logger) with chooser.get_key(): try: chooser.write_plenary_templates(locked=True) td.compile(session, only=chooser.plenaries.object_templates, locked=True) except: chooser.restore_stash() # Okay, cleaned up templates, make sure the caller knows # we've aborted so that DB can be appropriately rollback'd. raise return
def render(self, session, logger, cluster, keepbindings, **arguments): dbcluster = Cluster.get_unique(session, cluster, compel=True) if not dbcluster.personality.archetype.is_compileable: raise ArgumentError("{0} is not a compilable archetype " "({1!s}).".format(dbcluster, dbcluster.personality.archetype)) chooser = Chooser(dbcluster, logger=logger, required_only=not(keepbindings)) chooser.set_required() chooser.flush_changes() # Force a domain lock as pan might overwrite any of the profiles... key = CompileKey.merge([chooser.get_write_key(), CompileKey(domain=dbcluster.branch.name, logger=logger)]) try: lock_queue.acquire(key) chooser.write_plenary_templates(locked=True) profile_list = add_cluster_data(dbcluster) profile_list.extend(chooser.changed_server_fqdns()) td = TemplateDomain(dbcluster.branch, dbcluster.sandbox_author, logger=logger) td.compile(session, only=profile_list, locked=True) except: chooser.restore_stash() # Okay, cleaned up templates, make sure the caller knows # we've aborted so that DB can be appropriately rollback'd. raise finally: lock_queue.release(key) return
def render(self, session, logger, cluster, buildstatus, **arguments): dbcluster = Cluster.get_unique(session, cluster, compel=True) dbstatus = ClusterLifecycle.get_unique(session, buildstatus, compel=True) if not dbcluster.status.transition(dbcluster, dbstatus): return if not dbcluster.personality.archetype.is_compileable: return session.flush() plenaries = PlenaryCollection(logger=logger) plenaries.append(Plenary.get_plenary(dbcluster)) for dbhost in dbcluster.hosts: plenaries.append(Plenary.get_plenary(dbhost)) # Force a host lock as pan might overwrite the profile... key = CompileKey(domain=dbcluster.branch.name, logger=logger) try: lock_queue.acquire(key) plenaries.write(locked=True) td = TemplateDomain(dbcluster.branch, dbcluster.sandbox_author, logger=logger) td.compile(session, plenaries.object_templates, locked=True) except: plenaries.restore_stash() raise finally: lock_queue.release(key) return
class CommandReconfigureList(BrokerCommand): required_parameters = ["list"] def render(self, session, logger, list, archetype, personality, buildstatus, osname, osversion, **arguments): check_hostlist_size(self.command, self.config, list) dbhosts = hostlist_to_hosts(session, list) self.reconfigure_list(session, logger, dbhosts, archetype, personality, buildstatus, osname, osversion, **arguments) def reconfigure_list(self, session, logger, dbhosts, archetype, personality, buildstatus, osname, osversion, **arguments): failed = [] # Check all the parameters up front. # Some of these could be more intelligent about defaults # (either by checking for unique entries or relying on the list) # - starting simple. if archetype: dbarchetype = Archetype.get_unique(session, archetype, compel=True) if dbarchetype.cluster_type is not None: raise ArgumentError("Archetype %s is a cluster archetype" % dbarchetype.name) # TODO: Once OS is a first class object this block needs # to check that either OS is also being reset or that the # OS is valid for the new archetype. else: dbarchetype = None if personality: dbpersonality = Personality.get_unique(session, name=personality, archetype=dbarchetype, compel=True) if osname and not osversion: raise ArgumentError("Please specify --osversion for OS %s." % osname) if osversion: if not osname: raise ArgumentError("Please specify --osname to use with " "OS version %s." % osversion) # Linux model names are the same under aurora and aquilon, so # allowing to omit --archetype would not be useful if not archetype: raise ArgumentError("Please specify --archetype for OS " "%s, version %s." % (osname, osversion)) dbos = OperatingSystem.get_unique(session, name=osname, version=osversion, archetype=dbarchetype, compel=True) else: dbos = None if buildstatus: dbstatus = HostLifecycle.get_unique(session, buildstatus, compel=True) # Take a shortcut if there's nothing to do, but only after all the other # parameters have been checked if not dbhosts: return personalities = {} branches = {} authors = {} # Do any final cross-list or dependency checks before entering # the Chooser loop. for dbhost in dbhosts: if dbhost.branch in branches: branches[dbhost.branch].append(dbhost) else: branches[dbhost.branch] = [dbhost] if dbhost.sandbox_author in authors: authors[dbhost.sandbox_author].append(dbhost) else: authors[dbhost.sandbox_author] = [dbhost] if dbos and not dbarchetype and dbhost.archetype != dbos.archetype: failed.append("{0}: Cannot change operating system because it " "needs {1:l} instead of " "{2:l}.".format(dbhost.fqdn, dbhost.archetype, dbos.archetype)) if dbarchetype and not dbos and \ dbhost.operating_system.archetype != dbarchetype: failed.append("{0}: Cannot change archetype because {1:l} needs " "{2:l}.".format(dbhost.fqdn, dbhost.operating_system, dbhost.operating_system.archetype)) if (personality and dbhost.cluster and len(dbhost.cluster.allowed_personalities) > 0 and dbpersonality not in dbhost.cluster.allowed_personalities): allowed = ["%s/%s" % (p.archetype.name, p.name) for p in dbhost.cluster.allowed_personalities] failed.append("{0}: The {1:l} is not allowed by {2}. " "Specify one of {3}.".format( dbhost.fqdn, dbpersonality, dbhost.cluster, allowed)) if personality: personalities[dbhost.fqdn] = dbpersonality elif archetype: personalities[dbhost.fqdn] = Personality.get_unique(session, name=dbhost.personality.name, archetype=dbarchetype) if not personalities[dbhost.fqdn]: failed.append("%s: No personality %s found for archetype " "%s." % (dbhost.fqdn, dbhost.personality.name, dbarchetype.name)) if failed: raise ArgumentError("Cannot modify the following hosts:\n%s" % "\n".join(failed)) if len(branches) > 1: keys = branches.keys() branch_sort = lambda x, y: cmp(len(branches[x]), len(branches[y])) keys.sort(cmp=branch_sort) stats = ["{0:d} hosts in {1:l}".format(len(branches[branch]), branch) for branch in keys] raise ArgumentError("All hosts must be in the same domain or " "sandbox:\n%s" % "\n".join(stats)) dbbranch = branches.keys()[0] if len(authors) > 1: keys = authors.keys() author_sort = lambda x, y: cmp(len(authors[x]), len(authors[y])) keys.sort(cmp=author_sort) stats = ["%s hosts with sandbox author %s" % (len(authors[author]), author.name) for author in keys] raise ArgumentError("All hosts must be managed by the same " "sandbox author:\n%s" % "\n".join(stats)) dbauthor = authors.keys()[0] failed = [] choosers = [] for dbhost in dbhosts: if dbhost.fqdn in personalities: dbhost.personality = personalities[dbhost.fqdn] session.add(dbhost) if osversion: dbhost.operating_system = dbos session.add(dbhost) if buildstatus: dbhost.status.transition(dbhost, dbstatus) session.add(dbhost) session.flush() logger.client_info("Verifying service bindings.") for dbhost in dbhosts: if dbhost.archetype.is_compileable: if arguments.get("keepbindings", None): chooser = Chooser(dbhost, logger=logger, required_only=False) else: chooser = Chooser(dbhost, logger=logger, required_only=True) choosers.append(chooser) try: chooser.set_required() except ArgumentError, e: failed.append(str(e)) if failed: raise ArgumentError("The following hosts failed service " "binding:\n%s" % "\n".join(failed)) session.flush() logger.info("reconfigure_hostlist processing: %s" % ",".join([str(dbhost.fqdn) for dbhost in dbhosts])) if not choosers: return # Optimize so that duplicate service plenaries are not re-written templates = set() for chooser in choosers: # chooser.plenaries is a PlenaryCollection - this flattens # that top level. templates.update(chooser.plenaries.plenaries) # Don't bother locking until every possible check before the # actual writing and compile is done. This will allow for fast # turnaround on errors (no need to wait for a lock if there's # a missing service map entry or something). # The lock must be over at least the domain, but could be over # all if (for example) service plenaries need to change. key = CompileKey.merge([p.get_write_key() for p in templates] + [CompileKey(domain=dbbranch.name, logger=logger)]) try: lock_queue.acquire(key) logger.client_info("Writing %s plenary templates.", len(templates)) # FIXME: if one of the templates raises IncompleteError (e.g. # a host should be in a cluster, but it is not), then we return an # InternalError to the client, which is not nice for template in templates: logger.debug("Writing %s", template) template.write(locked=True) td = TemplateDomain(dbbranch, dbauthor, logger=logger) td.compile(session, locked=True) except: logger.client_info("Restoring plenary templates.") for template in templates: logger.debug("Restoring %s", template) template.restore_stash() # Okay, cleaned up templates, make sure the caller knows # we've aborted so that DB can be appropriately rollback'd. raise finally: lock_queue.release(key) return
def resetadvertisedstatus_list(self, session, logger, dbhosts): branches = {} authors = {} failed = [] compileable = [] # Do any cross-list or dependency checks for dbhost in dbhosts: ## if archetype is compileable only then ## validate for branches and domains if (dbhost.archetype.is_compileable): compileable.append(dbhost.fqdn) if dbhost.branch in branches: branches[dbhost.branch].append(dbhost) else: branches[dbhost.branch] = [dbhost] if dbhost.sandbox_author in authors: authors[dbhost.sandbox_author].append(dbhost) else: authors[dbhost.sandbox_author] = [dbhost] if dbhost.status.name == 'ready': failed.append("{0:l} is in ready status, " "advertised status can be reset only " "when host is in non ready state".format(dbhost)) if failed: raise ArgumentError("Cannot modify the following hosts:\n%s" % "\n".join(failed)) if len(branches) > 1: keys = branches.keys() branch_sort = lambda x, y: cmp(len(branches[x]), len(branches[y])) keys.sort(cmp=branch_sort) stats = [ "{0:d} hosts in {1:l}".format(len(branches[branch]), branch) for branch in keys ] raise ArgumentError("All hosts must be in the same domain or " "sandbox:\n%s" % "\n".join(stats)) if len(authors) > 1: keys = authors.keys() author_sort = lambda x, y: cmp(len(authors[x]), len(authors[y])) keys.sort(cmp=author_sort) stats = [ "%s hosts with sandbox author %s" % (len(authors[author]), author.name) for author in keys ] raise ArgumentError("All hosts must be managed by the same " "sandbox author:\n%s" % "\n".join(stats)) plenaries = PlenaryCollection(logger=logger) for dbhost in dbhosts: dbhost.advertise_status = False session.add(dbhost) plenaries.append(PlenaryHost(dbhost, logger=logger)) session.flush() dbbranch = branches.keys()[0] dbauthor = authors.keys()[0] key = CompileKey.merge([plenaries.get_write_key()]) try: lock_queue.acquire(key) plenaries.stash() plenaries.write(locked=True) td = TemplateDomain(dbbranch, dbauthor, logger=logger) td.compile(session, only=compileable, locked=True) except: plenaries.restore_stash() raise finally: lock_queue.release(key) return
def resetadvertisedstatus_list(self, session, logger, dbhosts): branches = {} authors = {} failed = [] compileable = [] # Do any cross-list or dependency checks for dbhost in dbhosts: ## if archetype is compileable only then ## validate for branches and domains if (dbhost.archetype.is_compileable): compileable.append(dbhost.fqdn) if dbhost.branch in branches: branches[dbhost.branch].append(dbhost) else: branches[dbhost.branch] = [dbhost] if dbhost.sandbox_author in authors: authors[dbhost.sandbox_author].append(dbhost) else: authors[dbhost.sandbox_author] = [dbhost] if dbhost.status.name == 'ready': failed.append("{0:l} is in ready status, " "advertised status can be reset only " "when host is in non ready state".format(dbhost)) if failed: raise ArgumentError("Cannot modify the following hosts:\n%s" % "\n".join(failed)) if len(branches) > 1: keys = branches.keys() branch_sort = lambda x, y: cmp(len(branches[x]), len(branches[y])) keys.sort(cmp=branch_sort) stats = ["{0:d} hosts in {1:l}".format(len(branches[branch]), branch) for branch in keys] raise ArgumentError("All hosts must be in the same domain or " "sandbox:\n%s" % "\n".join(stats)) if len(authors) > 1: keys = authors.keys() author_sort = lambda x, y: cmp(len(authors[x]), len(authors[y])) keys.sort(cmp=author_sort) stats = ["%s hosts with sandbox author %s" % (len(authors[author]), author.name) for author in keys] raise ArgumentError("All hosts must be managed by the same " "sandbox author:\n%s" % "\n".join(stats)) plenaries = PlenaryCollection(logger=logger) for dbhost in dbhosts: dbhost.advertise_status = False session.add(dbhost) plenaries.append(PlenaryHost(dbhost, logger=logger)) session.flush() dbbranch = branches.keys()[0] dbauthor = authors.keys()[0] key = CompileKey.merge([plenaries.get_write_key()]) try: lock_queue.acquire(key) plenaries.stash() plenaries.write(locked=True) td = TemplateDomain(dbbranch, dbauthor, logger=logger) td.compile(session, only=compileable, locked=True) except: plenaries.restore_stash() raise finally: lock_queue.release(key) return