def del_resource(session, logger, dbresource, dsdb_callback=None, **arguments): holder = dbresource.holder holder_plenary = Plenary.get_plenary(holder.holder_object, logger=logger) remove_plenary = Plenary.get_plenary(dbresource, logger=logger) domain = holder.holder_object.branch.name holder.resources.remove(dbresource) session.flush() key = CompileKey.merge( [remove_plenary.get_remove_key(), holder_plenary.get_write_key()]) try: lock_queue.acquire(key) remove_plenary.stash() try: holder_plenary.write(locked=True) except IncompleteError: holder_plenary.cleanup(domain, locked=True) remove_plenary.remove(locked=True) if dsdb_callback: dsdb_callback(session, logger, holder, dbresource, **arguments) except: holder_plenary.restore_stash() remove_plenary.restore_stash() raise finally: lock_queue.release(key) return
def stash_services(self): changed_servers = set() for instance in self.instances_bound.union(self.instances_unbound): if not instance.service.need_client_list: continue for srv in instance.servers: if srv.host: changed_servers.add(srv.host) if srv.cluster: changed_servers.add(srv.cluster) plenary = PlenaryServiceInstanceServer.get_plenary(instance) self.plenaries.append(plenary) for dbobj in changed_servers: # Skip servers that do not have a profile if not dbobj.personality.archetype.is_compileable: continue # Skip servers that are in a different domain/sandbox if (dbobj.branch != self.dbobj.branch or dbobj.sandbox_author_id != self.dbobj.sandbox_author_id): continue self.plenaries.append(Plenary.get_plenary(dbobj)) if isinstance(dbobj, Cluster): for dbhost in dbobj.hosts: self.plenaries.append(Plenary.get_plenary(dbhost))
def del_cluster(session, logger, dbcluster, config): check_no_provided_service(dbcluster) if hasattr(dbcluster, 'members') and dbcluster.members: raise ArgumentError("%s is still in use by clusters: %s." % (format(dbcluster), ", ".join([c.name for c in dbcluster.members]))) elif dbcluster.hosts: hosts = ", ".join([h.fqdn for h in dbcluster.hosts]) raise ArgumentError("%s is still in use by hosts: %s." % (format(dbcluster), hosts)) plenaries = PlenaryCollection(logger=logger) plenaries.append(Plenary.get_plenary(dbcluster)) if dbcluster.resholder: for res in dbcluster.resholder.resources: plenaries.append(Plenary.get_plenary(res)) session.delete(dbcluster) session.flush() plenaries.remove(remove_profile=True) trigger_notifications(config, logger, CLIENT_INFO) return
def add_resource(session, logger, holder, dbresource, dsdb_callback=None, **arguments): if dbresource not in holder.resources: holder.resources.append(dbresource) holder_plenary = Plenary.get_plenary(holder.holder_object, logger=logger) res_plenary = Plenary.get_plenary(dbresource, logger=logger) domain = holder.holder_object.branch.name session.flush() key = CompileKey.merge([res_plenary.get_write_key(), holder_plenary.get_write_key()]) try: lock_queue.acquire(key) res_plenary.write(locked=True) try: holder_plenary.write(locked=True) except IncompleteError: holder_plenary.cleanup(domain, locked=True) if dsdb_callback: dsdb_callback(session, logger, dbresource, **arguments) except: res_plenary.restore_stash() holder_plenary.restore_stash() raise finally: lock_queue.release(key) return
def render(self, session, logger, cluster, buildstatus, **arguments): dbcluster = Cluster.get_unique(session, cluster, compel=True) dbstatus = ClusterLifecycle.get_instance(session, buildstatus) if not dbcluster.status.transition(dbcluster, dbstatus): return if not dbcluster.personality.archetype.is_compileable: return session.flush() plenaries = PlenaryCollection(logger=logger) plenaries.append(Plenary.get_plenary(dbcluster)) for dbhost in dbcluster.hosts: plenaries.append(Plenary.get_plenary(dbhost)) td = TemplateDomain(dbcluster.branch, dbcluster.sandbox_author, logger=logger) # Force a host lock as pan might overwrite the profile... with plenaries.get_key(): plenaries.stash() try: plenaries.write(locked=True) td.compile(session, only=plenaries.object_templates, locked=True) except: plenaries.restore_stash() raise return
def render(self, session, logger, domain, sandbox, pancinclude, pancexclude, pancdebug, cleandeps, **arguments): (dbdomain, dbauthor) = get_branch_and_author(session, logger, domain=domain, sandbox=sandbox, compel=True) # Grab a shared lock on personalities and services used by the domain. # Object templates (hosts, clusters) are protected by the domain lock. plenaries = PlenaryCollection(logger=logger) q1 = session.query(Personality) q1 = q1.join(Host) q1 = q1.filter(and_(Host.branch == dbdomain, Host.sandbox_author == dbauthor)) q1 = q1.reset_joinpoint() q1 = q1.options(joinedload('paramholder'), subqueryload('paramholder.parameters')) q2 = session.query(Personality) q2 = q2.join(Cluster) q2 = q2.filter(and_(Cluster.branch == dbdomain, Cluster.sandbox_author == dbauthor)) q2 = q2.reset_joinpoint() q2 = q2.options(joinedload('paramholder'), subqueryload('paramholder.parameters')) for dbpers in q1.union(q2): plenaries.append(Plenary.get_plenary(dbpers)) q1 = session.query(ServiceInstance) q1 = q1.join(ServiceInstance.clients) q1 = q1.filter(and_(Host.branch == dbdomain, Host.sandbox_author == dbauthor)) q2 = session.query(ServiceInstance) q2 = q2.join(ServiceInstance.cluster_clients) q2 = q2.filter(and_(Cluster.branch == dbdomain, Cluster.sandbox_author == dbauthor)) for si in q1.union(q2): plenaries.append(Plenary.get_plenary(si)) if pancdebug: pancinclude = r'.*' pancexclude = r'components/spma/functions' dom = TemplateDomain(dbdomain, dbauthor, logger=logger) with CompileKey.merge([CompileKey(domain=dbdomain.name, logger=logger), plenaries.get_key(exclusive=False)]): dom.compile(session, panc_debug_include=pancinclude, panc_debug_exclude=pancexclude, cleandeps=cleandeps, locked=True) return
def prestash_primary(self): self.plenaries.append(Plenary.get_plenary(self.dbobj)) # This may be too much action at a distance... however, if # we are potentially re-writing a host plenary, it seems like # a good idea to also verify and refresh known dependencies. self.plenaries.append(Plenary.get_plenary(self.dbobj.hardware_entity)) if self.dbobj.resholder: for dbres in self.dbobj.resholder.resources: self.plenaries.append(Plenary.get_plenary(dbres))
def add_cluster_dependencies(cluster): self.plenaries.append(Plenary.get_plenary(cluster)) for dbhost in cluster.hosts: self.plenaries.append(Plenary.get_plenary(dbhost)) if cluster.resholder: for dbres in cluster.resholder.resources: self.plenaries.append(Plenary.get_plenary(dbres)) if isinstance(cluster, EsxCluster) and cluster.network_device: self.plenaries.append(Plenary.get_plenary(cluster.network_device))
def prestash_primary(self): plenary_host = Plenary.get_plenary(self.dbhost, logger=self.logger) plenary_host.stash() self.plenaries.append(plenary_host) # This may be too much action at a distance... however, if # we are potentially re-writing a host plenary, it seems like # a good idea to also verify known dependencies. plenary_machine = Plenary.get_plenary(self.dbhost.machine, logger=self.logger) plenary_machine.stash() self.plenaries.append(plenary_machine) if self.dbhost.resholder: for dbres in self.dbhost.resholder.resources: resource_plenary = Plenary.get_plenary(dbres, logger=self.logger) resource_plenary.stash() self.plenaries.append(resource_plenary)
def render(self, session, logger, hostname, **arguments): dbhost = hostname_to_host(session, hostname) if dbhost.status.name == 'ready': raise ArgumentError("{0:l} is in ready status, " "advertised status can be reset only " "when host is in non ready state." .format(dbhost)) dbhost.advertise_status = False session.flush() td = TemplateDomain(dbhost.branch, dbhost.sandbox_author, logger=logger) plenary = Plenary.get_plenary(dbhost, logger=logger) # Force a host lock as pan might overwrite the profile... with plenary.get_key(): try: plenary.write(locked=True) td.compile(session, only=plenary.object_templates, locked=True) except IncompleteError: raise ArgumentError("Run aq make for host %s first." % dbhost.fqdn) except: plenary.restore_stash() raise return
def add_cluster_data(cluster, logger): for dbhost in cluster.hosts: host_plenary = Plenary.get_plenary(dbhost, logger=self.logger) host_plenary.stash() self.plenaries.append(host_plenary) if cluster.resholder: for dbres in cluster.resholder.resources: resource_plenary = Plenary.get_plenary(dbres, logger=self.logger) resource_plenary.stash() self.plenaries.append(resource_plenary) if isinstance(cluster, EsxCluster) and cluster.switch: sw_plenary = Plenary.get_plenary(cluster.switch, logger=self.logger) sw_plenary.stash() self.plenaries.append(sw_plenary)
def render(self, session, logger, service, **arguments): dbservice = Service.get_unique(session, service, compel=True) if dbservice.archetypes: msg = ", ".join([archetype.name for archetype in dbservice.archetypes]) raise ArgumentError("Service %s is still required by the following " "archetypes: %s." % (dbservice.name, msg)) if dbservice.personalities: msg = ", ".join(["%s (%s)" % (personality.name, personality.archetype.name) for personality in dbservice.personalities]) raise ArgumentError("Service %s is still required by the following " "personalities: %s." % (dbservice.name, msg)) if dbservice.instances: raise ArgumentError("Service %s still has instances defined and " "cannot be deleted." % dbservice.name) session.delete(dbservice) session.flush() plenary_info = Plenary.get_plenary(dbservice, logger=logger) plenary_info.remove() return
def render(self, session, logger, share, latency_threshold, comments, **arguments): validate_nlist_key("share", share) q = session.query(Share).filter_by(name=share) if q.count() == 0: raise ArgumentError("Share %s is not used on any resource and " "cannot be modified" % share) plenaries = PlenaryCollection(logger=logger) for dbshare in q.all(): if latency_threshold: dbshare.latency_threshold = latency_threshold if comments: dbshare.comments = comments plenaries.append(Plenary.get_plenary(dbshare)) session.flush() plenaries.write() return
def render(self, session, logger, city, country, fullname, comments, timezone, campus, **arguments): if country: dbparent = Country.get_unique(session, country, compel=True) else: dbparent = Campus.get_unique(session, campus, compel=True) dbcity = add_location(session, City, city, dbparent, fullname=fullname, comments=comments, timezone=timezone) session.flush() plenary = Plenary.get_plenary(dbcity, logger=logger) with plenary.get_key(): try: plenary.write(locked=True) dsdb_runner = DSDBRunner(logger=logger) dsdb_runner.add_city(city, dbcity.country.name, fullname) dsdb_runner.commit_or_rollback() except: plenary.restore_stash() raise return
def render( self, session, logger, archetype, personality, feature, model, interface, path, value=None, comments=None, **arguments ): param_holder = get_parameter_holder(session, archetype, personality, auto_include=True) if ( isinstance(param_holder.holder_object, Personality) and not param_holder.holder_object.archetype.is_compileable ): raise ArgumentError("{0} is not compileable.".format(param_holder.holder_object.archetype)) dbparameter = self.process_parameter(session, param_holder, feature, model, interface, path, value, comments) session.add(dbparameter) session.flush() plenary = Plenary.get_plenary(param_holder.personality) plenary.write()
def render(self, session, logger, archetype, personality, feature, model, interface, path, value=None, comments=None, **arguments): param_holder = get_parameter_holder(session, archetype, personality, auto_include=True) if isinstance(param_holder.holder_object, Personality) and \ not param_holder.holder_object.archetype.is_compileable: raise ArgumentError("{0} is not compileable.".format( param_holder.holder_object.archetype)) dbparameter = self.process_parameter(session, param_holder, feature, model, interface, path, value, comments) session.add(dbparameter) session.flush() plenary = Plenary.get_plenary(param_holder.personality) plenary.write()
def render(self, generate, session, logger, network_device, **kwargs): dbnetdev = NetworkDevice.get_unique(session, network_device, compel=True) plenary_info = Plenary.get_plenary(dbnetdev, logger=logger) if generate: return plenary_info._generate_content() else: return plenary_info.read()
def render(self, generate, session, logger, city, **kwargs): dbcity = City.get_unique(session, city, compel=True) plenary_info = Plenary.get_plenary(dbcity, logger=logger) if generate: return plenary_info._generate_content() else: return plenary_info.read()
def render(self, session, logger, machine, generate, **kwargs): dbmachine = Machine.get_unique(session, machine, compel=True) plenary_info = Plenary.get_plenary(dbmachine, logger=logger) if generate: return plenary_info._generate_content() else: return plenary_info.read()
def render(self, session, logger, network_device, label, model, type, ip, interface, iftype, mac, vendor, serial, comments, **arguments): dbmodel = Model.get_unique(session, name=model, vendor=vendor, compel=True) if not dbmodel.model_type.isNetworkDeviceType(): raise ArgumentError("This command can only be used to " "add network devices.") dblocation = get_location(session, compel=True, **arguments) dbdns_rec, newly_created = grab_address(session, network_device, ip, allow_restricted_domain=True, allow_reserved=True, preclude=True) if not label: label = dbdns_rec.fqdn.name try: NetworkDevice.check_label(label) except ArgumentError: raise ArgumentError("Could not deduce a valid hardware label " "from the network device name. Please specify " "--label.") # FIXME: What do the error messages for an invalid enum (switch_type) # look like? dbnetdev = NetworkDevice(label=label, switch_type=type, location=dblocation, model=dbmodel, serial_no=serial, comments=comments) session.add(dbnetdev) dbnetdev.primary_name = dbdns_rec check_netdev_iftype(iftype) dbinterface = get_or_create_interface(session, dbnetdev, name=interface, mac=mac, interface_type=iftype) dbnetwork = get_net_id_from_ip(session, ip) # TODO: should we call check_ip_restrictions() here? assign_address(dbinterface, ip, dbnetwork, logger=logger) session.flush() plenary = Plenary.get_plenary(dbnetdev, logger=logger) with plenary.get_key(): plenary.stash() try: plenary.write(locked=True) dsdb_runner = DSDBRunner(logger=logger) dsdb_runner.update_host(dbnetdev, None) dsdb_runner.commit_or_rollback("Could not add network device to DSDB") except: plenary.restore_stash() raise return
def get_key(self, exclusive=True): if not exclusive: # CompileKey() does not support shared mode raise InternalError("Shared locks are not implemented for machine " "plenaries.") # Need a compile key if: # - There is a host attached. # - This is a virtual machine in a container. keylist = [NoLockKey(logger=self.logger)] if not inspect(self.dbobj).deleted: if self.dbobj.host: plenary = Plenary.get_plenary(self.dbobj.host, logger=self.logger) keylist.append(plenary.get_key()) if self.dbobj.vm_container: plenary = Plenary.get_plenary(self.dbobj.vm_container, logger=self.logger) keylist.append(plenary.get_key()) return CompileKey.merge(keylist)
def prestash_primary(self): def add_cluster_data(cluster, logger): for dbhost in cluster.hosts: host_plenary = Plenary.get_plenary(dbhost, logger=self.logger) host_plenary.stash() self.plenaries.append(host_plenary) if cluster.resholder: for dbres in cluster.resholder.resources: resource_plenary = Plenary.get_plenary(dbres, logger=self.logger) resource_plenary.stash() self.plenaries.append(resource_plenary) if isinstance(cluster, EsxCluster) and cluster.switch: sw_plenary = Plenary.get_plenary(cluster.switch, logger=self.logger) sw_plenary.stash() self.plenaries.append(sw_plenary) plenary_cluster = Plenary.get_plenary(self.dbcluster, logger=self.logger) plenary_cluster.stash() self.plenaries.append(plenary_cluster) if self.dbcluster.resholder: for dbres in self.dbcluster.resholder.resources: resource_plenary = Plenary.get_plenary(dbres, logger=self.logger) resource_plenary.stash() self.plenaries.append(resource_plenary) if isinstance(self.dbcluster, MetaCluster): for c in self.dbcluster.members: plenary_cluster = Plenary.get_plenary(c, logger=self.logger) plenary_cluster.stash() self.plenaries.append(plenary_cluster) add_cluster_data(c, self.logger) else: add_cluster_data(self.dbcluster, self.logger)
def render(self, session, logger, hostname, manager, interface, mac, comments, **arguments): dbhost = hostname_to_host(session, hostname) dbmachine = dbhost.hardware_entity oldinfo = DSDBRunner.snapshot_hw(dbmachine) if not manager: manager = "%sr.%s" % (dbmachine.primary_name.fqdn.name, dbmachine.primary_name.fqdn.dns_domain.name) dbinterface = get_or_create_interface(session, dbmachine, name=interface, mac=mac, interface_type='management') addrs = ", ".join(["%s [%s]" % (addr.logical_name, addr.ip) for addr in dbinterface.assignments]) if addrs: raise ArgumentError("{0} already has the following addresses: " "{1}.".format(dbinterface, addrs)) audit_results = [] ip = generate_ip(session, logger, dbinterface, compel=True, audit_results=audit_results, **arguments) dbdns_rec, newly_created = grab_address(session, manager, ip, comments=comments, preclude=True) assign_address(dbinterface, ip, dbdns_rec.network, logger=logger) session.flush() plenary_info = Plenary.get_plenary(dbmachine, logger=logger) with plenary_info.get_key(): try: plenary_info.write(locked=True) dsdb_runner = DSDBRunner(logger=logger) dsdb_runner.update_host(dbmachine, oldinfo) dsdb_runner.commit_or_rollback("Could not add host to DSDB") except: plenary_info.restore_stash() raise if dbmachine.host: # XXX: Host needs to be reconfigured. pass for name, value in audit_results: self.audit_result(session, name, value, **arguments) return
def render(self, session, logger, hostname, domain, sandbox, force, **arguments): (dbbranch, dbauthor) = get_branch_and_author(session, logger, domain=domain, sandbox=sandbox, compel=True) if hasattr(dbbranch, "allow_manage") and not dbbranch.allow_manage: raise ArgumentError("Managing hosts to {0:l} is not allowed." .format(dbbranch)) dbhost = hostname_to_host(session, hostname) dbsource = dbhost.branch dbsource_author = dbhost.sandbox_author if dbhost.cluster: raise ArgumentError("Cluster nodes must be managed at the " "cluster level; this host is a member of " "{0}.".format(dbhost.cluster)) if not force: validate_branch_commits(dbsource, dbsource_author, dbbranch, dbauthor, logger, self.config) plenary_host = Plenary.get_plenary(dbhost, logger=logger) dbhost.branch = dbbranch dbhost.sandbox_author = dbauthor session.flush() # We're crossing domains, need to lock everything. # XXX: There's a directory per domain. Do we need subdirectories # for different authors for a sandbox? with CompileKey.merge([CompileKey(domain=dbsource.name, logger=logger), CompileKey(domain=dbbranch.name, logger=logger)]): plenary_host.stash() try: plenary_host.write(locked=True) except IncompleteError: # This template cannot be written, we leave it alone # It would be nice to flag the state in the the host? plenary_host.remove(locked=True) except: # This will not restore the cleaned up build files. That's OK. # They will be recreated as needed. plenary_host.restore_stash() raise return
def render(self, session, logger, personality, archetype, **arguments): dbpersona = Personality.get_unique(session, name=personality, archetype=archetype, compel=True) dbhost = session.query(Host).filter_by(personality=dbpersona).first() dbcls = session.query(Cluster).filter_by(personality=dbpersona).first() if dbhost or dbcls: raise ArgumentError("{0} is still in use and cannot be deleted." .format(dbpersona)) plenary = Plenary.get_plenary(dbpersona, logger=logger) session.delete(dbpersona) session.flush() plenary.remove() return
def render(self, session, logger, hostname, pancinclude, pancexclude, pancdebug, cleandeps, **arguments): dbhost = hostname_to_host(session, hostname) if pancdebug: pancinclude = r'.*' pancexclude = r'components/spma/functions' dom = TemplateDomain(dbhost.branch, dbhost.sandbox_author, logger=logger) plenary = Plenary.get_plenary(dbhost, logger=logger) with plenary.get_key(): dom.compile(session, only=plenary.object_templates, panc_debug_include=pancinclude, panc_debug_exclude=pancexclude, cleandeps=cleandeps, locked=True) return
def render(self, session, logger, hostname, data, generate, **arguments): dbhost = hostname_to_host(session, hostname) dbresource = get_resource(session, dbhost, **arguments) if dbresource: plenary_info = Plenary.get_plenary(dbresource, logger=logger) else: if data: cls = PlenaryHostData else: cls = PlenaryToplevelHost plenary_info = cls.get_plenary(dbhost, logger=logger) if generate: return plenary_info._generate_content() else: return plenary_info.read()
def render(self, session, logger, target, grn, eon_id, hostname, list, personality, archetype, **arguments): dbgrn = lookup_grn(session, grn, eon_id, logger=logger, config=self.config) plenaries = PlenaryCollection(logger=logger) if hostname: objs = [hostname_to_host(session, hostname)] config_key = "host_grn_targets" elif list: check_hostlist_size(self.command, self.config, list) objs = hostlist_to_hosts(session, list) config_key = "host_grn_targets" elif personality: objs = [Personality.get_unique(session, name=personality, archetype=archetype, compel=True)] config_key = "personality_grn_targets" for obj in objs: section = "archetype_" + obj.archetype.name if self.config.has_option(section, config_key): valid_targets = [s.strip() for s in self.config.get(section, config_key).split(",")] else: raise ArgumentError("{0} has no valid GRN targets configured." .format(obj.archetype)) if target not in valid_targets: raise ArgumentError("Invalid target %s for archetype %s, please " "choose from: %s." % (target, obj.archetype.name, ", ".join(valid_targets))) plenaries.append(Plenary.get_plenary(obj)) self._update_dbobj(obj, target, dbgrn) session.flush() plenaries.write() return
def render(self, session, logger, domain, sandbox, archetype, personality, pancinclude, pancexclude, pancdebug, cleandeps, **arguments): dbdomain = None dbauthor = None if domain or sandbox: (dbdomain, dbauthor) = get_branch_and_author(session, logger, domain=domain, sandbox=sandbox, compel=True) dbpersonality = Personality.get_unique(session, name=personality, archetype=archetype, compel=True) if pancdebug: pancinclude = r'.*' pancexclude = r'components/spma/functions' q = session.query(Host) q = q.filter_by(personality=dbpersonality) if dbdomain: q = q.filter_by(branch=dbdomain) if dbauthor: q = q.filter_by(sandbox_author=dbauthor) host_list = q.all() if not host_list: return # If the domain was not specified, set it to the domain of first host dbdomain, dbauthor = validate_branch_author(host_list) plenaries = PlenaryCollection(logger=logger) for host in host_list: plenaries.append(Plenary.get_plenary(host)) dom = TemplateDomain(dbdomain, dbauthor, logger=logger) with plenaries.get_key(): dom.compile(session, only=plenaries.object_templates, panc_debug_include=pancinclude, panc_debug_exclude=pancexclude, cleandeps=cleandeps, locked=True) return
def render(self, session, logger, hostname, service, **arguments): dbhost = hostname_to_host(session, hostname) for srv in dbhost.archetype.services + dbhost.personality.services: if srv.name == service: raise ArgumentError("Cannot unbind a required service. " "Perhaps you want to rebind?") dbservice = Service.get_unique(session, service, compel=True) si = get_host_bound_service(dbhost, dbservice) if si: logger.info("Removing client binding") dbhost.services_used.remove(si) session.flush() plenaries = PlenaryCollection(logger=logger) plenaries.append(Plenary.get_plenary(dbhost)) plenaries.append(PlenaryServiceInstanceServer.get_plenary(si)) plenaries.write() return
def render(self, session, logger, metacluster, cluster, **arguments): dbcluster = Cluster.get_unique(session, cluster, compel=True) dbmetacluster = MetaCluster.get_unique(session, metacluster, compel=True) old_metacluster = None if dbcluster.metacluster and dbcluster.metacluster != dbmetacluster: if dbcluster.virtual_machines: raise ArgumentError("Cannot move cluster to a new metacluster " "while virtual machines are attached.") old_metacluster = dbcluster.metacluster old_metacluster.members.remove(dbcluster) session.expire(dbcluster, ['_metacluster']) if not dbcluster.metacluster: dbmetacluster.members.append(dbcluster) session.flush() plenary = Plenary.get_plenary(dbcluster, logger=logger) plenary.write() return
def render(self, session, logger, city, **arguments): dbcity = get_location(session, city=city) name = dbcity.name country = dbcity.country.name fullname = dbcity.fullname plenary = Plenary.get_plenary(dbcity, logger=logger) CommandDelLocation.render(self, session=session, name=city, type='city', **arguments) session.flush() with plenary.get_key(): try: plenary.remove(locked=True) dsdb_runner = DSDBRunner(logger=logger) dsdb_runner.del_city(name, country, fullname) dsdb_runner.commit_or_rollback() except: plenary.restore_stash() raise return
grn, eon_id, logger=logger, config=self.config) old_grn = dbpersona.owner_grn dbpersona.owner_grn = dbgrn if not leave_existing: # If this is a public personality, then there may be hosts with # various GRNs inside the personality, so make sure we preserve # those GRNs by filtering on the original GRN of the personality q = session.query(Host) q = q.filter_by(personality=dbpersona, owner_grn=old_grn) for dbhost in q.all(): dbhost.owner_grn = dbgrn plenaries.append(Plenary.get_plenary(dbhost)) if config_override is not None and \ dbpersona.config_override != config_override: dbpersona.config_override = config_override plenaries.append(Plenary.get_plenary(dbpersona)) session.flush() q = session.query(Cluster) q = q.with_polymorphic("*") # The validation will touch all member hosts/machines, so it's better to # pre-load everything q = q.options(subqueryload('_hosts'), joinedload('_hosts.host'), joinedload('_hosts.host.machine'), joinedload('resholder'),