def __init__(self, dbmetacluster, logger=LOGGER): Plenary.__init__(self, dbmetacluster, logger=logger) self.name = dbmetacluster.name self.loadpath = self.dbobj.personality.archetype.name self.plenary_core = "clusters" self.plenary_template = dbmetacluster.name
def render(self, session, logger, hostname, service, instance, **arguments): dbhost = hostname_to_host(session, hostname) dbservice = Service.get_unique(session, service, compel=True) msg = "Service %s" % service if instance: dbinstances = [get_service_instance(session, dbservice, instance)] msg = "Service %s, instance %s" % (service, instance) else: q = session.query(ServiceInstance) q = q.filter_by(service=dbservice) q = q.join('servers') q = q.filter_by(host=dbhost) dbinstances = q.all() for dbinstance in dbinstances: if dbhost in dbinstance.server_hosts: if (dbinstance.client_count > 0 and len(dbinstance.server_hosts) <= 1): logger.warning("WARNING: Server %s, is the last server " "bound to %s which still has clients" % (hostname, msg)) dbinstance.server_hosts.remove(dbhost) session.expire(dbhost, ['_services_provided']) session.flush() plenaries = PlenaryCollection(logger=logger) plenaries.append(Plenary.get_plenary(dbhost)) for dbinstance in dbinstances: plenaries.append(Plenary.get_plenary(dbinstance)) plenaries.write() return
def __init__(self, dbmetacluster, logger=LOGGER): Plenary.__init__(self, dbmetacluster, logger=logger) self.name = dbmetacluster.name # TODO maybe metaclusterdata self.plenary_core = "clusterdata" self.plenary_template = dbmetacluster.name
def render(self, session, logger, cluster, buildstatus, **arguments): dbcluster = Cluster.get_unique(session, cluster, compel=True) dbstatus = ClusterLifecycle.get_unique( session, buildstatus, compel=True) if not dbcluster.status.transition(dbcluster, dbstatus): return if not dbcluster.personality.archetype.is_compileable: return session.flush() plenaries = PlenaryCollection(logger=logger) plenaries.append(Plenary.get_plenary(dbcluster)) for dbhost in dbcluster.hosts: plenaries.append(Plenary.get_plenary(dbhost)) # Force a host lock as pan might overwrite the profile... key = CompileKey(domain=dbcluster.branch.name, logger=logger) try: lock_queue.acquire(key) plenaries.write(locked=True) td = TemplateDomain( dbcluster.branch, dbcluster.sandbox_author, logger=logger) td.compile(session, plenaries.object_templates, locked=True) except: plenaries.restore_stash() raise finally: lock_queue.release(key) return
def render(self, session, logger, service, instance, comments, **arguments): dbservice = session.query(Service).filter_by(name=service).first() if dbservice and instance is None: raise ArgumentError("Service %s already exists." % dbservice.name) if not dbservice: # "add_service --service foo --comments blah" should add the comments # to Service, # "add_service --service foo --instance bar --comments blah" should # add the comments to ServiceInstance if instance: srvcomments = None else: srvcomments = comments dbservice = Service(name=service, comments=srvcomments) session.add(dbservice) plenaries = PlenaryCollection(logger=logger) plenaries.append(Plenary.get_plenary(dbservice)) if instance: ServiceInstance.get_unique(session, service=dbservice, name=instance, preclude=True) dbsi = ServiceInstance(service=dbservice, name=instance, comments=comments) session.add(dbsi) plenaries.append(Plenary.get_plenary(dbsi)) session.flush() plenaries.write() return
def render(self, session, logger, machine, disk, controller, size, all, dbuser, **arguments): # Handle deprecated arguments if arguments.get("type", None): self.deprecated_option("type", "Please use --controller instead.", logger=logger, **arguments) controller = arguments["type"] if arguments.get("capacity", None): self.deprecated_option("capacity", "Please use --size instead.", logger=logger, **arguments) size = arguments["capacity"] dbmachine = Machine.get_unique(session, machine, compel=True) q = session.query(Disk).filter_by(machine=dbmachine) if disk: q = q.filter_by(device_name=disk) if controller: if controller not in controller_types: raise ArgumentError("%s is not a valid controller type, use " "one of: %s." % (controller, ", ".join(controller_types))) q = q.filter_by(controller_type=controller) if size is not None: q = q.filter_by(capacity=size) results = q.all() if len(results) == 0: raise NotFoundException("No disks found.") elif len(results) > 1 and not all: raise ArgumentError("More than one matching disks found. " "Use --all to delete them all.") for result in results: session.delete(result) session.flush() session.expire(dbmachine, ['disks']) plenary_machine = Plenary.get_plenary(dbmachine, logger=logger) key = plenary_machine.get_write_key() dbcontainer = dbmachine.vm_container if dbcontainer: plenary_container = Plenary.get_plenary(dbcontainer, logger=logger) key = CompileKey.merge([key, plenary_container.get_write_key()]) try: lock_queue.acquire(key) if dbcontainer: plenary_container.write(locked=True) plenary_machine.write(locked=True) except: plenary_machine.restore_stash() if dbcontainer: plenary_container.restore_stash() raise finally: lock_queue.release(key)
def render(self, session, logger, service, instance, position, hostname, cluster, ip, resourcegroup, service_address, alias, **arguments): dbservice = Service.get_unique(session, service, compel=True) if instance: dbsi = ServiceInstance.get_unique(session, service=dbservice, name=instance, compel=True) dbinstances = [dbsi] else: # --position for multiple service instances sounds dangerous, so # disallow it until a real usecase emerges if position: raise ArgumentError("The --position option can only be " "specified for one service instance.") q = session.query(ServiceInstance) q = q.filter_by(service=dbservice) dbinstances = q.all() plenaries = PlenaryCollection(logger=logger) if position is not None: params = None else: params = lookup_target(session, plenaries, hostname, ip, cluster, resourcegroup, service_address, alias) for dbinstance in dbinstances: if position is not None: if position < 0 or position >= len(dbinstance.servers): raise ArgumentError("Invalid server position.") dbsrv = dbinstance.servers[position] if dbsrv.host: plenaries.append(Plenary.get_plenary(dbsrv.host)) if dbsrv.cluster: plenaries.append(Plenary.get_plenary(dbsrv.cluster)) else: dbsrv = find_server(dbinstance, params) if not dbsrv: if instance: raise NotFoundException("No such server binding.") continue plenaries.append(Plenary.get_plenary(dbinstance)) if dbsrv.host: session.expire(dbsrv.host, ['services_provided']) if dbsrv.cluster: session.expire(dbsrv.cluster, ['services_provided']) dbinstance.servers.remove(dbsrv) if dbinstance.client_count > 0 and not dbinstance.servers: logger.warning("Warning: {0} was left without servers, " "but it still has clients.".format(dbinstance)) session.flush() plenaries.write() return
def render(self, session, logger, city, timezone, campus, default_dns_domain, comments, **arguments): dbcity = get_location(session, city=city) # Updating machine templates is expensive, so only do that if needed update_machines = False if timezone is not None: dbcity.timezone = timezone if comments is not None: dbcity.comments = comments if default_dns_domain is not None: if default_dns_domain: dbdns_domain = DnsDomain.get_unique(session, default_dns_domain, compel=True) dbcity.default_dns_domain = dbdns_domain else: dbcity.default_dns_domain = None prev_campus = None dsdb_runner = None dsdb_runner = DSDBRunner(logger=logger) if campus is not None: dbcampus = get_location(session, campus=campus) # This one would change the template's locations hence forbidden if dbcampus.hub != dbcity.hub: # Doing this both to reduce user error and to limit # testing required. raise ArgumentError("Cannot change campus. {0} is in {1:l}, " "while {2:l} is in {3:l}.".format( dbcampus, dbcampus.hub, dbcity, dbcity.hub)) if dbcity.campus: prev_campus = dbcity.campus dbcity.update_parent(parent=dbcampus) update_machines = True session.flush() if campus is not None: if prev_campus: prev_name = prev_campus.name else: prev_name = None dsdb_runner.update_city(city, dbcampus.name, prev_name) plenaries = PlenaryCollection(logger=logger) plenaries.append(Plenary.get_plenary(dbcity)) if update_machines: q = session.query(Machine) q = q.filter(Machine.location_id.in_(dbcity.offspring_ids())) logger.client_info("Updating %d machines..." % q.count()) for dbmachine in q: plenaries.append(Plenary.get_plenary(dbmachine)) count = plenaries.write() dsdb_runner.commit_or_rollback() logger.client_info("Flushed %d templates." % count)
def __init__(self, dbhost, logger=LOGGER): Plenary.__init__(self, dbhost, logger=logger) # Store the branch separately so get_key() works even after the dbhost # object has been deleted self.branch = dbhost.branch self.name = dbhost.fqdn self.plenary_core = "hostdata" self.plenary_template = self.name
def render(self, session, logger, switch, **arguments): dbswitch = Switch.get_unique(session, switch, compel=True) # Check and complain if the switch has any other addresses than its # primary address addrs = [] for addr in dbswitch.all_addresses(): if addr.ip == dbswitch.primary_ip: continue addrs.append(str(addr.ip)) if addrs: raise ArgumentError("{0} still provides the following addresses, " "delete them first: {1}.".format (dbswitch, ", ".join(addrs))) dbdns_rec = dbswitch.primary_name ip = dbswitch.primary_ip old_fqdn = str(dbswitch.primary_name.fqdn) old_comments = dbswitch.comments session.delete(dbswitch) if dbdns_rec: delete_dns_record(dbdns_rec) session.flush() # Any switch ports hanging off this switch should be deleted with # the cascade delete of the switch. switch_plenary = Plenary.get_plenary(dbswitch, logger=logger) # clusters connected to this switch plenaries = PlenaryCollection(logger=logger) for dbcluster in dbswitch.esx_clusters: plenaries.append(Plenary.get_plenary(dbcluster)) key = CompileKey.merge([switch_plenary.get_remove_key(), plenaries.get_write_key()]) try: lock_queue.acquire(key) switch_plenary.stash() plenaries.write(locked=True) switch_plenary.remove(locked=True) if ip: dsdb_runner = DSDBRunner(logger=logger) # FIXME: restore interface name/MAC on rollback dsdb_runner.delete_host_details(old_fqdn, ip, comments=old_comments) dsdb_runner.commit_or_rollback("Could not remove switch from DSDB") return except: plenaries.restore_stash() switch_plenary.restore_stash() raise finally: lock_queue.release(key)
def __init__(self, dbhost, logger=LOGGER): Plenary.__init__(self, dbhost, logger=logger) # Store the branch separately so get_key() works even after the dbhost # object has been deleted self.branch = dbhost.branch self.name = dbhost.fqdn self.loadpath = dbhost.personality.archetype.name self.plenary_core = "" self.plenary_template = self.name
def __init__(self, dbcluster, logger=LOGGER): Plenary.__init__(self, dbcluster, logger=logger) self.name = dbcluster.name if dbcluster.metacluster: self.metacluster = dbcluster.metacluster.name else: self.metacluster = None self.plenary_core = "clusterdata" self.plenary_template = dbcluster.name
def render(self, session, logger, machine, disk, controller, size, all, dbuser, **arguments): # Handle deprecated arguments if arguments.get("type", None): self.deprecated_option("type", "Please use --controller instead.", logger=logger, **arguments) controller = arguments["type"] if arguments.get("capacity", None): self.deprecated_option("capacity", "Please use --size instead.", logger=logger, **arguments) size = arguments["capacity"] dbmachine = Machine.get_unique(session, machine, compel=True) q = session.query(Disk).filter_by(machine=dbmachine) if disk: q = q.filter_by(device_name=disk) if controller: if controller not in controller_types: raise ArgumentError("%s is not a valid controller type, use " "one of: %s." % (controller, ", ".join(controller_types) )) q = q.filter_by(controller_type=controller) if size is not None: q = q.filter_by(capacity=size) results = q.all() if len(results) == 0: raise NotFoundException("No disks found.") elif len(results) > 1 and not all: raise ArgumentError("More than one matching disks found. " "Use --all to delete them all.") for result in results: session.delete(result) session.flush() session.expire(dbmachine, ['disks']) plenary_machine = Plenary.get_plenary(dbmachine, logger=logger) key = plenary_machine.get_write_key() dbcontainer = dbmachine.vm_container if dbcontainer: plenary_container = Plenary.get_plenary(dbcontainer, logger=logger) key = CompileKey.merge([key, plenary_container.get_write_key()]) try: lock_queue.acquire(key) if dbcontainer: plenary_container.write(locked=True) plenary_machine.write(locked=True) except: plenary_machine.restore_stash() if dbcontainer: plenary_container.restore_stash() raise finally: lock_queue.release(key)
def lookup_target(session, plenaries, hostname, ip, cluster, resourcegroup, service_address, alias): """ Check the parameters of the server providing a given service Look for potential conflicts, and return a dict that is suitable to be passed to either the constructor of ServiceInstanceServer, or to the find_server() function. """ params = {} if cluster and hostname: raise ArgumentError("Only one of --cluster and --hostname may be " "specified.") if alias: dbdns_env = DnsEnvironment.get_unique_or_default(session) dbdns_rec = Alias.get_unique(session, fqdn=alias, dns_environment=dbdns_env, compel=True) params["alias"] = dbdns_rec if hostname: params["host"] = hostname_to_host(session, hostname) plenaries.append(Plenary.get_plenary(params["host"])) if cluster: params["cluster"] = Cluster.get_unique(session, cluster, compel=True) plenaries.append(Plenary.get_plenary(params["cluster"])) if service_address: # TODO: calling get_resource_holder() means doing redundant DB lookups # TODO: it would be nice to also accept an FQDN for the service address, # to be consistent with the usage of the --service_address option in # add_service_address/del_service_address holder = get_resource_holder(session, hostname=hostname, cluster=cluster, resgroup=resourcegroup, compel=True) dbsrv_addr = ServiceAddress.get_unique(session, name=service_address, holder=holder, compel=True) params["service_address"] = dbsrv_addr elif ip: for addr in params["host"].hardware_entity.all_addresses(): if ip != addr.ip: continue if addr.service_address: params["service_address"] = addr.service_address else: params["address_assignment"] = addr break return params
def render(self, session, logger, network_device, **arguments): dbnetdev = NetworkDevice.get_unique(session, network_device, compel=True) # Check and complain if the network device has any other addresses than its # primary address addrs = [] for addr in dbnetdev.all_addresses(): if addr.ip == dbnetdev.primary_ip: continue addrs.append(str(addr.ip)) if addrs: raise ArgumentError("{0} still provides the following addresses, " "delete them first: {1}.".format (dbnetdev, ", ".join(addrs))) dbdns_rec = dbnetdev.primary_name ip = dbnetdev.primary_ip old_fqdn = str(dbnetdev.primary_name.fqdn) old_comments = dbnetdev.comments session.delete(dbnetdev) if dbdns_rec: delete_dns_record(dbdns_rec) session.flush() # Any network device ports hanging off this network device should be deleted with # the cascade delete of the network device. netdev_plenary = Plenary.get_plenary(dbnetdev, logger=logger) # clusters connected to this network device plenaries = PlenaryCollection(logger=logger) for dbcluster in dbnetdev.esx_clusters: plenaries.append(Plenary.get_plenary(dbcluster)) with CompileKey.merge([netdev_plenary.get_key(), plenaries.get_key()]): netdev_plenary.stash() try: plenaries.write(locked=True) netdev_plenary.remove(locked=True) if ip: dsdb_runner = DSDBRunner(logger=logger) # FIXME: restore interface name/MAC on rollback dsdb_runner.delete_host_details(old_fqdn, ip, comments=old_comments) dsdb_runner.commit_or_rollback("Could not remove network device " "from DSDB") except: plenaries.restore_stash() netdev_plenary.restore_stash() raise return
def render(self, session, logger, hostname, cluster, personality, **arguments): dbcluster = Cluster.get_unique(session, cluster, compel=True) dbhost = hostname_to_host(session, hostname) if not dbhost.cluster: raise ArgumentError( "{0} is not bound to a cluster.".format(dbhost)) if dbhost.cluster != dbcluster: raise ArgumentError("{0} is bound to {1:l}, not {2:l}.".format( dbhost, dbhost.cluster, dbcluster)) if personality: dbpersonality = Personality.get_unique(session, name=personality, archetype=dbhost.archetype, compel=True) if dbpersonality.cluster_required: raise ArgumentError("Cannot switch host to personality %s " "because that personality requires a " "cluster" % personality) dbhost.personality = dbpersonality elif dbhost.personality.cluster_required: raise ArgumentError("Host personality %s requires a cluster, " "use --personality to change personality " "when leaving the cluster." % dbhost.personality.name) dbcluster.hosts.remove(dbhost) remove_service_addresses(dbcluster, dbhost) dbcluster.validate() session.flush() session.expire(dbhost, ['_cluster']) # Will need to write a cluster plenary and either write or # remove a host plenary. Grab the domain key since the two # must be in the same domain. host_plenary = Plenary.get_plenary(dbhost, logger=logger) cluster_plenary = Plenary.get_plenary(dbcluster, logger=logger) key = CompileKey(domain=dbcluster.branch.name, logger=logger) try: lock_queue.acquire(key) cluster_plenary.write(locked=True) try: host_plenary.write(locked=True) except IncompleteError: host_plenary.cleanup(domain=dbhost.branch.name, locked=True) except: cluster_plenary.restore_stash() host_plenary.restore_stash() raise finally: lock_queue.release(key)
def update_cluster_location(session, logger, dbcluster, fix_location, plenaries, remove_plenaries, **arguments): location_updated = False dblocation = get_location(session, **arguments) if fix_location: dblocation = dbcluster.minimum_location if not dblocation: raise ArgumentError("Cannot infer the cluster location from " "the host locations.") if dblocation: errors = [] if not dblocation.campus: errors.append("{0} is not within a campus.".format(dblocation)) if dbcluster.cluster_type != 'meta': for host in dbcluster.hosts: if host.machine.location != dblocation and \ dblocation not in host.machine.location.parents: errors.append("{0} has location {1}.".format( host, host.machine.location)) else: for cluster in dbcluster.members: if cluster.location_constraint != dblocation and \ dblocation not in cluster.location_constraint.parents: errors.append("{0} has location {1}.".format( cluster, cluster.location_constraint)) if errors: raise ArgumentError("Cannot set {0} location constraint to " "{1}:\n{2}".format(dbcluster, dblocation, "\n".join(errors))) if dbcluster.location_constraint != dblocation: if machine_plenary_will_move(old=dbcluster.location_constraint, new=dblocation): for dbmachine in dbcluster.machines: # This plenary will have a path to the old location. plenary = Plenary.get_plenary(dbmachine, logger=logger) remove_plenaries.append(plenary) dbmachine.location = dblocation session.add(dbmachine) # This plenary will have a path to the new location. plenaries.append(Plenary.get_plenary(dbmachine)) # Update the path to the machine plenary in the # container resource plenaries.append( Plenary.get_plenary(dbmachine.vm_container)) dbcluster.location_constraint = dblocation location_updated = True return location_updated
def render(self, session, logger, hostname, cluster, personality, **arguments): dbcluster = Cluster.get_unique(session, cluster, compel=True) dbhost = hostname_to_host(session, hostname) if not dbhost.cluster: raise ArgumentError("{0} is not bound to a cluster.".format(dbhost)) if dbhost.cluster != dbcluster: raise ArgumentError("{0} is bound to {1:l}, not {2:l}.".format( dbhost, dbhost.cluster, dbcluster)) if personality: dbpersonality = Personality.get_unique(session, name=personality, archetype=dbhost.archetype, compel=True) if dbpersonality.cluster_required: raise ArgumentError("Cannot switch host to personality %s " "because that personality requires a " "cluster" % personality) dbhost.personality = dbpersonality elif dbhost.personality.cluster_required: raise ArgumentError("Host personality %s requires a cluster, " "use --personality to change personality " "when leaving the cluster." % dbhost.personality.name) dbcluster.hosts.remove(dbhost) remove_service_addresses(dbcluster, dbhost) dbcluster.validate() session.flush() session.expire(dbhost, ['_cluster']) # Will need to write a cluster plenary and either write or # remove a host plenary. Grab the domain key since the two # must be in the same domain. host_plenary = Plenary.get_plenary(dbhost, logger=logger) cluster_plenary = Plenary.get_plenary(dbcluster, logger=logger) key = CompileKey(domain=dbcluster.branch.name, logger=logger) try: lock_queue.acquire(key) cluster_plenary.write(locked=True) try: host_plenary.write(locked=True) except IncompleteError: host_plenary.cleanup(domain=dbhost.branch.name, locked=True) except: cluster_plenary.restore_stash() host_plenary.restore_stash() raise finally: lock_queue.release(key)
def update_cluster_location(session, logger, dbcluster, fix_location, plenaries, remove_plenaries, **arguments): location_updated = False dblocation = get_location(session, **arguments) if fix_location: dblocation = dbcluster.minimum_location if not dblocation: raise ArgumentError("Cannot infer the cluster location from " "the host locations.") if dblocation: errors = [] if not dblocation.campus: errors.append("{0} is not within a campus.".format(dblocation)) if dbcluster.cluster_type != 'meta': for host in dbcluster.hosts: if host.machine.location != dblocation and \ dblocation not in host.machine.location.parents: errors.append("{0} has location {1}.".format(host, host.machine.location)) else: for cluster in dbcluster.members: if cluster.location_constraint != dblocation and \ dblocation not in cluster.location_constraint.parents: errors.append("{0} has location {1}.".format(cluster, cluster.location_constraint)) if errors: raise ArgumentError("Cannot set {0} location constraint to " "{1}:\n{2}".format(dbcluster, dblocation, "\n".join(errors))) if dbcluster.location_constraint != dblocation: if machine_plenary_will_move(old=dbcluster.location_constraint, new=dblocation): for dbmachine in dbcluster.machines: # This plenary will have a path to the old location. plenary = Plenary.get_plenary(dbmachine, logger=logger) remove_plenaries.append(plenary) dbmachine.location = dblocation session.add(dbmachine) # This plenary will have a path to the new location. plenaries.append(Plenary.get_plenary(dbmachine)) # Update the path to the machine plenary in the # container resource plenaries.append(Plenary.get_plenary(dbmachine.vm_container)) dbcluster.location_constraint = dblocation location_updated = True return location_updated
def __init__(self, dbmachine, logger=LOGGER): Plenary.__init__(self, dbmachine, logger=logger) self.machine = dbmachine.label loc = dbmachine.location self.hub = loc.hub.fullname.lower() self.building = loc.building.name self.city = loc.city.name self.continent = loc.continent.name if loc.rack: self.rack = loc.rack.name self.rackrow = loc.rack.rack_row self.rackcol = loc.rack.rack_column else: self.rack = None if loc.room: self.room = loc.room.name else: self.room = None if loc.bunker: self.bunker = loc.bunker.name else: self.bunker = None if loc.campus: self.campus = loc.campus.name else: self.campus = None self.dns_search_domains = [] parents = loc.parents[:] parents.append(loc) parents.reverse() for parent in parents: # Filter out duplicates extra_domains = [ map.dns_domain.name for map in parent.dns_maps if map.dns_domain.name not in self.dns_search_domains ] self.dns_search_domains.extend(extra_domains) self.sysloc = loc.sysloc() # If this changes need to update machine_plenary_will_move() to match. self.plenary_core = "machine/%(hub)s/%(building)s/%(rack)s" % self.__dict__ self.plenary_template = self.machine
def __init__(self, dbmachine, logger=LOGGER): Plenary.__init__(self, dbmachine, logger=logger) self.machine = dbmachine.label loc = dbmachine.location self.hub = loc.hub.fullname.lower() self.building = loc.building.name self.city = loc.city.name self.continent = loc.continent.name if loc.rack: self.rack = loc.rack.name self.rackrow = loc.rack.rack_row self.rackcol = loc.rack.rack_column else: self.rack = None if loc.room: self.room = loc.room.name else: self.room = None if loc.bunker: self.bunker = loc.bunker.name else: self.bunker = None if loc.campus: self.campus = loc.campus.name else: self.campus = None self.dns_search_domains = [] parents = loc.parents[:] parents.append(loc) parents.reverse() for parent in parents: # Filter out duplicates extra_domains = [map.dns_domain.name for map in parent.dns_maps if map.dns_domain.name not in self.dns_search_domains] self.dns_search_domains.extend(extra_domains) self.sysloc = loc.sysloc() # If this changes need to update machine_plenary_will_move() to match. self.plenary_core = "machine/%(hub)s/%(building)s/%(rack)s" % self.__dict__ self.plenary_template = self.machine
def body_virtual_machine(self, lines): pan_assign(lines, "name", self.dbobj.name) machine = self.dbobj.machine pmac = Plenary.get_plenary(machine) pan_assign(lines, "hardware", StructureTemplate(pmac.plenary_template_name)) # One day we may get to the point where this will be required. # FIXME: read the data from the host data template if (machine.host): # we fill this in manually instead of just assigning # 'system' = value("hostname:/system") # because the target host might not actually have a profile. arch = machine.host.archetype os = machine.host.operating_system pn = machine.primary_name.fqdn system = { 'archetype': { 'name': arch.name, 'os': os.name, 'osversion': os.version }, 'build': machine.host.status.name, 'network': { 'hostname': pn.name, 'domainname': pn.dns_domain } } pan_assign(lines, "system", system)
class CommandRefreshWindowsHosts(BrokerCommand): required_parameters = [] def render(self, session, logger, dryrun, **arguments): containers = set() partial_error = None with SyncKey(data="windows", logger=logger): try: self.refresh_windows_hosts(session, logger, containers) if dryrun: session.rollback() return session.commit() except PartialError, e: if dryrun: raise partial_error = e # All errors were caught before hitting the session, so # keep going with whatever was successful. session.commit() if containers: plenaries = PlenaryCollection(logger=logger) for container in containers: plenaries.append(Plenary.get_plenary(container)) plenaries.write() if partial_error: raise partial_error return
def render(self, session, logger, hostname, service, instance, **arguments): dbhost = hostname_to_host(session, hostname) dbservice = Service.get_unique(session, service, compel=True) dbinstance = get_service_instance(session, dbservice, instance) if dbhost in dbinstance.server_hosts: # FIXME: This should just be a warning. There is currently # no way of returning output that would "do the right thing" # on the client but still show status 200 (OK). # The right thing would generally be writing to stderr for # a CLI (either raw or csv), and some sort of generic error # page for a web client. raise ArgumentError("Server %s is already bound to service %s " "instance %s." % (hostname, service, instance)) # The ordering_list will manage the position for us dbinstance.server_hosts.append(dbhost) session.flush() plenary_info = Plenary.get_plenary(dbinstance, logger=logger) plenary_info.write() # XXX: Need to recompile... return
def render(self, session, logger, metacluster, max_members, fix_location, high_availability, comments, **arguments): dbmetacluster = MetaCluster.get_unique(session, metacluster, compel=True) cluster_updated = False if max_members is not None: current_members = len(dbmetacluster.members) if max_members < current_members: raise ArgumentError("%s has %d clusters bound, which exceeds " "the requested limit %d." % (format(dbmetacluster), current_members, max_members)) dbmetacluster.max_clusters = max_members cluster_updated = True if comments is not None: dbmetacluster.comments = comments cluster_updated = True if high_availability is not None: dbmetacluster.high_availability = high_availability cluster_updated = True # TODO update_cluster_location would update VMs. Metaclusters # will contain VMs in Vulcan2 model. plenaries = PlenaryCollection(logger=logger) remove_plenaries = PlenaryCollection(logger=logger) location_updated = update_cluster_location(session, logger, dbmetacluster, fix_location, plenaries, remove_plenaries, **arguments) if location_updated: cluster_updated = True if not cluster_updated: return session.add(dbmetacluster) session.flush() dbmetacluster.validate() plenary_info = Plenary.get_plenary(dbmetacluster, logger=logger) key = plenary_info.get_write_key() try: lock_queue.acquire(key) plenary_info.write(locked=True) except: plenary_info.restore_stash() raise finally: lock_queue.release(key) return
def render(self, session, logger, hostname, cluster, personality, **arguments): dbcluster = Cluster.get_unique(session, cluster, compel=True) dbhost = hostname_to_host(session, hostname) if not dbhost.cluster: raise ArgumentError("{0} is not bound to a cluster.".format(dbhost)) if dbhost.cluster != dbcluster: raise ArgumentError("{0} is bound to {1:l}, not {2:l}.".format( dbhost, dbhost.cluster, dbcluster)) if personality: dbpersonality = Personality.get_unique(session, name=personality, archetype=dbhost.archetype, compel=True) if dbpersonality.cluster_required: raise ArgumentError("Cannot switch host to personality %s " "because that personality requires a " "cluster" % personality) dbhost.personality = dbpersonality elif dbhost.personality.cluster_required: raise ArgumentError("Host personality %s requires a cluster, " "use --personality to change personality " "when leaving the cluster." % dbhost.personality.name) dbcluster.hosts.remove(dbhost) remove_service_addresses(dbcluster, dbhost) dbcluster.validate() session.flush() session.expire(dbhost, ['_cluster']) host_plenary = Plenary.get_plenary(dbhost, logger=logger) cluster_plenary = Plenary.get_plenary(dbcluster, logger=logger) with CompileKey.merge([host_plenary.get_key(), cluster_plenary.get_key()]): try: cluster_plenary.write(locked=True) try: host_plenary.write(locked=True) except IncompleteError: host_plenary.remove(locked=True) except: cluster_plenary.restore_stash() host_plenary.restore_stash() raise
def render(self, session, logger, machine, dbuser, **arguments): dbmachine = Machine.get_unique(session, machine, compel=True) remove_plenaries = PlenaryCollection(logger=logger) remove_plenaries.append(Plenary.get_plenary(dbmachine)) if dbmachine.vm_container: remove_plenaries.append(Plenary.get_plenary( dbmachine.vm_container)) dbcontainer = dbmachine.vm_container.holder.holder_object else: dbcontainer = None if dbmachine.host: raise ArgumentError("{0} is still in use by {1:l} and cannot be " "deleted.".format(dbmachine, dbmachine.host)) addrs = [] for addr in dbmachine.all_addresses(): addrs.append("%s: %s" % (addr.logical_name, addr.ip)) if addrs: addrmsg = ", ".join(addrs) raise ArgumentError("{0} still provides the following addresses, " "delete them first: {1}.".format( dbmachine, addrmsg)) session.delete(dbmachine) session.flush() key = remove_plenaries.get_remove_key() if dbcontainer: plenary_container = Plenary.get_plenary(dbcontainer, logger=logger) key = CompileKey.merge([key, plenary_container.get_write_key()]) try: lock_queue.acquire(key) remove_plenaries.stash() if dbcontainer: plenary_container.write(locked=True) remove_plenaries.remove(locked=True) except: remove_plenaries.restore_stash() if dbcontainer: plenary_container.restore_stash() raise finally: lock_queue.release(key) return
def update_cluster_location(session, logger, dbcluster, fix_location, plenaries, **arguments): dblocation = get_location(session, **arguments) if fix_location: dblocation = dbcluster.minimum_location if not dblocation: raise ArgumentError("Cannot infer the cluster location from " "the host locations.") if dblocation: errors = [] if not dblocation.campus: errors.append("{0} is not within a campus.".format(dblocation)) if not isinstance(dbcluster, MetaCluster): for host in dbcluster.hosts: if host.hardware_entity.location != dblocation and \ dblocation not in host.hardware_entity.location.parents: errors.append("{0} has location {1}." .format(host, host.hardware_entity.location)) else: for cluster in dbcluster.members: if cluster.location_constraint != dblocation and \ dblocation not in cluster.location_constraint.parents: errors.append("{0} has location {1}." .format(cluster, cluster.location_constraint)) if errors: raise ArgumentError("Cannot set {0} location constraint to " "{1}:\n{2}".format(dbcluster, dblocation, "\n".join(errors))) if dbcluster.location_constraint != dblocation: for dbmachine in dbcluster.virtual_machines: # The plenary objects should be created before changing the # location, so they can track the change plenaries.append(Plenary.get_plenary(dbmachine, logger=logger)) # Update the path to the machine plenary in the container # resource plenaries.append(Plenary.get_plenary(dbmachine.vm_container)) dbmachine.location = dblocation dbcluster.location_constraint = dblocation return
def render(self, session, logger, machine, dbuser, **arguments): dbmachine = Machine.get_unique(session, machine, compel=True) remove_plenaries = PlenaryCollection(logger=logger) remove_plenaries.append(Plenary.get_plenary(dbmachine)) if dbmachine.vm_container: remove_plenaries.append(Plenary.get_plenary(dbmachine.vm_container)) dbcontainer = dbmachine.vm_container.holder.holder_object else: dbcontainer = None if dbmachine.host: raise ArgumentError("{0} is still in use by {1:l} and cannot be " "deleted.".format(dbmachine, dbmachine.host)) addrs = [] for addr in dbmachine.all_addresses(): addrs.append("%s: %s" % (addr.logical_name, addr.ip)) if addrs: addrmsg = ", ".join(addrs) raise ArgumentError("{0} still provides the following addresses, " "delete them first: {1}.".format(dbmachine, addrmsg)) session.delete(dbmachine) session.flush() key = remove_plenaries.get_remove_key() if dbcontainer: plenary_container = Plenary.get_plenary(dbcontainer, logger=logger) key = CompileKey.merge([key, plenary_container.get_write_key()]) try: lock_queue.acquire(key) remove_plenaries.stash() if dbcontainer: plenary_container.write(locked=True) remove_plenaries.remove(locked=True) except: remove_plenaries.restore_stash() if dbcontainer: plenary_container.restore_stash() raise finally: lock_queue.release(key) return
def del_cluster(session, logger, dbcluster, config): cluster = str(dbcluster.name) if hasattr(dbcluster, 'members') and dbcluster.members: raise ArgumentError("%s is still in use by clusters: %s." % (format(dbcluster), ", ".join([c.name for c in dbcluster.members]))) elif dbcluster.hosts: hosts = ", ".join([h.fqdn for h in dbcluster.hosts]) raise ArgumentError("%s is still in use by hosts: %s." % (format(dbcluster), hosts)) cluster_plenary = Plenary.get_plenary(dbcluster, logger=logger) resources = PlenaryCollection(logger=logger) if dbcluster.resholder: for res in dbcluster.resholder.resources: resources.append(Plenary.get_plenary(res)) domain = dbcluster.branch.name session.delete(dbcluster) session.flush() key = cluster_plenary.get_remove_key() with CompileKey.merge([key, resources.get_remove_key()]): cluster_plenary.cleanup(domain, locked=True) # And we also want to remove the profile itself profiles = config.get("broker", "profilesdir") # Only one of these should exist, but it doesn't hurt # to try to clean up both. xmlfile = os.path.join(profiles, "clusters", cluster + ".xml") remove_file(xmlfile, logger=logger) xmlgzfile = xmlfile + ".gz" remove_file(xmlgzfile, logger=logger) # And the cached template created by ant remove_file(os.path.join(config.get("broker", "quattordir"), "objects", "clusters", cluster + TEMPLATE_EXTENSION), logger=logger) resources.remove(locked=True) build_index(config, session, profiles, logger=logger) return
def render(self, session, logger, service, instance, position, hostname, cluster, ip, resourcegroup, service_address, alias, **arguments): # Check for invalid combinations. We allow binding as a server: # - a host, in which case the primary IP address will be used # - an auxiliary IP address of a host # - a service address of a host # - a service address of a cluster if ip: if cluster or not hostname: raise ArgumentError("Using the --ip option requires --hostname" "to be specified.") if cluster and not service_address: raise ArgumentError("Binding a cluster requires --service_address " "to be specified.") dbservice = Service.get_unique(session, service, compel=True) dbinstance = get_service_instance(session, dbservice, instance) plenaries = PlenaryCollection(logger=logger) plenaries.append(Plenary.get_plenary(dbinstance)) params = lookup_target(session, plenaries, hostname, ip, cluster, resourcegroup, service_address, alias) # TODO: someday we should verify that the target really points to the # host/cluster specified by the other options if "alias" in params and ("host" in params or "cluster" in params): logger.client_info("Warning: when using --alias, it is your " "responsibility to ensure it really points to " "the host/cluster you've specified - the broker " "does not verify that.") with session.no_autoflush: dbsrv = find_server(dbinstance, params) if dbsrv: raise ArgumentError("The server binding already exists.") dbsrv = ServiceInstanceServer(**params) # The ordering_list will manage the position for us if position is not None: dbinstance.servers.insert(position, dbsrv) else: dbinstance.servers.append(dbsrv) if dbsrv.host: session.expire(dbsrv.host, ['services_provided']) if dbsrv.cluster: session.expire(dbsrv.cluster, ['services_provided']) session.flush() plenaries.write() return
def del_cluster(session, logger, dbcluster, config): cluster = str(dbcluster.name) if hasattr(dbcluster, 'members') and dbcluster.members: raise ArgumentError( "%s is still in use by clusters: %s." % (format(dbcluster), ", ".join([c.name for c in dbcluster.members]))) elif dbcluster.hosts: hosts = ", ".join([h.fqdn for h in dbcluster.hosts]) raise ArgumentError("%s is still in use by hosts: %s." % (format(dbcluster), hosts)) cluster_plenary = Plenary.get_plenary(dbcluster, logger=logger) resources = PlenaryCollection(logger=logger) if dbcluster.resholder: for res in dbcluster.resholder.resources: resources.append(Plenary.get_plenary(res)) domain = dbcluster.branch.name session.delete(dbcluster) session.flush() key = cluster_plenary.get_remove_key() with CompileKey.merge([key, resources.get_remove_key()]): cluster_plenary.cleanup(domain, locked=True) # And we also want to remove the profile itself profiles = config.get("broker", "profilesdir") # Only one of these should exist, but it doesn't hurt # to try to clean up both. xmlfile = os.path.join(profiles, "clusters", cluster + ".xml") remove_file(xmlfile, logger=logger) xmlgzfile = xmlfile + ".gz" remove_file(xmlgzfile, logger=logger) # And the cached template created by ant remove_file(os.path.join(config.get("broker", "quattordir"), "objects", "clusters", cluster + TEMPLATE_EXTENSION), logger=logger) resources.remove(locked=True) build_index(config, session, profiles, logger=logger) return
def render(self, session, logger, service, max_clients, default, **arguments): dbservice = Service.get_unique(session, name=service, compel=True) if default: dbservice.max_clients = None elif max_clients is not None: dbservice.max_clients = max_clients else: raise ArgumentError("Missing --max_clients or --default argument " "to update service %s." % dbservice.name) session.add(dbservice) session.flush() plenaries = PlenaryCollection() plenaries.append(Plenary.get_plenary(dbservice)) for dbinstance in dbservice.instances: plenaries.append(Plenary.get_plenary(dbinstance)) plenaries.write() return
def render(self, session, logger, service, need_client_list, comments, **arguments): Service.get_unique(session, service, preclude=True) dbservice = Service(name=service, comments=comments, need_client_list=need_client_list) session.add(dbservice) plenaries = PlenaryCollection(logger=logger) plenaries.append(Plenary.get_plenary(dbservice)) session.flush() plenaries.write() return
def get_key(self): host = self.dbobj.host container = self.dbobj.vm_container # Need a compile key if: # - There is a host attached. # - This is a virtual machine in a container. if not host and not container: return None # We have at least host or container, maybe both... if host: # PlenaryHost is actually a PlenaryCollection... can't call # get_key() directly, so using get_remove_key(). ph = Plenary.get_plenary(host, logger=self.logger) host_key = ph.get_remove_key() if container: pc = Plenary.get_plenary(container, self.logger) container_key = pc.get_key() if not container: return host_key if not host: return container_key return CompileKey.merge([host_key, container_key])
def render(self, session, logger, list, domain, sandbox, force, **arguments): dbbranch, dbauthor = get_branch_and_author(session, logger, domain=domain, sandbox=sandbox, compel=True) if hasattr(dbbranch, "allow_manage") and not dbbranch.allow_manage: raise ArgumentError("Managing hosts to {0:l} is not allowed." .format(dbbranch)) check_hostlist_size(self.command, self.config, list) dbhosts = hostlist_to_hosts(session, list) failed = [] dbsource, dbsource_author = validate_branch_author(dbhosts) for dbhost in dbhosts: # check if any host in the list is a cluster node if dbhost.cluster: failed.append("Cluster nodes must be managed at the " "cluster level; {0} is a member of {1:l}." .format(dbhost.fqdn, dbhost.cluster)) if failed: raise ArgumentError("Cannot modify the following hosts:\n%s" % "\n".join(failed)) if not force: validate_branch_commits(dbsource, dbsource_author, dbbranch, dbauthor, logger, self.config) plenaries = PlenaryCollection(logger=logger) for dbhost in dbhosts: plenaries.append(Plenary.get_plenary(dbhost)) dbhost.branch = dbbranch dbhost.sandbox_author = dbauthor session.flush() # We're crossing domains, need to lock everything. with CompileKey.merge([CompileKey(domain=dbsource.name, logger=logger), CompileKey(domain=dbbranch.name, logger=logger)]): plenaries.stash() try: plenaries.write(locked=True) except: plenaries.restore_stash() raise return
def remove_prev(self, session, logger, prev, pending_removals): """Remove the interface 'prev' and its host and machine.""" # This should probably be re-factored to call code used elsewhere. # The below seems too simple to warrant that, though... logger.info( "Removing blind host '%s', machine '%s', " "and interface '%s'" % (prev.hardware_entity.fqdn, prev.hardware_entity.label, prev.name) ) host_plenary_info = Plenary.get_plenary(prev.hardware_entity.host, logger=logger) # FIXME: Should really do everything that del_host.py does, not # just remove the host plenary but adjust all the service # plenarys and dependency files. pending_removals.append(host_plenary_info) dbmachine = prev.hardware_entity machine_plenary_info = Plenary.get_plenary(dbmachine, logger=logger) pending_removals.append(machine_plenary_info) # This will cascade to prev & the host if dbmachine.primary_name: dbdns_rec = dbmachine.primary_name dbmachine.primary_name = None delete_dns_record(dbdns_rec) session.delete(dbmachine) session.flush()
def render(self, session, logger, machine, disk, controller, size, all, **arguments): dbmachine = Machine.get_unique(session, machine, compel=True) q = session.query(Disk).filter_by(machine=dbmachine) if disk: q = q.filter_by(device_name=disk) if controller: if controller not in controller_types: raise ArgumentError("%s is not a valid controller type, use " "one of: %s." % (controller, ", ".join(controller_types) )) q = q.filter_by(controller_type=controller) if size is not None: q = q.filter_by(capacity=size) results = q.all() if len(results) == 0: raise NotFoundException("No disks found.") elif len(results) > 1 and not all: raise ArgumentError("More than one matching disks found. " "Use --all to delete them all.") for result in results: session.delete(result) session.flush() session.expire(dbmachine, ['disks']) plenaries = PlenaryCollection(logger=logger) plenaries.append(Plenary.get_plenary(dbmachine)) dbcontainer = dbmachine.vm_container if dbcontainer: plenaries.append(Plenary.get_plenary(dbcontainer, logger=logger)) plenaries.write() return
def remove_prev(self, session, logger, prev, pending_removals): """Remove the interface 'prev' and its host and machine.""" # This should probably be re-factored to call code used elsewhere. # The below seems too simple to warrant that, though... logger.info( "Removing blind host '%s', machine '%s', " "and interface '%s'" % (prev.hardware_entity.fqdn, prev.hardware_entity.label, prev.name)) host_plenary_info = Plenary.get_plenary(prev.hardware_entity.host, logger=logger) # FIXME: Should really do everything that del_host.py does, not # just remove the host plenary but adjust all the service # plenarys and dependency files. pending_removals.append(host_plenary_info) dbmachine = prev.hardware_entity machine_plenary_info = Plenary.get_plenary(dbmachine, logger=logger) pending_removals.append(machine_plenary_info) # This will cascade to prev & the host if dbmachine.primary_name: dbdns_rec = dbmachine.primary_name dbmachine.primary_name = None delete_dns_record(dbdns_rec) session.delete(dbmachine) session.flush()
def render(self, session, logger, cluster, buildstatus, **arguments): dbcluster = Cluster.get_unique(session, cluster, compel=True) dbstatus = ClusterLifecycle.get_unique(session, buildstatus, compel=True) if not dbcluster.status.transition(dbcluster, dbstatus): return if not dbcluster.personality.archetype.is_compileable: return session.flush() plenaries = PlenaryCollection(logger=logger) plenaries.append(Plenary.get_plenary(dbcluster)) for dbhost in dbcluster.hosts: plenaries.append(Plenary.get_plenary(dbhost)) # Force a host lock as pan might overwrite the profile... key = CompileKey(domain=dbcluster.branch.name, logger=logger) try: lock_queue.acquire(key) plenaries.write(locked=True) td = TemplateDomain(dbcluster.branch, dbcluster.sandbox_author, logger=logger) td.compile(session, plenaries.object_templates, locked=True) except: plenaries.restore_stash() raise finally: lock_queue.release(key) return
def render(self, session, logger, cluster, service, instance, **arguments): dbservice = Service.get_unique(session, service, compel=True) dbinstance = get_service_instance(session, dbservice, instance) dbcluster = Cluster.get_unique(session, cluster, compel=True) if dbinstance not in dbcluster.service_bindings: raise NotFoundException("{0} is not bound to {1:l}." .format(dbinstance, dbcluster)) if dbservice in dbcluster.required_services: raise ArgumentError("Cannot remove cluster service instance " "binding for %s cluster aligned service %s." % (dbcluster.cluster_type, dbservice.name)) dbcluster.service_bindings.remove(dbinstance) session.flush() plenary = Plenary.get_plenary(dbcluster, logger=logger) plenary.write() return
def render(self, session, logger, rack, row, column, room, building, bunker, fullname, default_dns_domain, comments, **arguments): dbrack = get_location(session, rack=rack) if row is not None: dbrack.rack_row = row if column is not None: dbrack.rack_column = column if fullname is not None: dbrack.fullname = fullname if comments is not None: dbrack.comments = comments if default_dns_domain is not None: if default_dns_domain: dbdns_domain = DnsDomain.get_unique(session, default_dns_domain, compel=True) dbrack.default_dns_domain = dbdns_domain else: dbrack.default_dns_domain = None if bunker or room or building: dbparent = get_location(session, bunker=bunker, room=room, building=building) # This one would change the template's locations hence forbidden if dbparent.building != dbrack.building: # Doing this both to reduce user error and to limit # testing required. raise ArgumentError("Cannot change buildings. {0} is in {1} " "while {2} is in {3}.".format( dbparent, dbparent.building, dbrack, dbrack.building)) dbrack.update_parent(parent=dbparent) session.flush() plenaries = PlenaryCollection(logger=logger) q = session.query(Machine) q = q.filter(Machine.location_id.in_(dbrack.offspring_ids())) for dbmachine in q: plenaries.append(Plenary.get_plenary(dbmachine)) plenaries.write()
def render(self, session, logger, service, instance, **arguments): dbservice = Service.get_unique(session, service, compel=True) dbsi = get_service_instance(session, dbservice, instance) if dbsi.client_count > 0: raise ArgumentError( "Service %s, instance %s still has clients and " "cannot be deleted." % (dbservice.name, dbsi.name)) if dbsi.server_hosts: msg = ", ".join([host.fqdn for host in dbsi.server_hosts]) raise ArgumentError("Service %s, instance %s is still being " "provided by servers: %s." % (dbservice.name, dbsi.name, msg)) # Depend on cascading to remove any mappings session.delete(dbsi) session.flush() plenary_info = Plenary.get_plenary(dbsi, logger=logger) plenary_info.remove() return
def render(self, session, logger, cluster, data, generate, **arguments): dbcluster = Cluster.get_unique(session, cluster, compel=True) dbresource = get_resource(session, dbcluster, **arguments) if dbresource: plenary_info = Plenary.get_plenary(dbresource, logger=logger) else: if isinstance(dbcluster, MetaCluster): if data: plenary_info = PlenaryMetaClusterData(dbcluster, logger=logger) else: plenary_info = PlenaryMetaClusterObject(dbcluster, logger=logger) else: if data: plenary_info = PlenaryClusterData(dbcluster, logger=logger) else: plenary_info = PlenaryClusterObject(dbcluster, logger=logger) if generate: return plenary_info._generate_content() else: return plenary_info.read()
def render(self, session, logger, service, instance, max_clients, default, **arguments): dbservice = Service.get_unique(session, name=service, compel=True) dbsi = ServiceInstance.get_unique(session, service=dbservice, name=instance, compel=True) if default: dbsi.max_clients = None elif max_clients is not None: dbsi.max_clients = max_clients else: raise ArgumentError("Missing --max_clients or --default argument " "to update service %s instance %s." % (dbservice.name, dbsi.name)) session.add(dbsi) session.flush() plenary = Plenary.get_plenary(dbsi, logger=logger) plenary.write() return
def consolidate_names(self, session, logger, dbmachine, dummy_machine_name, pending_removals): short = dbmachine.label[:-1] if short != dummy_machine_name[:-1]: logger.client_info("Not altering name of machine %s, name of " "machine being removed %s is too different." % (dbmachine.label, dummy_machine_name)) return if not dbmachine.label[-1].isalpha(): logger.client_info("Not altering name of machine %s, name does " "not end with a letter." % dbmachine.label) return if session.query(Machine).filter_by(label=short).first(): logger.client_info("Not altering name of machine %s, target " "name %s is already in use." % (dbmachine.label, short)) return logger.client_info("Renaming machine %s to %s." % (dbmachine.label, short)) pending_removals.append(Plenary.get_plenary(dbmachine)) dbmachine.label = short session.add(dbmachine) session.flush()
def __init__(self, dbservice, logger=LOGGER): Plenary.__init__(self, dbservice, logger=logger) self.plenary_core = "service/%s/server" % dbservice.name self.plenary_template = "config"
def __init__(self, dbinstance, logger=LOGGER): Plenary.__init__(self, dbinstance, logger=logger) self.service = dbinstance.service.name self.name = dbinstance.name self.plenary_core = "service/%(service)s/%(name)s/server" % self.__dict__ self.plenary_template = "config"