def render(self, session, network_environment, dns_environment, comments, **arguments): validate_basic("network environment", network_environment) NetworkEnvironment.get_unique(session, network_environment, preclude=True) dbdns_env = DnsEnvironment.get_unique(session, dns_environment, compel=True) # Currently input.xml lists --building only, but that may change location = get_location(session, **arguments) dbnet_env = NetworkEnvironment(name=network_environment, dns_environment=dbdns_env, location=location, comments=comments) if dbdns_env.is_default != dbnet_env.is_default: raise ArgumentError("Only the default network environment may be " "associated with the default DNS environment.") session.add(dbnet_env) session.flush() return
def render(self, session, logger, filesystem, type, mountpoint, blockdevice, bootmount, dumpfreq, fsckpass, options, hostname, cluster, resourcegroup, comments, **arguments): validate_basic("filesystem", filesystem) holder = get_resource_holder(session, hostname, cluster, resourcegroup, compel=False) Filesystem.get_unique(session, name=filesystem, holder=holder, preclude=True) if dumpfreq is None: dumpfreq = 0 if fsckpass is None: # This is already set by defaults in input.xml, but # we're being extra paranoid... fsckpass = 2 # pragma: no cover dbfs = Filesystem(name=filesystem, mountpoint=mountpoint, mountoptions=options, mount=bool(bootmount), blockdev=blockdevice, fstype=type, passno=fsckpass, dumpfreq=dumpfreq, comments=comments) return add_resource(session, logger, holder, dbfs)
def render(self, session, logger, filesystem, type, mountpoint, blockdevice, bootmount, dumpfreq, fsckpass, options, hostname, cluster, resourcegroup, comments, **arguments): validate_basic("filesystem", filesystem) holder = get_resource_holder(session, hostname, cluster, resourcegroup, compel=False) Filesystem.get_unique(session, name=filesystem, holder=holder, preclude=True) if dumpfreq is None: dumpfreq = 0 if fsckpass is None: # This is already set by defaults in input.xml, but # we're being extra paranoid... fsckpass = 2 # pragma: no cover dbfs = Filesystem(name=filesystem, mountpoint=mountpoint, mountoptions=options, mount=bool(bootmount), blockdev=blockdevice, fstype=type, passno=fsckpass, dumpfreq=dumpfreq, comments=comments ) return add_resource(session, logger, holder, dbfs)
def render(self, session, dns_environment, comments, **arguments): validate_basic("DNS environment", dns_environment) DnsEnvironment.get_unique(session, dns_environment, preclude=True) db_dnsenv = DnsEnvironment(name=dns_environment, comments=comments) session.add(db_dnsenv) session.flush() return
def render(self, session, logger, hostname, cluster, application, **arguments): validate_basic("application", application) holder = get_resource_holder(session, hostname, cluster) dbapp = Application.get_unique(session, name=application, holder=holder, compel=True) del_resource(session, logger, dbapp) return
def render(self, session, logger, hostname, cluster, resourcegroup, hostlink, **arguments): validate_basic("hostlink", hostlink) holder = get_resource_holder(session, hostname, cluster) dbhl = Hostlink.get_unique(session, name=hostlink, holder=holder, compel=True) del_resource(session, logger, dbhl) return
def render(self, session, logger, hostname, cluster, intervention, **arguments): validate_basic("intervention", intervention) holder = get_resource_holder(session, hostname, cluster) dbapp = Intervention.get_unique(session, name=intervention, holder=holder, compel=True) del_resource(session, logger, dbapp) return
def render(self, session, logger, filesystem, hostname, cluster, resourcegroup, **arguments): validate_basic("filesystem", filesystem) holder = get_resource_holder(session, hostname, cluster, resourcegroup) dbfs = Filesystem.get_unique(session, name=filesystem, holder=holder, compel=True) del_resource(session, logger, dbfs) return
def render(self, session, logger, share, hostname, resourcegroup, cluster, **arguments): validate_basic("share", share) holder = get_resource_holder(session, hostname, cluster, resourcegroup) dbshare = Share.get_unique(session, name=share, holder=holder, compel=True) del_resource(session, logger, dbshare) return
def render(self, session, logger, vlan, name, vlan_type, **kwargs): validate_basic("name", name) VlanInfo.get_unique(session, vlan_id=vlan, preclude=True) dbvlan = VlanInfo(vlan_id=vlan, port_group=name, vlan_type=vlan_type) session.add(dbvlan) session.flush() return
def render(self, session, logger, application, eonid, hostname, cluster, resourcegroup, comments, **arguments): validate_basic("application", application) holder = get_resource_holder(session, hostname, cluster, resourcegroup, compel=False) Application.get_unique(session, name=application, holder=holder, preclude=True) dbapp = Application(name=application, comments=comments, eonid=eonid) return add_resource(session, logger, holder, dbapp)
def render(self, session, logger, name, hostname, cluster, resourcegroup, keep_dns, **arguments): validate_basic("name", name) if name == "hostname": raise ArgumentError("The primary address of the host cannot " "be deleted.") holder = get_resource_holder(session, hostname, cluster, resourcegroup, compel=False) dbsrv = ServiceAddress.get_unique(session, name=name, holder=holder, compel=True) if isinstance(holder.holder_object, Host): oldinfo = DSDBRunner.snapshot_hw(holder.holder_object.machine) else: oldinfo = None dbdns_rec = dbsrv.dns_record for addr in dbsrv.assignments: addr.interface.assignments.remove(addr) session.expire(dbsrv, ['assignments']) session.flush() # Check if the address was assigned to multiple interfaces, and remove # the DNS entries if this was the last use q = session.query(AddressAssignment) q = q.filter_by(network=dbdns_rec.network) q = q.filter_by(ip=dbdns_rec.ip) other_uses = q.all() del_resource(session, logger, dbsrv, dsdb_callback=del_srv_dsdb_callback, oldinfo=oldinfo, keep_dns=other_uses or keep_dns) if not other_uses and not keep_dns: delete_dns_record(dbdns_rec) return
def render(self, session, logger, hostlink, target, owner, group, hostname, cluster, resourcegroup, comments, **arguments): validate_basic("hostlink", hostlink) holder = get_resource_holder(session, hostname, cluster, resourcegroup, compel=False) Hostlink.get_unique(session, name=hostlink, holder=holder, preclude=True) dbhl = Hostlink(name=hostlink, comments=comments, target=target, owner_user=owner, owner_group=group) return add_resource(session, logger, holder, dbhl)
def render(self, session, logger, share, comments, hostname, resourcegroup, cluster, **arguments): validate_basic("share", share) holder = get_resource_holder(session, hostname, cluster, resourcegroup, compel=False) Share.get_unique(session, name=share, holder=holder, preclude=True) dbshare = Share(name=share, comments=comments) add_resource(session, logger, holder, dbshare) return
def render(self, session, logger, resourcegroup, hostname, cluster, **arguments): validate_basic("resourcegroup", resourcegroup) holder = get_resource_holder(session, hostname, cluster, compel=True) dbrg = ResourceGroup.get_unique(session, name=resourcegroup, holder=holder, compel=True) # Deleting service addresses can't be done with just cascading if dbrg.resholder: for res in dbrg.resholder.resources: if isinstance(res, ServiceAddress): raise ArgumentError("{0} contains {1:l}, please delete " "it first.".format(dbrg, res)) del_resource(session, logger, dbrg) return
def render(self, session, logger, **arguments): reboot_schedule = "reboot_schedule" validate_basic("reboot_schedule", reboot_schedule) arguments = self._validate_args(logger, **arguments) time = arguments["time"] week = arguments["week"].capitalize() day = arguments["day"].capitalize() hostname = arguments["hostname"] cluster = arguments["cluster"] comments = arguments["comments"] if time is not None: try: parse(time) except ValueError, e: raise ArgumentError("the preferred time '%s' could not be " "interpreted: %s" % (time, e))
def render(self, session, logger, resourcegroup, required_type, hostname, cluster, **arguments): validate_basic("resourcegroup", resourcegroup) if required_type is not None: Resource.polymorphic_subclass(required_type, "Unknown resource type") if required_type == "resourcegroup": raise ArgumentError("A resourcegroup can't hold other " "resourcegroups.") holder = get_resource_holder(session, hostname, cluster, compel=False) ResourceGroup.get_unique(session, name=resourcegroup, holder=holder, preclude=True) dbrg = ResourceGroup(name=resourcegroup, required_type=required_type) return add_resource(session, logger, holder, dbrg)
def render(self, session, logger, machine, chassis, switch, fqdn, interface, label, network_environment, map_to_primary, **kwargs): if machine: hwtype = 'machine' hwname = machine elif chassis: hwtype = 'chassis' hwname = chassis elif switch: hwtype = 'switch' hwname = switch dbnet_env = NetworkEnvironment.get_unique_or_default(session, network_environment) dbhw_ent = HardwareEntity.get_unique(session, hwname, hardware_type=hwtype, compel=True) dbinterface = Interface.get_unique(session, hardware_entity=dbhw_ent, name=interface, compel=True) oldinfo = DSDBRunner.snapshot_hw(dbhw_ent) audit_results = [] ip = generate_ip(session, logger, dbinterface, network_environment=dbnet_env, audit_results=audit_results, **kwargs) if dbinterface.interface_type == "loopback": # Switch loopback interfaces may use e.g. the network address as an # IP address relaxed = True else: relaxed = False if not fqdn: if not dbhw_ent.primary_name: raise ArgumentError("{0} has no primary name, can not " "auto-generate the DNS record. " "Please specify --fqdn.".format(dbhw_ent)) if label: name = "%s-%s-%s" % (dbhw_ent.primary_name.fqdn.name, interface, label) else: name = "%s-%s" % (dbhw_ent.primary_name.fqdn.name, interface) fqdn = "%s.%s" % (name, dbhw_ent.primary_name.fqdn.dns_domain) if label is None: label = "" elif label == "hostname": # When add_host sets up Zebra, it always uses the label 'hostname'. # Due to the primary IP being special, add_interface_address cannot # really emulate what add_host does, so tell the user where to look. raise ArgumentError("The 'hostname' label can only be managed " "by add_host/del_host.") # The label will be used as an nlist key if label: validate_basic("label", label) # TODO: add allow_multi=True dbdns_rec, newly_created = grab_address(session, fqdn, ip, dbnet_env, relaxed=relaxed) ip = dbdns_rec.ip dbnetwork = dbdns_rec.network delete_old_dsdb_entry = not newly_created and not dbdns_rec.assignments # Reverse PTR control. Auxiliary addresses should point to the primary # name by default, with some exceptions. if (map_to_primary is None and dbhw_ent.primary_name and dbinterface.interface_type != "management" and dbdns_rec.fqdn.dns_environment == dbhw_ent.primary_name.fqdn.dns_environment): map_to_primary = True if map_to_primary: if not dbhw_ent.primary_name: raise ArgumentError("{0} does not have a primary name, cannot " "set the reverse DNS mapping." .format(dbhw_ent)) if (dbhw_ent.primary_name.fqdn.dns_environment != dbdns_rec.fqdn.dns_environment): raise ArgumentError("{0} lives in {1:l}, not {2:l}." .format(dbhw_ent, dbhw_ent.primary_name.fqdn.dns_environment, dbdns_rec.fqdn.dns_environment)) if dbinterface.interface_type == "management": raise ArgumentError("The reverse PTR for management addresses " "should not point to the primary name.") dbdns_rec.reverse_ptr = dbhw_ent.primary_name.fqdn # Check that the network ranges assigned to different interfaces # do not overlap even if the network environments are different, because # that would confuse routing on the host. E.g. if eth0 is an internal # and eth1 is an external interface, then using 192.168.1.10/24 on eth0 # and using 192.168.1.20/26 on eth1 won't work. for addr in dbhw_ent.all_addresses(): if addr.network != dbnetwork and \ addr.network.network.overlaps(dbnetwork.network): raise ArgumentError("{0} in {1:l} used on {2:l} overlaps " "requested {3:l} in " "{4:l}.".format(addr.network, addr.network.network_environment, addr.interface, dbnetwork, dbnetwork.network_environment)) assign_address(dbinterface, ip, dbnetwork, label=label) session.flush() dbhost = getattr(dbhw_ent, "host", None) if dbhost: plenary_info = PlenaryHost(dbhost, logger=logger) key = plenary_info.get_write_key() try: lock_queue.acquire(key) try: plenary_info.write(locked=True) except IncompleteError: # FIXME: if this command is used after "add host" but before # "make", then writing out the template will fail due to # required services not being assigned. Ignore this error # for now. plenary_info.restore_stash() dsdb_runner = DSDBRunner(logger=logger) if delete_old_dsdb_entry: dsdb_runner.delete_host_details(dbdns_rec.fqdn, ip) dsdb_runner.update_host(dbhw_ent, oldinfo) dsdb_runner.commit_or_rollback("Could not add host to DSDB") except: plenary_info.restore_stash() raise finally: lock_queue.release(key) else: dsdb_runner = DSDBRunner(logger=logger) if delete_old_dsdb_entry: dsdb_runner.delete_host_details(dbdns_rec.fqdn, ip) dsdb_runner.update_host(dbhw_ent, oldinfo) dsdb_runner.commit_or_rollback("Could not add host to DSDB") for name, value in audit_results: self.audit_result(session, name, value, **kwargs) return
def render( self, session, logger, service_address, ip, name, interfaces, hostname, cluster, resourcegroup, network_environment, map_to_primary, comments, **arguments ): validate_basic("name", name) # TODO: generalize the error message - Layer-3 failover may be # implemented by other software, not just Zebra. if name == "hostname": raise ArgumentError( "The hostname service address is reserved for " "Zebra. Please specify the --zebra_interfaces " "option when calling add_host if you want the " "primary name of the host to be managed by " "Zebra." ) ifnames = [ifname.strip().lower() for ifname in interfaces.split(",")] if not ifnames: raise ArgumentError("Please specify at least one interface name.") holder = get_resource_holder(session, hostname, cluster, resourcegroup, compel=False) # Address assignments should be added based on the host/cluster, so we # have to resolve resource groups first if isinstance(holder.holder_object, ResourceGroup): real_holder = holder.holder_object.holder.holder_object else: real_holder = holder.holder_object ServiceAddress.get_unique(session, name=name, holder=holder, preclude=True) # TODO: add allow_multi=True dbdns_rec, newly_created = grab_address(session, service_address, ip, network_environment) ip = dbdns_rec.ip dbnetwork = dbdns_rec.network if map_to_primary: if not isinstance(real_holder, Host): raise ArgumentError("The --map_to_primary option works only " "for host-based service addresses.") dbdns_rec.reverse_ptr = real_holder.machine.primary_name.fqdn # Disable autoflush, since the ServiceAddress object won't be complete # until add_resource() is called with session.no_autoflush: dbsrv = ServiceAddress(name=name, dns_record=dbdns_rec, comments=comments) holder.resources.append(dbsrv) oldinfo = None if isinstance(real_holder, Cluster): if not real_holder.hosts: # The interface names are only stored in the # AddressAssignment objects, so we can't handle a cluster # with no hosts and thus no interfaces raise ArgumentError("Cannot assign a service address to a " "cluster that has no members.") for host in real_holder.hosts: apply_service_address(host, ifnames, dbsrv) elif isinstance(real_holder, Host): oldinfo = DSDBRunner.snapshot_hw(real_holder.machine) apply_service_address(real_holder, ifnames, dbsrv) else: # pragma: no cover raise UnimplementedError("{0} as a resource holder is not " "implemented.".format(real_holder)) add_resource( session, logger, holder, dbsrv, dsdb_callback=add_srv_dsdb_callback, real_holder=real_holder, oldinfo=oldinfo, newly_created=newly_created, comments=comments, ) return
def render(self, session, logger, machine, chassis, switch, fqdn, interface, label, network_environment, map_to_primary, **kwargs): if machine: hwtype = 'machine' hwname = machine elif chassis: hwtype = 'chassis' hwname = chassis elif switch: hwtype = 'switch' hwname = switch dbnet_env = NetworkEnvironment.get_unique_or_default( session, network_environment) dbhw_ent = HardwareEntity.get_unique(session, hwname, hardware_type=hwtype, compel=True) dbinterface = Interface.get_unique(session, hardware_entity=dbhw_ent, name=interface, compel=True) oldinfo = DSDBRunner.snapshot_hw(dbhw_ent) audit_results = [] ip = generate_ip(session, logger, dbinterface, network_environment=dbnet_env, audit_results=audit_results, **kwargs) if dbinterface.interface_type == "loopback": # Switch loopback interfaces may use e.g. the network address as an # IP address relaxed = True else: relaxed = False if not fqdn: if not dbhw_ent.primary_name: raise ArgumentError("{0} has no primary name, can not " "auto-generate the DNS record. " "Please specify --fqdn.".format(dbhw_ent)) if label: name = "%s-%s-%s" % (dbhw_ent.primary_name.fqdn.name, interface, label) else: name = "%s-%s" % (dbhw_ent.primary_name.fqdn.name, interface) fqdn = "%s.%s" % (name, dbhw_ent.primary_name.fqdn.dns_domain) if label is None: label = "" elif label == "hostname": # When add_host sets up Zebra, it always uses the label 'hostname'. # Due to the primary IP being special, add_interface_address cannot # really emulate what add_host does, so tell the user where to look. raise ArgumentError("The 'hostname' label can only be managed " "by add_host/del_host.") # The label will be used as an nlist key if label: validate_basic("label", label) # TODO: add allow_multi=True dbdns_rec, newly_created = grab_address(session, fqdn, ip, dbnet_env, relaxed=relaxed) ip = dbdns_rec.ip dbnetwork = dbdns_rec.network delete_old_dsdb_entry = not newly_created and not dbdns_rec.assignments # Reverse PTR control. Auxiliary addresses should point to the primary # name by default, with some exceptions. if (map_to_primary is None and dbhw_ent.primary_name and dbinterface.interface_type != "management" and dbdns_rec.fqdn.dns_environment == dbhw_ent.primary_name.fqdn.dns_environment): map_to_primary = True if map_to_primary: if not dbhw_ent.primary_name: raise ArgumentError( "{0} does not have a primary name, cannot " "set the reverse DNS mapping.".format(dbhw_ent)) if (dbhw_ent.primary_name.fqdn.dns_environment != dbdns_rec.fqdn.dns_environment): raise ArgumentError("{0} lives in {1:l}, not {2:l}.".format( dbhw_ent, dbhw_ent.primary_name.fqdn.dns_environment, dbdns_rec.fqdn.dns_environment)) if dbinterface.interface_type == "management": raise ArgumentError("The reverse PTR for management addresses " "should not point to the primary name.") dbdns_rec.reverse_ptr = dbhw_ent.primary_name.fqdn # Check that the network ranges assigned to different interfaces # do not overlap even if the network environments are different, because # that would confuse routing on the host. E.g. if eth0 is an internal # and eth1 is an external interface, then using 192.168.1.10/24 on eth0 # and using 192.168.1.20/26 on eth1 won't work. for addr in dbhw_ent.all_addresses(): if addr.network != dbnetwork and \ addr.network.network.overlaps(dbnetwork.network): raise ArgumentError("{0} in {1:l} used on {2:l} overlaps " "requested {3:l} in " "{4:l}.".format( addr.network, addr.network.network_environment, addr.interface, dbnetwork, dbnetwork.network_environment)) assign_address(dbinterface, ip, dbnetwork, label=label) session.flush() dbhost = getattr(dbhw_ent, "host", None) if dbhost: plenary_info = PlenaryHost(dbhost, logger=logger) key = plenary_info.get_write_key() try: lock_queue.acquire(key) try: plenary_info.write(locked=True) except IncompleteError: # FIXME: if this command is used after "add host" but before # "make", then writing out the template will fail due to # required services not being assigned. Ignore this error # for now. plenary_info.restore_stash() dsdb_runner = DSDBRunner(logger=logger) if delete_old_dsdb_entry: dsdb_runner.delete_host_details(dbdns_rec.fqdn, ip) dsdb_runner.update_host(dbhw_ent, oldinfo) dsdb_runner.commit_or_rollback("Could not add host to DSDB") except: plenary_info.restore_stash() raise finally: lock_queue.release(key) else: dsdb_runner = DSDBRunner(logger=logger) if delete_old_dsdb_entry: dsdb_runner.delete_host_details(dbdns_rec.fqdn, ip) dsdb_runner.update_host(dbhw_ent, oldinfo) dsdb_runner.commit_or_rollback("Could not add host to DSDB") for name, value in audit_results: self.audit_result(session, name, value, **kwargs) return
def render(self, session, logger, cluster, archetype, personality, domain, sandbox, max_members, down_hosts_threshold, maint_threshold, buildstatus, comments, vm_to_host_ratio, switch, metacluster, **arguments): validate_basic("cluster", cluster) dbpersonality = Personality.get_unique(session, name=personality, archetype=archetype, compel=True) if not dbpersonality.is_cluster: raise ArgumentError("%s is not a cluster personality." % personality) ctype = dbpersonality.archetype.cluster_type section = "archetype_" + dbpersonality.archetype.name if not buildstatus: buildstatus = "build" dbstatus = ClusterLifecycle.get_unique(session, buildstatus, compel=True) (dbbranch, dbauthor) = get_branch_and_author(session, logger, domain=domain, sandbox=sandbox, compel=True) if hasattr(dbbranch, "allow_manage") and not dbbranch.allow_manage: raise ArgumentError("Adding clusters to {0:l} is not allowed." .format(dbbranch)) dbloc = get_location(session, **arguments) if not dbloc: raise ArgumentError("Adding a cluster requires a location " "constraint.") if not dbloc.campus: raise ArgumentError("{0} is not within a campus.".format(dbloc)) if max_members is None: if self.config.has_option(section, "max_members_default"): max_members = self.config.getint(section, "max_members_default") Cluster.get_unique(session, cluster, preclude=True) # Not finding the cluster type is an internal consistency issue, so make # that show up in the logs by using AquilonError clus_type = Cluster.polymorphic_subclass(ctype, "Unknown cluster type", error=AquilonError) (down_hosts_pct, dht) = Cluster.parse_threshold(down_hosts_threshold) kw = {'name': cluster, 'location_constraint': dbloc, 'personality': dbpersonality, 'max_hosts': max_members, 'branch': dbbranch, 'sandbox_author': dbauthor, 'down_hosts_threshold': dht, 'down_hosts_percent': down_hosts_pct, 'status': dbstatus, 'comments': comments} if ctype == 'esx': if vm_to_host_ratio is None: if self.config.has_option(section, "vm_to_host_ratio"): vm_to_host_ratio = self.config.get(section, "vm_to_host_ratio") else: vm_to_host_ratio = "1:1" (vm_count, host_count) = force_ratio("vm_to_host_ratio", vm_to_host_ratio) kw["vm_count"] = vm_count kw["host_count"] = host_count if switch and hasattr(clus_type, 'switch'): kw['switch'] = Switch.get_unique(session, switch, compel=True) if maint_threshold is not None: (down_hosts_pct, dht) = Cluster.parse_threshold(maint_threshold) kw['down_maint_threshold'] = dht kw['down_maint_percent'] = down_hosts_pct dbcluster = clus_type(**kw) plenaries = PlenaryCollection(logger=logger) if metacluster: dbmetacluster = MetaCluster.get_unique(session, metacluster, compel=True) dbmetacluster.validate_membership(dbcluster) dbmetacluster.members.append(dbcluster) plenaries.append(Plenary.get_plenary(dbmetacluster)) session.add(dbcluster) session.flush() session.refresh(dbcluster) plenaries.append(Plenary.get_plenary(dbcluster)) key = plenaries.get_write_key() try: lock_queue.acquire(key) plenaries.write(locked=True) except: plenaries.restore_stash() raise finally: lock_queue.release(key)
def render(self, session, logger, cluster, archetype, personality, domain, sandbox, max_members, down_hosts_threshold, maint_threshold, buildstatus, comments, vm_to_host_ratio, switch, metacluster, **arguments): validate_basic("cluster", cluster) dbpersonality = Personality.get_unique(session, name=personality, archetype=archetype, compel=True) if not dbpersonality.is_cluster: raise ArgumentError("%s is not a cluster personality." % personality) ctype = dbpersonality.archetype.cluster_type section = "archetype_" + dbpersonality.archetype.name if not buildstatus: buildstatus = "build" dbstatus = ClusterLifecycle.get_unique(session, buildstatus, compel=True) (dbbranch, dbauthor) = get_branch_and_author(session, logger, domain=domain, sandbox=sandbox, compel=True) if hasattr(dbbranch, "allow_manage") and not dbbranch.allow_manage: raise ArgumentError( "Adding clusters to {0:l} is not allowed.".format(dbbranch)) dbloc = get_location(session, **arguments) if not dbloc: raise ArgumentError("Adding a cluster requires a location " "constraint.") if not dbloc.campus: raise ArgumentError("{0} is not within a campus.".format(dbloc)) if max_members is None: if self.config.has_option(section, "max_members_default"): max_members = self.config.getint(section, "max_members_default") Cluster.get_unique(session, cluster, preclude=True) # Not finding the cluster type is an internal consistency issue, so make # that show up in the logs by using AquilonError clus_type = Cluster.polymorphic_subclass(ctype, "Unknown cluster type", error=AquilonError) (down_hosts_pct, dht) = Cluster.parse_threshold(down_hosts_threshold) kw = { 'name': cluster, 'location_constraint': dbloc, 'personality': dbpersonality, 'max_hosts': max_members, 'branch': dbbranch, 'sandbox_author': dbauthor, 'down_hosts_threshold': dht, 'down_hosts_percent': down_hosts_pct, 'status': dbstatus, 'comments': comments } if ctype == 'esx': if vm_to_host_ratio is None: if self.config.has_option(section, "vm_to_host_ratio"): vm_to_host_ratio = self.config.get(section, "vm_to_host_ratio") else: vm_to_host_ratio = "1:1" (vm_count, host_count) = force_ratio("vm_to_host_ratio", vm_to_host_ratio) kw["vm_count"] = vm_count kw["host_count"] = host_count if switch and hasattr(clus_type, 'switch'): kw['switch'] = Switch.get_unique(session, switch, compel=True) if maint_threshold is not None: (down_hosts_pct, dht) = Cluster.parse_threshold(maint_threshold) kw['down_maint_threshold'] = dht kw['down_maint_percent'] = down_hosts_pct dbcluster = clus_type(**kw) plenaries = PlenaryCollection(logger=logger) if metacluster: dbmetacluster = MetaCluster.get_unique(session, metacluster, compel=True) dbmetacluster.validate_membership(dbcluster) dbmetacluster.members.append(dbcluster) plenaries.append(Plenary.get_plenary(dbmetacluster)) session.add(dbcluster) session.flush() session.refresh(dbcluster) plenaries.append(Plenary.get_plenary(dbcluster)) key = plenaries.get_write_key() try: lock_queue.acquire(key) plenaries.write(locked=True) except: plenaries.restore_stash() raise finally: lock_queue.release(key)
def render(self, session, logger, metacluster, archetype, personality, domain, sandbox, max_members, buildstatus, comments, **arguments): validate_basic("metacluster", metacluster) # this should be reverted when virtbuild supports these options if not archetype: archetype = "metacluster" if not personality: personality = "metacluster" dbpersonality = Personality.get_unique(session, name=personality, archetype=archetype, compel=True) if not dbpersonality.is_cluster: raise ArgumentError("%s is not a cluster personality." % personality) ctype = "meta" # dbpersonality.archetype.cluster_type if not buildstatus: buildstatus = "build" dbstatus = ClusterLifecycle.get_unique(session, buildstatus, compel=True) # this should be reverted when virtbuild supports these options if not domain and not sandbox: domain = self.config.get("archetype_metacluster", "host_domain") (dbbranch, dbauthor) = get_branch_and_author(session, logger, domain=domain, sandbox=sandbox, compel=False) dbloc = get_location(session, **arguments) # this should be reverted when virtbuild supports this option if not dbloc: dbloc = Location.get_unique(session, name=self.config.get("archetype_metacluster", "location_name"), location_type=self.config.get("archetype_metacluster", "location_type")) elif not dbloc.campus: raise ArgumentError("{0} is not within a campus.".format(dbloc)) if max_members is None: max_members = self.config.getint("archetype_metacluster", "max_members_default") if metacluster.strip().lower() == 'global': raise ArgumentError("Metacluster name global is reserved.") MetaCluster.get_unique(session, metacluster, preclude=True) clus_type = MetaCluster # Cluster.__mapper__.polymorphic_map[ctype].class_ kw = {} dbcluster = MetaCluster(name=metacluster, location_constraint=dbloc, personality=dbpersonality, max_clusters=max_members, branch=dbbranch, sandbox_author=dbauthor, status=dbstatus, comments=comments) session.add(dbcluster) session.flush() plenary = PlenaryMetaCluster(dbcluster, logger=logger) plenary.write() return
def render(self, session, logger, service_address, ip, name, interfaces, hostname, cluster, resourcegroup, network_environment, map_to_primary, comments, **arguments): validate_basic("name", name) # TODO: generalize the error message - Layer-3 failover may be # implemented by other software, not just Zebra. if name == "hostname": raise ArgumentError( "The hostname service address is reserved for " "Zebra. Please specify the --zebra_interfaces " "option when calling add_host if you want the " "primary name of the host to be managed by " "Zebra.") ifnames = [ifname.strip().lower() for ifname in interfaces.split(",")] if not ifnames: raise ArgumentError("Please specify at least one interface name.") holder = get_resource_holder(session, hostname, cluster, resourcegroup, compel=False) # Address assignments should be added based on the host/cluster, so we # have to resolve resource groups first if isinstance(holder.holder_object, ResourceGroup): real_holder = holder.holder_object.holder.holder_object else: real_holder = holder.holder_object ServiceAddress.get_unique(session, name=name, holder=holder, preclude=True) # TODO: add allow_multi=True dbdns_rec, newly_created = grab_address(session, service_address, ip, network_environment) ip = dbdns_rec.ip dbnetwork = dbdns_rec.network if map_to_primary: if not isinstance(real_holder, Host): raise ArgumentError("The --map_to_primary option works only " "for host-based service addresses.") dbdns_rec.reverse_ptr = real_holder.machine.primary_name.fqdn # Disable autoflush, since the ServiceAddress object won't be complete # until add_resource() is called with session.no_autoflush: dbsrv = ServiceAddress(name=name, dns_record=dbdns_rec, comments=comments) holder.resources.append(dbsrv) oldinfo = None if isinstance(real_holder, Cluster): if not real_holder.hosts: # The interface names are only stored in the # AddressAssignment objects, so we can't handle a cluster # with no hosts and thus no interfaces raise ArgumentError("Cannot assign a service address to a " "cluster that has no members.") for host in real_holder.hosts: apply_service_address(host, ifnames, dbsrv) elif isinstance(real_holder, Host): oldinfo = DSDBRunner.snapshot_hw(real_holder.machine) apply_service_address(real_holder, ifnames, dbsrv) else: # pragma: no cover raise UnimplementedError("{0} as a resource holder is not " "implemented.".format(real_holder)) add_resource(session, logger, holder, dbsrv, dsdb_callback=add_srv_dsdb_callback, real_holder=real_holder, oldinfo=oldinfo, newly_created=newly_created, comments=comments) return