def hostname_to_host(session, hostname): # When the user asked for a host, returning "machine not found" does not # feel to be the right error message, even if it is technically correct. # It's a little tricky though: we don't want to suppress "dns domain not # found" parse_fqdn(session, hostname) try: dbmachine = Machine.get_unique(session, hostname, compel=True) except NotFoundException: raise NotFoundException("Host %s not found." % hostname) if not dbmachine.host: raise NotFoundException("{0} does not have a host " "assigned.".format(dbmachine)) return dbmachine.host
def __init__(self, session=None, name=None, dns_domain=None, fqdn=None, dns_environment=None, ignore_name_check=False, **kwargs): if fqdn: if name or dns_domain: # pragma: no cover raise TypeError("fqdn and name/dns_domain should not be mixed") self._check_session(session) (name, dns_domain) = parse_fqdn(session, fqdn) self.check_name(name, dns_domain, ignore_name_check) if not isinstance(dns_environment, DnsEnvironment): self._check_session(session) dns_environment = DnsEnvironment.get_unique_or_default( session, dns_environment) super(Fqdn, self).__init__(name=name, dns_domain=dns_domain, dns_environment=dns_environment, **kwargs)
def create_target_if_needed(session, logger, target, dbdns_env): """ Create FQDNs in restricted domains. This is used to allow pointing CNAME and PTR records to DNS domains we otherwise don't manage. """ name, dbtarget_domain = parse_fqdn(session, target) dbtarget_domain.lock_row() q = session.query(Fqdn) q = q.filter_by(dns_environment=dbdns_env) q = q.filter_by(dns_domain=dbtarget_domain) q = q.filter_by(name=name) try: dbtarget = q.one() except NoResultFound: if not dbtarget_domain.restricted: raise NotFoundException("Target FQDN {0} does not exist in {1:l}." .format(target, dbdns_env)) dbtarget = Fqdn(name=name, dns_domain=dbtarget_domain, dns_environment=dbdns_env) try: socket.gethostbyname(dbtarget.fqdn) except socket.gaierror, e: logger.warning("WARNING: Will create a reference to {0.fqdn!s}, " "but trying to resolve it resulted in an error: " "{1.strerror}.".format(dbtarget, e)) session.add(dbtarget) dbtarget_rec = ReservedName(fqdn=dbtarget) session.add(dbtarget_rec)
def rename_hardware(session, dbhw_ent, rename_to): if "." in rename_to: if not dbhw_ent.primary_name: raise ArgumentError( "{0} does not have a primary name, renaming " "using an FQDN is not possible.".format(dbhw_ent)) old_domain = dbhw_ent.primary_name.fqdn.dns_domain dns_env = dbhw_ent.primary_name.fqdn.dns_environment new_label, new_domain = parse_fqdn(session, rename_to) else: new_label = rename_to if dbhw_ent.primary_name: old_domain = new_domain = dbhw_ent.primary_name.fqdn.dns_domain dns_env = dbhw_ent.primary_name.fqdn.dns_environment else: new_domain = None dns_env = None old_domain.lock_row() if new_domain != old_domain: new_domain.lock_row() dbhw_ent.check_label(new_label) HardwareEntity.get_unique(session, new_label, preclude=True) old_label = dbhw_ent.label fqdns = [] for addr in dbhw_ent.all_addresses(): fqdns.extend([dns_rec.fqdn for dns_rec in addr.dns_records]) # This case handles reserved names if dbhw_ent.primary_name and dbhw_ent.primary_name.fqdn not in fqdns: fqdns.append(dbhw_ent.primary_name.fqdn) # Filter out unrelated FQDNs fqdns = [ fqdn for fqdn in fqdns if fqdn.dns_domain == old_domain and ( fqdn.name == old_label or fqdn.name.startswith(old_label + "-")) ] # Update all state in one go, so disable autoflush for now. with session.no_autoflush: dbhw_ent.label = new_label for dbfqdn in fqdns: new_name = new_label + dbfqdn.name[len(old_label):] Fqdn.get_unique(session, name=new_name, dns_domain=new_domain, dns_environment=dns_env, preclude=True) dbfqdn.dns_domain = new_domain dbfqdn.name = new_name
def render(self, session, dbuser, fqdn, building, ip, network_environment, comments, **arguments): dbnet_env = NetworkEnvironment.get_unique_or_default( session, network_environment) self.az.check_network_environment(dbuser, dbnet_env) if building: dbbuilding = Building.get_unique(session, building, compel=True) else: dbbuilding = None (short, dbdns_domain) = parse_fqdn(session, fqdn) dbfqdn = Fqdn.get_or_create(session, name=short, dns_domain=dbdns_domain, dns_environment=dbnet_env.dns_environment) if ip: dbnetwork = get_net_id_from_ip(session, ip, dbnet_env) dbdns_rec = ARecord.get_or_create(session, fqdn=dbfqdn, ip=ip, network=dbnetwork) else: dbdns_rec = ARecord.get_unique(session, dbfqdn, compel=True) ip = dbdns_rec.ip dbnetwork = dbdns_rec.network assert ip in dbnetwork.network, "IP %s is outside network %s" % ( ip, dbnetwork.ip) if ip in dbnetwork.router_ips: raise ArgumentError( "IP address {0} is already present as a router " "for {1:l}.".format(ip, dbnetwork)) # Policy checks are valid only for internal networks if dbnetwork.is_internal: if ip >= dbnetwork.first_usable_host or \ int(ip) - int(dbnetwork.ip) in dbnetwork.reserved_offsets: raise ArgumentError( "IP address {0} is not a valid router address " "on {1:l}.".format(ip, dbnetwork)) dbnetwork.routers.append( RouterAddress(ip=ip, location=dbbuilding, dns_environment=dbdns_rec.fqdn.dns_environment, comments=comments)) session.flush() # TODO: update the templates of Zebra hosts on the network return
def get_unique(cls, session, fqdn=None, dns_environment=None, name=None, dns_domain=None, **kwargs): if fqdn: if name or dns_domain: # pragma: no cover raise TypeError("fqdn and name/dns_domain should not be mixed") (name, dns_domain) = parse_fqdn(session, fqdn) if not isinstance(dns_environment, DnsEnvironment): dns_environment = DnsEnvironment.get_unique_or_default(session, dns_environment) return super(Fqdn, cls).get_unique(session, name=name, dns_domain=dns_domain, dns_environment=dns_environment, **kwargs)
def get_unique(cls, session, fqdn=None, name=None, dns_domain=None, dns_environment=None, compel=False, preclude=False, **kwargs): # Proxy FQDN lookup to the Fqdn class if not fqdn or not isinstance(fqdn, Fqdn): if not isinstance(dns_environment, DnsEnvironment): dns_environment = DnsEnvironment.get_unique_or_default( session, dns_environment) if fqdn: if name or dns_domain: # pragma: no cover raise TypeError("fqdn and name/dns_domain cannot be mixed") (name, dns_domain) = parse_fqdn(session, fqdn) try: # Do not pass preclude=True to Fqdn fqdn = Fqdn.get_unique(session, name=name, dns_domain=dns_domain, dns_environment=dns_environment, compel=compel) except NotFoundException: # Replace the "Fqdn ... not found" message with a more user # friendly one msg = "%s %s.%s, %s not found." % ( cls._get_class_label(), name, dns_domain, format(dns_environment, "l")) raise NotFoundException(msg) if not fqdn: return None # We already have the FQDN, no need to load it again if "query_options" not in kwargs: kwargs["query_options"] = [lazyload("fqdn")] result = super(DnsRecord, cls).get_unique(session, fqdn=fqdn, compel=compel, preclude=preclude, **kwargs) if result: # Make sure not to load the relation again if we already know its # value set_committed_value(result, 'fqdn', fqdn) return result
def rename_hardware(session, dbhw_ent, rename_to): if "." in rename_to: if not dbhw_ent.primary_name: raise ArgumentError("{0} does not have a primary name, renaming " "using an FQDN is not possible." .format(dbhw_ent)) old_domain = dbhw_ent.primary_name.fqdn.dns_domain dns_env = dbhw_ent.primary_name.fqdn.dns_environment new_label, new_domain = parse_fqdn(session, rename_to) else: new_label = rename_to if dbhw_ent.primary_name: old_domain = new_domain = dbhw_ent.primary_name.fqdn.dns_domain dns_env = dbhw_ent.primary_name.fqdn.dns_environment else: new_domain = None dns_env = None old_domain.lock_row() if new_domain != old_domain: new_domain.lock_row() dbhw_ent.check_label(new_label) HardwareEntity.get_unique(session, new_label, preclude=True) old_label = dbhw_ent.label fqdns = [] for addr in dbhw_ent.all_addresses(): fqdns.extend([dns_rec.fqdn for dns_rec in addr.dns_records]) # This case handles reserved names if dbhw_ent.primary_name and dbhw_ent.primary_name.fqdn not in fqdns: fqdns.append(dbhw_ent.primary_name.fqdn) # Filter out unrelated FQDNs fqdns = [fqdn for fqdn in fqdns if fqdn.dns_domain == old_domain and (fqdn.name == old_label or fqdn.name.startswith(old_label + "-"))] # Update all state in one go, so disable autoflush for now. with session.no_autoflush: dbhw_ent.label = new_label for dbfqdn in fqdns: new_name = new_label + dbfqdn.name[len(old_label):] Fqdn.get_unique(session, name=new_name, dns_domain=new_domain, dns_environment=dns_env, preclude=True) dbfqdn.dns_domain = new_domain dbfqdn.name = new_name
def render(self, session, dbuser, fqdn, building, ip, network_environment, comments, **arguments): dbnet_env = NetworkEnvironment.get_unique_or_default(session, network_environment) self.az.check_network_environment(dbuser, dbnet_env) if building: dbbuilding = Building.get_unique(session, building, compel=True) else: dbbuilding = None (short, dbdns_domain) = parse_fqdn(session, fqdn) dbfqdn = Fqdn.get_or_create(session, name=short, dns_domain=dbdns_domain, dns_environment=dbnet_env.dns_environment) if ip: dbnetwork = get_net_id_from_ip(session, ip, dbnet_env) dbdns_rec = ARecord.get_or_create(session, fqdn=dbfqdn, ip=ip, network=dbnetwork) else: dbdns_rec = ARecord.get_unique(session, dbfqdn, compel=True) ip = dbdns_rec.ip dbnetwork = dbdns_rec.network assert ip in dbnetwork.network, "IP %s is outside network %s" % (ip, dbnetwork.ip) if ip in dbnetwork.router_ips: raise ArgumentError("IP address {0} is already present as a router " "for {1:l}.".format(ip, dbnetwork)) # Policy checks are valid only for internal networks if dbnetwork.is_internal: if ip >= dbnetwork.first_usable_host or \ int(ip) - int(dbnetwork.ip) in dbnetwork.reserved_offsets: raise ArgumentError("IP address {0} is not a valid router address " "on {1:l}.".format(ip, dbnetwork)) dbnetwork.routers.append(RouterAddress(ip=ip, location=dbbuilding, dns_environment=dbdns_rec.fqdn.dns_environment, comments=comments)) session.flush() # TODO: update the templates of Zebra hosts on the network return
def __init__(self, session=None, name=None, dns_domain=None, fqdn=None, dns_environment=None, ignore_name_check=False, **kwargs): if fqdn: if name or dns_domain: # pragma: no cover raise TypeError("fqdn and name/dns_domain should not be mixed") self._check_session(session) (name, dns_domain) = parse_fqdn(session, fqdn) self.check_name(name, dns_domain, ignore_name_check) if not isinstance(dns_environment, DnsEnvironment): self._check_session(session) dns_environment = DnsEnvironment.get_unique_or_default(session, dns_environment) super(Fqdn, self).__init__(name=name, dns_domain=dns_domain, dns_environment=dns_environment, **kwargs)
def get_unique(cls, session, fqdn=None, dns_environment=None, name=None, dns_domain=None, **kwargs): if fqdn: if name or dns_domain: # pragma: no cover raise TypeError("fqdn and name/dns_domain should not be mixed") (name, dns_domain) = parse_fqdn(session, fqdn) if not isinstance(dns_environment, DnsEnvironment): dns_environment = DnsEnvironment.get_unique_or_default( session, dns_environment) return super(Fqdn, cls).get_unique(session, name=name, dns_domain=dns_domain, dns_environment=dns_environment, **kwargs)
def get_unique(cls, session, fqdn=None, name=None, dns_domain=None, dns_environment=None, compel=False, preclude=False, **kwargs): # Proxy FQDN lookup to the Fqdn class if not fqdn or not isinstance(fqdn, Fqdn): if not isinstance(dns_environment, DnsEnvironment): dns_environment = DnsEnvironment.get_unique_or_default(session, dns_environment) if fqdn: if name or dns_domain: # pragma: no cover raise TypeError("fqdn and name/dns_domain cannot be mixed") (name, dns_domain) = parse_fqdn(session, fqdn) try: # Do not pass preclude=True to Fqdn fqdn = Fqdn.get_unique(session, name=name, dns_domain=dns_domain, dns_environment=dns_environment, compel=compel) except NotFoundException: # Replace the "Fqdn ... not found" message with a more user # friendly one msg = "%s %s.%s, %s not found." % (cls._get_class_label(), name, dns_domain, format(dns_environment, "l")) raise NotFoundException(msg) if not fqdn: return None # We already have the FQDN, no need to load it again if "query_options" not in kwargs: kwargs["query_options"] = [lazyload("fqdn")] result = super(DnsRecord, cls).get_unique(session, fqdn=fqdn, compel=compel, preclude=preclude, **kwargs) if result: # Make sure not to load the relation again if we already know its # value set_committed_value(result, 'fqdn', fqdn) return result
def search_system_query(session, dns_record_type=DnsRecord, **kwargs): q = session.query(dns_record_type) # Outer-join in all the subclasses so that each access of # system doesn't (necessarily) issue another query. if dns_record_type is DnsRecord: q = q.with_polymorphic('*') dbdns_env = DnsEnvironment.get_unique_or_default(session, kwargs.get("dns_environment", None)) q = q.join((Fqdn, DnsRecord.fqdn_id == Fqdn.id)) q = q.filter_by(dns_environment=dbdns_env) q = q.options(contains_eager('fqdn')) if kwargs.get('fqdn', None): (short, dbdns_domain) = parse_fqdn(session, kwargs['fqdn']) q = q.filter_by(name=short, dns_domain=dbdns_domain) if kwargs.get('dns_domain', None): dbdns_domain = DnsDomain.get_unique(session, kwargs['dns_domain'], compel=True) q = q.filter_by(dns_domain=dbdns_domain) if kwargs.get('shortname', None): q = q.filter_by(name=kwargs['shortname']) q = q.reset_joinpoint() if kwargs.get('ip', None): q = q.filter(ARecord.ip == kwargs['ip']) if kwargs.get('networkip', None): net_env = kwargs.get('network_environment', None) dbnet_env = NetworkEnvironment.get_unique_or_default(session, net_env) dbnetwork = get_network_byip(session, kwargs['networkip'], dbnet_env) q = q.filter(ARecord.network == dbnetwork) if kwargs.get('mac', None): raise UnimplementedError("search_system --mac is no longer supported, " "try search_hardware.") if kwargs.get('type', None): # Deprecated... remove if it becomes a problem. type_arg = kwargs['type'].strip().lower() q = q.filter_by(dns_record_type=type_arg) return q
def create_target_if_needed(session, logger, target, dbdns_env): """ Create FQDNs in restricted domains. This is used to allow pointing CNAME and PTR records to DNS domains we otherwise don't manage. """ name, dbtarget_domain = parse_fqdn(session, target) dbtarget_domain.lock_row() q = session.query(Fqdn) q = q.filter_by(dns_environment=dbdns_env) q = q.filter_by(dns_domain=dbtarget_domain) q = q.filter_by(name=name) try: dbtarget = q.one() except NoResultFound: if not dbtarget_domain.restricted: raise NotFoundException( "Target FQDN {0} does not exist in {1:l}.".format( target, dbdns_env)) dbtarget = Fqdn(name=name, dns_domain=dbtarget_domain, dns_environment=dbdns_env) try: socket.gethostbyname(dbtarget.fqdn) except socket.gaierror, e: logger.warning("WARNING: Will create a reference to {0.fqdn!s}, " "but trying to resolve it resulted in an error: " "{1.strerror}.".format(dbtarget, e)) session.add(dbtarget) dbtarget_rec = ReservedName(fqdn=dbtarget) session.add(dbtarget_rec)
def grab_address(session, fqdn, ip, network_environment=None, dns_environment=None, comments=None, allow_restricted_domain=False, allow_multi=False, allow_reserved=False, relaxed=False, preclude=False): """ Take ownership of an address. This is a bit complicated because due to DNS propagation delays, we want to allow users to pre-define a DNS address and then assign the address to a host later. Parameters: session: SQLA session handle fqdn: the name to allocate/take over ip: the IP address to allocate/take over network_environment: where the IP address lives dns_enviromnent: where the FQDN lives comments: any comments to attach to the DNS record if it is created as new allow_restricted_domain: if True, adding entries to restricted DNS domains is allowed, otherwise it is denied. Default is False. allow_multi: if True, allow the same FQDN to be added multiple times with different IP addresses. Deault is False. allow_reserved: if True, allow creating a ReservedName instead of an ARecord if no IP address was specified. Default is False. preclude: if True, forbid taking over an existing DNS record, even if it is not referenced by any AddressAssignment records. Default is False. """ if not isinstance(network_environment, NetworkEnvironment): network_environment = NetworkEnvironment.get_unique_or_default( session, network_environment) if not dns_environment: dns_environment = network_environment.dns_environment elif not isinstance(dns_environment, DnsEnvironment): dns_environment = DnsEnvironment.get_unique(session, dns_environment, compel=True) # Non-default DNS environments may contain anything, but we want to keep # the internal environment clean if dns_environment.is_default and not network_environment.is_default: raise ArgumentError("Entering external IP addresses to the " "internal DNS environment is not allowed.") short, dbdns_domain = parse_fqdn(session, fqdn) # Lock the domain to prevent adding/deleting records while we're checking # FQDN etc. availability dbdns_domain.lock_row() if dbdns_domain.restricted and not allow_restricted_domain: raise ArgumentError("{0} is restricted, adding extra addresses " "is not allowed.".format(dbdns_domain)) dbfqdn = Fqdn.get_or_create(session, dns_environment=dns_environment, name=short, dns_domain=dbdns_domain, query_options=[joinedload('dns_records')]) existing_record = None newly_created = False if ip: dbnetwork = get_net_id_from_ip(session, ip, network_environment) check_ip_restrictions(dbnetwork, ip, relaxed=relaxed) dbnetwork.lock_row() # No filtering on DNS environment. If an address is dynamic in one # environment, it should not be considered static in a different # environment. q = session.query(DynamicStub) q = q.filter_by(network=dbnetwork) q = q.filter_by(ip=ip) dbdns_rec = q.first() _forbid_dyndns(dbdns_rec) # Verify that no other record uses the same IP address, this time taking # the DNS environemt into consideration. # While the DNS would allow different A records to point to the same IP # address, the current user expectation is that creating a DNS entry # also counts as a reservation, so we can not allow this use case. If we # want to implement such a feature later, the best way would be to # subclass Alias and let that subclass emit an A record instead of a # CNAME when the dump_dns command is called. q = session.query(ARecord) q = q.filter_by(network=dbnetwork) q = q.filter_by(ip=ip) q = q.join(ARecord.fqdn) q = q.filter_by(dns_environment=dns_environment) dbrecords = q.all() if dbrecords and len(dbrecords) > 1: # pragma: no cover # We're just trying to make sure this never happens raise AquilonError( "IP address %s is referenced by multiple " "DNS records: %s" % (ip, ", ".join([format(rec, "a") for rec in dbrecords]))) if dbrecords and dbrecords[0].fqdn != dbfqdn: raise ArgumentError( "IP address {0} is already in use by {1:l}.".format( ip, dbrecords[0])) # Check if the name is used already for dbdns_rec in dbfqdn.dns_records: if isinstance(dbdns_rec, ARecord): _forbid_dyndns(dbdns_rec) _check_netenv_compat(dbdns_rec, network_environment) if dbdns_rec.ip == ip and dbdns_rec.network == dbnetwork: existing_record = dbdns_rec elif not allow_multi: raise ArgumentError( "{0} points to a different IP address.".format( dbdns_rec)) elif isinstance(dbdns_rec, ReservedName): existing_record = convert_reserved_to_arecord( session, dbdns_rec, dbnetwork, ip) newly_created = True else: # Exclude aliases etc. raise ArgumentError( "{0} cannot be used for address assignment.".format( dbdns_rec)) if not existing_record: existing_record = ARecord(fqdn=dbfqdn, ip=ip, network=dbnetwork, comments=comments) session.add(existing_record) newly_created = True else: if not dbfqdn.dns_records: # There's no IP, and the name did not exist before. Create a # reservation, but only if the caller allowed that use case. if not allow_reserved: raise ArgumentError("DNS Record %s does not exist." % dbfqdn) existing_record = ReservedName(fqdn=dbfqdn, comments=comments) newly_created = True else: # There's no IP, but the name is already in use. We need a single IP # address. if len(dbfqdn.dns_records) > 1: raise ArgumentError( "{0} does not resolve to a single IP address.".format( dbfqdn)) existing_record = dbfqdn.dns_records[0] _forbid_dyndns(existing_record) if not isinstance(existing_record, ARecord): # Exclude aliases etc. raise ArgumentError( "{0} cannot be used for address assignment.".format( existing_record)) # Verify that the existing record is in the network environment the # caller expects _check_netenv_compat(existing_record, network_environment) ip = existing_record.ip dbnetwork = existing_record.network dbnetwork.lock_row() if existing_record.hardware_entity: raise ArgumentError( "{0} is already used as the primary name of {1:cl} " "{1.label}.".format(existing_record, existing_record.hardware_entity)) if preclude and not newly_created: raise ArgumentError("{0} already exists.".format(existing_record)) if ip: q = session.query(AddressAssignment) q = q.filter_by(network=dbnetwork) q = q.filter_by(ip=ip) addr = q.first() if addr: raise ArgumentError("IP address {0} is already in use by " "{1:l}.".format(ip, addr.interface)) return (existing_record, newly_created)
def render(self, session, logger, fqdn, ip, dns_environment, network_environment, **arguments): if network_environment: if not isinstance(network_environment, NetworkEnvironment): network_environment = NetworkEnvironment.get_unique_or_default(session, network_environment) if not dns_environment: dns_environment = network_environment.dns_environment dbdns_env = DnsEnvironment.get_unique(session, dns_environment, compel=True) with DeleteKey("system", logger=logger): # We can't use get_unique() here, since we always want to filter by # DNS environment, even if no FQDN was given q = session.query(ARecord) if ip: q = q.filter_by(ip=ip) q = q.join(ARecord.fqdn) q = q.options(contains_eager('fqdn')) q = q.filter_by(dns_environment=dbdns_env) if fqdn: (name, dbdns_domain) = parse_fqdn(session, fqdn) q = q.filter_by(name=name) q = q.filter_by(dns_domain=dbdns_domain) try: dbaddress = q.one() except NoResultFound: parts = [] if fqdn: parts.append(fqdn) if ip: parts.append("ip %s" % ip) raise NotFoundException("DNS Record %s not found." % ", ".join(parts)) except MultipleResultsFound: parts = [] if fqdn: parts.append(fqdn) if ip: parts.append("ip %s" % ip) raise NotFoundException("DNS Record %s is not unique." % ", ".join(parts)) if dbaddress.hardware_entity: raise ArgumentError("DNS Record {0:a} is the primary name of " "{1:l}, therefore it cannot be " "deleted.".format(dbaddress, dbaddress.hardware_entity)) if dbaddress.service_address: # TODO: print the holder object raise ArgumentError("DNS Record {0:a} is used as a service " "address, therefore it cannot be deleted." .format(dbaddress)) # Do not allow deleting the DNS record if the IP address is still in # use - except if there are other DNS records having the same # address if dbaddress.assignments: last_use = [] # FIXME: race condition here, we should use # SELECT ... FOR UPDATE for addr in dbaddress.assignments: if len(addr.dns_records) == 1: last_use.append(addr) if last_use: users = " ,".join([format(addr.interface, "l") for addr in last_use]) raise ArgumentError("IP address %s is still in use by %s." % (ip, users)) ip = dbaddress.ip old_fqdn = str(dbaddress.fqdn) old_comments = dbaddress.comments delete_dns_record(dbaddress) session.flush() if dbdns_env.is_default: dsdb_runner = DSDBRunner(logger=logger) dsdb_runner.delete_host_details(old_fqdn, ip, comments=old_comments) dsdb_runner.commit_or_rollback() return
self.config.get("archetype_windows", "host_domain"), compel=InternalError) dbarchetype = Archetype.get_unique(session, "windows", compel=InternalError) dbpersonality = Personality.get_unique(session, archetype=dbarchetype, name="generic", compel=InternalError) dbstatus = Ready.get_instance(session) dbos = OperatingSystem.get_unique(session, name="windows", version="generic", archetype=dbarchetype, compel=InternalError) for (host, mac) in windows_hosts.items(): try: (short, dbdns_domain) = parse_fqdn(session, host) except AquilonError, err: msg = "Skipping host %s: %s" % (host, err) failed.append(msg) logger.info(msg) continue existing = DnsRecord.get_unique(session, name=short, dns_domain=dbdns_domain) if existing: if not existing.hardware_entity: msg = "Skipping host %s: It is not a primary name." % host failed.append(msg) logger.info(msg) continue # If these are invalid there should have been a deletion # attempt above.
def refresh_windows_hosts(self, session, logger, containers): conn = sqlite3.connect(self.config.get("broker", "windows_host_info")) # Enable dictionary-style access to the rows. conn.row_factory = sqlite3.Row windows_hosts = {} interfaces = {} cur = conn.cursor() # There are more fields in the dataset like machine and # aqhostname that might be useful for error messages but these # are sufficient. cur.execute("select ether, windowshostname from machines") for row in cur: host = row["windowshostname"] if host: host = host.strip().lower() else: continue mac = row["ether"] if mac: mac = mac.strip().lower() windows_hosts[host] = mac interfaces[mac] = host success = [] failed = [] q = session.query(Host) q = q.filter_by(comments='Created by refresh_windows_host') for dbhost in q.all(): mac_addresses = [iface.mac for iface in dbhost.machine.interfaces] if dbhost.fqdn in windows_hosts and \ windows_hosts[dbhost.fqdn] in mac_addresses: # All is well continue deps = get_host_dependencies(session, dbhost) if deps: msg = "Skipping removal of host %s with dependencies: %s" % \ (dbhost.fqdn, ", ".join(deps)) failed.append(msg) logger.info(msg) continue dbmachine = dbhost.machine success.append("Removed host entry for %s (%s)" % (dbmachine.label, dbmachine.fqdn)) if dbmachine.vm_container: containers.add(dbmachine.vm_container) session.delete(dbhost) dbdns_rec = dbmachine.primary_name dbmachine.primary_name = None delete_dns_record(dbdns_rec) session.flush() # The Host() creations below fail when autoflush is enabled. session.autoflush = False dbdomain = Domain.get_unique(session, self.config.get("archetype_windows", "host_domain"), compel=InternalError) dbarchetype = Archetype.get_unique(session, "windows", compel=InternalError) dbpersonality = Personality.get_unique(session, archetype=dbarchetype, name="generic", compel=InternalError) dbstatus = HostLifecycle.get_unique(session, "ready", compel=InternalError) dbos = OperatingSystem.get_unique(session, name="windows", version="generic", archetype=dbarchetype, compel=InternalError) for (host, mac) in windows_hosts.items(): try: (short, dbdns_domain) = parse_fqdn(session, host) except AquilonError, err: msg = "Skipping host %s: %s" % (host, err) failed.append(msg) logger.info(msg) continue existing = DnsRecord.get_unique(session, name=short, dns_domain=dbdns_domain) if existing: if not existing.hardware_entity: msg = "Skipping host %s: It is not a primary name." % host failed.append(msg) logger.info(msg) continue # If these are invalid there should have been a deletion # attempt above. if not existing.hardware_entity.interfaces: msg = "Skipping host %s: Host already exists but has " \ "no interface attached." % host failed.append(msg) logger.info(msg) elif existing.hardware_entity.interfaces[0].mac != mac: msg = "Skipping host %s: Host already exists but with " \ "MAC address %s and not %s." % \ (host, existing.hardware_entity.interfaces[0].mac, mac) failed.append(msg) logger.info(msg) continue dbinterface = session.query(Interface).filter_by(mac=mac).first() if not dbinterface: msg = "Skipping host %s: MAC address %s is not present in " \ "AQDB." % (host, mac) failed.append(msg) logger.info(msg) continue q = session.query(Machine) q = q.filter_by(id=dbinterface.hardware_entity.id) dbmachine = q.first() if not dbmachine: msg = "Skipping host %s: The AQDB interface with MAC address " \ "%s is tied to hardware %s instead of a virtual " \ "machine." % \ (host, mac, dbinterface.hardware_entity.label) failed.append(msg) logger.info(msg) continue if dbinterface.assignments: msg = "Skipping host %s: The AQDB interface with MAC address " \ "%s is already tied to %s." % \ (host, mac, dbinterface.assignments[0].fqdns[0]) failed.append(msg) logger.info(msg) continue if dbmachine.host: msg = "Skipping host %s: The AQDB interface with MAC address " \ "%s is already tied to %s." % \ (host, mac, dbmachine.fqdn) failed.append(msg) logger.info(msg) continue dbhost = Host(machine=dbmachine, branch=dbdomain, status=dbstatus, owner_grn=dbpersonality.owner_grn, personality=dbpersonality, operating_system=dbos, comments="Created by refresh_windows_host") session.add(dbhost) if self.config.has_option("archetype_windows", "default_grn_target"): dbhost.grns.append((dbhost, dbgrn, self.config.get("archetype_", "default_grn_target"))) dbfqdn = Fqdn.get_or_create(session, name=short, dns_domain=dbdns_domain, preclude=True) dbdns_rec = ReservedName(fqdn=dbfqdn) session.add(dbdns_rec) dbmachine.primary_name = dbdns_rec success.append("Added host entry for %s (%s)." % (dbmachine.label, dbdns_rec.fqdn)) if dbmachine.vm_container: containers.add(dbmachine.vm_container) session.flush()
def render(self, session, network, network_environment, ip, type, side, machine, fqdn, cluster, pg, has_dynamic_ranges, exact_location, fullinfo, style, **arguments): """Return a network matching the parameters. Some of the search terms can only return a unique network. For those (like ip and fqdn) we proceed with the query anyway. This allows for quick scripted tests like "is the network for X.X.X.X a tor_net2?". """ dbnet_env = NetworkEnvironment.get_unique_or_default(session, network_environment) q = session.query(Network) q = q.filter_by(network_environment=dbnet_env) if network: # Note: the network name is not unique (neither in QIP) q = q.filter_by(name=network) if ip: dbnetwork = get_net_id_from_ip(session, ip, dbnet_env) q = q.filter_by(id=dbnetwork.id) if type: q = q.filter_by(network_type=type) if side: q = q.filter_by(side=side) if machine: dbmachine = Machine.get_unique(session, machine, compel=True) vlans = [] if dbmachine.cluster and dbmachine.cluster.network_device: # If this is a VM on a cluster, consult the VLANs. There # could be functionality here for real hardware to consult # interface port groups... there's no real use case yet. vlans = [VlanInfo.get_vlan_id(session, i.port_group) for i in dbmachine.interfaces if i.port_group] if vlans: q = q.join('observed_vlans') q = q.filter_by(network_device=dbmachine.cluster.network_device) q = q.filter(ObservedVlan.vlan_id.in_(vlans)) q = q.reset_joinpoint() if not vlans: networks = [addr.network.id for addr in dbmachine.all_addresses()] if not networks: msg = "Machine %s has no interfaces " % dbmachine.label if dbmachine.cluster: msg += "with a portgroup or " msg += "assigned to a network." raise ArgumentError(msg) q = q.filter(Network.id.in_(networks)) if fqdn: (short, dbdns_domain) = parse_fqdn(session, fqdn) dnsq = session.query(ARecord.ip) dnsq = dnsq.join(ARecord.fqdn) dnsq = dnsq.filter_by(name=short) dnsq = dnsq.filter_by(dns_domain=dbdns_domain) networks = [get_net_id_from_ip(session, addr.ip, dbnet_env).id for addr in dnsq.all()] q = q.filter(Network.id.in_(networks)) if cluster: dbcluster = Cluster.get_unique(session, cluster, compel=True) if dbcluster.network_device: q = q.join('observed_vlans') q = q.filter_by(network_device=dbcluster.network_device) q = q.reset_joinpoint() else: net_ids = [h.hardware_entity.primary_name.network.id for h in dbcluster.hosts if getattr(h.hardware_entity.primary_name, "network")] q = q.filter(Network.id.in_(net_ids)) if pg: vlan = VlanInfo.get_vlan_id(session, pg, compel=ArgumentError) q = q.join('observed_vlans') q = q.filter_by(vlan_id=vlan) q = q.reset_joinpoint() dblocation = get_location(session, **arguments) if dblocation: if exact_location: q = q.filter_by(location=dblocation) else: childids = dblocation.offspring_ids() q = q.filter(Network.location_id.in_(childids)) if has_dynamic_ranges: q = q.filter(exists([DynamicStub.dns_record_id], from_obj=DynamicStub.__table__.join(ARecord.__table__)) .where(Network.id == DynamicStub.network_id)) q = q.order_by(Network.ip) if fullinfo or style != 'raw': q = q.options(undefer('comments')) return q.all() return StringAttributeList(q.all(), lambda n: "%s/%s" % (n.ip, n.cidr))
def render(self, session, network, network_environment, ip, type, side, machine, fqdn, cluster, pg, has_dynamic_ranges, fullinfo, **arguments): """Return a network matching the parameters. Some of the search terms can only return a unique network. For those (like ip and fqdn) we proceed with the query anyway. This allows for quick scripted tests like "is the network for X.X.X.X a tor_net2?". """ dbnet_env = NetworkEnvironment.get_unique_or_default( session, network_environment) q = session.query(Network) q = q.filter_by(network_environment=dbnet_env) if network: # Note: the network name is not unique (neither in QIP) q = q.filter_by(name=network) if ip: dbnetwork = get_net_id_from_ip(session, ip, dbnet_env) q = q.filter_by(id=dbnetwork.id) if type: q = q.filter_by(network_type=type) if side: q = q.filter_by(side=side) if machine: dbmachine = Machine.get_unique(session, machine, compel=True) vlans = [] if dbmachine.cluster and dbmachine.cluster.switch: # If this is a VM on a cluster, consult the VLANs. There # could be functionality here for real hardware to consult # interface port groups... there's no real use case yet. vlans = [ VlanInfo.get_vlan_id(session, i.port_group) for i in dbmachine.interfaces if i.port_group ] if vlans: q = q.join('observed_vlans') q = q.filter_by(switch=dbmachine.cluster.switch) q = q.filter(ObservedVlan.vlan_id.in_(vlans)) q = q.reset_joinpoint() if not vlans: networks = [ addr.network.id for addr in dbmachine.all_addresses() ] if not networks: msg = "Machine %s has no interfaces " % dbmachine.label if dbmachine.cluster: msg += "with a portgroup or " msg += "assigned to a network." raise ArgumentError(msg) q = q.filter(Network.id.in_(networks)) if fqdn: (short, dbdns_domain) = parse_fqdn(session, fqdn) dnsq = session.query(ARecord.ip) dnsq = dnsq.join(ARecord.fqdn) dnsq = dnsq.filter_by(name=short) dnsq = dnsq.filter_by(dns_domain=dbdns_domain) networks = [ get_net_id_from_ip(session, addr.ip, dbnet_env).id for addr in dnsq.all() ] q = q.filter(Network.id.in_(networks)) if cluster: dbcluster = Cluster.get_unique(session, cluster, compel=True) if dbcluster.switch: q = q.join('observed_vlans') q = q.filter_by(switch=dbcluster.switch) q = q.reset_joinpoint() else: net_ids = [ h.machine.primary_name.network.id for h in dbcluster.hosts if getattr(h.machine.primary_name, "network") ] q = q.filter(Network.id.in_(net_ids)) if pg: vlan = VlanInfo.get_vlan_id(session, pg, compel=ArgumentError) q = q.join('observed_vlans') q = q.filter_by(vlan_id=vlan) q = q.reset_joinpoint() dblocation = get_location(session, **arguments) if dblocation: if arguments.get('exact_location'): q = q.filter_by(location=dblocation) else: childids = dblocation.offspring_ids() q = q.filter(Network.location_id.in_(childids)) if has_dynamic_ranges: q = q.filter( exists([DynamicStub.dns_record_id], from_obj=DynamicStub.__table__.join( ARecord.__table__)).where( Network.id == DynamicStub.network_id)) q = q.order_by(Network.ip) if fullinfo: q = q.options(undefer('comments')) return q.all() return ShortNetworkList(q.all())
def render(self, session, logger, hostname, machine, archetype, buildstatus, personality, osname, osversion, service, instance, model, machine_type, vendor, serial, cluster, guest_on_cluster, guest_on_share, member_cluster_share, domain, sandbox, branch, sandbox_owner, dns_domain, shortname, mac, ip, networkip, network_environment, exact_location, server_of_service, server_of_instance, grn, eon_id, fullinfo, **arguments): dbnet_env = NetworkEnvironment.get_unique_or_default(session, network_environment) q = session.query(Host) if machine: dbmachine = Machine.get_unique(session, machine, compel=True) q = q.filter_by(machine=dbmachine) # Add the machine definition and the primary name. Use aliases to make # sure the end result will be ordered by primary name. PriDns = aliased(DnsRecord) PriFqdn = aliased(Fqdn) PriDomain = aliased(DnsDomain) q = q.join(Machine, (PriDns, PriDns.id == Machine.primary_name_id), (PriFqdn, PriDns.fqdn_id == PriFqdn.id), (PriDomain, PriFqdn.dns_domain_id == PriDomain.id)) q = q.order_by(PriFqdn.name, PriDomain.name) q = q.options(contains_eager('machine'), contains_eager('machine.primary_name', alias=PriDns), contains_eager('machine.primary_name.fqdn', alias=PriFqdn), contains_eager('machine.primary_name.fqdn.dns_domain', alias=PriDomain)) q = q.reset_joinpoint() # Hardware-specific filters dblocation = get_location(session, **arguments) if dblocation: if exact_location: q = q.filter(Machine.location == dblocation) else: childids = dblocation.offspring_ids() q = q.filter(Machine.location_id.in_(childids)) if model or vendor or machine_type: subq = Model.get_matching_query(session, name=model, vendor=vendor, machine_type=machine_type, compel=True) q = q.filter(Machine.model_id.in_(subq)) if serial: self.deprecated_option("serial", "Please use search machine --serial instead.", logger=logger, **arguments) q = q.filter(Machine.serial_no == serial) # DNS IP address related filters if mac or ip or networkip or hostname or dns_domain or shortname: # Inner joins are cheaper than outer joins, so make some effort to # use inner joins when possible if mac or ip or networkip: q = q.join(Interface) else: q = q.outerjoin(Interface) if ip or networkip: q = q.join(AddressAssignment, Network, from_joinpoint=True) else: q = q.outerjoin(AddressAssignment, Network, from_joinpoint=True) if mac: self.deprecated_option("mac", "Please use search machine " "--mac instead.", logger=logger, **arguments) q = q.filter(Interface.mac == mac) if ip: q = q.filter(AddressAssignment.ip == ip) q = q.filter(Network.network_environment == dbnet_env) if networkip: dbnetwork = get_network_byip(session, networkip, dbnet_env) q = q.filter(AddressAssignment.network == dbnetwork) dbdns_domain = None if hostname: (shortname, dbdns_domain) = parse_fqdn(session, hostname) if dns_domain: dbdns_domain = DnsDomain.get_unique(session, dns_domain, compel=True) if shortname or dbdns_domain: ARecAlias = aliased(ARecord) ARecFqdn = aliased(Fqdn) q = q.outerjoin((ARecAlias, and_(ARecAlias.ip == AddressAssignment.ip, ARecAlias.network_id == AddressAssignment.network_id)), (ARecFqdn, ARecAlias.fqdn_id == ARecFqdn.id)) if shortname: q = q.filter(or_(ARecFqdn.name == shortname, PriFqdn.name == shortname)) if dbdns_domain: q = q.filter(or_(ARecFqdn.dns_domain == dbdns_domain, PriFqdn.dns_domain == dbdns_domain)) q = q.reset_joinpoint() (dbbranch, dbauthor) = get_branch_and_author(session, logger, domain=domain, sandbox=sandbox, branch=branch) if sandbox_owner: dbauthor = get_user_principal(session, sandbox_owner) if dbbranch: q = q.filter_by(branch=dbbranch) if dbauthor: q = q.filter_by(sandbox_author=dbauthor) if archetype: # Added to the searches as appropriate below. dbarchetype = Archetype.get_unique(session, archetype, compel=True) if personality and archetype: dbpersonality = Personality.get_unique(session, archetype=dbarchetype, name=personality, compel=True) q = q.filter_by(personality=dbpersonality) elif personality: PersAlias = aliased(Personality) q = q.join(PersAlias).filter_by(name=personality) q = q.reset_joinpoint() elif archetype: PersAlias = aliased(Personality) q = q.join(PersAlias).filter_by(archetype=dbarchetype) q = q.reset_joinpoint() if buildstatus: dbbuildstatus = HostLifecycle.get_unique(session, buildstatus, compel=True) q = q.filter_by(status=dbbuildstatus) if osname and osversion and archetype: # archetype was already resolved above dbos = OperatingSystem.get_unique(session, name=osname, version=osversion, archetype=dbarchetype, compel=True) q = q.filter_by(operating_system=dbos) elif osname or osversion: q = q.join('operating_system') if osname: q = q.filter_by(name=osname) if osversion: q = q.filter_by(version=osversion) q = q.reset_joinpoint() if service: dbservice = Service.get_unique(session, service, compel=True) if instance: dbsi = get_service_instance(session, dbservice, instance) q = q.filter(Host.services_used.contains(dbsi)) else: q = q.join('services_used') q = q.filter_by(service=dbservice) q = q.reset_joinpoint() elif instance: q = q.join('services_used') q = q.filter_by(name=instance) q = q.reset_joinpoint() if server_of_service: dbserver_service = Service.get_unique(session, server_of_service, compel=True) if server_of_instance: dbssi = get_service_instance(session, dbserver_service, server_of_instance) q = q.join('_services_provided') q = q.filter_by(service_instance=dbssi) q = q.reset_joinpoint() else: q = q.join('_services_provided', 'service_instance') q = q.filter_by(service=dbserver_service) q = q.reset_joinpoint() elif server_of_instance: q = q.join('_services_provided', 'service_instance') q = q.filter_by(name=server_of_instance) q = q.reset_joinpoint() if cluster: dbcluster = Cluster.get_unique(session, cluster, compel=True) if isinstance(dbcluster, MetaCluster): q = q.join('_cluster', 'cluster', '_metacluster') q = q.filter_by(metacluster=dbcluster) else: q = q.filter_by(cluster=dbcluster) q = q.reset_joinpoint() if guest_on_cluster: # TODO: this does not handle metaclusters according to Wes dbcluster = Cluster.get_unique(session, guest_on_cluster, compel=True) q = q.join('machine', VirtualMachine, ClusterResource) q = q.filter_by(cluster=dbcluster) q = q.reset_joinpoint() if guest_on_share: #v2 v2shares = session.query(Share.id).filter_by(name=guest_on_share).all() if not v2shares: raise NotFoundException("No shares found with name {0}." .format(guest_on_share)) NasAlias = aliased(VirtualDisk) q = q.join('machine', 'disks', (NasAlias, NasAlias.id == Disk.id)) q = q.filter( NasAlias.share_id.in_(map(lambda s: s[0], v2shares))) q = q.reset_joinpoint() if member_cluster_share: #v2 v2shares = session.query(Share.id).filter_by(name=member_cluster_share).all() if not v2shares: raise NotFoundException("No shares found with name {0}." .format(guest_on_share)) NasAlias = aliased(VirtualDisk) q = q.join('_cluster', 'cluster', 'resholder', VirtualMachine, 'machine', 'disks', (NasAlias, NasAlias.id == Disk.id)) q = q.filter( NasAlias.share_id.in_(map(lambda s: s[0], v2shares))) q = q.reset_joinpoint() if grn or eon_id: dbgrn = lookup_grn(session, grn, eon_id, autoupdate=False) persq = session.query(Personality.id) persq = persq.outerjoin(PersonalityGrnMap) persq = persq.filter(or_(Personality.owner_eon_id == dbgrn.eon_id, PersonalityGrnMap.eon_id == dbgrn.eon_id)) q = q.outerjoin(HostGrnMap) q = q.filter(or_(Host.owner_eon_id == dbgrn.eon_id, HostGrnMap.eon_id == dbgrn.eon_id, Host.personality_id.in_(persq.subquery()))) q = q.reset_joinpoint() if fullinfo: return q.all() return SimpleHostList(q.all())
def grab_address(session, fqdn, ip, network_environment=None, dns_environment=None, comments=None, allow_restricted_domain=False, allow_multi=False, allow_reserved=False, relaxed=False, preclude=False): """ Take ownership of an address. This is a bit complicated because due to DNS propagation delays, we want to allow users to pre-define a DNS address and then assign the address to a host later. Parameters: session: SQLA session handle fqdn: the name to allocate/take over ip: the IP address to allocate/take over network_environment: where the IP address lives dns_enviromnent: where the FQDN lives comments: any comments to attach to the DNS record if it is created as new allow_restricted_domain: if True, adding entries to restricted DNS domains is allowed, otherwise it is denied. Default is False. allow_multi: if True, allow the same FQDN to be added multiple times with different IP addresses. Deault is False. allow_reserved: if True, allow creating a ReservedName instead of an ARecord if no IP address was specified. Default is False. preclude: if True, forbid taking over an existing DNS record, even if it is not referenced by any AddressAssignment records. Default is False. """ if not isinstance(network_environment, NetworkEnvironment): network_environment = NetworkEnvironment.get_unique_or_default(session, network_environment) if not dns_environment: dns_environment = network_environment.dns_environment elif not isinstance(dns_environment, DnsEnvironment): dns_environment = DnsEnvironment.get_unique(session, dns_environment, compel=True) # Non-default DNS environments may contain anything, but we want to keep # the internal environment clean if dns_environment.is_default and not network_environment.is_default: raise ArgumentError("Entering external IP addresses to the " "internal DNS environment is not allowed.") short, dbdns_domain = parse_fqdn(session, fqdn) # Lock the domain to prevent adding/deleting records while we're checking # FQDN etc. availability dbdns_domain.lock_row() if dbdns_domain.restricted and not allow_restricted_domain: raise ArgumentError("{0} is restricted, adding extra addresses " "is not allowed.".format(dbdns_domain)) dbfqdn = Fqdn.get_or_create(session, dns_environment=dns_environment, name=short, dns_domain=dbdns_domain, query_options=[joinedload('dns_records')]) existing_record = None newly_created = False if ip: dbnetwork = get_net_id_from_ip(session, ip, network_environment) check_ip_restrictions(dbnetwork, ip, relaxed=relaxed) dbnetwork.lock_row() # No filtering on DNS environment. If an address is dynamic in one # environment, it should not be considered static in a different # environment. q = session.query(DynamicStub) q = q.filter_by(network=dbnetwork) q = q.filter_by(ip=ip) dbdns_rec = q.first() _forbid_dyndns(dbdns_rec) # Verify that no other record uses the same IP address, this time taking # the DNS environemt into consideration. # While the DNS would allow different A records to point to the same IP # address, the current user expectation is that creating a DNS entry # also counts as a reservation, so we can not allow this use case. If we # want to implement such a feature later, the best way would be to # subclass Alias and let that subclass emit an A record instead of a # CNAME when the dump_dns command is called. q = session.query(ARecord) q = q.filter_by(network=dbnetwork) q = q.filter_by(ip=ip) q = q.join(ARecord.fqdn) q = q.filter_by(dns_environment=dns_environment) dbrecords = q.all() if dbrecords and len(dbrecords) > 1: # pragma: no cover # We're just trying to make sure this never happens raise AquilonError("IP address %s is referenced by multiple " "DNS records: %s" % (ip, ", ".join([format(rec, "a") for rec in dbrecords]))) if dbrecords and dbrecords[0].fqdn != dbfqdn: raise ArgumentError("IP address {0} is already in use by {1:l}." .format(ip, dbrecords[0])) # Check if the name is used already for dbdns_rec in dbfqdn.dns_records: if isinstance(dbdns_rec, ARecord): _forbid_dyndns(dbdns_rec) _check_netenv_compat(dbdns_rec, network_environment) if dbdns_rec.ip == ip and dbdns_rec.network == dbnetwork: existing_record = dbdns_rec elif not allow_multi: raise ArgumentError("{0} points to a different IP address." .format(dbdns_rec)) elif isinstance(dbdns_rec, ReservedName): existing_record = convert_reserved_to_arecord(session, dbdns_rec, dbnetwork, ip) newly_created = True else: # Exclude aliases etc. raise ArgumentError("{0} cannot be used for address assignment." .format(dbdns_rec)) if not existing_record: existing_record = ARecord(fqdn=dbfqdn, ip=ip, network=dbnetwork, comments=comments) session.add(existing_record) newly_created = True else: if not dbfqdn.dns_records: # There's no IP, and the name did not exist before. Create a # reservation, but only if the caller allowed that use case. if not allow_reserved: raise ArgumentError("DNS Record %s does not exist." % dbfqdn) existing_record = ReservedName(fqdn=dbfqdn, comments=comments) newly_created = True else: # There's no IP, but the name is already in use. We need a single IP # address. if len(dbfqdn.dns_records) > 1: raise ArgumentError("{0} does not resolve to a single IP address." .format(dbfqdn)) existing_record = dbfqdn.dns_records[0] _forbid_dyndns(existing_record) if not isinstance(existing_record, ARecord): # Exclude aliases etc. raise ArgumentError("{0} cannot be used for address assignment." .format(existing_record)) # Verify that the existing record is in the network environment the # caller expects _check_netenv_compat(existing_record, network_environment) ip = existing_record.ip dbnetwork = existing_record.network dbnetwork.lock_row() if existing_record.hardware_entity: raise ArgumentError("{0} is already used as the primary name of {1:cl} " "{1.label}.".format(existing_record, existing_record.hardware_entity)) if preclude and not newly_created: raise ArgumentError("{0} already exists.".format(existing_record)) if ip: q = session.query(AddressAssignment) q = q.filter_by(network=dbnetwork) q = q.filter_by(ip=ip) addr = q.first() if addr: raise ArgumentError("IP address {0} is already in use by " "{1:l}.".format(ip, addr.interface)) return (existing_record, newly_created)
def render(self, session, logger, hostname, machine, archetype, buildstatus, personality, osname, osversion, service, instance, model, machine_type, vendor, serial, cluster, guest_on_cluster, guest_on_share, member_cluster_share, domain, sandbox, branch, sandbox_owner, dns_domain, shortname, mac, ip, networkip, network_environment, exact_location, server_of_service, server_of_instance, grn, eon_id, fullinfo, **arguments): dbnet_env = NetworkEnvironment.get_unique_or_default( session, network_environment) q = session.query(Host) if machine: dbmachine = Machine.get_unique(session, machine, compel=True) q = q.filter_by(machine=dbmachine) # Add the machine definition and the primary name. Use aliases to make # sure the end result will be ordered by primary name. PriDns = aliased(DnsRecord) PriFqdn = aliased(Fqdn) PriDomain = aliased(DnsDomain) q = q.join(Machine, (PriDns, PriDns.id == Machine.primary_name_id), (PriFqdn, PriDns.fqdn_id == PriFqdn.id), (PriDomain, PriFqdn.dns_domain_id == PriDomain.id)) q = q.order_by(PriFqdn.name, PriDomain.name) q = q.options( contains_eager('machine'), contains_eager('machine.primary_name', alias=PriDns), contains_eager('machine.primary_name.fqdn', alias=PriFqdn), contains_eager('machine.primary_name.fqdn.dns_domain', alias=PriDomain)) q = q.reset_joinpoint() # Hardware-specific filters dblocation = get_location(session, **arguments) if dblocation: if exact_location: q = q.filter(Machine.location == dblocation) else: childids = dblocation.offspring_ids() q = q.filter(Machine.location_id.in_(childids)) if model or vendor or machine_type: subq = Model.get_matching_query(session, name=model, vendor=vendor, machine_type=machine_type, compel=True) q = q.filter(Machine.model_id.in_(subq)) if serial: self.deprecated_option( "serial", "Please use search machine --serial instead.", logger=logger, **arguments) q = q.filter(Machine.serial_no == serial) # DNS IP address related filters if mac or ip or networkip or hostname or dns_domain or shortname: # Inner joins are cheaper than outer joins, so make some effort to # use inner joins when possible if mac or ip or networkip: q = q.join(Interface) else: q = q.outerjoin(Interface) if ip or networkip: q = q.join(AddressAssignment, Network, from_joinpoint=True) else: q = q.outerjoin(AddressAssignment, Network, from_joinpoint=True) if mac: self.deprecated_option("mac", "Please use search machine " "--mac instead.", logger=logger, **arguments) q = q.filter(Interface.mac == mac) if ip: q = q.filter(AddressAssignment.ip == ip) q = q.filter(Network.network_environment == dbnet_env) if networkip: dbnetwork = get_network_byip(session, networkip, dbnet_env) q = q.filter(AddressAssignment.network == dbnetwork) dbdns_domain = None if hostname: (shortname, dbdns_domain) = parse_fqdn(session, hostname) if dns_domain: dbdns_domain = DnsDomain.get_unique(session, dns_domain, compel=True) if shortname or dbdns_domain: ARecAlias = aliased(ARecord) ARecFqdn = aliased(Fqdn) q = q.outerjoin( (ARecAlias, and_(ARecAlias.ip == AddressAssignment.ip, ARecAlias.network_id == AddressAssignment.network_id)), (ARecFqdn, ARecAlias.fqdn_id == ARecFqdn.id)) if shortname: q = q.filter( or_(ARecFqdn.name == shortname, PriFqdn.name == shortname)) if dbdns_domain: q = q.filter( or_(ARecFqdn.dns_domain == dbdns_domain, PriFqdn.dns_domain == dbdns_domain)) q = q.reset_joinpoint() (dbbranch, dbauthor) = get_branch_and_author(session, logger, domain=domain, sandbox=sandbox, branch=branch) if sandbox_owner: dbauthor = get_user_principal(session, sandbox_owner) if dbbranch: q = q.filter_by(branch=dbbranch) if dbauthor: q = q.filter_by(sandbox_author=dbauthor) if archetype: # Added to the searches as appropriate below. dbarchetype = Archetype.get_unique(session, archetype, compel=True) if personality and archetype: dbpersonality = Personality.get_unique(session, archetype=dbarchetype, name=personality, compel=True) q = q.filter_by(personality=dbpersonality) elif personality: PersAlias = aliased(Personality) q = q.join(PersAlias).filter_by(name=personality) q = q.reset_joinpoint() elif archetype: PersAlias = aliased(Personality) q = q.join(PersAlias).filter_by(archetype=dbarchetype) q = q.reset_joinpoint() if buildstatus: dbbuildstatus = HostLifecycle.get_unique(session, buildstatus, compel=True) q = q.filter_by(status=dbbuildstatus) if osname and osversion and archetype: # archetype was already resolved above dbos = OperatingSystem.get_unique(session, name=osname, version=osversion, archetype=dbarchetype, compel=True) q = q.filter_by(operating_system=dbos) elif osname or osversion: q = q.join('operating_system') if osname: q = q.filter_by(name=osname) if osversion: q = q.filter_by(version=osversion) q = q.reset_joinpoint() if service: dbservice = Service.get_unique(session, service, compel=True) if instance: dbsi = get_service_instance(session, dbservice, instance) q = q.filter(Host.services_used.contains(dbsi)) else: q = q.join('services_used') q = q.filter_by(service=dbservice) q = q.reset_joinpoint() elif instance: q = q.join('services_used') q = q.filter_by(name=instance) q = q.reset_joinpoint() if server_of_service: dbserver_service = Service.get_unique(session, server_of_service, compel=True) if server_of_instance: dbssi = get_service_instance(session, dbserver_service, server_of_instance) q = q.join('_services_provided') q = q.filter_by(service_instance=dbssi) q = q.reset_joinpoint() else: q = q.join('_services_provided', 'service_instance') q = q.filter_by(service=dbserver_service) q = q.reset_joinpoint() elif server_of_instance: q = q.join('_services_provided', 'service_instance') q = q.filter_by(name=server_of_instance) q = q.reset_joinpoint() if cluster: dbcluster = Cluster.get_unique(session, cluster, compel=True) if isinstance(dbcluster, MetaCluster): q = q.join('_cluster', 'cluster', '_metacluster') q = q.filter_by(metacluster=dbcluster) else: q = q.filter_by(cluster=dbcluster) q = q.reset_joinpoint() if guest_on_cluster: # TODO: this does not handle metaclusters according to Wes dbcluster = Cluster.get_unique(session, guest_on_cluster, compel=True) q = q.join('machine', VirtualMachine, ClusterResource) q = q.filter_by(cluster=dbcluster) q = q.reset_joinpoint() if guest_on_share: #v2 v2shares = session.query( Share.id).filter_by(name=guest_on_share).all() if not v2shares: raise NotFoundException( "No shares found with name {0}.".format(guest_on_share)) NasAlias = aliased(VirtualDisk) q = q.join('machine', 'disks', (NasAlias, NasAlias.id == Disk.id)) q = q.filter(NasAlias.share_id.in_(map(lambda s: s[0], v2shares))) q = q.reset_joinpoint() if member_cluster_share: #v2 v2shares = session.query( Share.id).filter_by(name=member_cluster_share).all() if not v2shares: raise NotFoundException( "No shares found with name {0}.".format(guest_on_share)) NasAlias = aliased(VirtualDisk) q = q.join('_cluster', 'cluster', 'resholder', VirtualMachine, 'machine', 'disks', (NasAlias, NasAlias.id == Disk.id)) q = q.filter(NasAlias.share_id.in_(map(lambda s: s[0], v2shares))) q = q.reset_joinpoint() if grn or eon_id: dbgrn = lookup_grn(session, grn, eon_id, autoupdate=False) persq = session.query(Personality.id) persq = persq.outerjoin(PersonalityGrnMap) persq = persq.filter( or_(Personality.owner_eon_id == dbgrn.eon_id, PersonalityGrnMap.eon_id == dbgrn.eon_id)) q = q.outerjoin(HostGrnMap) q = q.filter( or_(Host.owner_eon_id == dbgrn.eon_id, HostGrnMap.eon_id == dbgrn.eon_id, Host.personality_id.in_(persq.subquery()))) q = q.reset_joinpoint() if fullinfo: return q.all() return SimpleHostList(q.all())