def render(self, session, logger, dns_domain, restricted, comments, **arguments): DnsDomain.get_unique(session, dns_domain, preclude=True) dbdns_domain = DnsDomain(name=dns_domain, comments=comments) if restricted: dbdns_domain.restricted = True session.add(dbdns_domain) session.flush() dsdb_runner = DSDBRunner(logger=logger) dsdb_runner.add_dns_domain(dbdns_domain.name, comments) dsdb_runner.commit_or_rollback() return
def render(self, session, service, protocol, dns_domain, priority, weight, target, port, dns_environment, comments, **kwargs): dbdns_env = DnsEnvironment.get_unique_or_default(session, dns_environment) dbdns_domain = DnsDomain.get_unique(session, dns_domain, compel=True) if dbdns_domain.restricted: raise ArgumentError("{0} is restricted, SRV records are not allowed." .format(dbdns_domain)) # TODO: we could try looking up the port based on the service, but there # are some caveats: # - the protocol name used in SRV record may not match the name used in # /etc/services # - socket.getservent() may return bogus information (like it does for # e.g. 'kerberos') service = service.strip().lower() target = target.strip().lower() dbtarget = Fqdn.get_unique(session, target, compel=True) dbsrv_rec = SrvRecord(service=service, protocol=protocol, priority=priority, weight=weight, target=dbtarget, port=port, dns_domain=dbdns_domain, dns_environment=dbdns_env, comments=comments) session.add(dbsrv_rec) session.flush() return
def render(self, session, logger, prefix, dns_domain, hostname, machine, **args): if dns_domain: dbdns_domain = DnsDomain.get_unique(session, dns_domain, compel=True) else: dbmachine = Machine.get_unique(session, machine, compel=True) dbdns_domain = None loc = dbmachine.location while loc and not dbdns_domain: dbdns_domain = loc.default_dns_domain loc = loc.parent if not dbdns_domain: raise ArgumentError("There is no default DNS domain configured " "for the machine's location. Please " "specify --dns_domain.") # Lock the DNS domain to prevent the same name generated for # simultaneous requests dbdns_domain.lock_row() prefix = AqStr.normalize(prefix) result = search_next(session=session, cls=Fqdn, attr=Fqdn.name, value=prefix, dns_domain=dbdns_domain, start=None, pack=None) hostname = "%s%d.%s" % (prefix, result, dbdns_domain) CommandAddHost.render(self, session, logger, hostname=hostname, machine=machine, **args) logger.info("Selected host name %s" % hostname) self.audit_result(session, 'hostname', hostname, **args) return hostname
def render(self, session, service, protocol, dns_domain, priority, weight, target, port, dns_environment, comments, **kwargs): dbdns_env = DnsEnvironment.get_unique_or_default( session, dns_environment) dbdns_domain = DnsDomain.get_unique(session, dns_domain, compel=True) if dbdns_domain.restricted: raise ArgumentError( "{0} is restricted, SRV records are not allowed.".format( dbdns_domain)) # TODO: we could try looking up the port based on the service, but there # are some caveats: # - the protocol name used in SRV record may not match the name used in # /etc/services # - socket.getservent() may return bogus information (like it does for # e.g. 'kerberos') service = service.strip().lower() target = target.strip().lower() dbtarget = Fqdn.get_unique(session, target, compel=True) dbsrv_rec = SrvRecord(service=service, protocol=protocol, priority=priority, weight=weight, target=dbtarget, port=port, dns_domain=dbdns_domain, dns_environment=dbdns_env, comments=comments) session.add(dbsrv_rec) session.flush() return
def test_create_hosts(): br = Branch.get_unique(sess, 'ny-prod', compel=True) dns_dmn = DnsDomain.get_unique(sess, 'one-nyp.ms.com', compel=True) stat = Status.get_unique(sess, 'build', compel=True) os = sess.query(OperatingSystem).filter(Archetype.name == 'vmhost').first() assert os, 'No OS in %s' % func_name() pers = sess.query(Personality).select_from( join(Personality, Archetype)).filter( and_(Archetype.name=='vmhost', Personality.name=='generic')).one() sess.autoflush=False for i in xrange(NUM_HOSTS): machine = m_factory.next() vm_host = Host(machine=machine, name='%s%s' % (HOST_NAME, i), dns_domain=dns_dmn, branch=br, personality=pers, status=stat, operating_system=os) add(sess, vm_host) sess.autoflush=True commit(sess) hosts = sess.query(Host).filter( Host.name.like(HOST_NAME+'%')).all() assert len(hosts) is NUM_HOSTS print 'created %s hosts'% len(hosts)
def hostlist_to_hosts(session, hostlist): dbdns_env = DnsEnvironment.get_unique_or_default(session) failed = [] dbhosts = [] dns_domains = {} for host in hostlist: if "." not in host: failed.append("%s: Not an FQDN." % host) continue short, dns_domain = host.split(".", 1) try: if dns_domain not in dns_domains: dbdns_domain = DnsDomain.get_unique(session, dns_domain, compel=True) dns_domains[dns_domain] = dbdns_domain dbdns_rec = DnsRecord.get_unique(session, name=short, dns_domain=dns_domains[dns_domain], dns_environment=dbdns_env, query_options=[joinedload('hardware_entity')], compel=True) if not dbdns_rec.hardware_entity or \ not dbdns_rec.hardware_entity.host: raise NotFoundException("Host %s not found." % host) dbhosts.append(dbdns_rec.hardware_entity.host) except NotFoundException, err: failed.append("%s: %s" % (host, err)) continue except ArgumentError, err: failed.append("%s: %s" % (host, err)) continue
def render(self, session, logger, city, timezone, campus, default_dns_domain, comments, **arguments): dbcity = get_location(session, city=city) # Updating machine templates is expensive, so only do that if needed update_machines = False if timezone is not None: dbcity.timezone = timezone if comments is not None: dbcity.comments = comments if default_dns_domain is not None: if default_dns_domain: dbdns_domain = DnsDomain.get_unique(session, default_dns_domain, compel=True) dbcity.default_dns_domain = dbdns_domain else: dbcity.default_dns_domain = None prev_campus = None dsdb_runner = None dsdb_runner = DSDBRunner(logger=logger) if campus is not None: dbcampus = get_location(session, campus=campus) # This one would change the template's locations hence forbidden if dbcampus.hub != dbcity.hub: # Doing this both to reduce user error and to limit # testing required. raise ArgumentError("Cannot change campus. {0} is in {1:l}, " "while {2:l} is in {3:l}.".format( dbcampus, dbcampus.hub, dbcity, dbcity.hub)) if dbcity.campus: prev_campus = dbcity.campus dbcity.update_parent(parent=dbcampus) update_machines = True session.flush() if campus is not None: if prev_campus: prev_name = prev_campus.name else: prev_name = None dsdb_runner.update_city(city, dbcampus.name, prev_name) plenaries = PlenaryCollection(logger=logger) plenaries.append(Plenary.get_plenary(dbcity)) if update_machines: q = session.query(Machine) q = q.filter(Machine.location_id.in_(dbcity.offspring_ids())) logger.client_info("Updating %d machines..." % q.count()) for dbmachine in q: plenaries.append(Plenary.get_plenary(dbmachine)) count = plenaries.write() dsdb_runner.commit_or_rollback() logger.client_info("Flushed %d templates." % count)
def render(self, session, dns_domain, restricted, comments, **arguments): dbdns_domain = DnsDomain.get_unique(session, dns_domain, compel=True) if restricted is not None: dbdns_domain.restricted = restricted if comments is not None: dbdns_domain.comments = comments session.flush()
def test_create_hosts(): br = Branch.get_unique(sess, 'ny-prod', compel=True) dns_dmn = DnsDomain.get_unique(sess, 'one-nyp.ms.com', compel=True) stat = Status.get_unique(sess, 'build', compel=True) os = sess.query(OperatingSystem).filter(Archetype.name == 'vmhost').first() assert os, 'No OS in %s' % func_name() pers = sess.query(Personality).select_from(join( Personality, Archetype)).filter( and_(Archetype.name == 'vmhost', Personality.name == 'generic')).one() sess.autoflush = False for i in xrange(NUM_HOSTS): machine = m_factory.next() vm_host = Host(machine=machine, name='%s%s' % (HOST_NAME, i), dns_domain=dns_dmn, branch=br, personality=pers, status=stat, operating_system=os) add(sess, vm_host) sess.autoflush = True commit(sess) hosts = sess.query(Host).filter(Host.name.like(HOST_NAME + '%')).all() assert len(hosts) is NUM_HOSTS print 'created %s hosts' % len(hosts)
def check_name(cls, name, dns_domain, ignore_name_check=False): """ Validate the name parameter """ if not isinstance(name, basestring): # pragma: no cover raise TypeError("%s: name must be a string." % cls.name) if not isinstance(dns_domain, DnsDomain): # pragma: no cover raise TypeError("%s: dns_domain must be a DnsDomain." % cls.name) # Allow SRV records to opt out from this test if not ignore_name_check: DnsDomain.check_label(name) # The limit for DNS name length is 255, assuming wire format. This # translates to 253 for simple ASCII text; see: # http://www.ops.ietf.org/lists/namedroppers/namedroppers.2003/msg00964.html if len(name) + 1 + len(dns_domain.name) > 253: raise ArgumentError('The fully qualified domain name is too long.')
def render(self, session, short, dns_domain, start, number, pack, **arguments): dbdns_domain = DnsDomain.get_unique(session, dns_domain, compel=True) result = search_next(session=session, cls=Fqdn, attr=Fqdn.name, value=short, dns_domain=dbdns_domain, start=start, pack=pack) if number: return str(result) return "%s%d.%s" % (short, result, dbdns_domain.name)
def render(self, session, fqdn, dns_domain, **arguments): dbdns_domain = DnsDomain.get_unique(session, dns_domain, compel=True) dba_rec = ARecord.get_unique(session, fqdn=fqdn, compel=True) ns_record = NsRecord.get_unique(session, dns_domain=dbdns_domain, a_record=dba_rec, compel=True) session.delete(ns_record) return
def render(self, session, dns_domain, **kw): dbdns = DnsDomain.get_unique(session, dns_domain, compel=True) q = session.query(NsRecord).filter_by(dns_domain=dbdns) dba_record = ARecord.get_unique(session, fqdn=kw['fqdn'], compel=True) q = q.filter_by(a_record=dba_record) ns_rec = q.all() if not ns_rec: raise NotFoundException( "Could not find a dns_record for domain '%s'." % dns_domain) return ns_rec
def render(self, session, fqdn, dns_domain, comments, **arguments): dbdns_domain = DnsDomain.get_unique(session, dns_domain, compel=True) dba_rec = ARecord.get_unique(session, fqdn=fqdn, compel=True) NsRecord.get_unique(session, a_record=dba_rec, dns_domain=dbdns_domain, preclude=True) ns_record = NsRecord(a_record=dba_rec, dns_domain=dbdns_domain, comments=comments) session.add(ns_record) return
def test_ns_record(): """ test creating a valid ns record """ tgt = ARecord.get_unique(sess, fqdn='%s.%s' % (AREC_NAME, DNS_DOMAIN_NAME), compel=True) dmn = DnsDomain.get_unique(sess, name=DNS_DOMAIN_NAME, compel=True) ns = NsRecord(a_record=tgt, dns_domain=dmn) create(sess, ns) assert ns, 'No NS Record created in test_ns_record' print 'created %s' % ns assert dmn.servers, 'No name server association proxy in test_ns_record'
def setup(): dmn = DnsDomain(name=DNS_DOMAIN_NAME) create(sess, dmn) assert dmn, 'no dns domain in %s' % func_name() pi = Building.get_unique(sess, name='pi', compel=True) n = IPv4Network(TEST_NET) net = Network(name=TEST_NET_NAME, network=n, location=pi) create(sess, net) assert net, 'no network created by %s' % func_name() ip = IPv4Address(TEST_IP) arec = ARecord(name=AREC_NAME, dns_domain=dmn, ip=ip, network=net) create(sess, arec) assert arec, 'no ARecord created by %s' % func_name()
def render(self, session, logger, rack, row, column, room, building, bunker, fullname, default_dns_domain, comments, **arguments): dbrack = get_location(session, rack=rack) if row is not None: dbrack.rack_row = row if column is not None: dbrack.rack_column = column if fullname is not None: dbrack.fullname = fullname if comments is not None: dbrack.comments = comments if default_dns_domain is not None: if default_dns_domain: dbdns_domain = DnsDomain.get_unique(session, default_dns_domain, compel=True) dbrack.default_dns_domain = dbdns_domain else: dbrack.default_dns_domain = None if bunker or room or building: dbparent = get_location(session, bunker=bunker, room=room, building=building) # This one would change the template's locations hence forbidden if dbparent.building != dbrack.building: # Doing this both to reduce user error and to limit # testing required. raise ArgumentError("Cannot change buildings. {0} is in {1} " "while {2} is in {3}.".format( dbparent, dbparent.building, dbrack, dbrack.building)) dbrack.update_parent(parent=dbparent) session.flush() plenaries = PlenaryCollection(logger=logger) q = session.query(Machine) q = q.filter(Machine.location_id.in_(dbrack.offspring_ids())) for dbmachine in q: plenaries.append(Plenary.get_plenary(dbmachine)) plenaries.write()
def teardown(): ip = IPv4Address(TEST_IP) arec = ARecord.get_unique(sess, fqdn='%s.%s' % (AREC_NAME, DNS_DOMAIN_NAME), compel=True) dmn = DnsDomain.get_unique(sess, DNS_DOMAIN_NAME, compel=True) #Test deletion of NSRecord doesn't affect the ARecord or DNS Domain #by deleting it first. ns = NsRecord.get_unique(sess, a_record=arec, dns_domain=dmn) sess.delete(ns) commit(sess) sess.delete(arec) commit(sess) sess.delete(dmn) commit(sess) sess.query(Network).filter_by(name=TEST_NET_NAME).delete() commit(sess)
def render(self, session, service, protocol, dns_domain, dns_environment, **kwargs): dbdns_env = DnsEnvironment.get_unique_or_default( session, dns_environment) dbdns_domain = DnsDomain.get_unique(session, dns_domain, compel=True) name = "_%s._%s" % (service.strip().lower(), protocol.strip().lower()) q = session.query(SrvRecord) q = q.join((Fqdn, SrvRecord.fqdn_id == Fqdn.id)) q = q.options(contains_eager('fqdn')) q = q.filter_by(dns_domain=dbdns_domain) q = q.filter_by(name=name) q = q.filter_by(dns_environment=dbdns_env) result = q.all() if not result: raise NotFoundException( "%s for service %s, protocol %s in DNS " "domain %s not found." % (SrvRecord._get_class_label(), service, protocol, dns_domain)) return result
def render(self, session, dns_domain, **kw): dbdns_domain = DnsDomain.get_unique(session, name=dns_domain, compel=True) dblocation = get_location(session, query_options=[subqueryload('dns_maps')], **kw) if not dblocation: raise ArgumentError("Please specify a location.") dbmap = None for item in dblocation.dns_maps: if item.dns_domain == dbdns_domain: dbmap = item break if dbmap: dblocation.dns_maps.remove(dbmap) session.flush() return
def render(self, session, dns_domain, position, comments, **kw): dbdns_domain = DnsDomain.get_unique(session, name=dns_domain, compel=True) dblocation = get_location(session, query_options=[subqueryload('dns_maps')], **kw) if not dblocation: raise ArgumentError("Please specify a location.") DnsMap.get_unique(session, dns_domain=dbdns_domain, location=dblocation, preclude=True) dbmap = DnsMap(dns_domain=dbdns_domain, comments=comments) if position is not None: dblocation.dns_maps.insert(position, dbmap) else: dblocation.dns_maps.append(dbmap) session.flush() return
def main(): q = session.query(Campus) for campus in q.all(): print "Processing {0:l}".format(campus) name = campus.fullname.lower().strip().replace(" ", "-") + ".ms.com" dbdns_domain = DnsDomain.get_unique(session, name) if not dbdns_domain: print " - DNS Domain %s does not exist, skipping" % name continue map = DnsMap.get_unique(session, location=campus, dns_domain=dbdns_domain) if map: print " - {0} is already mapped, skipping".format(dbdns_domain) continue campus.dns_maps.append(DnsMap(dns_domain=dbdns_domain)) print " * Mapping {0:l} to {1:l}".format(dbdns_domain, campus) session.commit()
def render(self, session, service, protocol, dns_domain, dns_environment, **kwargs): dbdns_env = DnsEnvironment.get_unique_or_default(session, dns_environment) dbdns_domain = DnsDomain.get_unique(session, dns_domain, compel=True) name = "_%s._%s" % (service.strip().lower(), protocol.strip().lower()) q = session.query(SrvRecord) q = q.join((Fqdn, SrvRecord.fqdn_id == Fqdn.id)) q = q.options(contains_eager('fqdn')) q = q.filter_by(dns_domain=dbdns_domain) q = q.filter_by(name=name) q = q.filter_by(dns_environment=dbdns_env) result = q.all() if not result: raise NotFoundException("%s for service %s, protocol %s in DNS " "domain %s not found." % (SrvRecord._get_class_label(), service, protocol, dns_domain)) return result
def render(self, session, logger, dns_domain, **arguments): dbdns_domain = DnsDomain.get_unique(session, dns_domain, compel=True) q = session.query(Fqdn) q = q.filter_by(dns_domain=dbdns_domain) if q.first(): raise ArgumentError("DNS domain %s cannot be deleted while still " "in use." % dns_domain) if dbdns_domain.dns_maps: raise ArgumentError("{0} is still mapped to locations and cannot " "be deleted.".format(dbdns_domain)) comments = dbdns_domain.comments session.delete(dbdns_domain) session.flush() dsdb_runner = DSDBRunner(logger=logger) dsdb_runner.delete_dns_domain(dns_domain, comments) dsdb_runner.commit_or_rollback() return
def search_system_query(session, dns_record_type=DnsRecord, **kwargs): q = session.query(dns_record_type) # Outer-join in all the subclasses so that each access of # system doesn't (necessarily) issue another query. if dns_record_type is DnsRecord: q = q.with_polymorphic('*') dbdns_env = DnsEnvironment.get_unique_or_default(session, kwargs.get("dns_environment", None)) q = q.join((Fqdn, DnsRecord.fqdn_id == Fqdn.id)) q = q.filter_by(dns_environment=dbdns_env) q = q.options(contains_eager('fqdn')) if kwargs.get('fqdn', None): (short, dbdns_domain) = parse_fqdn(session, kwargs['fqdn']) q = q.filter_by(name=short, dns_domain=dbdns_domain) if kwargs.get('dns_domain', None): dbdns_domain = DnsDomain.get_unique(session, kwargs['dns_domain'], compel=True) q = q.filter_by(dns_domain=dbdns_domain) if kwargs.get('shortname', None): q = q.filter_by(name=kwargs['shortname']) q = q.reset_joinpoint() if kwargs.get('ip', None): q = q.filter(ARecord.ip == kwargs['ip']) if kwargs.get('networkip', None): net_env = kwargs.get('network_environment', None) dbnet_env = NetworkEnvironment.get_unique_or_default(session, net_env) dbnetwork = get_network_byip(session, kwargs['networkip'], dbnet_env) q = q.filter(ARecord.network == dbnetwork) if kwargs.get('mac', None): raise UnimplementedError("search_system --mac is no longer supported, " "try search_hardware.") if kwargs.get('type', None): # Deprecated... remove if it becomes a problem. type_arg = kwargs['type'].strip().lower() q = q.filter_by(dns_record_type=type_arg) return q
def render(self, session, dns_domain, include_parents, **kwargs): dblocation = get_location(session, **kwargs) q = session.query(DnsMap) q = q.options(undefer('comments')) if dblocation: if include_parents: location_ids = [parent.id for parent in dblocation.parents] location_ids.append(dblocation.id) q = q.filter(DnsMap.location_id.in_(location_ids)) else: q = q.filter_by(location=dblocation) if dns_domain: dbdns_domain = DnsDomain.get_unique(session, dns_domain, compel=True) q = q.filter_by(dns_domain=dbdns_domain) q = q.join(DnsDomain) q = q.options(contains_eager('dns_domain')) q = q.join((Location, DnsMap.location_id == Location.id)) q = q.options(contains_eager('location')) q = q.order_by(Location.location_type, Location.name, DnsMap.position) return q.all()
def render(self, session, dns_domain, dns_environment, **arguments): dbdns_env = DnsEnvironment.get_unique_or_default(session, dns_environment) if dns_domain: dbdns_domain = DnsDomain.get_unique(session, dns_domain, compel=True) else: dbdns_domain = None q = session.query(DnsRecord) q = q.with_polymorphic('*') q = q.join((Fqdn, DnsRecord.fqdn_id == Fqdn.id)) q = q.options(contains_eager('fqdn')) q = q.filter_by(dns_environment=dbdns_env) if dbdns_domain: q = q.filter_by(dns_domain=dbdns_domain) dns_domains = [dbdns_domain] else: # Preload DNS domains, and keep a reference to prevent them being # evicted from the session's cache dns_domains = session.query(DnsDomain).all() return DnsDump(q.all(), dns_domains)
def render(self, session, dns_domain, dns_environment, **arguments): dbdns_env = DnsEnvironment.get_unique_or_default( session, dns_environment) if dns_domain: dbdns_domain = DnsDomain.get_unique(session, dns_domain, compel=True) else: dbdns_domain = None q = session.query(DnsRecord) q = q.with_polymorphic('*') q = q.join((Fqdn, DnsRecord.fqdn_id == Fqdn.id)) q = q.options(contains_eager('fqdn')) q = q.filter_by(dns_environment=dbdns_env) if dbdns_domain: q = q.filter_by(dns_domain=dbdns_domain) dns_domains = [dbdns_domain] else: # Preload DNS domains, and keep a reference to prevent them being # evicted from the session's cache dns_domains = session.query(DnsDomain).all() return DnsDump(q.all(), dns_domains)
def render(self, session, logger, startip, endip, dns_domain, prefix, **arguments): if not prefix: prefix = 'dynamic' dbnet_env = NetworkEnvironment.get_unique_or_default(session) dbdns_env = DnsEnvironment.get_unique_or_default(session) startnet = get_net_id_from_ip(session, startip, dbnet_env) endnet = get_net_id_from_ip(session, endip, dbnet_env) if startnet != endnet: raise ArgumentError("IP addresses %s (%s) and %s (%s) must be on " "the same subnet." % (startip, startnet.ip, endip, endnet.ip)) dbdns_domain = DnsDomain.get_unique(session, dns_domain, compel=True) dbdns_domain.lock_row() startnet.lock_row() q = session.query(AddressAssignment.ip) q = q.filter_by(network=startnet) q = q.filter(AddressAssignment.ip >= startip) q = q.filter(AddressAssignment.ip <= endip) q = q.order_by(AddressAssignment.ip) conflicts = q.all() if conflicts: raise ArgumentError( "Cannot allocate the address range because the " "following IP addresses are already in use:\n" + ", ".join([str(c.ip) for c in conflicts])) # No filtering on DNS environment. If an address is dynamic in one # environment, it should not be considered static in a different # environment. q = session.query(ARecord) q = q.filter_by(network=startnet) q = q.filter(ARecord.ip >= startip) q = q.filter(ARecord.ip <= endip) q = q.order_by(ARecord.ip) conflicts = q.all() if conflicts: raise ArgumentError( "Cannot allocate the address range because the " "following DNS records already exist:\n" + "\n".join([format(c, "a") for c in conflicts])) dsdb_runner = DSDBRunner(logger=logger) with session.no_autoflush: for ipint in range(int(startip), int(endip) + 1): ip = IPv4Address(ipint) check_ip_restrictions(startnet, ip) name = "%s-%s" % (prefix, str(ip).replace('.', '-')) dbfqdn = Fqdn.get_or_create(session, name=name, dns_domain=dbdns_domain, dns_environment=dbdns_env, preclude=True) dbdynamic_stub = DynamicStub(fqdn=dbfqdn, ip=ip, network=startnet) session.add(dbdynamic_stub) dsdb_runner.add_host_details(dbfqdn, ip) session.flush() # This may take some time if the range is big, so be verbose dsdb_runner.commit_or_rollback("Could not add addresses to DSDB", verbose=True) return
ARCH = Archetype.get_unique(sess, 'aquilon') assert isinstance(ARCH, Archetype), 'No archetype @ %s' % func_name() OS = OperatingSystem.get_unique(sess, name='linux', version='5.0.1-x86_64', archetype=ARCH) assert isinstance(OS, OperatingSystem), 'No os @ %s' % func_name() PRSNLTY = Personality.get_unique(sess, name='generic', archetype=ARCH) assert isinstance(PRSNLTY, Personality), 'no personality @ %s' % func_name() NETWORK = sess.query(Network).filter(Network.cidr < 31).first() assert isinstance(NETWORK, Network), 'no network in %s' % func_name() DNS_DOMAIN = DnsDomain.get_unique(sess, DNAME) assert isinstance(DNS_DOMAIN, DnsDomain), 'no dns domain @ %s' % func_name() BRANCH = sess.query(Branch).first() if not BRANCH: BRANCH = Branch(branch_type='domain', name='ny-prod', is_sync_valid=1, compiler='/ms/dist/elfms/PROJ/panc/prod/lib/panc.jar', autosync=1, owner_id=1) add(sess, BRANCH) commit(sess) print BRANCH
def render(self, session, dns_domain, **arguments): options = [undefer('comments')] return DNSDomainList([DnsDomain.get_unique(session, dns_domain, compel=True, query_options=options)])
def render(self, session, fqdn, dns_environment, dns_domain, shortname, record_type, ip, network, network_environment, target, target_domain, primary_name, used, reverse_override, reverse_ptr, fullinfo, style, **kwargs): if record_type: record_type = record_type.strip().lower() if record_type in DNS_RRTYPE_MAP: cls = DNS_RRTYPE_MAP[record_type] else: cls = DnsRecord.polymorphic_subclass(record_type, "Unknown DNS record type") q = session.query(cls) else: q = session.query(DnsRecord) q = q.with_polymorphic('*') dbnet_env = NetworkEnvironment.get_unique_or_default(session, network_environment) if network_environment: # The network environment determines the DNS environment dbdns_env = dbnet_env.dns_environment else: dbdns_env = DnsEnvironment.get_unique_or_default(session, dns_environment) if fqdn: dbfqdn = Fqdn.get_unique(session, fqdn=fqdn, dns_environment=dbdns_env, compel=True) q = q.filter_by(fqdn=dbfqdn) q = q.join((Fqdn, DnsRecord.fqdn_id == Fqdn.id)) q = q.filter_by(dns_environment=dbdns_env) q = q.options(contains_eager('fqdn')) if dns_domain: dbdns_domain = DnsDomain.get_unique(session, dns_domain, compel=True) q = q.filter_by(dns_domain=dbdns_domain) if shortname: q = q.filter_by(name=shortname) q = q.join(DnsDomain) q = q.options(contains_eager('fqdn.dns_domain')) q = q.order_by(Fqdn.name, DnsDomain.name) q = q.reset_joinpoint() if ip: q = q.join(Network) q = q.filter_by(network_environment=dbnet_env) q = q.reset_joinpoint() q = q.filter(ARecord.ip == ip) if network: dbnetwork = Network.get_unique(session, network, network_environment=dbnet_env, compel=True) q = q.filter(ARecord.network == dbnetwork) if target: dbtarget = Fqdn.get_unique(session, fqdn=target, dns_environment=dbdns_env, compel=True) q = q.filter(or_(Alias.target == dbtarget, SrvRecord.target == dbtarget)) if target_domain: dbdns_domain = DnsDomain.get_unique(session, target_domain, compel=True) TargetFqdn = aliased(Fqdn) q = q.join((TargetFqdn, or_(Alias.target_id == TargetFqdn.id, SrvRecord.target_id == TargetFqdn.id))) q = q.filter(TargetFqdn.dns_domain == dbdns_domain) if primary_name is not None: if primary_name: q = q.filter(DnsRecord.hardware_entity.has()) else: q = q.filter(~DnsRecord.hardware_entity.has()) if used is not None: if used: q = q.join(AddressAssignment, and_(ARecord.network_id == AddressAssignment.network_id, ARecord.ip == AddressAssignment.ip)) else: q = q.outerjoin(AddressAssignment, and_(ARecord.network_id == AddressAssignment.network_id, ARecord.ip == AddressAssignment.ip)) q = q.filter(AddressAssignment.id == None) q = q.reset_joinpoint() if reverse_override is not None: if reverse_override: q = q.filter(ARecord.reverse_ptr.has()) else: q = q.filter(~ARecord.reverse_ptr.has()) if reverse_ptr: dbtarget = Fqdn.get_unique(session, fqdn=reverse_ptr, dns_environment=dbdns_env, compel=True) q = q.filter(ARecord.reverse_ptr == dbtarget) if fullinfo or style != "raw": q = q.options(undefer('comments'), subqueryload('hardware_entity'), lazyload('hardware_entity.primary_name'), undefer('alias_cnt')) return q.all() else: return StringAttributeList(q.all(), 'fqdn')
def render(self, session, logger, building, city, address, fullname, default_dns_domain, comments, **arguments): dbbuilding = get_location(session, building=building) old_city = dbbuilding.city dsdb_runner = DSDBRunner(logger=logger) if address is not None: old_address = dbbuilding.address dbbuilding.address = address dsdb_runner.update_building(dbbuilding.name, dbbuilding.address, old_address) if fullname is not None: dbbuilding.fullname = fullname if comments is not None: dbbuilding.comments = comments if default_dns_domain is not None: if default_dns_domain: dbdns_domain = DnsDomain.get_unique(session, default_dns_domain, compel=True) dbbuilding.default_dns_domain = dbdns_domain else: dbbuilding.default_dns_domain = None plenaries = PlenaryCollection(logger=logger) if city: dbcity = get_location(session, city=city) # This one would change the template's locations hence forbidden if dbcity.hub != dbbuilding.hub: # Doing this both to reduce user error and to limit # testing required. raise ArgumentError("Cannot change hubs. {0} is in {1} " "while {2} is in {3}.".format( dbcity, dbcity.hub, dbbuilding, dbbuilding.hub)) # issue svcmap warnings maps = 0 for map_type in [ServiceMap, PersonalityServiceMap]: maps = maps + session.query(map_type).\ filter_by(location=old_city).count() if maps > 0: logger.client_info("There are {0} service(s) mapped to the " "old location of the ({1:l}), please " "review and manually update mappings for " "the new location as needed.".format( maps, dbbuilding.city)) dbbuilding.update_parent(parent=dbcity) if old_city.campus and (old_city.campus != dbcity.campus): dsdb_runner.del_campus_building(old_city.campus, building) if dbcity.campus and (old_city.campus != dbcity.campus): dsdb_runner.add_campus_building(dbcity.campus, building) query = session.query(Machine) query = query.filter( Machine.location_id.in_(dbcity.offspring_ids())) for dbmachine in query: plenaries.append(PlenaryMachineInfo(dbmachine, logger=logger)) session.flush() if plenaries.plenaries: with plenaries.get_write_key() as key: plenaries.stash() try: plenaries.write(locked=True) dsdb_runner.commit_or_rollback() except: plenaries.restore_stash() else: dsdb_runner.commit_or_rollback() return
def setup(): dmn = DnsDomain.get_unique(sess, DNS_DOMAIN_NAME, compel=True) assert isinstance(dmn, DnsDomain), 'No ms.com domain in %s' % func_name() intrnl = DnsEnvironment.get_unique(sess, DNS_ENV, compel=True) assert isinstance(dmn, DnsDomain), 'No internal env in %s' % func_name()
def render(self, session, logger, hostname, machine, archetype, buildstatus, personality, osname, osversion, service, instance, model, machine_type, vendor, serial, cluster, guest_on_cluster, guest_on_share, member_cluster_share, domain, sandbox, branch, sandbox_owner, dns_domain, shortname, mac, ip, networkip, network_environment, exact_location, server_of_service, server_of_instance, grn, eon_id, fullinfo, **arguments): dbnet_env = NetworkEnvironment.get_unique_or_default(session, network_environment) q = session.query(Host) if machine: dbmachine = Machine.get_unique(session, machine, compel=True) q = q.filter_by(machine=dbmachine) # Add the machine definition and the primary name. Use aliases to make # sure the end result will be ordered by primary name. PriDns = aliased(DnsRecord) PriFqdn = aliased(Fqdn) PriDomain = aliased(DnsDomain) q = q.join(Machine, (PriDns, PriDns.id == Machine.primary_name_id), (PriFqdn, PriDns.fqdn_id == PriFqdn.id), (PriDomain, PriFqdn.dns_domain_id == PriDomain.id)) q = q.order_by(PriFqdn.name, PriDomain.name) q = q.options(contains_eager('machine'), contains_eager('machine.primary_name', alias=PriDns), contains_eager('machine.primary_name.fqdn', alias=PriFqdn), contains_eager('machine.primary_name.fqdn.dns_domain', alias=PriDomain)) q = q.reset_joinpoint() # Hardware-specific filters dblocation = get_location(session, **arguments) if dblocation: if exact_location: q = q.filter(Machine.location == dblocation) else: childids = dblocation.offspring_ids() q = q.filter(Machine.location_id.in_(childids)) if model or vendor or machine_type: subq = Model.get_matching_query(session, name=model, vendor=vendor, machine_type=machine_type, compel=True) q = q.filter(Machine.model_id.in_(subq)) if serial: self.deprecated_option("serial", "Please use search machine --serial instead.", logger=logger, **arguments) q = q.filter(Machine.serial_no == serial) # DNS IP address related filters if mac or ip or networkip or hostname or dns_domain or shortname: # Inner joins are cheaper than outer joins, so make some effort to # use inner joins when possible if mac or ip or networkip: q = q.join(Interface) else: q = q.outerjoin(Interface) if ip or networkip: q = q.join(AddressAssignment, Network, from_joinpoint=True) else: q = q.outerjoin(AddressAssignment, Network, from_joinpoint=True) if mac: self.deprecated_option("mac", "Please use search machine " "--mac instead.", logger=logger, **arguments) q = q.filter(Interface.mac == mac) if ip: q = q.filter(AddressAssignment.ip == ip) q = q.filter(Network.network_environment == dbnet_env) if networkip: dbnetwork = get_network_byip(session, networkip, dbnet_env) q = q.filter(AddressAssignment.network == dbnetwork) dbdns_domain = None if hostname: (shortname, dbdns_domain) = parse_fqdn(session, hostname) if dns_domain: dbdns_domain = DnsDomain.get_unique(session, dns_domain, compel=True) if shortname or dbdns_domain: ARecAlias = aliased(ARecord) ARecFqdn = aliased(Fqdn) q = q.outerjoin((ARecAlias, and_(ARecAlias.ip == AddressAssignment.ip, ARecAlias.network_id == AddressAssignment.network_id)), (ARecFqdn, ARecAlias.fqdn_id == ARecFqdn.id)) if shortname: q = q.filter(or_(ARecFqdn.name == shortname, PriFqdn.name == shortname)) if dbdns_domain: q = q.filter(or_(ARecFqdn.dns_domain == dbdns_domain, PriFqdn.dns_domain == dbdns_domain)) q = q.reset_joinpoint() (dbbranch, dbauthor) = get_branch_and_author(session, logger, domain=domain, sandbox=sandbox, branch=branch) if sandbox_owner: dbauthor = get_user_principal(session, sandbox_owner) if dbbranch: q = q.filter_by(branch=dbbranch) if dbauthor: q = q.filter_by(sandbox_author=dbauthor) if archetype: # Added to the searches as appropriate below. dbarchetype = Archetype.get_unique(session, archetype, compel=True) if personality and archetype: dbpersonality = Personality.get_unique(session, archetype=dbarchetype, name=personality, compel=True) q = q.filter_by(personality=dbpersonality) elif personality: PersAlias = aliased(Personality) q = q.join(PersAlias).filter_by(name=personality) q = q.reset_joinpoint() elif archetype: PersAlias = aliased(Personality) q = q.join(PersAlias).filter_by(archetype=dbarchetype) q = q.reset_joinpoint() if buildstatus: dbbuildstatus = HostLifecycle.get_unique(session, buildstatus, compel=True) q = q.filter_by(status=dbbuildstatus) if osname and osversion and archetype: # archetype was already resolved above dbos = OperatingSystem.get_unique(session, name=osname, version=osversion, archetype=dbarchetype, compel=True) q = q.filter_by(operating_system=dbos) elif osname or osversion: q = q.join('operating_system') if osname: q = q.filter_by(name=osname) if osversion: q = q.filter_by(version=osversion) q = q.reset_joinpoint() if service: dbservice = Service.get_unique(session, service, compel=True) if instance: dbsi = get_service_instance(session, dbservice, instance) q = q.filter(Host.services_used.contains(dbsi)) else: q = q.join('services_used') q = q.filter_by(service=dbservice) q = q.reset_joinpoint() elif instance: q = q.join('services_used') q = q.filter_by(name=instance) q = q.reset_joinpoint() if server_of_service: dbserver_service = Service.get_unique(session, server_of_service, compel=True) if server_of_instance: dbssi = get_service_instance(session, dbserver_service, server_of_instance) q = q.join('_services_provided') q = q.filter_by(service_instance=dbssi) q = q.reset_joinpoint() else: q = q.join('_services_provided', 'service_instance') q = q.filter_by(service=dbserver_service) q = q.reset_joinpoint() elif server_of_instance: q = q.join('_services_provided', 'service_instance') q = q.filter_by(name=server_of_instance) q = q.reset_joinpoint() if cluster: dbcluster = Cluster.get_unique(session, cluster, compel=True) if isinstance(dbcluster, MetaCluster): q = q.join('_cluster', 'cluster', '_metacluster') q = q.filter_by(metacluster=dbcluster) else: q = q.filter_by(cluster=dbcluster) q = q.reset_joinpoint() if guest_on_cluster: # TODO: this does not handle metaclusters according to Wes dbcluster = Cluster.get_unique(session, guest_on_cluster, compel=True) q = q.join('machine', VirtualMachine, ClusterResource) q = q.filter_by(cluster=dbcluster) q = q.reset_joinpoint() if guest_on_share: #v2 v2shares = session.query(Share.id).filter_by(name=guest_on_share).all() if not v2shares: raise NotFoundException("No shares found with name {0}." .format(guest_on_share)) NasAlias = aliased(VirtualDisk) q = q.join('machine', 'disks', (NasAlias, NasAlias.id == Disk.id)) q = q.filter( NasAlias.share_id.in_(map(lambda s: s[0], v2shares))) q = q.reset_joinpoint() if member_cluster_share: #v2 v2shares = session.query(Share.id).filter_by(name=member_cluster_share).all() if not v2shares: raise NotFoundException("No shares found with name {0}." .format(guest_on_share)) NasAlias = aliased(VirtualDisk) q = q.join('_cluster', 'cluster', 'resholder', VirtualMachine, 'machine', 'disks', (NasAlias, NasAlias.id == Disk.id)) q = q.filter( NasAlias.share_id.in_(map(lambda s: s[0], v2shares))) q = q.reset_joinpoint() if grn or eon_id: dbgrn = lookup_grn(session, grn, eon_id, autoupdate=False) persq = session.query(Personality.id) persq = persq.outerjoin(PersonalityGrnMap) persq = persq.filter(or_(Personality.owner_eon_id == dbgrn.eon_id, PersonalityGrnMap.eon_id == dbgrn.eon_id)) q = q.outerjoin(HostGrnMap) q = q.filter(or_(Host.owner_eon_id == dbgrn.eon_id, HostGrnMap.eon_id == dbgrn.eon_id, Host.personality_id.in_(persq.subquery()))) q = q.reset_joinpoint() if fullinfo: return q.all() return SimpleHostList(q.all())
def render(self, session, logger, building, city, address, fullname, default_dns_domain, comments, **arguments): dbbuilding = get_location(session, building=building) old_city = dbbuilding.city dsdb_runner = DSDBRunner(logger=logger) if address is not None: old_address = dbbuilding.address dbbuilding.address = address dsdb_runner.update_building(dbbuilding.name, dbbuilding.address, old_address) if fullname is not None: dbbuilding.fullname = fullname if comments is not None: dbbuilding.comments = comments if default_dns_domain is not None: if default_dns_domain: dbdns_domain = DnsDomain.get_unique(session, default_dns_domain, compel=True) dbbuilding.default_dns_domain = dbdns_domain else: dbbuilding.default_dns_domain = None plenaries = PlenaryCollection(logger=logger) if city: dbcity = get_location(session, city=city) # This one would change the template's locations hence forbidden if dbcity.hub != dbbuilding.hub: # Doing this both to reduce user error and to limit # testing required. raise ArgumentError("Cannot change hubs. {0} is in {1} " "while {2} is in {3}.".format( dbcity, dbcity.hub, dbbuilding, dbbuilding.hub)) # issue svcmap warnings maps = 0 for map_type in [ServiceMap, PersonalityServiceMap]: maps = maps + session.query(map_type).\ filter_by(location=old_city).count() if maps > 0: logger.client_info("There are {0} service(s) mapped to the " "old location of the ({1:l}), please " "review and manually update mappings for " "the new location as needed.".format( maps, dbbuilding.city)) dbbuilding.update_parent(parent=dbcity) if old_city.campus and (old_city.campus != dbcity.campus): dsdb_runner.del_campus_building(old_city.campus, building) if dbcity.campus and (old_city.campus != dbcity.campus): dsdb_runner.add_campus_building(dbcity.campus, building) query = session.query(Machine) query = query.filter(Machine.location_id.in_(dbcity.offspring_ids())) for dbmachine in query: plenaries.append(Plenary.get_plenary(dbmachine)) session.flush() if plenaries.plenaries: with plenaries.get_key(): plenaries.stash() try: plenaries.write(locked=True) dsdb_runner.commit_or_rollback() except: plenaries.restore_stash() raise else: dsdb_runner.commit_or_rollback() return
def render(self, session, dns_domain, **arguments): options = [undefer("comments")] return DNSDomainList([DnsDomain.get_unique(session, dns_domain, compel=True, query_options=options)])
def render(self, session, logger, startip, endip, dns_domain, prefix, **arguments): if not prefix: prefix = 'dynamic' dbnet_env = NetworkEnvironment.get_unique_or_default(session) dbdns_env = DnsEnvironment.get_unique_or_default(session) startnet = get_net_id_from_ip(session, startip, dbnet_env) endnet = get_net_id_from_ip(session, endip, dbnet_env) if startnet != endnet: raise ArgumentError("IP addresses %s (%s) and %s (%s) must be on " "the same subnet." % (startip, startnet.ip, endip, endnet.ip)) dbdns_domain = DnsDomain.get_unique(session, dns_domain, compel=True) dbdns_domain.lock_row() startnet.lock_row() q = session.query(AddressAssignment.ip) q = q.filter_by(network=startnet) q = q.filter(AddressAssignment.ip >= startip) q = q.filter(AddressAssignment.ip <= endip) q = q.order_by(AddressAssignment.ip) conflicts = q.all() if conflicts: raise ArgumentError("Cannot allocate the address range because the " "following IP addresses are already in use:\n" + ", ".join([str(c.ip) for c in conflicts])) # No filtering on DNS environment. If an address is dynamic in one # environment, it should not be considered static in a different # environment. q = session.query(ARecord) q = q.filter_by(network=startnet) q = q.filter(ARecord.ip >= startip) q = q.filter(ARecord.ip <= endip) q = q.order_by(ARecord.ip) conflicts = q.all() if conflicts: raise ArgumentError("Cannot allocate the address range because the " "following DNS records already exist:\n" + "\n".join([format(c, "a") for c in conflicts])) dsdb_runner = DSDBRunner(logger=logger) with session.no_autoflush: for ipint in range(int(startip), int(endip) + 1): ip = IPv4Address(ipint) check_ip_restrictions(startnet, ip) name = "%s-%s" % (prefix, str(ip).replace('.', '-')) dbfqdn = Fqdn.get_or_create(session, name=name, dns_domain=dbdns_domain, dns_environment=dbdns_env, preclude=True) dbdynamic_stub = DynamicStub(fqdn=dbfqdn, ip=ip, network=startnet) session.add(dbdynamic_stub) dsdb_runner.add_host_details(dbfqdn, ip) session.flush() # This may take some time if the range is big, so be verbose dsdb_runner.commit_or_rollback("Could not add addresses to DSDB", verbose=True) return
def render(self, session, fqdn, dns_environment, dns_domain, shortname, record_type, ip, network, network_environment, target, target_domain, primary_name, used, reverse_override, reverse_ptr, fullinfo, style, **kwargs): if record_type: record_type = record_type.strip().lower() if record_type in DNS_RRTYPE_MAP: cls = DNS_RRTYPE_MAP[record_type] else: cls = DnsRecord.polymorphic_subclass( record_type, "Unknown DNS record type") q = session.query(cls) else: q = session.query(DnsRecord) q = q.with_polymorphic('*') dbnet_env = NetworkEnvironment.get_unique_or_default( session, network_environment) if network_environment: # The network environment determines the DNS environment dbdns_env = dbnet_env.dns_environment else: dbdns_env = DnsEnvironment.get_unique_or_default( session, dns_environment) if fqdn: dbfqdn = Fqdn.get_unique(session, fqdn=fqdn, dns_environment=dbdns_env, compel=True) q = q.filter_by(fqdn=dbfqdn) q = q.join((Fqdn, DnsRecord.fqdn_id == Fqdn.id)) q = q.filter_by(dns_environment=dbdns_env) q = q.options(contains_eager('fqdn')) if dns_domain: dbdns_domain = DnsDomain.get_unique(session, dns_domain, compel=True) q = q.filter_by(dns_domain=dbdns_domain) if shortname: q = q.filter_by(name=shortname) q = q.join(DnsDomain) q = q.options(contains_eager('fqdn.dns_domain')) q = q.order_by(Fqdn.name, DnsDomain.name) q = q.reset_joinpoint() if ip: q = q.join(Network) q = q.filter_by(network_environment=dbnet_env) q = q.reset_joinpoint() q = q.filter(ARecord.ip == ip) if network: dbnetwork = Network.get_unique(session, network, network_environment=dbnet_env, compel=True) q = q.filter(ARecord.network == dbnetwork) if target: dbtarget = Fqdn.get_unique(session, fqdn=target, dns_environment=dbdns_env, compel=True) q = q.filter( or_(Alias.target == dbtarget, SrvRecord.target == dbtarget)) if target_domain: dbdns_domain = DnsDomain.get_unique(session, target_domain, compel=True) TargetFqdn = aliased(Fqdn) q = q.join((TargetFqdn, or_(Alias.target_id == TargetFqdn.id, SrvRecord.target_id == TargetFqdn.id))) q = q.filter(TargetFqdn.dns_domain == dbdns_domain) if primary_name is not None: if primary_name: q = q.filter(DnsRecord.hardware_entity.has()) else: q = q.filter(~DnsRecord.hardware_entity.has()) if used is not None: if used: q = q.join( AddressAssignment, and_(ARecord.network_id == AddressAssignment.network_id, ARecord.ip == AddressAssignment.ip)) else: q = q.outerjoin( AddressAssignment, and_(ARecord.network_id == AddressAssignment.network_id, ARecord.ip == AddressAssignment.ip)) q = q.filter(AddressAssignment.id == None) q = q.reset_joinpoint() if reverse_override is not None: if reverse_override: q = q.filter(ARecord.reverse_ptr.has()) else: q = q.filter(~ARecord.reverse_ptr.has()) if reverse_ptr: dbtarget = Fqdn.get_unique(session, fqdn=reverse_ptr, dns_environment=dbdns_env, compel=True) q = q.filter(ARecord.reverse_ptr == dbtarget) if fullinfo: q = q.options(undefer('comments')) q = q.options(subqueryload('hardware_entity')) q = q.options(undefer('alias_cnt')) return q.all() elif style == "raw": return StringAttributeList(q.all(), 'fqdn') else: # This is for the CSV formatter return q.all()
def render(self, session, dns_domain, **arguments): options = [undefer('comments')] return DnsDomain.get_unique(session, dns_domain, compel=True, query_options=options)
def render(self, session, logger, hostname, machine, archetype, buildstatus, personality, osname, osversion, service, instance, model, machine_type, vendor, serial, cluster, guest_on_cluster, guest_on_share, member_cluster_share, domain, sandbox, branch, sandbox_owner, dns_domain, shortname, mac, ip, networkip, network_environment, exact_location, server_of_service, server_of_instance, grn, eon_id, fullinfo, **arguments): dbnet_env = NetworkEnvironment.get_unique_or_default( session, network_environment) q = session.query(Host) if machine: dbmachine = Machine.get_unique(session, machine, compel=True) q = q.filter_by(machine=dbmachine) # Add the machine definition and the primary name. Use aliases to make # sure the end result will be ordered by primary name. PriDns = aliased(DnsRecord) PriFqdn = aliased(Fqdn) PriDomain = aliased(DnsDomain) q = q.join(Machine, (PriDns, PriDns.id == Machine.primary_name_id), (PriFqdn, PriDns.fqdn_id == PriFqdn.id), (PriDomain, PriFqdn.dns_domain_id == PriDomain.id)) q = q.order_by(PriFqdn.name, PriDomain.name) q = q.options( contains_eager('machine'), contains_eager('machine.primary_name', alias=PriDns), contains_eager('machine.primary_name.fqdn', alias=PriFqdn), contains_eager('machine.primary_name.fqdn.dns_domain', alias=PriDomain)) q = q.reset_joinpoint() # Hardware-specific filters dblocation = get_location(session, **arguments) if dblocation: if exact_location: q = q.filter(Machine.location == dblocation) else: childids = dblocation.offspring_ids() q = q.filter(Machine.location_id.in_(childids)) if model or vendor or machine_type: subq = Model.get_matching_query(session, name=model, vendor=vendor, machine_type=machine_type, compel=True) q = q.filter(Machine.model_id.in_(subq)) if serial: self.deprecated_option( "serial", "Please use search machine --serial instead.", logger=logger, **arguments) q = q.filter(Machine.serial_no == serial) # DNS IP address related filters if mac or ip or networkip or hostname or dns_domain or shortname: # Inner joins are cheaper than outer joins, so make some effort to # use inner joins when possible if mac or ip or networkip: q = q.join(Interface) else: q = q.outerjoin(Interface) if ip or networkip: q = q.join(AddressAssignment, Network, from_joinpoint=True) else: q = q.outerjoin(AddressAssignment, Network, from_joinpoint=True) if mac: self.deprecated_option("mac", "Please use search machine " "--mac instead.", logger=logger, **arguments) q = q.filter(Interface.mac == mac) if ip: q = q.filter(AddressAssignment.ip == ip) q = q.filter(Network.network_environment == dbnet_env) if networkip: dbnetwork = get_network_byip(session, networkip, dbnet_env) q = q.filter(AddressAssignment.network == dbnetwork) dbdns_domain = None if hostname: (shortname, dbdns_domain) = parse_fqdn(session, hostname) if dns_domain: dbdns_domain = DnsDomain.get_unique(session, dns_domain, compel=True) if shortname or dbdns_domain: ARecAlias = aliased(ARecord) ARecFqdn = aliased(Fqdn) q = q.outerjoin( (ARecAlias, and_(ARecAlias.ip == AddressAssignment.ip, ARecAlias.network_id == AddressAssignment.network_id)), (ARecFqdn, ARecAlias.fqdn_id == ARecFqdn.id)) if shortname: q = q.filter( or_(ARecFqdn.name == shortname, PriFqdn.name == shortname)) if dbdns_domain: q = q.filter( or_(ARecFqdn.dns_domain == dbdns_domain, PriFqdn.dns_domain == dbdns_domain)) q = q.reset_joinpoint() (dbbranch, dbauthor) = get_branch_and_author(session, logger, domain=domain, sandbox=sandbox, branch=branch) if sandbox_owner: dbauthor = get_user_principal(session, sandbox_owner) if dbbranch: q = q.filter_by(branch=dbbranch) if dbauthor: q = q.filter_by(sandbox_author=dbauthor) if archetype: # Added to the searches as appropriate below. dbarchetype = Archetype.get_unique(session, archetype, compel=True) if personality and archetype: dbpersonality = Personality.get_unique(session, archetype=dbarchetype, name=personality, compel=True) q = q.filter_by(personality=dbpersonality) elif personality: PersAlias = aliased(Personality) q = q.join(PersAlias).filter_by(name=personality) q = q.reset_joinpoint() elif archetype: PersAlias = aliased(Personality) q = q.join(PersAlias).filter_by(archetype=dbarchetype) q = q.reset_joinpoint() if buildstatus: dbbuildstatus = HostLifecycle.get_unique(session, buildstatus, compel=True) q = q.filter_by(status=dbbuildstatus) if osname and osversion and archetype: # archetype was already resolved above dbos = OperatingSystem.get_unique(session, name=osname, version=osversion, archetype=dbarchetype, compel=True) q = q.filter_by(operating_system=dbos) elif osname or osversion: q = q.join('operating_system') if osname: q = q.filter_by(name=osname) if osversion: q = q.filter_by(version=osversion) q = q.reset_joinpoint() if service: dbservice = Service.get_unique(session, service, compel=True) if instance: dbsi = get_service_instance(session, dbservice, instance) q = q.filter(Host.services_used.contains(dbsi)) else: q = q.join('services_used') q = q.filter_by(service=dbservice) q = q.reset_joinpoint() elif instance: q = q.join('services_used') q = q.filter_by(name=instance) q = q.reset_joinpoint() if server_of_service: dbserver_service = Service.get_unique(session, server_of_service, compel=True) if server_of_instance: dbssi = get_service_instance(session, dbserver_service, server_of_instance) q = q.join('_services_provided') q = q.filter_by(service_instance=dbssi) q = q.reset_joinpoint() else: q = q.join('_services_provided', 'service_instance') q = q.filter_by(service=dbserver_service) q = q.reset_joinpoint() elif server_of_instance: q = q.join('_services_provided', 'service_instance') q = q.filter_by(name=server_of_instance) q = q.reset_joinpoint() if cluster: dbcluster = Cluster.get_unique(session, cluster, compel=True) if isinstance(dbcluster, MetaCluster): q = q.join('_cluster', 'cluster', '_metacluster') q = q.filter_by(metacluster=dbcluster) else: q = q.filter_by(cluster=dbcluster) q = q.reset_joinpoint() if guest_on_cluster: # TODO: this does not handle metaclusters according to Wes dbcluster = Cluster.get_unique(session, guest_on_cluster, compel=True) q = q.join('machine', VirtualMachine, ClusterResource) q = q.filter_by(cluster=dbcluster) q = q.reset_joinpoint() if guest_on_share: #v2 v2shares = session.query( Share.id).filter_by(name=guest_on_share).all() if not v2shares: raise NotFoundException( "No shares found with name {0}.".format(guest_on_share)) NasAlias = aliased(VirtualDisk) q = q.join('machine', 'disks', (NasAlias, NasAlias.id == Disk.id)) q = q.filter(NasAlias.share_id.in_(map(lambda s: s[0], v2shares))) q = q.reset_joinpoint() if member_cluster_share: #v2 v2shares = session.query( Share.id).filter_by(name=member_cluster_share).all() if not v2shares: raise NotFoundException( "No shares found with name {0}.".format(guest_on_share)) NasAlias = aliased(VirtualDisk) q = q.join('_cluster', 'cluster', 'resholder', VirtualMachine, 'machine', 'disks', (NasAlias, NasAlias.id == Disk.id)) q = q.filter(NasAlias.share_id.in_(map(lambda s: s[0], v2shares))) q = q.reset_joinpoint() if grn or eon_id: dbgrn = lookup_grn(session, grn, eon_id, autoupdate=False) persq = session.query(Personality.id) persq = persq.outerjoin(PersonalityGrnMap) persq = persq.filter( or_(Personality.owner_eon_id == dbgrn.eon_id, PersonalityGrnMap.eon_id == dbgrn.eon_id)) q = q.outerjoin(HostGrnMap) q = q.filter( or_(Host.owner_eon_id == dbgrn.eon_id, HostGrnMap.eon_id == dbgrn.eon_id, Host.personality_id.in_(persq.subquery()))) q = q.reset_joinpoint() if fullinfo: return q.all() return SimpleHostList(q.all())