def render(self, session, network_environment, dns_environment, comments, **arguments): validate_basic("network environment", network_environment) NetworkEnvironment.get_unique(session, network_environment, preclude=True) dbdns_env = DnsEnvironment.get_unique(session, dns_environment, compel=True) # Currently input.xml lists --building only, but that may change location = get_location(session, **arguments) dbnet_env = NetworkEnvironment(name=network_environment, dns_environment=dbdns_env, location=location, comments=comments) if dbdns_env.is_default != dbnet_env.is_default: raise ArgumentError("Only the default network environment may be " "associated with the default DNS environment.") session.add(dbnet_env) session.flush() return
def render(self, session, dbuser, ip, fqdn, network_environment, **arguments): dbnet_env = NetworkEnvironment.get_unique_or_default(session, network_environment) self.az.check_network_environment(dbuser, dbnet_env) if fqdn: dbdns_rec = ARecord.get_unique(session, fqdn=fqdn, dns_environment=dbnet_env.dns_environment, compel=True) ip = dbdns_rec.ip elif not ip: raise ArgumentError("Please specify either --ip or --fqdn.") dbnetwork = get_net_id_from_ip(session, ip, dbnet_env) dbrouter = None for rtaddr in dbnetwork.routers: if rtaddr.ip == ip: dbrouter = rtaddr break if not dbrouter: raise NotFoundException("IP address {0} is not a router on " "{1:l}.".format(ip, dbnetwork)) map(delete_dns_record, dbrouter.dns_records) dbnetwork.routers.remove(dbrouter) session.flush() # TODO: update the templates of Zebra hosts on the network return
def render(self, session, gateway, ip, netmask, prefixlen, network_environment, **arguments): dbnet_env = NetworkEnvironment.get_unique_or_default( session, network_environment) dbnetwork = get_net_id_from_ip(session, gateway, dbnet_env) if netmask: dest = IPv4Network("%s/%s" % (ip, netmask)) else: dest = IPv4Network("%s/%s" % (ip, prefixlen)) q = session.query(StaticRoute) q = q.filter_by(network=dbnetwork) q = q.filter_by(gateway_ip=gateway) q = q.filter_by(dest_ip=dest.ip) q = q.filter_by(dest_cidr=dest.prefixlen) try: dbroute = q.one() except NoResultFound: raise NotFoundException("Static Route to {0} using gateway {1} " "not found.".format(dest, gateway)) session.delete(dbroute) session.flush() # TODO: refresh affected host templates return
def render(self, session, ip, fqdn, all, network_environment, **arguments): dbnet_env = NetworkEnvironment.get_unique_or_default( session, network_environment) q = session.query(RouterAddress) q = q.join(Network) q = q.filter_by(network_environment=dbnet_env) q = q.options(contains_eager('network')) q = q.reset_joinpoint() q = q.options(undefer(RouterAddress.comments)) q = q.options(joinedload('location')) q = q.options(joinedload('dns_records')) if all: return q.all() if fqdn: dbdns_rec = ARecord.get_unique(session, fqdn=fqdn, compel=True) ip = dbdns_rec.ip errmsg = "named %s" % fqdn elif ip: errmsg = "with IP address %s" % ip else: raise ArgumentError("Please specify either --ip or --fqdn.") q = q.filter(RouterAddress.ip == ip) try: return q.one() except NoResultFound: raise NotFoundException("Router %s not found." % errmsg)
def render(self, session, network, ip, network_environment, all, style, type=False, hosts=False, **arguments): options = [undefer('comments'), joinedload('location')] if hosts or style == "proto": options.extend([subqueryload("assignments"), joinedload("assignments.interface"), joinedload("assignments.dns_records"), subqueryload("dynamic_stubs")]) dbnet_env = NetworkEnvironment.get_unique_or_default(session, network_environment) dbnetwork = network and get_network_byname(session, network, dbnet_env, query_options=options) or None dbnetwork = ip and get_network_byip(session, ip, dbnet_env, query_options=options) or dbnetwork q = session.query(Network) q = q.filter_by(network_environment=dbnet_env) q = q.options(*options) if dbnetwork: if hosts: return NetworkHostList([dbnetwork]) else: return dbnetwork if type: q = q.filter_by(network_type=type) dblocation = get_location(session, **arguments) if dblocation: childids = dblocation.offspring_ids() q = q.filter(Network.location_id.in_(childids)) q = q.order_by(Network.ip) q = q.options(*options) if hosts: return NetworkHostList(q.all()) else: return SimpleNetworkList(q.all())
def render(self, session, ip, fqdn, all, network_environment, **arguments): dbnet_env = NetworkEnvironment.get_unique_or_default(session, network_environment) q = session.query(RouterAddress) q = q.join(Network) q = q.filter_by(network_environment=dbnet_env) q = q.options(contains_eager('network')) q = q.reset_joinpoint() q = q.options(undefer(RouterAddress.comments)) q = q.options(joinedload('location')) q = q.options(joinedload('dns_records')) if all: return q.all() if fqdn: dbdns_rec = ARecord.get_unique(session, fqdn=fqdn, compel=True) ip = dbdns_rec.ip errmsg = "named %s" % fqdn elif ip: errmsg = "with IP address %s" % ip else: raise ArgumentError("Please specify either --ip or --fqdn.") q = q.filter(RouterAddress.ip == ip) try: return q.one() except NoResultFound: raise NotFoundException("Router %s not found." % errmsg)
def render(self, session, gateway, ip, netmask, prefixlen, network_environment, **arguments): dbnet_env = NetworkEnvironment.get_unique_or_default(session, network_environment) dbnetwork = get_net_id_from_ip(session, gateway, dbnet_env) if netmask: dest = IPv4Network("%s/%s" % (ip, netmask)) else: dest = IPv4Network("%s/%s" % (ip, prefixlen)) q = session.query(StaticRoute) q = q.filter_by(network=dbnetwork) q = q.filter_by(gateway_ip=gateway) q = q.filter_by(dest_ip=dest.ip) q = q.filter_by(dest_cidr=dest.prefixlen) try: dbroute = q.one() except NoResultFound: raise NotFoundException("Static Route to {0} using gateway {1} " "not found.".format(dest, gateway)) session.delete(dbroute) session.flush() # TODO: refresh affected host templates return
def get_net_id_from_ip(session, ip, network_environment=None): """Requires a session, and will return the Network for a given ip.""" if ip is None: return None if isinstance(network_environment, NetworkEnvironment): dbnet_env = network_environment else: dbnet_env = NetworkEnvironment.get_unique_or_default(session, network_environment) # Query the last network having an address smaller than the given ip. There # is no guarantee that the returned network does in fact contain the given # ip, so this must be checked separately. subq = session.query(Network.ip) subq = subq.filter_by(network_environment=dbnet_env) subq = subq.filter(Network.ip <= ip) subq = subq.order_by(desc(Network.ip)).limit(1) q = session.query(Network) q = q.filter_by(network_environment=dbnet_env) q = q.filter(Network.ip == subq.as_scalar()) net = q.first() if not net or not ip in net.network: raise NotFoundException("Could not determine network containing IP " "address %s." % ip) return net
def del_dynamic_range(self, session, logger, startip, endip): dbnet_env = NetworkEnvironment.get_unique_or_default(session) startnet = get_net_id_from_ip(session, startip, dbnet_env) endnet = get_net_id_from_ip(session, endip, dbnet_env) if startnet != endnet: raise ArgumentError("IP addresses %s (%s) and %s (%s) must be " "on the same subnet." % (startip, startnet.ip, endip, endnet.ip)) q = session.query(ARecord) q = q.filter_by(network=startnet) q = q.filter(ARecord.ip >= startip) q = q.filter(ARecord.ip <= endip) q = q.order_by(ARecord.ip) q = q.options(joinedload('fqdn'), joinedload('fqdn.aliases'), joinedload('fqdn.srv_records'), joinedload('reverse_ptr')) existing = q.all() if not existing: raise ArgumentError("Nothing found in range.") if existing[0].ip != startip: raise ArgumentError("No system found with IP address %s." % startip) if existing[-1].ip != endip: raise ArgumentError("No system found with IP address %s." % endip) invalid = [s for s in existing if s.dns_record_type != 'dynamic_stub'] if invalid: raise ArgumentError("The range contains non-dynamic systems:\n" + "\n".join([format(i, "a") for i in invalid])) self.del_dynamic_stubs(session, logger, existing)
def render(self, session, gateway, ip, netmask, prefixlen, network_environment, comments, **arguments): dbnet_env = NetworkEnvironment.get_unique_or_default( session, network_environment) dbnetwork = get_net_id_from_ip(session, gateway, dbnet_env) if netmask: dest = IPv4Network("%s/%s" % (ip, netmask)) else: dest = IPv4Network("%s/%s" % (ip, prefixlen)) # TODO: this will have to be changed if we want equal cost multipath # etc. for route in dbnetwork.static_routes: if dest.overlaps(route.destination): raise ArgumentError("{0} already has an overlapping route to " "{1} using gateway {2}.".format( dbnetwork, route.destination, route.gateway_ip)) route = StaticRoute(network=dbnetwork, dest_ip=dest.ip, dest_cidr=dest.prefixlen, gateway_ip=gateway, comments=comments) session.add(route) session.flush() # TODO: refresh affected host templates return
def render(self, session, gateway, ip, netmask, prefixlen, network_environment, comments, **arguments): dbnet_env = NetworkEnvironment.get_unique_or_default(session, network_environment) dbnetwork = get_net_id_from_ip(session, gateway, dbnet_env) if netmask: dest = IPv4Network("%s/%s" % (ip, netmask)) else: dest = IPv4Network("%s/%s" % (ip, prefixlen)) # TODO: this will have to be changed if we want equal cost multipath # etc. for route in dbnetwork.static_routes: if dest.overlaps(route.destination): raise ArgumentError( "{0} already has an overlapping route to " "{1} using gateway {2}.".format(dbnetwork, route.destination, route.gateway_ip) ) route = StaticRoute( network=dbnetwork, dest_ip=dest.ip, dest_cidr=dest.prefixlen, gateway_ip=gateway, comments=comments ) session.add(route) session.flush() # TODO: refresh affected host templates return
def render(self, session, dbuser, ip, netmask, prefixlen, network_environment, **arguments): if netmask: # There must me a faster way, but this is the easy one net = IPv4Network("127.0.0.0/%s" % netmask) prefixlen = net.prefixlen if prefixlen is None or prefixlen < 8 or prefixlen > 31: raise ArgumentError("The prefix length must be between 8 and 31.") dbnet_env = NetworkEnvironment.get_unique_or_default( session, network_environment) self.az.check_network_environment(dbuser, dbnet_env) dbnetwork = get_net_id_from_ip(session, ip, network_environment=dbnet_env) if prefixlen >= dbnetwork.cidr: raise ArgumentError("The specified --prefixlen must be smaller " "than the current value.") # IPv4Network has a supernet() object, but that does not normalize the # IP address, i.e. IPv4Network('1.2.3.0/24').supernet() will return # IPv4Network('1.2.3.0/23'). Do the normalization manually. supernet = dbnetwork.network.supernet(new_prefix=prefixlen) supernet = IPv4Network("%s/%d" % (supernet.network, supernet.prefixlen)) q = session.query(Network) q = q.filter_by(network_environment=dbnet_env) q = q.filter( and_(Network.ip >= supernet.ip, Network.ip < supernet.broadcast)) q = q.order_by(Network.ip) dbnets = q.all() if dbnets[0].ip == supernet.ip: dbsuper = dbnets.pop(0) dbsuper.cidr = prefixlen else: # Create a new network, copying the parameters from the one # specified on the command line dbsuper = Network(name=dbnetwork.name, network=supernet, network_environment=dbnet_env, location=dbnetwork.location, side=dbnetwork.side, comments=dbnetwork.comments) session.add(dbsuper) for oldnet in dbnets: # Delete routers of the old subnets for dbrouter in oldnet.routers: map(delete_dns_record, dbrouter.dns_records) oldnet.routers = [] fix_foreign_links(session, oldnet, dbsuper) session.delete(oldnet) session.flush()
def render(self, session, network_environment, **arguments): options = [undefer("comments"), joinedload("dns_environment"), undefer("dns_environment.comments")] dbnet_env = NetworkEnvironment.get_unique(session, network_environment, compel=True, query_options=options) return dbnet_env
def render(self, session, dbuser, ip, netmask, prefixlen, network_environment, **arguments): if netmask: # There must me a faster way, but this is the easy one net = IPv4Network("127.0.0.0/%s" % netmask) prefixlen = net.prefixlen if prefixlen is None or prefixlen < 8 or prefixlen > 31: raise ArgumentError("The prefix length must be between 8 and 31.") dbnet_env = NetworkEnvironment.get_unique_or_default(session, network_environment) self.az.check_network_environment(dbuser, dbnet_env) dbnetwork = get_net_id_from_ip(session, ip, network_environment=dbnet_env) if prefixlen >= dbnetwork.cidr: raise ArgumentError("The specified --prefixlen must be smaller " "than the current value.") # IPv4Network has a supernet() object, but that does not normalize the # IP address, i.e. IPv4Network('1.2.3.0/24').supernet() will return # IPv4Network('1.2.3.0/23'). Do the normalization manually. supernet = dbnetwork.network.supernet(new_prefix=prefixlen) supernet = IPv4Network("%s/%d" % (supernet.network, supernet.prefixlen)) q = session.query(Network) q = q.filter_by(network_environment=dbnet_env) q = q.filter(and_(Network.ip >= supernet.ip, Network.ip < supernet.broadcast)) q = q.order_by(Network.ip) dbnets = q.all() if dbnets[0].ip == supernet.ip: dbsuper = dbnets.pop(0) dbsuper.cidr = prefixlen else: # Create a new network, copying the parameters from the one # specified on the command line dbsuper = Network(name=dbnetwork.name, network=supernet, network_environment=dbnet_env, location=dbnetwork.location, side=dbnetwork.side, comments=dbnetwork.comments) session.add(dbsuper) for oldnet in dbnets: # Delete routers of the old subnets for dbrouter in oldnet.routers: map(delete_dns_record, dbrouter.dns_records) oldnet.routers = [] fix_foreign_links(session, oldnet, dbsuper) session.delete(oldnet) session.flush()
def render(self, session, logger, fillnetwork, **arguments): dbnet_env = NetworkEnvironment.get_unique_or_default(session) dbnetwork = Network.get_unique(session, fillnetwork, network_environment=dbnet_env, compel=True) arguments['startip'] = dbnetwork.first_usable_host arguments['endip'] = dbnetwork.broadcast - 1 return CommandAddDynamicRange.render(self, session, logger, **arguments)
def render(self, session, network_environment, **arguments): options = [ undefer("comments"), joinedload("dns_environment"), undefer("dns_environment.comments") ] dbnet_env = NetworkEnvironment.get_unique(session, network_environment, compel=True, query_options=options) return dbnet_env
def render(self, session, dbuser, fqdn, building, ip, network_environment, comments, **arguments): dbnet_env = NetworkEnvironment.get_unique_or_default( session, network_environment) self.az.check_network_environment(dbuser, dbnet_env) if building: dbbuilding = Building.get_unique(session, building, compel=True) else: dbbuilding = None (short, dbdns_domain) = parse_fqdn(session, fqdn) dbfqdn = Fqdn.get_or_create(session, name=short, dns_domain=dbdns_domain, dns_environment=dbnet_env.dns_environment) if ip: dbnetwork = get_net_id_from_ip(session, ip, dbnet_env) dbdns_rec = ARecord.get_or_create(session, fqdn=dbfqdn, ip=ip, network=dbnetwork) else: dbdns_rec = ARecord.get_unique(session, dbfqdn, compel=True) ip = dbdns_rec.ip dbnetwork = dbdns_rec.network assert ip in dbnetwork.network, "IP %s is outside network %s" % ( ip, dbnetwork.ip) if ip in dbnetwork.router_ips: raise ArgumentError( "IP address {0} is already present as a router " "for {1:l}.".format(ip, dbnetwork)) # Policy checks are valid only for internal networks if dbnetwork.is_internal: if ip >= dbnetwork.first_usable_host or \ int(ip) - int(dbnetwork.ip) in dbnetwork.reserved_offsets: raise ArgumentError( "IP address {0} is not a valid router address " "on {1:l}.".format(ip, dbnetwork)) dbnetwork.routers.append( RouterAddress(ip=ip, location=dbbuilding, dns_environment=dbdns_rec.fqdn.dns_environment, comments=comments)) session.flush() # TODO: update the templates of Zebra hosts on the network return
def del_dynamic_network(self, session, logger, network): dbnet_env = NetworkEnvironment.get_unique_or_default(session) dbnetwork = Network.get_unique(session, network, network_environment=dbnet_env, compel=True) q = session.query(DynamicStub) q = q.filter_by(network=dbnetwork) q = q.order_by(asc(DynamicStub.ip)) existing = q.all() if not existing: raise ArgumentError("No dynamic stubs found on network.") self.del_dynamic_stubs(session, logger, existing)
def render(self, session, network_environment, dns_environment, comments, **arguments): validate_nlist_key("network environment", network_environment) NetworkEnvironment.get_unique(session, network_environment, preclude=True) dbdns_env = DnsEnvironment.get_unique(session, dns_environment, compel=True) # Currently input.xml lists --building only, but that may change location = get_location(session, **arguments) dbnet_env = NetworkEnvironment(name=network_environment, dns_environment=dbdns_env, location=location, comments=comments) if dbdns_env.is_default != dbnet_env.is_default: raise ArgumentError("Only the default network environment may be " "associated with the default DNS environment.") session.add(dbnet_env) session.flush() return
def render(self, session, network_environment, **arguments): dbnet_env = NetworkEnvironment.get_unique(session, network_environment, compel=True) if dbnet_env.is_default: raise ArgumentError("{0} is the default network environment, " "therefore it cannot be deleted." .format(dbnet_env)) if session.query(Network).filter_by(network_environment=dbnet_env).first(): raise ArgumentError("{0} still has networks defined, delete them " "first.".format(dbnet_env)) session.delete(dbnet_env) session.flush() return
def render(self, session, network_environment, clear_location, comments, **arguments): dbnet_env = NetworkEnvironment.get_unique(session, network_environment, compel=True) # Currently input.xml lists --building only, but that may change location = get_location(session, **arguments) if location: dbnet_env.location = location if clear_location: dbnet_env.location = None if comments is not None: dbnet_env.comments = comments session.flush() return
def poll_vlan(self, session, logger, switch, now, ssh_args): if not switch.primary_ip: raise ArgumentError("Cannot poll VLAN info for {0:l} without " "a registered IP address.".format(switch)) session.query(ObservedVlan).filter_by(switch=switch).delete() session.flush() # Restrict operations to the internal network dbnet_env = NetworkEnvironment.get_unique_or_default(session) args = [] if ssh_args: args.extend(ssh_args) args.append(self.config.get("broker", "vlan2net")) args.append("-ip") args.append(switch.primary_ip) out = run_command(args) try: reader = DictReader(StringIO(out)) for row in reader: vlan = row.get("vlan", None) network = row.get("network", None) bitmask = row.get("bitmask", None) if vlan is None or network is None or bitmask is None or \ len(vlan) == 0 or len(network) == 0 or len(bitmask) == 0: logger.info( "Missing value for vlan, network or bitmask in " "output line #%d: %s" % (reader.line_num, row)) continue try: vlan_int = int(vlan) except ValueError, e: logger.info("Error parsing vlan number in output " "line #%d: %s error: %s" % (reader.line_num, row, e)) continue try: network = force_ipv4("network", network) except ArgumentError, e: raise InternalError(e) try: bitmask_int = int(bitmask) except ValueError, e: logger.info("Error parsing bitmask in output " "line #%d: %s error: %s" % (reader.line_num, row, e)) continue
def render(self, session, network_environment, **arguments): dbnet_env = NetworkEnvironment.get_unique(session, network_environment, compel=True) if dbnet_env.is_default: raise ArgumentError( "{0} is the default network environment, " "therefore it cannot be deleted.".format(dbnet_env)) if session.query(Network).filter_by( network_environment=dbnet_env).first(): raise ArgumentError("{0} still has networks defined, delete them " "first.".format(dbnet_env)) session.delete(dbnet_env) session.flush() return
def render(self, session, service, instance, archetype, personality, networkip, **kwargs): dbservice = Service.get_unique(session, service, compel=True) dblocation = get_location(session, **kwargs) dbinstance = get_service_instance(session, dbservice, instance) if networkip: dbnet_env = NetworkEnvironment.get_unique_or_default(session) dbnetwork = get_network_byip(session, networkip, dbnet_env) else: dbnetwork = None if archetype is None and personality: # Can't get here with the standard aq client. raise ArgumentError("Specifying --personality requires you to " "also specify --archetype.") kwargs = {} if archetype and personality: dbpersona = Personality.get_unique(session, name=personality, archetype=archetype, compel=True) map_class = PersonalityServiceMap query = session.query(map_class).filter_by(personality=dbpersona) kwargs["personality"] = dbpersona else: map_class = ServiceMap query = session.query(map_class) dbmap = query.filter_by(location=dblocation, service_instance=dbinstance, network=dbnetwork).first() if not dbmap: dbmap = map_class(service_instance=dbinstance, location=dblocation, network=dbnetwork, **kwargs) session.add(dbmap) session.flush() return
def poll_vlan(self, session, logger, switch, now, ssh_args): if not switch.primary_ip: raise ArgumentError("Cannot poll VLAN info for {0:l} without " "a registered IP address.".format(switch)) session.query(ObservedVlan).filter_by(switch=switch).delete() session.flush() # Restrict operations to the internal network dbnet_env = NetworkEnvironment.get_unique_or_default(session) args = [] if ssh_args: args.extend(ssh_args) args.append(self.config.get("broker", "vlan2net")) args.append("-ip") args.append(switch.primary_ip) out = run_command(args) try: reader = DictReader(StringIO(out)) for row in reader: vlan = row.get("vlan", None) network = row.get("network", None) bitmask = row.get("bitmask", None) if vlan is None or network is None or bitmask is None or \ len(vlan) == 0 or len(network) == 0 or len(bitmask) == 0: logger.info("Missing value for vlan, network or bitmask in " "output line #%d: %s" % (reader.line_num, row)) continue try: vlan_int = int(vlan) except ValueError, e: logger.info("Error parsing vlan number in output " "line #%d: %s error: %s" % (reader.line_num, row, e)) continue try: network = force_ipv4("network", network) except ArgumentError, e: raise InternalError(e) try: bitmask_int = int(bitmask) except ValueError, e: logger.info("Error parsing bitmask in output " "line #%d: %s error: %s" % (reader.line_num, row, e)) continue
def render(self, session, dbuser, fqdn, building, ip, network_environment, comments, **arguments): dbnet_env = NetworkEnvironment.get_unique_or_default(session, network_environment) self.az.check_network_environment(dbuser, dbnet_env) if building: dbbuilding = Building.get_unique(session, building, compel=True) else: dbbuilding = None (short, dbdns_domain) = parse_fqdn(session, fqdn) dbfqdn = Fqdn.get_or_create(session, name=short, dns_domain=dbdns_domain, dns_environment=dbnet_env.dns_environment) if ip: dbnetwork = get_net_id_from_ip(session, ip, dbnet_env) dbdns_rec = ARecord.get_or_create(session, fqdn=dbfqdn, ip=ip, network=dbnetwork) else: dbdns_rec = ARecord.get_unique(session, dbfqdn, compel=True) ip = dbdns_rec.ip dbnetwork = dbdns_rec.network assert ip in dbnetwork.network, "IP %s is outside network %s" % (ip, dbnetwork.ip) if ip in dbnetwork.router_ips: raise ArgumentError("IP address {0} is already present as a router " "for {1:l}.".format(ip, dbnetwork)) # Policy checks are valid only for internal networks if dbnetwork.is_internal: if ip >= dbnetwork.first_usable_host or \ int(ip) - int(dbnetwork.ip) in dbnetwork.reserved_offsets: raise ArgumentError("IP address {0} is not a valid router address " "on {1:l}.".format(ip, dbnetwork)) dbnetwork.routers.append(RouterAddress(ip=ip, location=dbbuilding, dns_environment=dbdns_rec.fqdn.dns_environment, comments=comments)) session.flush() # TODO: update the templates of Zebra hosts on the network return
def render(self, session, network, ip, network_environment, all, style, type=False, hosts=False, **arguments): options = [undefer('comments'), joinedload('location')] if hosts or style == "proto": options.extend([ subqueryload("assignments"), joinedload("assignments.interface"), joinedload("assignments.dns_records"), subqueryload("dynamic_stubs") ]) dbnet_env = NetworkEnvironment.get_unique_or_default( session, network_environment) dbnetwork = network and get_network_byname( session, network, dbnet_env, query_options=options) or None dbnetwork = ip and get_network_byip( session, ip, dbnet_env, query_options=options) or dbnetwork q = session.query(Network) q = q.filter_by(network_environment=dbnet_env) q = q.options(*options) if dbnetwork: if hosts: return NetworkHostList([dbnetwork]) else: return dbnetwork if type: q = q.filter_by(network_type=type) dblocation = get_location(session, **arguments) if dblocation: childids = dblocation.offspring_ids() q = q.filter(Network.location_id.in_(childids)) q = q.order_by(Network.ip) q = q.options(*options) if hosts: return NetworkHostList(q.all()) else: return SimpleNetworkList(q.all())
def render(self, session, fqdn, record_type, dns_environment, network_environment=None, **arguments): if network_environment: if not isinstance(network_environment, NetworkEnvironment): network_environment = NetworkEnvironment.get_unique_or_default( session, network_environment) if not dns_environment: dns_environment = network_environment.dns_environment dbdns_env = DnsEnvironment.get_unique_or_default( session, dns_environment) # No compel here. query(DnsRecord).filter_by(fqdn=None) will fail if the # FQDN is invalid, and that will give a better error message. dbfqdn = Fqdn.get_unique(session, fqdn=fqdn, dns_environment=dbdns_env) if record_type: if record_type in DNS_RRTYPE_MAP: cls = DNS_RRTYPE_MAP[record_type] else: cls = DnsRecord.polymorphic_subclass( record_type, "Unknown DNS record type") else: cls = DnsRecord # We want to query(ARecord) instead of # query(DnsRecord).filter_by(record_type='a_record'), because the former # works for DynamicStub as well q = session.query(cls) if cls == DnsRecord: q = q.with_polymorphic('*') q = q.filter_by(fqdn=dbfqdn) result = q.all() if not result: raise NotFoundException("%s %s not found." % (cls._get_class_label(), fqdn)) return result
def render(self, session, service, instance, archetype, personality, networkip, **arguments): dbservice = Service.get_unique(session, service, compel=True) dbinstance = ServiceInstance.get_unique(session, service=dbservice, name=instance, compel=True) dblocation = get_location(session, **arguments) if networkip: dbnet_env = NetworkEnvironment.get_unique_or_default(session) dbnetwork = get_network_byip(session, networkip, dbnet_env) else: dbnetwork = None if personality: if not archetype: # Can't get here with the standard aq client. raise ArgumentError("Specifying --personality requires you to " "also specify --archetype.") dbarchetype = Archetype.get_unique(session, archetype, compel=True) dbpersonality = Personality.get_unique(session, archetype=dbarchetype, name=personality, compel=True) q = session.query(PersonalityServiceMap) q = q.filter_by(personality=dbpersonality) else: q = session.query(ServiceMap) q = q.filter_by(location=dblocation, service_instance=dbinstance, network=dbnetwork) dbmap = q.first() if dbmap: session.delete(dbmap) session.flush() return
def search_system_query(session, dns_record_type=DnsRecord, **kwargs): q = session.query(dns_record_type) # Outer-join in all the subclasses so that each access of # system doesn't (necessarily) issue another query. if dns_record_type is DnsRecord: q = q.with_polymorphic('*') dbdns_env = DnsEnvironment.get_unique_or_default(session, kwargs.get("dns_environment", None)) q = q.join((Fqdn, DnsRecord.fqdn_id == Fqdn.id)) q = q.filter_by(dns_environment=dbdns_env) q = q.options(contains_eager('fqdn')) if kwargs.get('fqdn', None): (short, dbdns_domain) = parse_fqdn(session, kwargs['fqdn']) q = q.filter_by(name=short, dns_domain=dbdns_domain) if kwargs.get('dns_domain', None): dbdns_domain = DnsDomain.get_unique(session, kwargs['dns_domain'], compel=True) q = q.filter_by(dns_domain=dbdns_domain) if kwargs.get('shortname', None): q = q.filter_by(name=kwargs['shortname']) q = q.reset_joinpoint() if kwargs.get('ip', None): q = q.filter(ARecord.ip == kwargs['ip']) if kwargs.get('networkip', None): net_env = kwargs.get('network_environment', None) dbnet_env = NetworkEnvironment.get_unique_or_default(session, net_env) dbnetwork = get_network_byip(session, kwargs['networkip'], dbnet_env) q = q.filter(ARecord.network == dbnetwork) if kwargs.get('mac', None): raise UnimplementedError("search_system --mac is no longer supported, " "try search_hardware.") if kwargs.get('type', None): # Deprecated... remove if it becomes a problem. type_arg = kwargs['type'].strip().lower() q = q.filter_by(dns_record_type=type_arg) return q
def render(self, session, dbuser, network, ip, network_environment, type, side, comments, **arguments): dbnet_env = NetworkEnvironment.get_unique_or_default( session, network_environment) self.az.check_network_environment(dbuser, dbnet_env) if not network and not ip: raise ArgumentError("Please specify either --network or --ip.") q = session.query(Network) q = q.filter_by(network_environment=dbnet_env) if network: q = q.filter_by(name=network) if ip: q = q.filter_by(ip=ip) networks = q.all() if not networks: raise NotFoundException("No matching network was found.") dblocation = get_location(session, **arguments) for dbnetwork in q.all(): if type: dbnetwork.network_type = type if side: dbnetwork.side = side if dblocation: dbnetwork.location = dblocation if comments is not None: if comments.strip() == "": dbnetwork.comments = None else: dbnetwork.comments = comments session.flush() return
def render(self, session, fqdn, record_type, dns_environment, network_environment=None, **arguments): if network_environment: if not isinstance(network_environment, NetworkEnvironment): network_environment = NetworkEnvironment.get_unique_or_default(session, network_environment) if not dns_environment: dns_environment = network_environment.dns_environment dbdns_env = DnsEnvironment.get_unique_or_default(session, dns_environment) # No compel here. query(DnsRecord).filter_by(fqdn=None) will fail if the # FQDN is invalid, and that will give a better error message. dbfqdn = Fqdn.get_unique(session, fqdn=fqdn, dns_environment=dbdns_env) if record_type: if record_type in DNS_RRTYPE_MAP: cls = DNS_RRTYPE_MAP[record_type] else: cls = DnsRecord.polymorphic_subclass(record_type, "Unknown DNS record type") else: cls = DnsRecord # We want to query(ARecord) instead of # query(DnsRecord).filter_by(record_type='a_record'), because the former # works for DynamicStub as well q = session.query(cls) if cls == DnsRecord: q = q.with_polymorphic('*') q = q.filter_by(fqdn=dbfqdn) result = q.all() if not result: raise NotFoundException("%s %s not found." % (cls._get_class_label(), fqdn)) return result
def render(self, session, dbuser, network, ip, network_environment, type, side, comments, **arguments): dbnet_env = NetworkEnvironment.get_unique_or_default(session, network_environment) self.az.check_network_environment(dbuser, dbnet_env) if not network and not ip: raise ArgumentError("Please specify either --network or --ip.") q = session.query(Network) q = q.filter_by(network_environment=dbnet_env) if network: q = q.filter_by(name=network) if ip: q = q.filter_by(ip=ip) networks = q.all() if not networks: raise NotFoundException("No matching network was found.") dblocation = get_location(session, **arguments) for dbnetwork in q.all(): if type: dbnetwork.network_type = type if side: dbnetwork.side = side if dblocation: dbnetwork.location = dblocation if comments is not None: if comments.strip() == "": dbnetwork.comments = None else: dbnetwork.comments = comments session.flush() return
def __init__(self, session, logger, dbbuilding, dryrun, incremental): self.session = session self.logger = logger self.building = dbbuilding self.dryrun = dryrun self.incremental = incremental # Synchronize the internal environment only self.net_env = NetworkEnvironment.get_unique_or_default(session) self.errors = [] self.dns_env = DnsEnvironment.get_unique_or_default(session) # Cache building and bunker information. Load all buildings even if # we're interested in only one, so we can verify subnetdata.txt self.buildings = {} for item in session.query(Building): self.buildings[item.name] = item self.bunkers = {} for item in session.query(Bunker): self.bunkers[item.name] = item # Used to limit the number of warnings self.unknown_syslocs = set() # Load existing networks. We have to load all, otherwise we won't be # able to fix networks with wrong location self.aqnetworks = {} q = session.query(Network) q = q.filter_by(network_environment=self.net_env) q = q.options(subqueryload("routers")) for item in q: self.aqnetworks[item.ip] = item # Save how many networks we had initially self.networks_before = len(self.aqnetworks.keys())
except AddressValueError, e: raise ArgumentError("Failed to parse the network address: %s" % e) except NetmaskValueError, e: raise ArgumentError("Failed to parse the netmask: %s" % e) if ip != address.network: raise ArgumentError("IP address %s is not a network address. " "Maybe you meant %s?" % (ip, address.network)) location = get_location(session, **arguments) if not type: type = 'unknown' if not side: side = 'a' dbnet_env = NetworkEnvironment.get_unique_or_default(session, network_environment) self.az.check_network_environment(dbuser, dbnet_env) # Check if the name is free. Network names are not unique in QIP and # there is no uniqueness constraint in AQDB, so only warn if the name is # already in use. q = session.query(Network).filter_by(name=network) dbnetwork = q.first() if dbnetwork: logger.client_info("WARNING: Network name %s is already used for " "address %s." % (network, str(dbnetwork.network))) # Check if the address is free try: dbnetwork = get_net_id_from_ip(session, address.ip, network_environment=dbnet_env)
def render(self, session, logger, machine, chassis, switch, interface, fqdn, ip, label, keep_dns, network_environment, **kwargs): if machine: hwtype = 'machine' hwname = machine elif chassis: hwtype = 'chassis' hwname = chassis elif switch: hwtype = 'switch' hwname = switch dbhw_ent = HardwareEntity.get_unique(session, hwname, hardware_type=hwtype, compel=True) dbinterface = Interface.get_unique(session, hardware_entity=dbhw_ent, name=interface, compel=True) dbnet_env = NetworkEnvironment.get_unique_or_default( session, network_environment) oldinfo = DSDBRunner.snapshot_hw(dbhw_ent) if fqdn: dbdns_rec = ARecord.get_unique( session, fqdn=fqdn, dns_environment=dbnet_env.dns_environment, compel=True) ip = dbdns_rec.ip addr = None if ip: addr = first_of(dbinterface.assignments, lambda x: x.ip == ip) if not addr: raise ArgumentError( "{0} does not have IP address {1} assigned to " "it.".format(dbinterface, ip)) elif label is not None: addr = first_of(dbinterface.assignments, lambda x: x.label == label) if not addr: raise ArgumentError("{0} does not have an address with label " "{1}.".format(dbinterface, label)) if not addr: raise ArgumentError("Please specify the address to be removed " "using either --ip, --label, or --fqdn.") dbnetwork = addr.network ip = addr.ip if dbnetwork.network_environment != dbnet_env: raise ArgumentError("The specified address lives in {0:l}, not in " "{1:l}. Use the --network_environment option " "to select the correct environment.".format( dbnetwork.network_environment, dbnet_env)) # Forbid removing the primary name if ip == dbhw_ent.primary_ip: raise ArgumentError("The primary IP address of a hardware entity " "cannot be removed.") dbinterface.assignments.remove(addr) # Check if the address was assigned to multiple interfaces, and remove # the DNS entries if this was the last use q = session.query(AddressAssignment) q = q.filter_by(network=dbnetwork) q = q.filter_by(ip=ip) other_uses = q.all() if not other_uses and not keep_dns: q = session.query(ARecord) q = q.filter_by(network=dbnetwork) q = q.filter_by(ip=ip) q = q.join(ARecord.fqdn) q = q.filter_by(dns_environment=dbnet_env.dns_environment) map(delete_dns_record, q.all()) session.flush() dbhost = getattr(dbhw_ent, "host", None) if dbhost: plenary_info = PlenaryHost(dbhost, logger=logger) key = plenary_info.get_write_key() try: lock_queue.acquire(key) try: plenary_info.write(locked=True) except IncompleteError: # FIXME: if this command is used after "add host" but before # "make", then writing out the template will fail due to # required services not being assigned. Ignore this error # for now. plenary_info.restore_stash() dsdb_runner = DSDBRunner(logger=logger) dsdb_runner.update_host(dbhw_ent, oldinfo) if not other_uses and keep_dns: q = session.query(ARecord) q = q.filter_by(network=dbnetwork) q = q.filter_by(ip=ip) dbdns_rec = q.first() dsdb_runner.add_host_details(dbdns_rec.fqdn, ip) dsdb_runner.commit_or_rollback("Could not add host to DSDB") except: plenary_info.restore_stash() raise finally: lock_queue.release(key) else: dsdb_runner = DSDBRunner(logger=logger) dsdb_runner.update_host(dbhw_ent, oldinfo) dsdb_runner.commit_or_rollback("Could not add host to DSDB") return
def render(self, session, fqdn, dns_environment, dns_domain, shortname, record_type, ip, network, network_environment, target, target_domain, primary_name, used, reverse_override, reverse_ptr, fullinfo, style, **kwargs): if record_type: record_type = record_type.strip().lower() if record_type in DNS_RRTYPE_MAP: cls = DNS_RRTYPE_MAP[record_type] else: cls = DnsRecord.polymorphic_subclass( record_type, "Unknown DNS record type") q = session.query(cls) else: q = session.query(DnsRecord) q = q.with_polymorphic('*') dbnet_env = NetworkEnvironment.get_unique_or_default( session, network_environment) if network_environment: # The network environment determines the DNS environment dbdns_env = dbnet_env.dns_environment else: dbdns_env = DnsEnvironment.get_unique_or_default( session, dns_environment) if fqdn: dbfqdn = Fqdn.get_unique(session, fqdn=fqdn, dns_environment=dbdns_env, compel=True) q = q.filter_by(fqdn=dbfqdn) q = q.join((Fqdn, DnsRecord.fqdn_id == Fqdn.id)) q = q.filter_by(dns_environment=dbdns_env) q = q.options(contains_eager('fqdn')) if dns_domain: dbdns_domain = DnsDomain.get_unique(session, dns_domain, compel=True) q = q.filter_by(dns_domain=dbdns_domain) if shortname: q = q.filter_by(name=shortname) q = q.join(DnsDomain) q = q.options(contains_eager('fqdn.dns_domain')) q = q.order_by(Fqdn.name, DnsDomain.name) q = q.reset_joinpoint() if ip: q = q.join(Network) q = q.filter_by(network_environment=dbnet_env) q = q.reset_joinpoint() q = q.filter(ARecord.ip == ip) if network: dbnetwork = Network.get_unique(session, network, network_environment=dbnet_env, compel=True) q = q.filter(ARecord.network == dbnetwork) if target: dbtarget = Fqdn.get_unique(session, fqdn=target, dns_environment=dbdns_env, compel=True) q = q.filter( or_(Alias.target == dbtarget, SrvRecord.target == dbtarget)) if target_domain: dbdns_domain = DnsDomain.get_unique(session, target_domain, compel=True) TargetFqdn = aliased(Fqdn) q = q.join((TargetFqdn, or_(Alias.target_id == TargetFqdn.id, SrvRecord.target_id == TargetFqdn.id))) q = q.filter(TargetFqdn.dns_domain == dbdns_domain) if primary_name is not None: if primary_name: q = q.filter(DnsRecord.hardware_entity.has()) else: q = q.filter(~DnsRecord.hardware_entity.has()) if used is not None: if used: q = q.join( AddressAssignment, and_(ARecord.network_id == AddressAssignment.network_id, ARecord.ip == AddressAssignment.ip)) else: q = q.outerjoin( AddressAssignment, and_(ARecord.network_id == AddressAssignment.network_id, ARecord.ip == AddressAssignment.ip)) q = q.filter(AddressAssignment.id == None) q = q.reset_joinpoint() if reverse_override is not None: if reverse_override: q = q.filter(ARecord.reverse_ptr.has()) else: q = q.filter(~ARecord.reverse_ptr.has()) if reverse_ptr: dbtarget = Fqdn.get_unique(session, fqdn=reverse_ptr, dns_environment=dbdns_env, compel=True) q = q.filter(ARecord.reverse_ptr == dbtarget) if fullinfo: q = q.options(undefer('comments')) q = q.options(subqueryload('hardware_entity')) q = q.options(undefer('alias_cnt')) return q.all() elif style == "raw": return StringAttributeList(q.all(), 'fqdn') else: # This is for the CSV formatter return q.all()
def render(self, session, **arguments): bunker_bucket = {None: None} rack_bucket = defaultdict(dict) # Cache information for faster access later for bunker in session.query(Bunker).options(subqueryload('parents')): if "." not in bunker.name: continue bucket, building = bunker.name.split(".", 1) # pylint: disable=W0612 bunker_bucket[bunker] = bucket.upper() q = session.query(Building).options(subqueryload('parents')) buildings = q.all() # pylint: disable=W0612 def_env = NetworkEnvironment.get_unique_or_default(session) HwRack = aliased(Rack) NetLoc = aliased(Location) # Query pairs of (rack, network location used inside the rack) q = session.query(HwRack, NetLoc) q = q.filter(HardwareEntity.location_id == HwRack.id) q = q.filter(HardwareEntity.model_id == Model.id) q = q.filter(Model.model_type != VirtualMachineType.VirtualMachine) q = q.filter(Model.model_type != VirtualMachineType.VirtualAppliance) q = q.filter(Interface.hardware_entity_id == HardwareEntity.id) q = q.filter(AddressAssignment.interface_id == Interface.id) q = q.filter(AddressAssignment.network_id == Network.id) q = q.filter(Network.network_environment == def_env) q = q.filter(Network.location_id == NetLoc.id) q = q.options(defer(HwRack.comments), defer(HwRack.fullname), defer(HwRack.default_dns_domain_id), defer(HwRack.rack_row), defer(HwRack.rack_column), joinedload(HwRack.parents), defer(NetLoc.comments), defer(NetLoc.fullname), defer(NetLoc.default_dns_domain_id)) q = q.distinct() rack_bucket = defaultdict(set) for rack, net_loc in q: bucket = bunker_bucket[net_loc.bunker] rack_bucket[rack].add(bucket) violation_ids = [] updates = [] for rack in sorted(rack_bucket.keys(), key=attrgetter("name")): buckets = rack_bucket[rack] if len(buckets) > 1: violation_ids.append(rack.id) continue bucket = buckets.pop() if bucket: bunker = bucket.lower() + "." + rack.building.name if not rack.bunker or rack.bunker.name != bunker: updates.append("aq update rack --rack %s --bunker %s" % (rack, bunker)) elif rack.bunker: if rack.room: new_parent = "--room %s" % rack.room.name else: new_parent = "--building %s" % rack.building.name updates.append("aq update rack --rack %s %s" % (rack, new_parent)) # Take a closer look at racks using networks from multiple buckets. # Load all the address assignments so we can give a detailed report. q = session.query(AddressAssignment) q = q.join(Network) q = q.filter_by(network_environment=def_env) q = q.reset_joinpoint() q = q.join(Interface, HardwareEntity, Model) q = q.filter(Model.model_type != VirtualMachineType.VirtualMachine) q = q.options(defer('service_address_id'), contains_eager('network'), defer('network.cidr'), defer('network.name'), defer('network.ip'), defer('network.side'), contains_eager('interface'), defer('interface.mac'), defer('interface.port_group'), defer('interface.model_id'), defer('interface.bootable'), defer('interface.default_route'), defer('interface.master_id'), defer('interface.comments'), contains_eager('interface.hardware_entity'), defer('interface.hardware_entity.comments'), defer('interface.hardware_entity.model_id'), defer('interface.hardware_entity.serial_no')) q = q.filter(HardwareEntity.location_id.in_(violation_ids)) addr_by_rack = defaultdict(dict) for addr in q: hw_rack = addr.interface.hardware_entity.location net_bucket = bunker_bucket[addr.network.location.bunker] if net_bucket not in addr_by_rack[hw_rack]: addr_by_rack[hw_rack][net_bucket] = [] addr_by_rack[hw_rack][net_bucket].append(addr) errors = [] for rack_id in violation_ids: rack = session.query(Rack).get((rack_id,)) rack_bucket = bunker_bucket[rack.bunker] buckets = addr_by_rack[rack] if rack_bucket: errors.append("Warning: {0} is part of {1:l}, but also " "has networks from:".format(rack, rack.bunker)) else: errors.append("Warning: {0} is not part of a bunker, but " "it uses bunkerized networks:".format(rack)) for bucket in sorted(buckets.keys()): if bucket == rack_bucket: continue hws = ["%s/%s" % (addr.interface.hardware_entity.printable_name, addr.interface.name) for addr in buckets[bucket]] hws = list(set(hws)) hws.sort() names = ", ".join(hws) if bucket is None: bucket = "(No bucket)" errors.append(" {0}: {1}".format(bucket, names)) errors.append("") result = "\n".join(errors) if updates: result += "\n\nRacks having incorrect bunker membership:\n\n" result += "\n".join(updates) return result
def render(self, session, logger, machine, chassis, switch, fqdn, interface, label, network_environment, map_to_primary, **kwargs): if machine: hwtype = 'machine' hwname = machine elif chassis: hwtype = 'chassis' hwname = chassis elif switch: hwtype = 'switch' hwname = switch dbnet_env = NetworkEnvironment.get_unique_or_default( session, network_environment) dbhw_ent = HardwareEntity.get_unique(session, hwname, hardware_type=hwtype, compel=True) dbinterface = Interface.get_unique(session, hardware_entity=dbhw_ent, name=interface, compel=True) oldinfo = DSDBRunner.snapshot_hw(dbhw_ent) audit_results = [] ip = generate_ip(session, logger, dbinterface, network_environment=dbnet_env, audit_results=audit_results, **kwargs) if dbinterface.interface_type == "loopback": # Switch loopback interfaces may use e.g. the network address as an # IP address relaxed = True else: relaxed = False if not fqdn: if not dbhw_ent.primary_name: raise ArgumentError("{0} has no primary name, can not " "auto-generate the DNS record. " "Please specify --fqdn.".format(dbhw_ent)) if label: name = "%s-%s-%s" % (dbhw_ent.primary_name.fqdn.name, interface, label) else: name = "%s-%s" % (dbhw_ent.primary_name.fqdn.name, interface) fqdn = "%s.%s" % (name, dbhw_ent.primary_name.fqdn.dns_domain) if label is None: label = "" elif label == "hostname": # When add_host sets up Zebra, it always uses the label 'hostname'. # Due to the primary IP being special, add_interface_address cannot # really emulate what add_host does, so tell the user where to look. raise ArgumentError("The 'hostname' label can only be managed " "by add_host/del_host.") # The label will be used as an nlist key if label: validate_basic("label", label) # TODO: add allow_multi=True dbdns_rec, newly_created = grab_address(session, fqdn, ip, dbnet_env, relaxed=relaxed) ip = dbdns_rec.ip dbnetwork = dbdns_rec.network delete_old_dsdb_entry = not newly_created and not dbdns_rec.assignments # Reverse PTR control. Auxiliary addresses should point to the primary # name by default, with some exceptions. if (map_to_primary is None and dbhw_ent.primary_name and dbinterface.interface_type != "management" and dbdns_rec.fqdn.dns_environment == dbhw_ent.primary_name.fqdn.dns_environment): map_to_primary = True if map_to_primary: if not dbhw_ent.primary_name: raise ArgumentError( "{0} does not have a primary name, cannot " "set the reverse DNS mapping.".format(dbhw_ent)) if (dbhw_ent.primary_name.fqdn.dns_environment != dbdns_rec.fqdn.dns_environment): raise ArgumentError("{0} lives in {1:l}, not {2:l}.".format( dbhw_ent, dbhw_ent.primary_name.fqdn.dns_environment, dbdns_rec.fqdn.dns_environment)) if dbinterface.interface_type == "management": raise ArgumentError("The reverse PTR for management addresses " "should not point to the primary name.") dbdns_rec.reverse_ptr = dbhw_ent.primary_name.fqdn # Check that the network ranges assigned to different interfaces # do not overlap even if the network environments are different, because # that would confuse routing on the host. E.g. if eth0 is an internal # and eth1 is an external interface, then using 192.168.1.10/24 on eth0 # and using 192.168.1.20/26 on eth1 won't work. for addr in dbhw_ent.all_addresses(): if addr.network != dbnetwork and \ addr.network.network.overlaps(dbnetwork.network): raise ArgumentError("{0} in {1:l} used on {2:l} overlaps " "requested {3:l} in " "{4:l}.".format( addr.network, addr.network.network_environment, addr.interface, dbnetwork, dbnetwork.network_environment)) assign_address(dbinterface, ip, dbnetwork, label=label) session.flush() dbhost = getattr(dbhw_ent, "host", None) if dbhost: plenary_info = PlenaryHost(dbhost, logger=logger) key = plenary_info.get_write_key() try: lock_queue.acquire(key) try: plenary_info.write(locked=True) except IncompleteError: # FIXME: if this command is used after "add host" but before # "make", then writing out the template will fail due to # required services not being assigned. Ignore this error # for now. plenary_info.restore_stash() dsdb_runner = DSDBRunner(logger=logger) if delete_old_dsdb_entry: dsdb_runner.delete_host_details(dbdns_rec.fqdn, ip) dsdb_runner.update_host(dbhw_ent, oldinfo) dsdb_runner.commit_or_rollback("Could not add host to DSDB") except: plenary_info.restore_stash() raise finally: lock_queue.release(key) else: dsdb_runner = DSDBRunner(logger=logger) if delete_old_dsdb_entry: dsdb_runner.delete_host_details(dbdns_rec.fqdn, ip) dsdb_runner.update_host(dbhw_ent, oldinfo) dsdb_runner.commit_or_rollback("Could not add host to DSDB") for name, value in audit_results: self.audit_result(session, name, value, **kwargs) return
def render(self, session, logger, startip, endip, dns_domain, prefix, **arguments): if not prefix: prefix = 'dynamic' dbnet_env = NetworkEnvironment.get_unique_or_default(session) dbdns_env = DnsEnvironment.get_unique_or_default(session) startnet = get_net_id_from_ip(session, startip, dbnet_env) endnet = get_net_id_from_ip(session, endip, dbnet_env) if startnet != endnet: raise ArgumentError("IP addresses %s (%s) and %s (%s) must be on " "the same subnet." % (startip, startnet.ip, endip, endnet.ip)) dbdns_domain = DnsDomain.get_unique(session, dns_domain, compel=True) dbdns_domain.lock_row() startnet.lock_row() q = session.query(AddressAssignment.ip) q = q.filter_by(network=startnet) q = q.filter(AddressAssignment.ip >= startip) q = q.filter(AddressAssignment.ip <= endip) q = q.order_by(AddressAssignment.ip) conflicts = q.all() if conflicts: raise ArgumentError("Cannot allocate the address range because the " "following IP addresses are already in use:\n" + ", ".join([str(c.ip) for c in conflicts])) # No filtering on DNS environment. If an address is dynamic in one # environment, it should not be considered static in a different # environment. q = session.query(ARecord) q = q.filter_by(network=startnet) q = q.filter(ARecord.ip >= startip) q = q.filter(ARecord.ip <= endip) q = q.order_by(ARecord.ip) conflicts = q.all() if conflicts: raise ArgumentError("Cannot allocate the address range because the " "following DNS records already exist:\n" + "\n".join([format(c, "a") for c in conflicts])) dsdb_runner = DSDBRunner(logger=logger) with session.no_autoflush: for ipint in range(int(startip), int(endip) + 1): ip = IPv4Address(ipint) check_ip_restrictions(startnet, ip) name = "%s-%s" % (prefix, str(ip).replace('.', '-')) dbfqdn = Fqdn.get_or_create(session, name=name, dns_domain=dbdns_domain, dns_environment=dbdns_env, preclude=True) dbdynamic_stub = DynamicStub(fqdn=dbfqdn, ip=ip, network=startnet) session.add(dbdynamic_stub) dsdb_runner.add_host_details(dbfqdn, ip) session.flush() # This may take some time if the range is big, so be verbose dsdb_runner.commit_or_rollback("Could not add addresses to DSDB", verbose=True) return
except AddressValueError, e: raise ArgumentError("Failed to parse the network address: %s" % e) except NetmaskValueError, e: raise ArgumentError("Failed to parse the netmask: %s" % e) if ip != address.network: raise ArgumentError("IP address %s is not a network address. " "Maybe you meant %s?" % (ip, address.network)) location = get_location(session, **arguments) if not type: type = 'unknown' if not side: side = 'a' dbnet_env = NetworkEnvironment.get_unique_or_default( session, network_environment) self.az.check_network_environment(dbuser, dbnet_env) # Check if the name is free. Network names are not unique in QIP and # there is no uniqueness constraint in AQDB, so only warn if the name is # already in use. q = session.query(Network).filter_by(name=network) dbnetwork = q.first() if dbnetwork: logger.client_info("WARNING: Network name %s is already used for " "address %s." % (network, str(dbnetwork.network))) # Check if the address is free try: dbnetwork = get_net_id_from_ip(session,
def render(self, session, dbuser, ip, netmask, prefixlen, network_environment, **arguments): if netmask: # There must me a faster way, but this is the easy one net = IPv4Network("127.0.0.0/%s" % netmask) prefixlen = net.prefixlen if prefixlen < 8 or prefixlen > 32: raise ArgumentError("The prefix length must be between 8 and 32.") dbnet_env = NetworkEnvironment.get_unique_or_default( session, network_environment) self.az.check_network_environment(dbuser, dbnet_env) dbnetwork = get_net_id_from_ip(session, ip, network_environment=dbnet_env) if prefixlen <= dbnetwork.cidr: raise ArgumentError("The specified --prefixlen must be bigger " "than the current value.") subnets = dbnetwork.network.subnet(new_prefix=prefixlen) # Collect IP addresses that will become network/broadcast addresses # after the split bad_ips = [] for subnet in subnets: bad_ips.append(subnet.ip) bad_ips.append(subnet.broadcast) q = session.query(AddressAssignment.ip) q = q.filter_by(network=dbnetwork) q = q.filter(AddressAssignment.ip.in_(bad_ips)) used_addrs = q.all() if used_addrs: raise ArgumentError( "Network split failed, because the following " "subnet IP and/or broadcast addresses are " "assigned to hosts: %s" % ", ".join([str(addr.ip) for addr in used_addrs])) q = session.query(ARecord.ip) q = q.filter_by(network=dbnetwork) q = q.filter(ARecord.ip.in_(bad_ips)) used_addrs = q.all() if used_addrs: raise ArgumentError( "Network split failed, because the following " "subnet IP and/or broadcast addresses are " "registered in the DNS: %s" % ", ".join([str(addr.ip) for addr in used_addrs])) # Reason of the initial value: we keep the name of the first segment # (e.g. "foo"), and the next segment will be called "foo_2" name_idx = 2 dbnets = [] for subnet in dbnetwork.network.subnet(new_prefix=prefixlen): # Skip the original if subnet.ip == dbnetwork.ip: continue # Generate a new name. Make it unique, even if the DB does not # enforce that currently while True: # TODO: check if the new name is too long name = "%s_%d" % (dbnetwork.name, name_idx) name_idx += 1 q = session.query(Network) q = q.filter_by(network_environment=dbnet_env) q = q.filter_by(name=name) if q.count() == 0: break # Should not happen... if name_idx > 1000: # pragma: no cover raise AquilonError( "Could not generate a unique network " "name in a reasonable time, bailing out") # Inherit location & side from the supernet newnet = Network( name=name, network=subnet, network_environment=dbnet_env, location=dbnetwork.location, side=dbnetwork.side, comments="Created by splitting {0:a}".format(dbnetwork)) session.add(newnet) dbnets.append(newnet) dbnetwork.cidr = prefixlen session.flush() for newnet in dbnets: fix_foreign_links(session, dbnetwork, newnet) session.flush()
def render(self, session, logger, fqdn, ip, dns_environment, network_environment, **arguments): if network_environment: if not isinstance(network_environment, NetworkEnvironment): network_environment = NetworkEnvironment.get_unique_or_default(session, network_environment) if not dns_environment: dns_environment = network_environment.dns_environment dbdns_env = DnsEnvironment.get_unique(session, dns_environment, compel=True) with DeleteKey("system", logger=logger): # We can't use get_unique() here, since we always want to filter by # DNS environment, even if no FQDN was given q = session.query(ARecord) if ip: q = q.filter_by(ip=ip) q = q.join(ARecord.fqdn) q = q.options(contains_eager('fqdn')) q = q.filter_by(dns_environment=dbdns_env) if fqdn: (name, dbdns_domain) = parse_fqdn(session, fqdn) q = q.filter_by(name=name) q = q.filter_by(dns_domain=dbdns_domain) try: dbaddress = q.one() except NoResultFound: parts = [] if fqdn: parts.append(fqdn) if ip: parts.append("ip %s" % ip) raise NotFoundException("DNS Record %s not found." % ", ".join(parts)) except MultipleResultsFound: parts = [] if fqdn: parts.append(fqdn) if ip: parts.append("ip %s" % ip) raise NotFoundException("DNS Record %s is not unique." % ", ".join(parts)) if dbaddress.hardware_entity: raise ArgumentError("DNS Record {0:a} is the primary name of " "{1:l}, therefore it cannot be " "deleted.".format(dbaddress, dbaddress.hardware_entity)) if dbaddress.service_address: # TODO: print the holder object raise ArgumentError("DNS Record {0:a} is used as a service " "address, therefore it cannot be deleted." .format(dbaddress)) # Do not allow deleting the DNS record if the IP address is still in # use - except if there are other DNS records having the same # address if dbaddress.assignments: last_use = [] # FIXME: race condition here, we should use # SELECT ... FOR UPDATE for addr in dbaddress.assignments: if len(addr.dns_records) == 1: last_use.append(addr) if last_use: users = " ,".join([format(addr.interface, "l") for addr in last_use]) raise ArgumentError("IP address %s is still in use by %s." % (ip, users)) ip = dbaddress.ip old_fqdn = str(dbaddress.fqdn) old_comments = dbaddress.comments delete_dns_record(dbaddress) session.flush() if dbdns_env.is_default: dsdb_runner = DSDBRunner(logger=logger) dsdb_runner.delete_host_details(old_fqdn, ip, comments=old_comments) dsdb_runner.commit_or_rollback() return
def render( self, session, logger, machine, chassis, switch, interface, fqdn, ip, label, keep_dns, network_environment, **kwargs ): if machine: hwtype = "machine" hwname = machine elif chassis: hwtype = "chassis" hwname = chassis elif switch: hwtype = "switch" hwname = switch dbhw_ent = HardwareEntity.get_unique(session, hwname, hardware_type=hwtype, compel=True) dbinterface = Interface.get_unique(session, hardware_entity=dbhw_ent, name=interface, compel=True) dbnet_env = NetworkEnvironment.get_unique_or_default(session, network_environment) oldinfo = DSDBRunner.snapshot_hw(dbhw_ent) if fqdn: dbdns_rec = ARecord.get_unique(session, fqdn=fqdn, dns_environment=dbnet_env.dns_environment, compel=True) ip = dbdns_rec.ip addr = None if ip: addr = first_of(dbinterface.assignments, lambda x: x.ip == ip) if not addr: raise ArgumentError("{0} does not have IP address {1} assigned to " "it.".format(dbinterface, ip)) elif label is not None: addr = first_of(dbinterface.assignments, lambda x: x.label == label) if not addr: raise ArgumentError("{0} does not have an address with label " "{1}.".format(dbinterface, label)) if not addr: raise ArgumentError("Please specify the address to be removed " "using either --ip, --label, or --fqdn.") dbnetwork = addr.network ip = addr.ip if dbnetwork.network_environment != dbnet_env: raise ArgumentError( "The specified address lives in {0:l}, not in " "{1:l}. Use the --network_environment option " "to select the correct environment.".format(dbnetwork.network_environment, dbnet_env) ) # Forbid removing the primary name if ip == dbhw_ent.primary_ip: raise ArgumentError("The primary IP address of a hardware entity " "cannot be removed.") dbinterface.assignments.remove(addr) # Check if the address was assigned to multiple interfaces, and remove # the DNS entries if this was the last use q = session.query(AddressAssignment) q = q.filter_by(network=dbnetwork) q = q.filter_by(ip=ip) other_uses = q.all() if not other_uses and not keep_dns: q = session.query(ARecord) q = q.filter_by(network=dbnetwork) q = q.filter_by(ip=ip) q = q.join(ARecord.fqdn) q = q.filter_by(dns_environment=dbnet_env.dns_environment) map(delete_dns_record, q.all()) session.flush() dbhost = getattr(dbhw_ent, "host", None) if dbhost: plenary_info = PlenaryHost(dbhost, logger=logger) key = plenary_info.get_write_key() try: lock_queue.acquire(key) try: plenary_info.write(locked=True) except IncompleteError: # FIXME: if this command is used after "add host" but before # "make", then writing out the template will fail due to # required services not being assigned. Ignore this error # for now. plenary_info.restore_stash() dsdb_runner = DSDBRunner(logger=logger) dsdb_runner.update_host(dbhw_ent, oldinfo) if not other_uses and keep_dns: q = session.query(ARecord) q = q.filter_by(network=dbnetwork) q = q.filter_by(ip=ip) dbdns_rec = q.first() dsdb_runner.add_host_details(dbdns_rec.fqdn, ip) dsdb_runner.commit_or_rollback("Could not add host to DSDB") except: plenary_info.restore_stash() raise finally: lock_queue.release(key) else: dsdb_runner = DSDBRunner(logger=logger) dsdb_runner.update_host(dbhw_ent, oldinfo) dsdb_runner.commit_or_rollback("Could not add host to DSDB") return
def render(self, session, fqdn, dns_environment, dns_domain, shortname, record_type, ip, network, network_environment, target, target_domain, primary_name, used, reverse_override, reverse_ptr, fullinfo, style, **kwargs): if record_type: record_type = record_type.strip().lower() if record_type in DNS_RRTYPE_MAP: cls = DNS_RRTYPE_MAP[record_type] else: cls = DnsRecord.polymorphic_subclass(record_type, "Unknown DNS record type") q = session.query(cls) else: q = session.query(DnsRecord) q = q.with_polymorphic('*') dbnet_env = NetworkEnvironment.get_unique_or_default(session, network_environment) if network_environment: # The network environment determines the DNS environment dbdns_env = dbnet_env.dns_environment else: dbdns_env = DnsEnvironment.get_unique_or_default(session, dns_environment) if fqdn: dbfqdn = Fqdn.get_unique(session, fqdn=fqdn, dns_environment=dbdns_env, compel=True) q = q.filter_by(fqdn=dbfqdn) q = q.join((Fqdn, DnsRecord.fqdn_id == Fqdn.id)) q = q.filter_by(dns_environment=dbdns_env) q = q.options(contains_eager('fqdn')) if dns_domain: dbdns_domain = DnsDomain.get_unique(session, dns_domain, compel=True) q = q.filter_by(dns_domain=dbdns_domain) if shortname: q = q.filter_by(name=shortname) q = q.join(DnsDomain) q = q.options(contains_eager('fqdn.dns_domain')) q = q.order_by(Fqdn.name, DnsDomain.name) q = q.reset_joinpoint() if ip: q = q.join(Network) q = q.filter_by(network_environment=dbnet_env) q = q.reset_joinpoint() q = q.filter(ARecord.ip == ip) if network: dbnetwork = Network.get_unique(session, network, network_environment=dbnet_env, compel=True) q = q.filter(ARecord.network == dbnetwork) if target: dbtarget = Fqdn.get_unique(session, fqdn=target, dns_environment=dbdns_env, compel=True) q = q.filter(or_(Alias.target == dbtarget, SrvRecord.target == dbtarget)) if target_domain: dbdns_domain = DnsDomain.get_unique(session, target_domain, compel=True) TargetFqdn = aliased(Fqdn) q = q.join((TargetFqdn, or_(Alias.target_id == TargetFqdn.id, SrvRecord.target_id == TargetFqdn.id))) q = q.filter(TargetFqdn.dns_domain == dbdns_domain) if primary_name is not None: if primary_name: q = q.filter(DnsRecord.hardware_entity.has()) else: q = q.filter(~DnsRecord.hardware_entity.has()) if used is not None: if used: q = q.join(AddressAssignment, and_(ARecord.network_id == AddressAssignment.network_id, ARecord.ip == AddressAssignment.ip)) else: q = q.outerjoin(AddressAssignment, and_(ARecord.network_id == AddressAssignment.network_id, ARecord.ip == AddressAssignment.ip)) q = q.filter(AddressAssignment.id == None) q = q.reset_joinpoint() if reverse_override is not None: if reverse_override: q = q.filter(ARecord.reverse_ptr.has()) else: q = q.filter(~ARecord.reverse_ptr.has()) if reverse_ptr: dbtarget = Fqdn.get_unique(session, fqdn=reverse_ptr, dns_environment=dbdns_env, compel=True) q = q.filter(ARecord.reverse_ptr == dbtarget) if fullinfo or style != "raw": q = q.options(undefer('comments'), subqueryload('hardware_entity'), lazyload('hardware_entity.primary_name'), undefer('alias_cnt')) return q.all() else: return StringAttributeList(q.all(), 'fqdn')
def render(self, session, logger, hostname, machine, archetype, buildstatus, personality, osname, osversion, service, instance, model, machine_type, vendor, serial, cluster, guest_on_cluster, guest_on_share, member_cluster_share, domain, sandbox, branch, sandbox_owner, dns_domain, shortname, mac, ip, networkip, network_environment, exact_location, server_of_service, server_of_instance, grn, eon_id, fullinfo, **arguments): dbnet_env = NetworkEnvironment.get_unique_or_default( session, network_environment) q = session.query(Host) if machine: dbmachine = Machine.get_unique(session, machine, compel=True) q = q.filter_by(machine=dbmachine) # Add the machine definition and the primary name. Use aliases to make # sure the end result will be ordered by primary name. PriDns = aliased(DnsRecord) PriFqdn = aliased(Fqdn) PriDomain = aliased(DnsDomain) q = q.join(Machine, (PriDns, PriDns.id == Machine.primary_name_id), (PriFqdn, PriDns.fqdn_id == PriFqdn.id), (PriDomain, PriFqdn.dns_domain_id == PriDomain.id)) q = q.order_by(PriFqdn.name, PriDomain.name) q = q.options( contains_eager('machine'), contains_eager('machine.primary_name', alias=PriDns), contains_eager('machine.primary_name.fqdn', alias=PriFqdn), contains_eager('machine.primary_name.fqdn.dns_domain', alias=PriDomain)) q = q.reset_joinpoint() # Hardware-specific filters dblocation = get_location(session, **arguments) if dblocation: if exact_location: q = q.filter(Machine.location == dblocation) else: childids = dblocation.offspring_ids() q = q.filter(Machine.location_id.in_(childids)) if model or vendor or machine_type: subq = Model.get_matching_query(session, name=model, vendor=vendor, machine_type=machine_type, compel=True) q = q.filter(Machine.model_id.in_(subq)) if serial: self.deprecated_option( "serial", "Please use search machine --serial instead.", logger=logger, **arguments) q = q.filter(Machine.serial_no == serial) # DNS IP address related filters if mac or ip or networkip or hostname or dns_domain or shortname: # Inner joins are cheaper than outer joins, so make some effort to # use inner joins when possible if mac or ip or networkip: q = q.join(Interface) else: q = q.outerjoin(Interface) if ip or networkip: q = q.join(AddressAssignment, Network, from_joinpoint=True) else: q = q.outerjoin(AddressAssignment, Network, from_joinpoint=True) if mac: self.deprecated_option("mac", "Please use search machine " "--mac instead.", logger=logger, **arguments) q = q.filter(Interface.mac == mac) if ip: q = q.filter(AddressAssignment.ip == ip) q = q.filter(Network.network_environment == dbnet_env) if networkip: dbnetwork = get_network_byip(session, networkip, dbnet_env) q = q.filter(AddressAssignment.network == dbnetwork) dbdns_domain = None if hostname: (shortname, dbdns_domain) = parse_fqdn(session, hostname) if dns_domain: dbdns_domain = DnsDomain.get_unique(session, dns_domain, compel=True) if shortname or dbdns_domain: ARecAlias = aliased(ARecord) ARecFqdn = aliased(Fqdn) q = q.outerjoin( (ARecAlias, and_(ARecAlias.ip == AddressAssignment.ip, ARecAlias.network_id == AddressAssignment.network_id)), (ARecFqdn, ARecAlias.fqdn_id == ARecFqdn.id)) if shortname: q = q.filter( or_(ARecFqdn.name == shortname, PriFqdn.name == shortname)) if dbdns_domain: q = q.filter( or_(ARecFqdn.dns_domain == dbdns_domain, PriFqdn.dns_domain == dbdns_domain)) q = q.reset_joinpoint() (dbbranch, dbauthor) = get_branch_and_author(session, logger, domain=domain, sandbox=sandbox, branch=branch) if sandbox_owner: dbauthor = get_user_principal(session, sandbox_owner) if dbbranch: q = q.filter_by(branch=dbbranch) if dbauthor: q = q.filter_by(sandbox_author=dbauthor) if archetype: # Added to the searches as appropriate below. dbarchetype = Archetype.get_unique(session, archetype, compel=True) if personality and archetype: dbpersonality = Personality.get_unique(session, archetype=dbarchetype, name=personality, compel=True) q = q.filter_by(personality=dbpersonality) elif personality: PersAlias = aliased(Personality) q = q.join(PersAlias).filter_by(name=personality) q = q.reset_joinpoint() elif archetype: PersAlias = aliased(Personality) q = q.join(PersAlias).filter_by(archetype=dbarchetype) q = q.reset_joinpoint() if buildstatus: dbbuildstatus = HostLifecycle.get_unique(session, buildstatus, compel=True) q = q.filter_by(status=dbbuildstatus) if osname and osversion and archetype: # archetype was already resolved above dbos = OperatingSystem.get_unique(session, name=osname, version=osversion, archetype=dbarchetype, compel=True) q = q.filter_by(operating_system=dbos) elif osname or osversion: q = q.join('operating_system') if osname: q = q.filter_by(name=osname) if osversion: q = q.filter_by(version=osversion) q = q.reset_joinpoint() if service: dbservice = Service.get_unique(session, service, compel=True) if instance: dbsi = get_service_instance(session, dbservice, instance) q = q.filter(Host.services_used.contains(dbsi)) else: q = q.join('services_used') q = q.filter_by(service=dbservice) q = q.reset_joinpoint() elif instance: q = q.join('services_used') q = q.filter_by(name=instance) q = q.reset_joinpoint() if server_of_service: dbserver_service = Service.get_unique(session, server_of_service, compel=True) if server_of_instance: dbssi = get_service_instance(session, dbserver_service, server_of_instance) q = q.join('_services_provided') q = q.filter_by(service_instance=dbssi) q = q.reset_joinpoint() else: q = q.join('_services_provided', 'service_instance') q = q.filter_by(service=dbserver_service) q = q.reset_joinpoint() elif server_of_instance: q = q.join('_services_provided', 'service_instance') q = q.filter_by(name=server_of_instance) q = q.reset_joinpoint() if cluster: dbcluster = Cluster.get_unique(session, cluster, compel=True) if isinstance(dbcluster, MetaCluster): q = q.join('_cluster', 'cluster', '_metacluster') q = q.filter_by(metacluster=dbcluster) else: q = q.filter_by(cluster=dbcluster) q = q.reset_joinpoint() if guest_on_cluster: # TODO: this does not handle metaclusters according to Wes dbcluster = Cluster.get_unique(session, guest_on_cluster, compel=True) q = q.join('machine', VirtualMachine, ClusterResource) q = q.filter_by(cluster=dbcluster) q = q.reset_joinpoint() if guest_on_share: #v2 v2shares = session.query( Share.id).filter_by(name=guest_on_share).all() if not v2shares: raise NotFoundException( "No shares found with name {0}.".format(guest_on_share)) NasAlias = aliased(VirtualDisk) q = q.join('machine', 'disks', (NasAlias, NasAlias.id == Disk.id)) q = q.filter(NasAlias.share_id.in_(map(lambda s: s[0], v2shares))) q = q.reset_joinpoint() if member_cluster_share: #v2 v2shares = session.query( Share.id).filter_by(name=member_cluster_share).all() if not v2shares: raise NotFoundException( "No shares found with name {0}.".format(guest_on_share)) NasAlias = aliased(VirtualDisk) q = q.join('_cluster', 'cluster', 'resholder', VirtualMachine, 'machine', 'disks', (NasAlias, NasAlias.id == Disk.id)) q = q.filter(NasAlias.share_id.in_(map(lambda s: s[0], v2shares))) q = q.reset_joinpoint() if grn or eon_id: dbgrn = lookup_grn(session, grn, eon_id, autoupdate=False) persq = session.query(Personality.id) persq = persq.outerjoin(PersonalityGrnMap) persq = persq.filter( or_(Personality.owner_eon_id == dbgrn.eon_id, PersonalityGrnMap.eon_id == dbgrn.eon_id)) q = q.outerjoin(HostGrnMap) q = q.filter( or_(Host.owner_eon_id == dbgrn.eon_id, HostGrnMap.eon_id == dbgrn.eon_id, Host.personality_id.in_(persq.subquery()))) q = q.reset_joinpoint() if fullinfo: return q.all() return SimpleHostList(q.all())
def render(self, session, service, instance, archetype, personality, networkip, include_parents, **arguments): dbservice = service and Service.get_unique(session, service, compel=True) or None dblocation = get_location(session, **arguments) queries = [] # The current logic basically shoots for exact match when given # (like exact personality maps only or exact archetype maps # only), or "any" if an exact spec isn't given. if archetype and personality: dbpersona = Personality.get_unique(session, name=personality, archetype=archetype, compel=True) q = session.query(PersonalityServiceMap) q = q.filter_by(personality=dbpersona) queries.append(q) elif personality: # Alternately, this could throw an error and ask for archetype. q = session.query(PersonalityServiceMap) q = q.join('personality').filter_by(name=personality) q = q.reset_joinpoint() queries.append(q) elif archetype: # Alternately, this could throw an error and ask for personality. q = session.query(PersonalityServiceMap) q = q.join('personality', 'archetype').filter_by(name=archetype) q = q.reset_joinpoint() queries.append(q) else: queries.append(session.query(ServiceMap)) queries.append(session.query(PersonalityServiceMap)) if dbservice: for i in range(len(queries)): queries[i] = queries[i].join('service_instance') queries[i] = queries[i].filter_by(service=dbservice) queries[i] = queries[i].reset_joinpoint() if instance: for i in range(len(queries)): queries[i] = queries[i].join('service_instance') queries[i] = queries[i].filter_by(name=instance) queries[i] = queries[i].reset_joinpoint() # Nothing fancy for now - just show any relevant explicit bindings. if dblocation: for i in range(len(queries)): if include_parents: base_cls = queries[i].column_descriptions[0]["expr"] col = base_cls.location_id queries[i] = queries[i].filter(col.in_(dblocation.parent_ids())) else: queries[i] = queries[i].filter_by(location=dblocation) if networkip: dbnet_env = NetworkEnvironment.get_unique_or_default(session) dbnetwork = get_network_byip(session, networkip, dbnet_env) for i in range(len(queries)): queries[i] = queries[i].filter_by(network=dbnetwork) results = ServiceMapList() for q in queries: results.extend(q.all()) if service and instance and dblocation: # This should be an exact match. (Personality doesn't # matter... either it was given and it should be an # exact match for PersonalityServiceMap or it wasn't # and this should be an exact match for ServiceMap.) if not results: raise NotFoundException("No matching map found.") return results
def render(self, session, logger, fillnetwork, **arguments): dbnet_env = NetworkEnvironment.get_unique_or_default(session) dbnetwork = Network.get_unique(session, fillnetwork, network_environment=dbnet_env, compel=True) arguments["startip"] = dbnetwork.first_usable_host arguments["endip"] = dbnetwork.broadcast - 1 return CommandAddDynamicRange.render(self, session, logger, **arguments)
def render(self, session, network, network_environment, ip, type, side, machine, fqdn, cluster, pg, has_dynamic_ranges, fullinfo, **arguments): """Return a network matching the parameters. Some of the search terms can only return a unique network. For those (like ip and fqdn) we proceed with the query anyway. This allows for quick scripted tests like "is the network for X.X.X.X a tor_net2?". """ dbnet_env = NetworkEnvironment.get_unique_or_default( session, network_environment) q = session.query(Network) q = q.filter_by(network_environment=dbnet_env) if network: # Note: the network name is not unique (neither in QIP) q = q.filter_by(name=network) if ip: dbnetwork = get_net_id_from_ip(session, ip, dbnet_env) q = q.filter_by(id=dbnetwork.id) if type: q = q.filter_by(network_type=type) if side: q = q.filter_by(side=side) if machine: dbmachine = Machine.get_unique(session, machine, compel=True) vlans = [] if dbmachine.cluster and dbmachine.cluster.switch: # If this is a VM on a cluster, consult the VLANs. There # could be functionality here for real hardware to consult # interface port groups... there's no real use case yet. vlans = [ VlanInfo.get_vlan_id(session, i.port_group) for i in dbmachine.interfaces if i.port_group ] if vlans: q = q.join('observed_vlans') q = q.filter_by(switch=dbmachine.cluster.switch) q = q.filter(ObservedVlan.vlan_id.in_(vlans)) q = q.reset_joinpoint() if not vlans: networks = [ addr.network.id for addr in dbmachine.all_addresses() ] if not networks: msg = "Machine %s has no interfaces " % dbmachine.label if dbmachine.cluster: msg += "with a portgroup or " msg += "assigned to a network." raise ArgumentError(msg) q = q.filter(Network.id.in_(networks)) if fqdn: (short, dbdns_domain) = parse_fqdn(session, fqdn) dnsq = session.query(ARecord.ip) dnsq = dnsq.join(ARecord.fqdn) dnsq = dnsq.filter_by(name=short) dnsq = dnsq.filter_by(dns_domain=dbdns_domain) networks = [ get_net_id_from_ip(session, addr.ip, dbnet_env).id for addr in dnsq.all() ] q = q.filter(Network.id.in_(networks)) if cluster: dbcluster = Cluster.get_unique(session, cluster, compel=True) if dbcluster.switch: q = q.join('observed_vlans') q = q.filter_by(switch=dbcluster.switch) q = q.reset_joinpoint() else: net_ids = [ h.machine.primary_name.network.id for h in dbcluster.hosts if getattr(h.machine.primary_name, "network") ] q = q.filter(Network.id.in_(net_ids)) if pg: vlan = VlanInfo.get_vlan_id(session, pg, compel=ArgumentError) q = q.join('observed_vlans') q = q.filter_by(vlan_id=vlan) q = q.reset_joinpoint() dblocation = get_location(session, **arguments) if dblocation: if arguments.get('exact_location'): q = q.filter_by(location=dblocation) else: childids = dblocation.offspring_ids() q = q.filter(Network.location_id.in_(childids)) if has_dynamic_ranges: q = q.filter( exists([DynamicStub.dns_record_id], from_obj=DynamicStub.__table__.join( ARecord.__table__)).where( Network.id == DynamicStub.network_id)) q = q.order_by(Network.ip) if fullinfo: q = q.options(undefer('comments')) return q.all() return ShortNetworkList(q.all())
def discover_switch(session, logger, config, dbswitch, dryrun): """ Perform switch discovery This function can operate in two modes: - If dryrun is False, it performs all the operations required to bring the definition of the switch in AQDB in line with the discovered status in one transaction. - If dryrun is True, it returns the list of individual "aq" commands the user should execute to get the switch into the desired state. In order to make the core logic less complex, simple actions like adding/deleting IP addresses and interfaces are implemented as helper functions, and those helper functions hide the differences between the normal and dryrun modes from the rest of the code. """ importer = config.get("broker", "switch_discover") results = [] dbnet_env = NetworkEnvironment.get_unique_or_default(session) def aqcmd(cmd, *args): """ Helper function to print an AQ command to be executed by the user """ quoted_args = [quote(str(arg)) for arg in args] results.append("aq %s --switch %s %s" % (cmd, dbswitch.primary_name, " ".join(quoted_args))) def update_switch(dbmodel, serial_no, comments): """ Helper for updating core switch attributes, honouring dryrun """ if dryrun: args = ["update_switch"] if dbmodel and dbmodel != dbswitch.model: args.extend(["--model", dbmodel.name, "--vendor", dbmodel.vendor.name]) if serial_no and serial_no != dbswitch.serial_no: args.extend(["--serial", serial_no]) if comments and comments != dbswitch.comments: args.extend(["--comments", comments]) aqcmd(*args) else: if dbmodel: dbswitch.model = dbmodel if serial_no: dbswitch.serial_no = serial_no if comments: dbswitch.comments = comments def del_address(iface, addr): """ Helper for deleting an IP address, honouring dryrun """ if dryrun: aqcmd("del_interface_address", "--interface", iface.name, "--ip", addr.ip) else: iface.assignments.remove(addr) session.flush() q = session.query(AddressAssignment.id) q = q.filter_by(network=addr.network) q = q.filter_by(ip=addr.ip) if not q.first(): q = session.query(ARecord) q = q.filter_by(network=addr.network) q = q.filter_by(ip=addr.ip) map(delete_dns_record, q.all()) def del_interface(iface): """ Helper for deleting an interface, honouring dryrun """ if dryrun: aqcmd("del_interface", "--interface", iface.name) else: dbswitch.interfaces.remove(iface) def do_rename_interface(iface, new_name): """ Helper for renaming an interface, honouring dryrun """ if dryrun: aqcmd("update_interface", "--interface", iface.name, "--rename_to", new_name) else: rename_interface(session, iface, new_name) def add_interface(ifname, iftype): """ Helper for adding a new interface, honouring dryrun """ if dryrun: aqcmd("add_interface", "--interface", ifname, "--type", iftype) # There's no Interface instace we could return here, but fortunately # nothing will use the returned value in dryrun mode return None else: return get_or_create_interface(session, dbswitch, name=ifname, interface_type=iftype) def add_address(iface, ifname, ip, label, relaxed): """ Helper for adding an IP address, honouring dryrun """ if label: name = "%s-%s-%s" % (dbswitch.primary_name.fqdn.name, ifname, label) else: name = "%s-%s" % (dbswitch.primary_name.fqdn.name, ifname) fqdn = "%s.%s" % (name, dbswitch.primary_name.fqdn.dns_domain) if dryrun: args = ["add_interface_address", "--interface", ifname, "--ip", ip] if label: args.extend(["--label", label]) aqcmd(*args) else: # Doing the DSDB update if the address existed before would be # tricky, so prevent that case by passing preclude=True dbdns_rec, newly_created = grab_address(session, fqdn, ip, dbnet_env, relaxed=relaxed, preclude=True) assign_address(iface, ip, dbdns_rec.network, label=label) def add_router(ip): """ Helper command for managing router IPs, honouring dryrun """ # TODO: the command should be configurable cmd = "qip-set-router %s" % ip if dryrun: results.append(cmd) else: # If we're not the authoritative source, then we can't just create # the RouterAddress directly. TODO: It should be configurable # whether we're authoritative for network data logger.client_info("You should run '%s'." % cmd) def warning(msg): """ Helper for sending warning messages to the client We cannot use the side channel in dryrun mode, because the "aq show_switch" command does not issue show_request. So we need to embed the warnings in the output. """ if dryrun: results.append("# Warning: " + msg) else: logger.client_info("Warning: " + msg) hostname = determine_helper_hostname(session, logger, config, dbswitch) if hostname: args = determine_helper_args(config) args.append(hostname) else: args = [] args.extend([importer, str(dbswitch.primary_name)]) try: out = run_command(args) except ProcessException, err: raise ArgumentError("Failed to run switch discovery: %s" % err)
def grab_address(session, fqdn, ip, network_environment=None, dns_environment=None, comments=None, allow_restricted_domain=False, allow_multi=False, allow_reserved=False, relaxed=False, preclude=False): """ Take ownership of an address. This is a bit complicated because due to DNS propagation delays, we want to allow users to pre-define a DNS address and then assign the address to a host later. Parameters: session: SQLA session handle fqdn: the name to allocate/take over ip: the IP address to allocate/take over network_environment: where the IP address lives dns_enviromnent: where the FQDN lives comments: any comments to attach to the DNS record if it is created as new allow_restricted_domain: if True, adding entries to restricted DNS domains is allowed, otherwise it is denied. Default is False. allow_multi: if True, allow the same FQDN to be added multiple times with different IP addresses. Deault is False. allow_reserved: if True, allow creating a ReservedName instead of an ARecord if no IP address was specified. Default is False. preclude: if True, forbid taking over an existing DNS record, even if it is not referenced by any AddressAssignment records. Default is False. """ if not isinstance(network_environment, NetworkEnvironment): network_environment = NetworkEnvironment.get_unique_or_default( session, network_environment) if not dns_environment: dns_environment = network_environment.dns_environment elif not isinstance(dns_environment, DnsEnvironment): dns_environment = DnsEnvironment.get_unique(session, dns_environment, compel=True) # Non-default DNS environments may contain anything, but we want to keep # the internal environment clean if dns_environment.is_default and not network_environment.is_default: raise ArgumentError("Entering external IP addresses to the " "internal DNS environment is not allowed.") short, dbdns_domain = parse_fqdn(session, fqdn) # Lock the domain to prevent adding/deleting records while we're checking # FQDN etc. availability dbdns_domain.lock_row() if dbdns_domain.restricted and not allow_restricted_domain: raise ArgumentError("{0} is restricted, adding extra addresses " "is not allowed.".format(dbdns_domain)) dbfqdn = Fqdn.get_or_create(session, dns_environment=dns_environment, name=short, dns_domain=dbdns_domain, query_options=[joinedload('dns_records')]) existing_record = None newly_created = False if ip: dbnetwork = get_net_id_from_ip(session, ip, network_environment) check_ip_restrictions(dbnetwork, ip, relaxed=relaxed) dbnetwork.lock_row() # No filtering on DNS environment. If an address is dynamic in one # environment, it should not be considered static in a different # environment. q = session.query(DynamicStub) q = q.filter_by(network=dbnetwork) q = q.filter_by(ip=ip) dbdns_rec = q.first() _forbid_dyndns(dbdns_rec) # Verify that no other record uses the same IP address, this time taking # the DNS environemt into consideration. # While the DNS would allow different A records to point to the same IP # address, the current user expectation is that creating a DNS entry # also counts as a reservation, so we can not allow this use case. If we # want to implement such a feature later, the best way would be to # subclass Alias and let that subclass emit an A record instead of a # CNAME when the dump_dns command is called. q = session.query(ARecord) q = q.filter_by(network=dbnetwork) q = q.filter_by(ip=ip) q = q.join(ARecord.fqdn) q = q.filter_by(dns_environment=dns_environment) dbrecords = q.all() if dbrecords and len(dbrecords) > 1: # pragma: no cover # We're just trying to make sure this never happens raise AquilonError( "IP address %s is referenced by multiple " "DNS records: %s" % (ip, ", ".join([format(rec, "a") for rec in dbrecords]))) if dbrecords and dbrecords[0].fqdn != dbfqdn: raise ArgumentError( "IP address {0} is already in use by {1:l}.".format( ip, dbrecords[0])) # Check if the name is used already for dbdns_rec in dbfqdn.dns_records: if isinstance(dbdns_rec, ARecord): _forbid_dyndns(dbdns_rec) _check_netenv_compat(dbdns_rec, network_environment) if dbdns_rec.ip == ip and dbdns_rec.network == dbnetwork: existing_record = dbdns_rec elif not allow_multi: raise ArgumentError( "{0} points to a different IP address.".format( dbdns_rec)) elif isinstance(dbdns_rec, ReservedName): existing_record = convert_reserved_to_arecord( session, dbdns_rec, dbnetwork, ip) newly_created = True else: # Exclude aliases etc. raise ArgumentError( "{0} cannot be used for address assignment.".format( dbdns_rec)) if not existing_record: existing_record = ARecord(fqdn=dbfqdn, ip=ip, network=dbnetwork, comments=comments) session.add(existing_record) newly_created = True else: if not dbfqdn.dns_records: # There's no IP, and the name did not exist before. Create a # reservation, but only if the caller allowed that use case. if not allow_reserved: raise ArgumentError("DNS Record %s does not exist." % dbfqdn) existing_record = ReservedName(fqdn=dbfqdn, comments=comments) newly_created = True else: # There's no IP, but the name is already in use. We need a single IP # address. if len(dbfqdn.dns_records) > 1: raise ArgumentError( "{0} does not resolve to a single IP address.".format( dbfqdn)) existing_record = dbfqdn.dns_records[0] _forbid_dyndns(existing_record) if not isinstance(existing_record, ARecord): # Exclude aliases etc. raise ArgumentError( "{0} cannot be used for address assignment.".format( existing_record)) # Verify that the existing record is in the network environment the # caller expects _check_netenv_compat(existing_record, network_environment) ip = existing_record.ip dbnetwork = existing_record.network dbnetwork.lock_row() if existing_record.hardware_entity: raise ArgumentError( "{0} is already used as the primary name of {1:cl} " "{1.label}.".format(existing_record, existing_record.hardware_entity)) if preclude and not newly_created: raise ArgumentError("{0} already exists.".format(existing_record)) if ip: q = session.query(AddressAssignment) q = q.filter_by(network=dbnetwork) q = q.filter_by(ip=ip) addr = q.first() if addr: raise ArgumentError("IP address {0} is already in use by " "{1:l}.".format(ip, addr.interface)) return (existing_record, newly_created)