def del_dynamic_range(self, session, logger, startip, endip): dbnet_env = NetworkEnvironment.get_unique_or_default(session) startnet = get_net_id_from_ip(session, startip, dbnet_env) endnet = get_net_id_from_ip(session, endip, dbnet_env) if startnet != endnet: raise ArgumentError("IP addresses %s (%s) and %s (%s) must be " "on the same subnet." % (startip, startnet.ip, endip, endnet.ip)) q = session.query(ARecord) q = q.filter_by(network=startnet) q = q.filter(ARecord.ip >= startip) q = q.filter(ARecord.ip <= endip) q = q.order_by(ARecord.ip) q = q.options(joinedload('fqdn'), joinedload('fqdn.aliases'), joinedload('fqdn.srv_records'), joinedload('reverse_ptr')) existing = q.all() if not existing: raise ArgumentError("Nothing found in range.") if existing[0].ip != startip: raise ArgumentError("No system found with IP address %s." % startip) if existing[-1].ip != endip: raise ArgumentError("No system found with IP address %s." % endip) invalid = [s for s in existing if s.dns_record_type != 'dynamic_stub'] if invalid: raise ArgumentError("The range contains non-dynamic systems:\n" + "\n".join([format(i, "a") for i in invalid])) self.del_dynamic_stubs(session, logger, existing)
def del_dynamic_range(self, session, logger, startip, endip): dbnet_env = NetworkEnvironment.get_unique_or_default(session) startnet = get_net_id_from_ip(session, startip, dbnet_env) endnet = get_net_id_from_ip(session, endip, dbnet_env) if startnet != endnet: raise ArgumentError("IP addresses %s (%s) and %s (%s) must be " "on the same subnet." % (startip, startnet.ip, endip, endnet.ip)) q = session.query(ARecord) q = q.filter_by(network=startnet) q = q.filter(ARecord.ip >= startip) q = q.filter(ARecord.ip <= endip) q = q.order_by(ARecord.ip) q = q.options(joinedload('fqdn'), joinedload('fqdn.aliases'), joinedload('fqdn.srv_records'), joinedload('reverse_ptr')) existing = q.all() if not existing: raise ArgumentError("Nothing found in range.") if existing[0].ip != startip: raise ArgumentError("No system found with IP address %s." % startip) if existing[-1].ip != endip: raise ArgumentError("No system found with IP address %s." % endip) invalid = [s for s in existing if s.dns_record_type != 'dynamic_stub'] if invalid: raise ArgumentError("The range contains non-dynamic systems:\n" + "\n".join([format(i, "a") for i in invalid])) self.del_dynamic_stubs(session, logger, existing)
def render(self, session, gateway, ip, netmask, prefixlen, network_environment, **arguments): dbnet_env = NetworkEnvironment.get_unique_or_default(session, network_environment) dbnetwork = get_net_id_from_ip(session, gateway, dbnet_env) if netmask: dest = IPv4Network("%s/%s" % (ip, netmask)) else: dest = IPv4Network("%s/%s" % (ip, prefixlen)) q = session.query(StaticRoute) q = q.filter_by(network=dbnetwork) q = q.filter_by(gateway_ip=gateway) q = q.filter_by(dest_ip=dest.ip) q = q.filter_by(dest_cidr=dest.prefixlen) try: dbroute = q.one() except NoResultFound: raise NotFoundException("Static Route to {0} using gateway {1} " "not found.".format(dest, gateway)) session.delete(dbroute) session.flush() # TODO: refresh affected host templates return
def render(self, session, logger, switch, label, model, rack, type, ip, interface, mac, vendor, serial, comments, **arguments): dbmodel = Model.get_unique(session, name=model, vendor=vendor, machine_type='switch', compel=True) dblocation = get_location(session, rack=rack) dbdns_rec, newly_created = grab_address(session, switch, ip, allow_restricted_domain=True, allow_reserved=True, preclude=True) if not label: label = dbdns_rec.fqdn.name try: Switch.check_label(label) except ArgumentError: raise ArgumentError("Could not deduce a valid hardware label " "from the switch name. Please specify " "--label.") # FIXME: What do the error messages for an invalid enum (switch_type) # look like? dbswitch = Switch(label=label, switch_type=type, location=dblocation, model=dbmodel, serial_no=serial, comments=comments) session.add(dbswitch) dbswitch.primary_name = dbdns_rec # FIXME: get default name from the model iftype = "oa" if not interface: interface = "xge" ifcomments = "Created automatically by add_switch" else: ifcomments = None if interface.lower().startswith("lo"): iftype = "loopback" dbinterface = get_or_create_interface(session, dbswitch, name=interface, mac=mac, interface_type=iftype, comments=ifcomments) dbnetwork = get_net_id_from_ip(session, ip) # TODO: should we call check_ip_restrictions() here? assign_address(dbinterface, ip, dbnetwork) session.flush() plenary = PlenarySwitch(dbswitch, logger=logger) with plenary.get_write_key() as key: plenary.stash() try: plenary.write(locked=True) dsdb_runner = DSDBRunner(logger=logger) dsdb_runner.update_host(dbswitch, None) dsdb_runner.commit_or_rollback("Could not add switch to DSDB") except: plenary.restore_stash() raise return
def render(self, session, fqdn, ip, dns_environment, **arguments): dbdns_env = DnsEnvironment.get_unique_or_default(session, dns_environment) if fqdn: dbdns_rec = DynamicStub.get_unique(session, fqdn=fqdn, dns_environment=dbdns_env, compel=True) dbnetwork = dbdns_rec.network ip = dbdns_rec.ip if ip: dbnetwork = get_net_id_from_ip(session, ip) all_stubs = {} q = session.query(DynamicStub.ip) q = q.filter_by(network=dbnetwork) for stub in q: all_stubs[int(stub.ip)] = True start = int(ip) while start > int(dbnetwork.ip) and start - 1 in all_stubs: start = start - 1 end = int(ip) while end < int(dbnetwork.broadcast) and end + 1 in all_stubs: end = end + 1 return DynamicRange(dbnetwork, IPv4Address(start), IPv4Address(end))
def render(self, session, dbuser, ip, fqdn, network_environment, **arguments): dbnet_env = NetworkEnvironment.get_unique_or_default(session, network_environment) self.az.check_network_environment(dbuser, dbnet_env) if fqdn: dbdns_rec = ARecord.get_unique(session, fqdn=fqdn, dns_environment=dbnet_env.dns_environment, compel=True) ip = dbdns_rec.ip elif not ip: raise ArgumentError("Please specify either --ip or --fqdn.") dbnetwork = get_net_id_from_ip(session, ip, dbnet_env) dbrouter = None for rtaddr in dbnetwork.routers: if rtaddr.ip == ip: dbrouter = rtaddr break if not dbrouter: raise NotFoundException("IP address {0} is not a router on " "{1:l}.".format(ip, dbnetwork)) map(delete_dns_record, dbrouter.dns_records) dbnetwork.routers.remove(dbrouter) session.flush() # TODO: update the templates of Zebra hosts on the network return
def render(self, session, dbuser, ip, fqdn, network_environment, **arguments): dbnet_env = NetworkEnvironment.get_unique_or_default(session, network_environment) self.az.check_network_environment(dbuser, dbnet_env) if fqdn: dbdns_rec = ARecord.get_unique(session, fqdn=fqdn, dns_environment=dbnet_env.dns_environment, compel=True) ip = dbdns_rec.ip elif not ip: raise ArgumentError("Please specify either --ip or --fqdn.") dbnetwork = get_net_id_from_ip(session, ip, dbnet_env) dbrouter = None for rtaddr in dbnetwork.routers: if rtaddr.ip == ip: dbrouter = rtaddr break if not dbrouter: raise NotFoundException("IP address {0} is not a router on " "{1:l}.".format(ip, dbnetwork)) map(delete_dns_record, dbrouter.dns_records) dbnetwork.routers.remove(dbrouter) session.flush() # TODO: update the templates of Zebra hosts on the network return
def render(self, session, gateway, ip, netmask, prefixlen, network_environment, comments, **arguments): dbnet_env = NetworkEnvironment.get_unique_or_default(session, network_environment) dbnetwork = get_net_id_from_ip(session, gateway, dbnet_env) if netmask: dest = IPv4Network("%s/%s" % (ip, netmask)) else: dest = IPv4Network("%s/%s" % (ip, prefixlen)) # TODO: this will have to be changed if we want equal cost multipath # etc. for route in dbnetwork.static_routes: if dest.overlaps(route.destination): raise ArgumentError( "{0} already has an overlapping route to " "{1} using gateway {2}.".format(dbnetwork, route.destination, route.gateway_ip) ) route = StaticRoute( network=dbnetwork, dest_ip=dest.ip, dest_cidr=dest.prefixlen, gateway_ip=gateway, comments=comments ) session.add(route) session.flush() # TODO: refresh affected host templates return
def render(self, session, gateway, ip, netmask, prefixlen, network_environment, comments, **arguments): dbnet_env = NetworkEnvironment.get_unique_or_default( session, network_environment) dbnetwork = get_net_id_from_ip(session, gateway, dbnet_env) if netmask: dest = IPv4Network("%s/%s" % (ip, netmask)) else: dest = IPv4Network("%s/%s" % (ip, prefixlen)) # TODO: this will have to be changed if we want equal cost multipath # etc. for route in dbnetwork.static_routes: if dest.overlaps(route.destination): raise ArgumentError("{0} already has an overlapping route to " "{1} using gateway {2}.".format( dbnetwork, route.destination, route.gateway_ip)) route = StaticRoute(network=dbnetwork, dest_ip=dest.ip, dest_cidr=dest.prefixlen, gateway_ip=gateway, comments=comments) session.add(route) session.flush() # TODO: refresh affected host templates return
def render(self, session, gateway, ip, netmask, prefixlen, network_environment, **arguments): dbnet_env = NetworkEnvironment.get_unique_or_default( session, network_environment) dbnetwork = get_net_id_from_ip(session, gateway, dbnet_env) if netmask: dest = IPv4Network("%s/%s" % (ip, netmask)) else: dest = IPv4Network("%s/%s" % (ip, prefixlen)) q = session.query(StaticRoute) q = q.filter_by(network=dbnetwork) q = q.filter_by(gateway_ip=gateway) q = q.filter_by(dest_ip=dest.ip) q = q.filter_by(dest_cidr=dest.prefixlen) try: dbroute = q.one() except NoResultFound: raise NotFoundException("Static Route to {0} using gateway {1} " "not found.".format(dest, gateway)) session.delete(dbroute) session.flush() # TODO: refresh affected host templates return
def render(self, session, fqdn, ip, dns_environment, **arguments): dbdns_env = DnsEnvironment.get_unique_or_default( session, dns_environment) if fqdn: dbdns_rec = DynamicStub.get_unique(session, fqdn=fqdn, dns_environment=dbdns_env, compel=True) dbnetwork = dbdns_rec.network ip = dbdns_rec.ip if ip: dbnetwork = get_net_id_from_ip(session, ip) all_stubs = {} q = session.query(DynamicStub.ip) q = q.filter_by(network=dbnetwork) for stub in q: all_stubs[int(stub.ip)] = True start = int(ip) while start > int(dbnetwork.ip) and start - 1 in all_stubs: start = start - 1 end = int(ip) while end < int(dbnetwork.broadcast) and end + 1 in all_stubs: end = end + 1 return DynamicRange(dbnetwork, IPv4Address(start), IPv4Address(end))
def render(self, session, dbuser, ip, netmask, prefixlen, network_environment, **arguments): if netmask: # There must me a faster way, but this is the easy one net = IPv4Network("127.0.0.0/%s" % netmask) prefixlen = net.prefixlen if prefixlen is None or prefixlen < 8 or prefixlen > 31: raise ArgumentError("The prefix length must be between 8 and 31.") dbnet_env = NetworkEnvironment.get_unique_or_default( session, network_environment) self.az.check_network_environment(dbuser, dbnet_env) dbnetwork = get_net_id_from_ip(session, ip, network_environment=dbnet_env) if prefixlen >= dbnetwork.cidr: raise ArgumentError("The specified --prefixlen must be smaller " "than the current value.") # IPv4Network has a supernet() object, but that does not normalize the # IP address, i.e. IPv4Network('1.2.3.0/24').supernet() will return # IPv4Network('1.2.3.0/23'). Do the normalization manually. supernet = dbnetwork.network.supernet(new_prefix=prefixlen) supernet = IPv4Network("%s/%d" % (supernet.network, supernet.prefixlen)) q = session.query(Network) q = q.filter_by(network_environment=dbnet_env) q = q.filter( and_(Network.ip >= supernet.ip, Network.ip < supernet.broadcast)) q = q.order_by(Network.ip) dbnets = q.all() if dbnets[0].ip == supernet.ip: dbsuper = dbnets.pop(0) dbsuper.cidr = prefixlen else: # Create a new network, copying the parameters from the one # specified on the command line dbsuper = Network(name=dbnetwork.name, network=supernet, network_environment=dbnet_env, location=dbnetwork.location, side=dbnetwork.side, comments=dbnetwork.comments) session.add(dbsuper) for oldnet in dbnets: # Delete routers of the old subnets for dbrouter in oldnet.routers: map(delete_dns_record, dbrouter.dns_records) oldnet.routers = [] fix_foreign_links(session, oldnet, dbsuper) session.delete(oldnet) session.flush()
def render(self, session, dbuser, ip, netmask, prefixlen, network_environment, **arguments): if netmask: # There must me a faster way, but this is the easy one net = IPv4Network("127.0.0.0/%s" % netmask) prefixlen = net.prefixlen if prefixlen is None or prefixlen < 8 or prefixlen > 31: raise ArgumentError("The prefix length must be between 8 and 31.") dbnet_env = NetworkEnvironment.get_unique_or_default(session, network_environment) self.az.check_network_environment(dbuser, dbnet_env) dbnetwork = get_net_id_from_ip(session, ip, network_environment=dbnet_env) if prefixlen >= dbnetwork.cidr: raise ArgumentError("The specified --prefixlen must be smaller " "than the current value.") # IPv4Network has a supernet() object, but that does not normalize the # IP address, i.e. IPv4Network('1.2.3.0/24').supernet() will return # IPv4Network('1.2.3.0/23'). Do the normalization manually. supernet = dbnetwork.network.supernet(new_prefix=prefixlen) supernet = IPv4Network("%s/%d" % (supernet.network, supernet.prefixlen)) q = session.query(Network) q = q.filter_by(network_environment=dbnet_env) q = q.filter(and_(Network.ip >= supernet.ip, Network.ip < supernet.broadcast)) q = q.order_by(Network.ip) dbnets = q.all() if dbnets[0].ip == supernet.ip: dbsuper = dbnets.pop(0) dbsuper.cidr = prefixlen else: # Create a new network, copying the parameters from the one # specified on the command line dbsuper = Network(name=dbnetwork.name, network=supernet, network_environment=dbnet_env, location=dbnetwork.location, side=dbnetwork.side, comments=dbnetwork.comments) session.add(dbsuper) for oldnet in dbnets: # Delete routers of the old subnets for dbrouter in oldnet.routers: map(delete_dns_record, dbrouter.dns_records) oldnet.routers = [] fix_foreign_links(session, oldnet, dbsuper) session.delete(oldnet) session.flush()
def render(self, session, logger, network_device, label, model, type, ip, interface, iftype, mac, vendor, serial, comments, **arguments): dbmodel = Model.get_unique(session, name=model, vendor=vendor, compel=True) if not dbmodel.model_type.isNetworkDeviceType(): raise ArgumentError("This command can only be used to " "add network devices.") dblocation = get_location(session, compel=True, **arguments) dbdns_rec, newly_created = grab_address(session, network_device, ip, allow_restricted_domain=True, allow_reserved=True, preclude=True) if not label: label = dbdns_rec.fqdn.name try: NetworkDevice.check_label(label) except ArgumentError: raise ArgumentError("Could not deduce a valid hardware label " "from the network device name. Please specify " "--label.") # FIXME: What do the error messages for an invalid enum (switch_type) # look like? dbnetdev = NetworkDevice(label=label, switch_type=type, location=dblocation, model=dbmodel, serial_no=serial, comments=comments) session.add(dbnetdev) dbnetdev.primary_name = dbdns_rec check_netdev_iftype(iftype) dbinterface = get_or_create_interface(session, dbnetdev, name=interface, mac=mac, interface_type=iftype) dbnetwork = get_net_id_from_ip(session, ip) # TODO: should we call check_ip_restrictions() here? assign_address(dbinterface, ip, dbnetwork, logger=logger) session.flush() plenary = Plenary.get_plenary(dbnetdev, logger=logger) with plenary.get_key(): plenary.stash() try: plenary.write(locked=True) dsdb_runner = DSDBRunner(logger=logger) dsdb_runner.update_host(dbnetdev, None) dsdb_runner.commit_or_rollback("Could not add network device to DSDB") except: plenary.restore_stash() raise return
def render(self, session, logger, chassis, label, rack, model, vendor, ip, interface, mac, serial, comments, **arguments): dbdns_rec, newly_created = grab_address(session, chassis, ip, allow_restricted_domain=True, allow_reserved=True, preclude=True) if not label: label = dbdns_rec.fqdn.name try: Chassis.check_label(label) except ArgumentError: raise ArgumentError("Could not deduce a valid hardware label " "from the chassis name. Please specify " "--label.") dblocation = get_location(session, rack=rack) dbmodel = Model.get_unique(session, name=model, vendor=vendor, machine_type='chassis', compel=True) # FIXME: Precreate chassis slots? dbchassis = Chassis(label=label, location=dblocation, model=dbmodel, serial_no=serial, comments=comments) session.add(dbchassis) dbchassis.primary_name = dbdns_rec # FIXME: get default name from the model if not interface: interface = "oa" ifcomments = "Created automatically by add_chassis" else: ifcomments = None dbinterface = get_or_create_interface(session, dbchassis, name=interface, mac=mac, interface_type="oa", comments=ifcomments) if ip: dbnetwork = get_net_id_from_ip(session, ip) check_ip_restrictions(dbnetwork, ip) assign_address(dbinterface, ip, dbnetwork) session.flush() if ip: dsdb_runner = DSDBRunner(logger=logger) dsdb_runner.update_host(dbchassis, None) dsdb_runner.commit_or_rollback("Could not add chassis to DSDB") return
def render(self, session, dbuser, fqdn, building, ip, network_environment, comments, **arguments): dbnet_env = NetworkEnvironment.get_unique_or_default( session, network_environment) self.az.check_network_environment(dbuser, dbnet_env) if building: dbbuilding = Building.get_unique(session, building, compel=True) else: dbbuilding = None (short, dbdns_domain) = parse_fqdn(session, fqdn) dbfqdn = Fqdn.get_or_create(session, name=short, dns_domain=dbdns_domain, dns_environment=dbnet_env.dns_environment) if ip: dbnetwork = get_net_id_from_ip(session, ip, dbnet_env) dbdns_rec = ARecord.get_or_create(session, fqdn=dbfqdn, ip=ip, network=dbnetwork) else: dbdns_rec = ARecord.get_unique(session, dbfqdn, compel=True) ip = dbdns_rec.ip dbnetwork = dbdns_rec.network assert ip in dbnetwork.network, "IP %s is outside network %s" % ( ip, dbnetwork.ip) if ip in dbnetwork.router_ips: raise ArgumentError( "IP address {0} is already present as a router " "for {1:l}.".format(ip, dbnetwork)) # Policy checks are valid only for internal networks if dbnetwork.is_internal: if ip >= dbnetwork.first_usable_host or \ int(ip) - int(dbnetwork.ip) in dbnetwork.reserved_offsets: raise ArgumentError( "IP address {0} is not a valid router address " "on {1:l}.".format(ip, dbnetwork)) dbnetwork.routers.append( RouterAddress(ip=ip, location=dbbuilding, dns_environment=dbdns_rec.fqdn.dns_environment, comments=comments)) session.flush() # TODO: update the templates of Zebra hosts on the network return
def update_primary_ip(session, dbhw_ent, ip): if not dbhw_ent.primary_name: raise ArgumentError( "{0} does not have a primary name.".format(dbhw_ent)) dbnetwork = get_net_id_from_ip(session, ip) check_ip_restrictions(dbnetwork, ip) # The primary address must be unique q = session.query(AddressAssignment) q = q.filter_by(network=dbnetwork) q = q.filter_by(ip=ip) addr = q.first() if addr: raise ArgumentError( "IP address {0} is already in use by {1:l}.".format( ip, addr.interface)) # Convert ReservedName to ARecord if needed if isinstance(dbhw_ent.primary_name, ReservedName): convert_reserved_to_arecord(session, dbhw_ent.primary_name, dbnetwork, ip) # When converting a ReservedName to an ARecord, we have to bind the # primary address to an interface. Try to pick one. dbinterface = first_of(dbhw_ent.interfaces, lambda x: x.bootable) if not dbinterface: dbinterface = first_of(dbhw_ent.interfaces, lambda x: x.interface_type != "management") if not dbinterface: # pragma: no cover raise AquilonError("Cannot update the primary IP address of {0:l} " "because it does not have any interfaces " "defined.".format(dbhw_ent)) assign_address(dbinterface, ip, dbnetwork) else: dns_rec = dbhw_ent.primary_name q = session.query(AddressAssignment) q = q.filter_by(network=dns_rec.network) q = q.filter_by(ip=dns_rec.ip) q = q.join(Interface) q = q.filter_by(hardware_entity=dbhw_ent) # In case of Zebra, the address may be assigned to multiple interfaces addrs = q.all() dns_rec.ip = ip dns_rec.network = dbnetwork for addr in addrs: addr.ip = ip addr.network = dbnetwork
def update_primary_ip(session, logger, dbhw_ent, ip): if not dbhw_ent.primary_name: raise ArgumentError("{0} does not have a primary name." .format(dbhw_ent)) dbnetwork = get_net_id_from_ip(session, ip) check_ip_restrictions(dbnetwork, ip) # The primary address must be unique q = session.query(AddressAssignment) q = q.filter_by(network=dbnetwork) q = q.filter_by(ip=ip) addr = q.first() if addr: raise ArgumentError("IP address {0} is already in use by {1:l}." .format(ip, addr.interface)) # Convert ReservedName to ARecord if needed if isinstance(dbhw_ent.primary_name, ReservedName): convert_reserved_to_arecord(session, dbhw_ent.primary_name, dbnetwork, ip) # When converting a ReservedName to an ARecord, we have to bind the # primary address to an interface. Try to pick one. dbinterface = first_of(dbhw_ent.interfaces, lambda x: x.bootable) if not dbinterface: dbinterface = first_of(dbhw_ent.interfaces, lambda x: x.interface_type != "management") if not dbinterface: # pragma: no cover raise AquilonError("Cannot update the primary IP address of {0:l} " "because it does not have any interfaces " "defined.".format(dbhw_ent)) assign_address(dbinterface, ip, dbnetwork, logger=logger) else: dns_rec = dbhw_ent.primary_name q = session.query(AddressAssignment) q = q.filter_by(network=dns_rec.network) q = q.filter_by(ip=dns_rec.ip) q = q.join(Interface) q = q.filter_by(hardware_entity=dbhw_ent) # In case of Zebra, the address may be assigned to multiple interfaces addrs = q.all() dns_rec.ip = ip dns_rec.network = dbnetwork for addr in addrs: addr.ip = ip addr.network = dbnetwork
def render(self, session, dbuser, fqdn, building, ip, network_environment, comments, **arguments): dbnet_env = NetworkEnvironment.get_unique_or_default(session, network_environment) self.az.check_network_environment(dbuser, dbnet_env) if building: dbbuilding = Building.get_unique(session, building, compel=True) else: dbbuilding = None (short, dbdns_domain) = parse_fqdn(session, fqdn) dbfqdn = Fqdn.get_or_create(session, name=short, dns_domain=dbdns_domain, dns_environment=dbnet_env.dns_environment) if ip: dbnetwork = get_net_id_from_ip(session, ip, dbnet_env) dbdns_rec = ARecord.get_or_create(session, fqdn=dbfqdn, ip=ip, network=dbnetwork) else: dbdns_rec = ARecord.get_unique(session, dbfqdn, compel=True) ip = dbdns_rec.ip dbnetwork = dbdns_rec.network assert ip in dbnetwork.network, "IP %s is outside network %s" % (ip, dbnetwork.ip) if ip in dbnetwork.router_ips: raise ArgumentError("IP address {0} is already present as a router " "for {1:l}.".format(ip, dbnetwork)) # Policy checks are valid only for internal networks if dbnetwork.is_internal: if ip >= dbnetwork.first_usable_host or \ int(ip) - int(dbnetwork.ip) in dbnetwork.reserved_offsets: raise ArgumentError("IP address {0} is not a valid router address " "on {1:l}.".format(ip, dbnetwork)) dbnetwork.routers.append(RouterAddress(ip=ip, location=dbbuilding, dns_environment=dbdns_rec.fqdn.dns_environment, comments=comments)) session.flush() # TODO: update the templates of Zebra hosts on the network return
def render(self, session, logger, chassis, label, rack, model, vendor, ip, interface, mac, serial, comments, **arguments): dbdns_rec, newly_created = grab_address(session, chassis, ip, allow_restricted_domain=True, allow_reserved=True, preclude=True) if not label: label = dbdns_rec.fqdn.name try: Chassis.check_label(label) except ArgumentError: raise ArgumentError("Could not deduce a valid hardware label " "from the chassis name. Please specify " "--label.") dblocation = get_location(session, rack=rack) dbmodel = Model.get_unique(session, name=model, vendor=vendor, machine_type='chassis', compel=True) # FIXME: Precreate chassis slots? dbchassis = Chassis(label=label, location=dblocation, model=dbmodel, serial_no=serial, comments=comments) session.add(dbchassis) dbchassis.primary_name = dbdns_rec # FIXME: get default name from the model if not interface: interface = "oa" ifcomments = "Created automatically by add_chassis" else: ifcomments = None dbinterface = get_or_create_interface(session, dbchassis, name=interface, mac=mac, interface_type="oa", comments=ifcomments) if ip: dbnetwork = get_net_id_from_ip(session, ip) check_ip_restrictions(dbnetwork, ip) assign_address(dbinterface, ip, dbnetwork) session.flush() if ip: dsdb_runner = DSDBRunner(logger=logger) dsdb_runner.update_host(dbchassis, None) dsdb_runner.commit_or_rollback("Could not add chassis to DSDB") return
def grab_address(session, fqdn, ip, network_environment=None, dns_environment=None, comments=None, allow_restricted_domain=False, allow_multi=False, allow_reserved=False, relaxed=False, preclude=False): """ Take ownership of an address. This is a bit complicated because due to DNS propagation delays, we want to allow users to pre-define a DNS address and then assign the address to a host later. Parameters: session: SQLA session handle fqdn: the name to allocate/take over ip: the IP address to allocate/take over network_environment: where the IP address lives dns_enviromnent: where the FQDN lives comments: any comments to attach to the DNS record if it is created as new allow_restricted_domain: if True, adding entries to restricted DNS domains is allowed, otherwise it is denied. Default is False. allow_multi: if True, allow the same FQDN to be added multiple times with different IP addresses. Deault is False. allow_reserved: if True, allow creating a ReservedName instead of an ARecord if no IP address was specified. Default is False. preclude: if True, forbid taking over an existing DNS record, even if it is not referenced by any AddressAssignment records. Default is False. """ if not isinstance(network_environment, NetworkEnvironment): network_environment = NetworkEnvironment.get_unique_or_default(session, network_environment) if not dns_environment: dns_environment = network_environment.dns_environment elif not isinstance(dns_environment, DnsEnvironment): dns_environment = DnsEnvironment.get_unique(session, dns_environment, compel=True) # Non-default DNS environments may contain anything, but we want to keep # the internal environment clean if dns_environment.is_default and not network_environment.is_default: raise ArgumentError("Entering external IP addresses to the " "internal DNS environment is not allowed.") short, dbdns_domain = parse_fqdn(session, fqdn) # Lock the domain to prevent adding/deleting records while we're checking # FQDN etc. availability dbdns_domain.lock_row() if dbdns_domain.restricted and not allow_restricted_domain: raise ArgumentError("{0} is restricted, adding extra addresses " "is not allowed.".format(dbdns_domain)) dbfqdn = Fqdn.get_or_create(session, dns_environment=dns_environment, name=short, dns_domain=dbdns_domain, query_options=[joinedload('dns_records')]) existing_record = None newly_created = False if ip: dbnetwork = get_net_id_from_ip(session, ip, network_environment) check_ip_restrictions(dbnetwork, ip, relaxed=relaxed) dbnetwork.lock_row() # No filtering on DNS environment. If an address is dynamic in one # environment, it should not be considered static in a different # environment. q = session.query(DynamicStub) q = q.filter_by(network=dbnetwork) q = q.filter_by(ip=ip) dbdns_rec = q.first() _forbid_dyndns(dbdns_rec) # Verify that no other record uses the same IP address, this time taking # the DNS environemt into consideration. # While the DNS would allow different A records to point to the same IP # address, the current user expectation is that creating a DNS entry # also counts as a reservation, so we can not allow this use case. If we # want to implement such a feature later, the best way would be to # subclass Alias and let that subclass emit an A record instead of a # CNAME when the dump_dns command is called. q = session.query(ARecord) q = q.filter_by(network=dbnetwork) q = q.filter_by(ip=ip) q = q.join(ARecord.fqdn) q = q.filter_by(dns_environment=dns_environment) dbrecords = q.all() if dbrecords and len(dbrecords) > 1: # pragma: no cover # We're just trying to make sure this never happens raise AquilonError("IP address %s is referenced by multiple " "DNS records: %s" % (ip, ", ".join([format(rec, "a") for rec in dbrecords]))) if dbrecords and dbrecords[0].fqdn != dbfqdn: raise ArgumentError("IP address {0} is already in use by {1:l}." .format(ip, dbrecords[0])) # Check if the name is used already for dbdns_rec in dbfqdn.dns_records: if isinstance(dbdns_rec, ARecord): _forbid_dyndns(dbdns_rec) _check_netenv_compat(dbdns_rec, network_environment) if dbdns_rec.ip == ip and dbdns_rec.network == dbnetwork: existing_record = dbdns_rec elif not allow_multi: raise ArgumentError("{0} points to a different IP address." .format(dbdns_rec)) elif isinstance(dbdns_rec, ReservedName): existing_record = convert_reserved_to_arecord(session, dbdns_rec, dbnetwork, ip) newly_created = True else: # Exclude aliases etc. raise ArgumentError("{0} cannot be used for address assignment." .format(dbdns_rec)) if not existing_record: existing_record = ARecord(fqdn=dbfqdn, ip=ip, network=dbnetwork, comments=comments) session.add(existing_record) newly_created = True else: if not dbfqdn.dns_records: # There's no IP, and the name did not exist before. Create a # reservation, but only if the caller allowed that use case. if not allow_reserved: raise ArgumentError("DNS Record %s does not exist." % dbfqdn) existing_record = ReservedName(fqdn=dbfqdn, comments=comments) newly_created = True else: # There's no IP, but the name is already in use. We need a single IP # address. if len(dbfqdn.dns_records) > 1: raise ArgumentError("{0} does not resolve to a single IP address." .format(dbfqdn)) existing_record = dbfqdn.dns_records[0] _forbid_dyndns(existing_record) if not isinstance(existing_record, ARecord): # Exclude aliases etc. raise ArgumentError("{0} cannot be used for address assignment." .format(existing_record)) # Verify that the existing record is in the network environment the # caller expects _check_netenv_compat(existing_record, network_environment) ip = existing_record.ip dbnetwork = existing_record.network dbnetwork.lock_row() if existing_record.hardware_entity: raise ArgumentError("{0} is already used as the primary name of {1:cl} " "{1.label}.".format(existing_record, existing_record.hardware_entity)) if preclude and not newly_created: raise ArgumentError("{0} already exists.".format(existing_record)) if ip: q = session.query(AddressAssignment) q = q.filter_by(network=dbnetwork) q = q.filter_by(ip=ip) addr = q.first() if addr: raise ArgumentError("IP address {0} is already in use by " "{1:l}.".format(ip, addr.interface)) return (existing_record, newly_created)
def render(self, session, logger, fqdn, ip, reverse_ptr, dns_environment, network_environment, comments, **arguments): dbnet_env, dbdns_env = get_net_dns_env(session, network_environment, dns_environment) dbdns_rec = ARecord.get_unique(session, fqdn=fqdn, dns_environment=dbdns_env, compel=True) old_ip = dbdns_rec.ip old_comments = dbdns_rec.comments if ip: if dbdns_rec.hardware_entity: raise ArgumentError("{0} is a primary name, and its IP address " "cannot be changed.".format(dbdns_rec)) if dbdns_rec.assignments: ifaces = ", ".join(["%s/%s" % (addr.interface.hardware_entity, addr.interface) for addr in dbdns_rec.assignments]) raise ArgumentError("{0} is already used by the following " "interfaces, and its IP address cannot be " "changed: {1!s}." .format(dbdns_rec, ifaces)) dbnetwork = get_net_id_from_ip(session, ip, dbnet_env) q = session.query(ARecord) q = q.filter_by(network=dbnetwork) q = q.filter_by(ip=ip) q = q.join(ARecord.fqdn) q = q.filter_by(dns_environment=dbdns_env) existing = q.first() if existing: raise ArgumentError("IP address {0!s} is already used by " "{1:l}." .format(ip, existing)) dbdns_rec.network = dbnetwork old_ip = dbdns_rec.ip dbdns_rec.ip = ip if reverse_ptr: old_reverse = dbdns_rec.reverse_ptr set_reverse_ptr(session, logger, dbdns_rec, reverse_ptr) if old_reverse and old_reverse != dbdns_rec.reverse_ptr: delete_target_if_needed(session, old_reverse) if comments: dbdns_rec.comments = comments session.flush() if dbdns_env.is_default and (dbdns_rec.ip != old_ip or dbdns_rec.comments != old_comments): dsdb_runner = DSDBRunner(logger=logger) dsdb_runner.update_host_details(dbdns_rec.fqdn, new_ip=dbdns_rec.ip, old_ip=old_ip, new_comments=dbdns_rec.comments, old_comments=old_comments) dsdb_runner.commit_or_rollback() return
def render(self, session, dbuser, ip, netmask, prefixlen, network_environment, **arguments): if netmask: # There must me a faster way, but this is the easy one net = IPv4Network("127.0.0.0/%s" % netmask) prefixlen = net.prefixlen if prefixlen < 8 or prefixlen > 32: raise ArgumentError("The prefix length must be between 8 and 32.") dbnet_env = NetworkEnvironment.get_unique_or_default(session, network_environment) self.az.check_network_environment(dbuser, dbnet_env) dbnetwork = get_net_id_from_ip(session, ip, network_environment=dbnet_env) if prefixlen <= dbnetwork.cidr: raise ArgumentError("The specified --prefixlen must be bigger " "than the current value.") subnets = dbnetwork.network.subnet(new_prefix=prefixlen) # Collect IP addresses that will become network/broadcast addresses # after the split bad_ips = [] for subnet in subnets: bad_ips.append(subnet.ip) bad_ips.append(subnet.broadcast) q = session.query(AddressAssignment.ip) q = q.filter_by(network=dbnetwork) q = q.filter(AddressAssignment.ip.in_(bad_ips)) used_addrs = q.all() if used_addrs: raise ArgumentError("Network split failed, because the following " "subnet IP and/or broadcast addresses are " "assigned to hosts: %s" % ", ".join([str(addr.ip) for addr in used_addrs])) q = session.query(ARecord.ip) q = q.filter_by(network=dbnetwork) q = q.filter(ARecord.ip.in_(bad_ips)) used_addrs = q.all() if used_addrs: raise ArgumentError("Network split failed, because the following " "subnet IP and/or broadcast addresses are " "registered in the DNS: %s" % ", ".join([str(addr.ip) for addr in used_addrs])) # Reason of the initial value: we keep the name of the first segment # (e.g. "foo"), and the next segment will be called "foo_2" name_idx = 2 dbnets = [] for subnet in dbnetwork.network.subnet(new_prefix=prefixlen): # Skip the original if subnet.ip == dbnetwork.ip: continue # Generate a new name. Make it unique, even if the DB does not # enforce that currently while True: # TODO: check if the new name is too long name = "%s_%d" % (dbnetwork.name, name_idx) name_idx += 1 q = session.query(Network) q = q.filter_by(network_environment=dbnet_env) q = q.filter_by(name=name) if q.count() == 0: break # Should not happen... if name_idx > 1000: # pragma: no cover raise AquilonError("Could not generate a unique network " "name in a reasonable time, bailing out") # Inherit location & side from the supernet newnet = Network(name=name, network=subnet, network_environment=dbnet_env, location=dbnetwork.location, side=dbnetwork.side, comments="Created by splitting {0:a}".format(dbnetwork)) session.add(newnet) dbnets.append(newnet) dbnetwork.cidr = prefixlen session.flush() for newnet in dbnets: fix_foreign_links(session, dbnetwork, newnet) session.flush()
def render(self, session, network, network_environment, ip, type, side, machine, fqdn, cluster, pg, has_dynamic_ranges, exact_location, fullinfo, style, **arguments): """Return a network matching the parameters. Some of the search terms can only return a unique network. For those (like ip and fqdn) we proceed with the query anyway. This allows for quick scripted tests like "is the network for X.X.X.X a tor_net2?". """ dbnet_env = NetworkEnvironment.get_unique_or_default(session, network_environment) q = session.query(Network) q = q.filter_by(network_environment=dbnet_env) if network: # Note: the network name is not unique (neither in QIP) q = q.filter_by(name=network) if ip: dbnetwork = get_net_id_from_ip(session, ip, dbnet_env) q = q.filter_by(id=dbnetwork.id) if type: q = q.filter_by(network_type=type) if side: q = q.filter_by(side=side) if machine: dbmachine = Machine.get_unique(session, machine, compel=True) vlans = [] if dbmachine.cluster and dbmachine.cluster.network_device: # If this is a VM on a cluster, consult the VLANs. There # could be functionality here for real hardware to consult # interface port groups... there's no real use case yet. vlans = [VlanInfo.get_vlan_id(session, i.port_group) for i in dbmachine.interfaces if i.port_group] if vlans: q = q.join('observed_vlans') q = q.filter_by(network_device=dbmachine.cluster.network_device) q = q.filter(ObservedVlan.vlan_id.in_(vlans)) q = q.reset_joinpoint() if not vlans: networks = [addr.network.id for addr in dbmachine.all_addresses()] if not networks: msg = "Machine %s has no interfaces " % dbmachine.label if dbmachine.cluster: msg += "with a portgroup or " msg += "assigned to a network." raise ArgumentError(msg) q = q.filter(Network.id.in_(networks)) if fqdn: (short, dbdns_domain) = parse_fqdn(session, fqdn) dnsq = session.query(ARecord.ip) dnsq = dnsq.join(ARecord.fqdn) dnsq = dnsq.filter_by(name=short) dnsq = dnsq.filter_by(dns_domain=dbdns_domain) networks = [get_net_id_from_ip(session, addr.ip, dbnet_env).id for addr in dnsq.all()] q = q.filter(Network.id.in_(networks)) if cluster: dbcluster = Cluster.get_unique(session, cluster, compel=True) if dbcluster.network_device: q = q.join('observed_vlans') q = q.filter_by(network_device=dbcluster.network_device) q = q.reset_joinpoint() else: net_ids = [h.hardware_entity.primary_name.network.id for h in dbcluster.hosts if getattr(h.hardware_entity.primary_name, "network")] q = q.filter(Network.id.in_(net_ids)) if pg: vlan = VlanInfo.get_vlan_id(session, pg, compel=ArgumentError) q = q.join('observed_vlans') q = q.filter_by(vlan_id=vlan) q = q.reset_joinpoint() dblocation = get_location(session, **arguments) if dblocation: if exact_location: q = q.filter_by(location=dblocation) else: childids = dblocation.offspring_ids() q = q.filter(Network.location_id.in_(childids)) if has_dynamic_ranges: q = q.filter(exists([DynamicStub.dns_record_id], from_obj=DynamicStub.__table__.join(ARecord.__table__)) .where(Network.id == DynamicStub.network_id)) q = q.order_by(Network.ip) if fullinfo or style != 'raw': q = q.options(undefer('comments')) return q.all() return StringAttributeList(q.all(), lambda n: "%s/%s" % (n.ip, n.cidr))
def render(self, session, logger, interface, machine, mac, automac, model, vendor, pg, autopg, type, comments, **arguments): dbmachine = Machine.get_unique(session, machine, compel=True) oldinfo = DSDBRunner.snapshot_hw(dbmachine) audit_results = [] q = session.query(Interface) q = q.filter_by(name=interface, hardware_entity=dbmachine) if q.first(): raise ArgumentError( "Machine %s already has an interface named %s." % (machine, interface)) if not type: type = 'public' management_types = ['bmc', 'ilo', 'ipmi'] for mtype in management_types: if interface.startswith(mtype): type = 'management' break if interface.startswith("bond"): type = 'bonding' elif interface.startswith("br"): type = 'bridge' # Test it last, VLANs can be added on top of almost anything if '.' in interface: type = 'vlan' if type == "oa" or type == "loopback": raise ArgumentError("Interface type '%s' is not valid for " "machines." % type) bootable = None if type == 'public': if interface == 'eth0': bootable = True else: bootable = False dbmanager = None pending_removals = PlenaryCollection() dsdb_runner = DSDBRunner(logger=logger) if mac: prev = session.query(Interface).filter_by(mac=mac).first() if prev and prev.hardware_entity == dbmachine: raise ArgumentError("{0} already has an interface with MAC " "address {1}.".format(dbmachine, mac)) # Is the conflicting interface something that can be # removed? It is if: # - we are currently attempting to add a management interface # - the old interface belongs to a machine # - the old interface is associated with a host # - that host was blindly created, and thus can be removed safely if prev and type == 'management' and \ prev.hardware_entity.hardware_type == 'machine' and \ prev.hardware_entity.host and \ prev.hardware_entity.host.status.name == 'blind': # FIXME: Is this just always allowed? Maybe restrict # to only aqd-admin and the host itself? dummy_machine = prev.hardware_entity dummy_ip = dummy_machine.primary_ip old_fqdn = str(dummy_machine.primary_name) old_iface = prev.name old_mac = prev.mac old_network = get_net_id_from_ip(session, dummy_ip) self.remove_prev(session, logger, prev, pending_removals) session.flush() dsdb_runner.delete_host_details(old_fqdn, dummy_ip, old_iface, old_mac) self.consolidate_names(session, logger, dbmachine, dummy_machine.label, pending_removals) # It seems like a shame to throw away the IP address that # had been allocated for the blind host. Try to use it # as it should be used... dbmanager = self.add_manager(session, logger, dbmachine, dummy_ip, old_network) elif prev: msg = describe_interface(session, prev) raise ArgumentError("MAC address %s is already in use: %s." % (mac, msg)) elif automac: mac = self.generate_mac(session, dbmachine) audit_results.append(('mac', mac)) else: #Ignore now that Mac Address can be null pass if pg is not None: port_group = verify_port_group(dbmachine, pg) elif autopg: port_group = choose_port_group(session, logger, dbmachine) audit_results.append(('pg', port_group)) else: port_group = None dbinterface = get_or_create_interface(session, dbmachine, name=interface, vendor=vendor, model=model, interface_type=type, mac=mac, bootable=bootable, port_group=port_group, comments=comments, preclude=True) # So far, we're *only* creating a manager if we happen to be # removing a blind entry and we can steal its IP address. if dbmanager: assign_address(dbinterface, dbmanager.ip, dbmanager.network) session.add(dbinterface) session.flush() plenaries = PlenaryCollection(logger=logger) plenaries.append(Plenary.get_plenary(dbmachine)) if pending_removals and dbmachine.host: # Not an exact test, but the file won't be re-written # if the contents are the same so calling too often is # not a major expense. plenaries.append(Plenary.get_plenary(dbmachine.host)) # Even though there may be removals going on the write key # should be sufficient here. key = plenaries.get_write_key() try: lock_queue.acquire(key) pending_removals.stash() plenaries.write(locked=True) pending_removals.remove(locked=True) dsdb_runner.update_host(dbmachine, oldinfo) dsdb_runner.commit_or_rollback("Could not update host in DSDB") except: plenaries.restore_stash() pending_removals.restore_stash() raise finally: lock_queue.release(key) if dbmachine.host: # FIXME: reconfigure host pass for name, value in audit_results: self.audit_result(session, name, value, **arguments) return
def generate_ip(session, logger, dbinterface, ip=None, ipfromip=None, ipfromsystem=None, autoip=None, ipalgorithm=None, compel=False, network_environment=None, audit_results=None, **kwargs): ip_options = [ip, ipfromip, ipfromsystem, autoip] numopts = sum([1 if opt else 0 for opt in ip_options]) if numopts > 1: raise ArgumentError("Only one of --ip, --ipfromip, --ipfromsystem " "and --autoip can be specified.") elif numopts == 0: if compel: raise ArgumentError("Please specify one of the --ip, --ipfromip, " "--ipfromsystem, and --autoip parameters.") return None if ip: return ip dbsystem = None dbnetwork = None if autoip: if not dbinterface: raise ArgumentError("No interface available to automatically " "generate an IP address.") if dbinterface.port_group: # This could either be an interface from a virtual machine # or an interface on an ESX vmhost. dbcluster = None if getattr(dbinterface.hardware_entity, "cluster", None): # VM dbcluster = dbinterface.hardware_entity.cluster elif getattr(dbinterface.hardware_entity, "host", None): dbcluster = dbinterface.hardware_entity.host.cluster if not dbcluster: raise ArgumentError("Can only automatically assign an IP " "address to an interface with a port " "group on virtual machines or ESX hosts.") if not dbcluster.switch: raise ArgumentError("Cannot automatically assign an IP " "address to an interface with a port group " "since {0} is not associated with a " "switch.".format(dbcluster)) vlan_id = VlanInfo.get_vlan_id(session, dbinterface.port_group) dbnetwork = ObservedVlan.get_network(session, vlan_id=vlan_id, switch=dbcluster.switch, compel=ArgumentError) elif dbinterface.mac: q = session.query(ObservedMac) q = q.filter_by(mac_address=dbinterface.mac) q = q.order_by(desc(ObservedMac.last_seen)) dbom = q.first() if not dbom: raise ArgumentError("No switch found in the discovery table " "for MAC address %s." % dbinterface.mac) if not dbom.switch.primary_ip: raise ArgumentError("{0} does not have a primary IP address " "to use for network " "selection.".format(dbom.switch)) dbnetwork = get_net_id_from_ip(session, dbom.switch.primary_ip) else: raise ArgumentError("{0} has neither a MAC address nor port group " "information, it is not possible to generate " "an IP address automatically." .format(dbinterface)) if ipfromsystem: # Assumes one system entry, not necessarily correct. dbdns_rec = ARecord.get_unique(session, fqdn=ipfromsystem, compel=True) dbnetwork = dbdns_rec.network if ipfromip: # determine network dbnetwork = get_net_id_from_ip(session, ipfromip, network_environment) if not dbnetwork: raise ArgumentError("Could not determine network to use for %s." % dbsystem.fqdn) # When there are e.g. multiple "add manager --autoip" operations going on in # parallel, we must ensure that they won't try to use the same IP address. # This query places a database lock on the network, which means IP address # generation within a network will be serialized, while operations on # different networks can still run in parallel. The lock will be released by # COMMIT or ROLLBACK. dbnetwork.lock_row() startip = dbnetwork.first_usable_host used_ips = session.query(ARecord.ip) used_ips = used_ips.filter_by(network=dbnetwork) used_ips = used_ips.filter(ARecord.ip >= startip) full_set = set(range(int(startip), int(dbnetwork.broadcast))) used_set = set([int(item.ip) for item in used_ips]) free_set = full_set - used_set if not free_set: raise ArgumentError("No available IP addresses found on " "network %s." % str(dbnetwork.network)) if ipalgorithm is None or ipalgorithm == 'lowest': # Select the lowest available address ip = IPv4Address(min(free_set)) elif ipalgorithm == 'highest': # Select the highest available address ip = IPv4Address(max(free_set)) elif ipalgorithm == 'max': # Return the max. used address + 1 if not used_set: # Avoids ValueError being thrown when used_set is empty ip = IPv4Address(min(free_set)) else: next = max(used_set) if not next + 1 in free_set: raise ArgumentError("Failed to find an IP that is suitable " "for --ipalgorithm=max. Try an other " "algorithm as there are still some free " "addresses.") ip = IPv4Address(next + 1) else: raise ArgumentError("Unknown algorithm %s." % ipalgorithm) if audit_results is not None: if dbinterface: logger.info("Selected IP address {0!s} for {1:l}" .format(ip, dbinterface)) else: logger.info("Selected IP address %s" % ip) audit_results.append(('ip', ip)) return ip
def grab_address(session, fqdn, ip, network_environment=None, dns_environment=None, comments=None, allow_restricted_domain=False, allow_multi=False, allow_reserved=False, relaxed=False, preclude=False): """ Take ownership of an address. This is a bit complicated because due to DNS propagation delays, we want to allow users to pre-define a DNS address and then assign the address to a host later. Parameters: session: SQLA session handle fqdn: the name to allocate/take over ip: the IP address to allocate/take over network_environment: where the IP address lives dns_enviromnent: where the FQDN lives comments: any comments to attach to the DNS record if it is created as new allow_restricted_domain: if True, adding entries to restricted DNS domains is allowed, otherwise it is denied. Default is False. allow_multi: if True, allow the same FQDN to be added multiple times with different IP addresses. Deault is False. allow_reserved: if True, allow creating a ReservedName instead of an ARecord if no IP address was specified. Default is False. preclude: if True, forbid taking over an existing DNS record, even if it is not referenced by any AddressAssignment records. Default is False. """ if not isinstance(network_environment, NetworkEnvironment): network_environment = NetworkEnvironment.get_unique_or_default( session, network_environment) if not dns_environment: dns_environment = network_environment.dns_environment elif not isinstance(dns_environment, DnsEnvironment): dns_environment = DnsEnvironment.get_unique(session, dns_environment, compel=True) # Non-default DNS environments may contain anything, but we want to keep # the internal environment clean if dns_environment.is_default and not network_environment.is_default: raise ArgumentError("Entering external IP addresses to the " "internal DNS environment is not allowed.") short, dbdns_domain = parse_fqdn(session, fqdn) # Lock the domain to prevent adding/deleting records while we're checking # FQDN etc. availability dbdns_domain.lock_row() if dbdns_domain.restricted and not allow_restricted_domain: raise ArgumentError("{0} is restricted, adding extra addresses " "is not allowed.".format(dbdns_domain)) dbfqdn = Fqdn.get_or_create(session, dns_environment=dns_environment, name=short, dns_domain=dbdns_domain, query_options=[joinedload('dns_records')]) existing_record = None newly_created = False if ip: dbnetwork = get_net_id_from_ip(session, ip, network_environment) check_ip_restrictions(dbnetwork, ip, relaxed=relaxed) dbnetwork.lock_row() # No filtering on DNS environment. If an address is dynamic in one # environment, it should not be considered static in a different # environment. q = session.query(DynamicStub) q = q.filter_by(network=dbnetwork) q = q.filter_by(ip=ip) dbdns_rec = q.first() _forbid_dyndns(dbdns_rec) # Verify that no other record uses the same IP address, this time taking # the DNS environemt into consideration. # While the DNS would allow different A records to point to the same IP # address, the current user expectation is that creating a DNS entry # also counts as a reservation, so we can not allow this use case. If we # want to implement such a feature later, the best way would be to # subclass Alias and let that subclass emit an A record instead of a # CNAME when the dump_dns command is called. q = session.query(ARecord) q = q.filter_by(network=dbnetwork) q = q.filter_by(ip=ip) q = q.join(ARecord.fqdn) q = q.filter_by(dns_environment=dns_environment) dbrecords = q.all() if dbrecords and len(dbrecords) > 1: # pragma: no cover # We're just trying to make sure this never happens raise AquilonError( "IP address %s is referenced by multiple " "DNS records: %s" % (ip, ", ".join([format(rec, "a") for rec in dbrecords]))) if dbrecords and dbrecords[0].fqdn != dbfqdn: raise ArgumentError( "IP address {0} is already in use by {1:l}.".format( ip, dbrecords[0])) # Check if the name is used already for dbdns_rec in dbfqdn.dns_records: if isinstance(dbdns_rec, ARecord): _forbid_dyndns(dbdns_rec) _check_netenv_compat(dbdns_rec, network_environment) if dbdns_rec.ip == ip and dbdns_rec.network == dbnetwork: existing_record = dbdns_rec elif not allow_multi: raise ArgumentError( "{0} points to a different IP address.".format( dbdns_rec)) elif isinstance(dbdns_rec, ReservedName): existing_record = convert_reserved_to_arecord( session, dbdns_rec, dbnetwork, ip) newly_created = True else: # Exclude aliases etc. raise ArgumentError( "{0} cannot be used for address assignment.".format( dbdns_rec)) if not existing_record: existing_record = ARecord(fqdn=dbfqdn, ip=ip, network=dbnetwork, comments=comments) session.add(existing_record) newly_created = True else: if not dbfqdn.dns_records: # There's no IP, and the name did not exist before. Create a # reservation, but only if the caller allowed that use case. if not allow_reserved: raise ArgumentError("DNS Record %s does not exist." % dbfqdn) existing_record = ReservedName(fqdn=dbfqdn, comments=comments) newly_created = True else: # There's no IP, but the name is already in use. We need a single IP # address. if len(dbfqdn.dns_records) > 1: raise ArgumentError( "{0} does not resolve to a single IP address.".format( dbfqdn)) existing_record = dbfqdn.dns_records[0] _forbid_dyndns(existing_record) if not isinstance(existing_record, ARecord): # Exclude aliases etc. raise ArgumentError( "{0} cannot be used for address assignment.".format( existing_record)) # Verify that the existing record is in the network environment the # caller expects _check_netenv_compat(existing_record, network_environment) ip = existing_record.ip dbnetwork = existing_record.network dbnetwork.lock_row() if existing_record.hardware_entity: raise ArgumentError( "{0} is already used as the primary name of {1:cl} " "{1.label}.".format(existing_record, existing_record.hardware_entity)) if preclude and not newly_created: raise ArgumentError("{0} already exists.".format(existing_record)) if ip: q = session.query(AddressAssignment) q = q.filter_by(network=dbnetwork) q = q.filter_by(ip=ip) addr = q.first() if addr: raise ArgumentError("IP address {0} is already in use by " "{1:l}.".format(ip, addr.interface)) return (existing_record, newly_created)
def render(self, session, logger, startip, endip, dns_domain, prefix, **arguments): if not prefix: prefix = 'dynamic' dbnet_env = NetworkEnvironment.get_unique_or_default(session) dbdns_env = DnsEnvironment.get_unique_or_default(session) startnet = get_net_id_from_ip(session, startip, dbnet_env) endnet = get_net_id_from_ip(session, endip, dbnet_env) if startnet != endnet: raise ArgumentError("IP addresses %s (%s) and %s (%s) must be on " "the same subnet." % (startip, startnet.ip, endip, endnet.ip)) dbdns_domain = DnsDomain.get_unique(session, dns_domain, compel=True) dbdns_domain.lock_row() startnet.lock_row() q = session.query(AddressAssignment.ip) q = q.filter_by(network=startnet) q = q.filter(AddressAssignment.ip >= startip) q = q.filter(AddressAssignment.ip <= endip) q = q.order_by(AddressAssignment.ip) conflicts = q.all() if conflicts: raise ArgumentError("Cannot allocate the address range because the " "following IP addresses are already in use:\n" + ", ".join([str(c.ip) for c in conflicts])) # No filtering on DNS environment. If an address is dynamic in one # environment, it should not be considered static in a different # environment. q = session.query(ARecord) q = q.filter_by(network=startnet) q = q.filter(ARecord.ip >= startip) q = q.filter(ARecord.ip <= endip) q = q.order_by(ARecord.ip) conflicts = q.all() if conflicts: raise ArgumentError("Cannot allocate the address range because the " "following DNS records already exist:\n" + "\n".join([format(c, "a") for c in conflicts])) dsdb_runner = DSDBRunner(logger=logger) with session.no_autoflush: for ipint in range(int(startip), int(endip) + 1): ip = IPv4Address(ipint) check_ip_restrictions(startnet, ip) name = "%s-%s" % (prefix, str(ip).replace('.', '-')) dbfqdn = Fqdn.get_or_create(session, name=name, dns_domain=dbdns_domain, dns_environment=dbdns_env, preclude=True) dbdynamic_stub = DynamicStub(fqdn=dbfqdn, ip=ip, network=startnet) session.add(dbdynamic_stub) dsdb_runner.add_host_details(dbfqdn, ip) session.flush() # This may take some time if the range is big, so be verbose dsdb_runner.commit_or_rollback("Could not add addresses to DSDB", verbose=True) return
self.az.check_network_environment(dbuser, dbnet_env) # Check if the name is free. Network names are not unique in QIP and # there is no uniqueness constraint in AQDB, so only warn if the name is # already in use. q = session.query(Network).filter_by(name=network) dbnetwork = q.first() if dbnetwork: logger.client_info("WARNING: Network name %s is already used for " "address %s." % (network, str(dbnetwork.network))) # Check if the address is free try: dbnetwork = get_net_id_from_ip(session, address.ip, network_environment=dbnet_env) raise ArgumentError( "IP address %s is part of existing network " "named %s with address %s." % (str(address.ip), dbnetwork.name, str(dbnetwork.network))) except NotFoundException: pass # Okay, all looks good, let's create the network net = Network(name=network, network=address, network_environment=dbnet_env, network_type=type, side=side, location=location,
def render(self, session, network, network_environment, ip, type, side, machine, fqdn, cluster, pg, has_dynamic_ranges, fullinfo, **arguments): """Return a network matching the parameters. Some of the search terms can only return a unique network. For those (like ip and fqdn) we proceed with the query anyway. This allows for quick scripted tests like "is the network for X.X.X.X a tor_net2?". """ dbnet_env = NetworkEnvironment.get_unique_or_default( session, network_environment) q = session.query(Network) q = q.filter_by(network_environment=dbnet_env) if network: # Note: the network name is not unique (neither in QIP) q = q.filter_by(name=network) if ip: dbnetwork = get_net_id_from_ip(session, ip, dbnet_env) q = q.filter_by(id=dbnetwork.id) if type: q = q.filter_by(network_type=type) if side: q = q.filter_by(side=side) if machine: dbmachine = Machine.get_unique(session, machine, compel=True) vlans = [] if dbmachine.cluster and dbmachine.cluster.switch: # If this is a VM on a cluster, consult the VLANs. There # could be functionality here for real hardware to consult # interface port groups... there's no real use case yet. vlans = [ VlanInfo.get_vlan_id(session, i.port_group) for i in dbmachine.interfaces if i.port_group ] if vlans: q = q.join('observed_vlans') q = q.filter_by(switch=dbmachine.cluster.switch) q = q.filter(ObservedVlan.vlan_id.in_(vlans)) q = q.reset_joinpoint() if not vlans: networks = [ addr.network.id for addr in dbmachine.all_addresses() ] if not networks: msg = "Machine %s has no interfaces " % dbmachine.label if dbmachine.cluster: msg += "with a portgroup or " msg += "assigned to a network." raise ArgumentError(msg) q = q.filter(Network.id.in_(networks)) if fqdn: (short, dbdns_domain) = parse_fqdn(session, fqdn) dnsq = session.query(ARecord.ip) dnsq = dnsq.join(ARecord.fqdn) dnsq = dnsq.filter_by(name=short) dnsq = dnsq.filter_by(dns_domain=dbdns_domain) networks = [ get_net_id_from_ip(session, addr.ip, dbnet_env).id for addr in dnsq.all() ] q = q.filter(Network.id.in_(networks)) if cluster: dbcluster = Cluster.get_unique(session, cluster, compel=True) if dbcluster.switch: q = q.join('observed_vlans') q = q.filter_by(switch=dbcluster.switch) q = q.reset_joinpoint() else: net_ids = [ h.machine.primary_name.network.id for h in dbcluster.hosts if getattr(h.machine.primary_name, "network") ] q = q.filter(Network.id.in_(net_ids)) if pg: vlan = VlanInfo.get_vlan_id(session, pg, compel=ArgumentError) q = q.join('observed_vlans') q = q.filter_by(vlan_id=vlan) q = q.reset_joinpoint() dblocation = get_location(session, **arguments) if dblocation: if arguments.get('exact_location'): q = q.filter_by(location=dblocation) else: childids = dblocation.offspring_ids() q = q.filter(Network.location_id.in_(childids)) if has_dynamic_ranges: q = q.filter( exists([DynamicStub.dns_record_id], from_obj=DynamicStub.__table__.join( ARecord.__table__)).where( Network.id == DynamicStub.network_id)) q = q.order_by(Network.ip) if fullinfo: q = q.options(undefer('comments')) return q.all() return ShortNetworkList(q.all())
def render( self, session, logger, interface, machine, mac, automac, model, vendor, pg, autopg, iftype, type, comments, **arguments ): dbmachine = Machine.get_unique(session, machine, compel=True) oldinfo = DSDBRunner.snapshot_hw(dbmachine) audit_results = [] if type: self.deprecated_option("type", "Please use --iftype" "instead.", logger=logger, **arguments) if not iftype: iftype = type if not iftype: iftype = "public" management_types = ["bmc", "ilo", "ipmi"] for mtype in management_types: if interface.startswith(mtype): iftype = "management" break if interface.startswith("bond"): iftype = "bonding" elif interface.startswith("br"): iftype = "bridge" # Test it last, VLANs can be added on top of almost anything if "." in interface: iftype = "vlan" if iftype == "oa" or iftype == "loopback": raise ArgumentError("Interface type '%s' is not valid for " "machines." % iftype) bootable = None if iftype == "public": if interface == "eth0": bootable = True else: bootable = False dbmanager = None pending_removals = PlenaryCollection() dsdb_runner = DSDBRunner(logger=logger) if mac: prev = session.query(Interface).filter_by(mac=mac).first() if prev and prev.hardware_entity == dbmachine: raise ArgumentError("{0} already has an interface with MAC " "address {1}.".format(dbmachine, mac)) # Is the conflicting interface something that can be # removed? It is if: # - we are currently attempting to add a management interface # - the old interface belongs to a machine # - the old interface is associated with a host # - that host was blindly created, and thus can be removed safely if ( prev and iftype == "management" and prev.hardware_entity.hardware_type == "machine" and prev.hardware_entity.host and prev.hardware_entity.host.status.name == "blind" ): # FIXME: Is this just always allowed? Maybe restrict # to only aqd-admin and the host itself? dummy_machine = prev.hardware_entity dummy_ip = dummy_machine.primary_ip old_fqdn = str(dummy_machine.primary_name) old_iface = prev.name old_mac = prev.mac old_network = get_net_id_from_ip(session, dummy_ip) self.remove_prev(session, logger, prev, pending_removals) session.flush() dsdb_runner.delete_host_details(old_fqdn, dummy_ip, old_iface, old_mac) self.consolidate_names(session, logger, dbmachine, dummy_machine.label, pending_removals) # It seems like a shame to throw away the IP address that # had been allocated for the blind host. Try to use it # as it should be used... dbmanager = self.add_manager(session, logger, dbmachine, dummy_ip, old_network) elif prev: msg = describe_interface(session, prev) raise ArgumentError("MAC address %s is already in use: %s." % (mac, msg)) elif automac: mac = self.generate_mac(session, dbmachine) audit_results.append(("mac", mac)) else: # Ignore now that Mac Address can be null pass if pg is not None: port_group = verify_port_group(dbmachine, pg) elif autopg: port_group = choose_port_group(session, logger, dbmachine) audit_results.append(("pg", port_group)) else: port_group = None dbinterface = get_or_create_interface( session, dbmachine, name=interface, vendor=vendor, model=model, interface_type=iftype, mac=mac, bootable=bootable, port_group=port_group, comments=comments, preclude=True, ) # So far, we're *only* creating a manager if we happen to be # removing a blind entry and we can steal its IP address. if dbmanager: assign_address(dbinterface, dbmanager.ip, dbmanager.network, logger=logger) session.add(dbinterface) session.flush() plenaries = PlenaryCollection(logger=logger) plenaries.append(Plenary.get_plenary(dbmachine)) if pending_removals and dbmachine.host: # Not an exact test, but the file won't be re-written # if the contents are the same so calling too often is # not a major expense. plenaries.append(Plenary.get_plenary(dbmachine.host)) # Even though there may be removals going on the write key # should be sufficient here. with plenaries.get_key(): pending_removals.stash() try: plenaries.write(locked=True) pending_removals.remove(locked=True) dsdb_runner.update_host(dbmachine, oldinfo) dsdb_runner.commit_or_rollback("Could not update host in DSDB") except: plenaries.restore_stash() pending_removals.restore_stash() raise if dbmachine.host: # FIXME: reconfigure host pass for name, value in audit_results: self.audit_result(session, name, value, **arguments) return
def render(self, session, logger, gateway, networkip, ip, netmask, prefixlen, network_environment, comments, personality, archetype, **arguments): dbnet_env = NetworkEnvironment.get_unique_or_default(session, network_environment) if gateway and networkip: raise ArgumentError("Exactly one of --gateway and --networkip " "should be specified.") elif gateway: dbnetwork = get_net_id_from_ip(session, gateway, dbnet_env) elif networkip: dbnetwork = get_network_byip(session, networkip, dbnet_env) if dbnetwork.routers: if len(dbnetwork.routers) > 1: raise ArgumentError("More than one router exists. " "Please specify one with --gateway") else: gateway = dbnetwork.routers[0].ip logger.client_info("Gateway %s taken from router address " "of network %s." % (gateway, dbnetwork.network)) else: # No routers are defined, so take an educated guess gateway = dbnetwork.network[dbnetwork.default_gateway_offset] logger.client_info("Gateway %s taken from default offset %d " "for network %s." % (gateway, dbnetwork.default_gateway_offset, dbnetwork.network)) else: raise ArgumentError("Please either --gateway or --networkip") if netmask: dest = IPv4Network("%s/%s" % (ip, netmask)) else: dest = IPv4Network("%s/%s" % (ip, prefixlen)) if dest.network != ip: raise ArgumentError("%s is not a network address; " "did you mean %s." % (ip, dest.network)) if personality: dbpersonality = Personality.get_unique(session, name=personality, archetype=archetype, compel=True) else: dbpersonality = None # TODO: this will have to be changed if we want equal cost multipath # etc. for route in dbnetwork.static_routes: if dest.overlaps(route.destination): if route.personality and route.personality != dbpersonality: continue raise ArgumentError("{0} already has an overlapping route to " "{1} using gateway {2}." .format(dbnetwork, route.destination, route.gateway_ip)) route = StaticRoute(network=dbnetwork, dest_ip=dest.ip, dest_cidr=dest.prefixlen, gateway_ip=gateway, personality=dbpersonality, comments=comments) session.add(route) session.flush() # TODO: refresh affected host templates return
def render(self, session, dbuser, ip, netmask, prefixlen, network_environment, **arguments): if netmask: # There must me a faster way, but this is the easy one net = IPv4Network("127.0.0.0/%s" % netmask) prefixlen = net.prefixlen if prefixlen < 8 or prefixlen > 32: raise ArgumentError("The prefix length must be between 8 and 32.") dbnet_env = NetworkEnvironment.get_unique_or_default( session, network_environment) self.az.check_network_environment(dbuser, dbnet_env) dbnetwork = get_net_id_from_ip(session, ip, network_environment=dbnet_env) if prefixlen <= dbnetwork.cidr: raise ArgumentError("The specified --prefixlen must be bigger " "than the current value.") subnets = dbnetwork.network.subnet(new_prefix=prefixlen) # Collect IP addresses that will become network/broadcast addresses # after the split bad_ips = [] for subnet in subnets: bad_ips.append(subnet.ip) bad_ips.append(subnet.broadcast) q = session.query(AddressAssignment.ip) q = q.filter_by(network=dbnetwork) q = q.filter(AddressAssignment.ip.in_(bad_ips)) used_addrs = q.all() if used_addrs: raise ArgumentError( "Network split failed, because the following " "subnet IP and/or broadcast addresses are " "assigned to hosts: %s" % ", ".join([str(addr.ip) for addr in used_addrs])) q = session.query(ARecord.ip) q = q.filter_by(network=dbnetwork) q = q.filter(ARecord.ip.in_(bad_ips)) used_addrs = q.all() if used_addrs: raise ArgumentError( "Network split failed, because the following " "subnet IP and/or broadcast addresses are " "registered in the DNS: %s" % ", ".join([str(addr.ip) for addr in used_addrs])) # Reason of the initial value: we keep the name of the first segment # (e.g. "foo"), and the next segment will be called "foo_2" name_idx = 2 dbnets = [] for subnet in dbnetwork.network.subnet(new_prefix=prefixlen): # Skip the original if subnet.ip == dbnetwork.ip: continue # Generate a new name. Make it unique, even if the DB does not # enforce that currently while True: # TODO: check if the new name is too long name = "%s_%d" % (dbnetwork.name, name_idx) name_idx += 1 q = session.query(Network) q = q.filter_by(network_environment=dbnet_env) q = q.filter_by(name=name) if q.count() == 0: break # Should not happen... if name_idx > 1000: # pragma: no cover raise AquilonError( "Could not generate a unique network " "name in a reasonable time, bailing out") # Inherit location & side from the supernet newnet = Network( name=name, network=subnet, network_environment=dbnet_env, location=dbnetwork.location, side=dbnetwork.side, comments="Created by splitting {0:a}".format(dbnetwork)) session.add(newnet) dbnets.append(newnet) dbnetwork.cidr = prefixlen session.flush() for newnet in dbnets: fix_foreign_links(session, dbnetwork, newnet) session.flush()
def generate_ip(session, logger, dbinterface, ip=None, ipfromip=None, ipfromsystem=None, autoip=None, ipalgorithm=None, compel=False, network_environment=None, audit_results=None, **kwargs): ip_options = [ip, ipfromip, ipfromsystem, autoip] numopts = sum([1 if opt else 0 for opt in ip_options]) if numopts > 1: raise ArgumentError("Only one of --ip, --ipfromip, --ipfromsystem " "and --autoip can be specified.") elif numopts == 0: if compel: raise ArgumentError("Please specify one of the --ip, --ipfromip, " "--ipfromsystem, and --autoip parameters.") return None if ip: return ip dbsystem = None dbnetwork = None if autoip: if not dbinterface: raise ArgumentError("No interface available to automatically " "generate an IP address.") if dbinterface.port_group: # This could either be an interface from a virtual machine # or an interface on an ESX vmhost. dbcluster = None if getattr(dbinterface.hardware_entity, "cluster", None): # VM dbcluster = dbinterface.hardware_entity.cluster elif getattr(dbinterface.hardware_entity, "host", None): dbcluster = dbinterface.hardware_entity.host.cluster if not dbcluster: raise ArgumentError("Can only automatically assign an IP " "address to an interface with a port " "group on virtual machines or ESX hosts.") if not dbcluster.switch: raise ArgumentError( "Cannot automatically assign an IP " "address to an interface with a port group " "since {0} is not associated with a " "switch.".format(dbcluster)) vlan_id = VlanInfo.get_vlan_id(session, dbinterface.port_group) dbnetwork = ObservedVlan.get_network(session, vlan_id=vlan_id, switch=dbcluster.switch, compel=ArgumentError) elif dbinterface.mac: q = session.query(ObservedMac) q = q.filter_by(mac_address=dbinterface.mac) q = q.order_by(desc(ObservedMac.last_seen)) dbom = q.first() if not dbom: raise ArgumentError("No switch found in the discovery table " "for MAC address %s." % dbinterface.mac) if not dbom.switch.primary_ip: raise ArgumentError("{0} does not have a primary IP address " "to use for network " "selection.".format(dbom.switch)) dbnetwork = get_net_id_from_ip(session, dbom.switch.primary_ip) else: raise ArgumentError( "{0} has neither a MAC address nor port group " "information, it is not possible to generate " "an IP address automatically.".format(dbinterface)) if ipfromsystem: # Assumes one system entry, not necessarily correct. dbdns_rec = ARecord.get_unique(session, fqdn=ipfromsystem, compel=True) dbnetwork = dbdns_rec.network if ipfromip: # determine network dbnetwork = get_net_id_from_ip(session, ipfromip, network_environment) if not dbnetwork: raise ArgumentError("Could not determine network to use for %s." % dbsystem.fqdn) # When there are e.g. multiple "add manager --autoip" operations going on in # parallel, we must ensure that they won't try to use the same IP address. # This query places a database lock on the network, which means IP address # generation within a network will be serialized, while operations on # different networks can still run in parallel. The lock will be released by # COMMIT or ROLLBACK. dbnetwork.lock_row() startip = dbnetwork.first_usable_host used_ips = session.query(ARecord.ip) used_ips = used_ips.filter_by(network=dbnetwork) used_ips = used_ips.filter(ARecord.ip >= startip) full_set = set(range(int(startip), int(dbnetwork.broadcast))) used_set = set([int(item.ip) for item in used_ips]) free_set = full_set - used_set if not free_set: raise ArgumentError("No available IP addresses found on " "network %s." % str(dbnetwork.network)) if ipalgorithm is None or ipalgorithm == 'lowest': # Select the lowest available address ip = IPv4Address(min(free_set)) elif ipalgorithm == 'highest': # Select the highest available address ip = IPv4Address(max(free_set)) elif ipalgorithm == 'max': # Return the max. used address + 1 if not used_set: # Avoids ValueError being thrown when used_set is empty ip = IPv4Address(min(free_set)) else: next = max(used_set) if not next + 1 in free_set: raise ArgumentError("Failed to find an IP that is suitable " "for --ipalgorithm=max. Try an other " "algorithm as there are still some free " "addresses.") ip = IPv4Address(next + 1) else: raise ArgumentError("Unknown algorithm %s." % ipalgorithm) if audit_results is not None: if dbinterface: logger.info("Selected IP address {0!s} for {1:l}".format( ip, dbinterface)) else: logger.info("Selected IP address %s" % ip) audit_results.append(('ip', ip)) return ip
dbnet_env = NetworkEnvironment.get_unique_or_default(session, network_environment) self.az.check_network_environment(dbuser, dbnet_env) # Check if the name is free. Network names are not unique in QIP and # there is no uniqueness constraint in AQDB, so only warn if the name is # already in use. q = session.query(Network).filter_by(name=network) dbnetwork = q.first() if dbnetwork: logger.client_info("WARNING: Network name %s is already used for " "address %s." % (network, str(dbnetwork.network))) # Check if the address is free try: dbnetwork = get_net_id_from_ip(session, address.ip, network_environment=dbnet_env) raise ArgumentError("IP address %s is part of existing network " "named %s with address %s." % (str(address.ip), dbnetwork.name, str(dbnetwork.network))) except NotFoundException: pass # Okay, all looks good, let's create the network net = Network(name=network, network=address, network_environment=dbnet_env, network_type=type, side=side, location=location, comments=comments) session.add(net) session.flush() return
class CommandAddAuroraHost(CommandAddHost): required_parameters = ["hostname"] # Look for node name like <building><rack_id>c<chassis_id>n<node_num> nodename_re = re.compile(r'^\s*([a-zA-Z]+)(\d+)c(\d+)n(\d+)\s*$') def render(self, session, logger, hostname, osname, osversion, **kwargs): # Pull relevant info out of dsdb... dsdb_runner = DSDBRunner(logger=logger) try: fields = dsdb_runner.show_host(hostname) except ProcessException, e: raise ArgumentError("Could not find %s in DSDB: %s" % (hostname, e)) fqdn = fields["fqdn"] dsdb_lookup = fields["dsdb_lookup"] if fields["node"]: machine = fields["node"] elif fields["primary_name"]: machine = fields["primary_name"] else: machine = dsdb_lookup # Create a machine dbmodel = Model.get_unique(session, name="aurora_model", vendor="aurora_vendor", compel=True) dbmachine = Machine.get_unique(session, machine) dbslot = None if not dbmachine: m = self.nodename_re.search(machine) if m: (building, rid, cid, nodenum) = m.groups() dbbuilding = session.query(Building).filter_by( name=building).first() if not dbbuilding: raise ArgumentError("Failed to find building %s for " "node %s, please add an Aurora " "machine manually and follow with " "add_host." % (building, machine)) rack = building + rid dbrack = session.query(Rack).filter_by(name=rack).first() if not dbrack: try: rack_fields = dsdb_runner.show_rack(rack) dbrack = Rack(name=rack, fullname=rack, parent=dbbuilding, rack_row=rack_fields["rack_row"], rack_column=rack_fields["rack_col"]) session.add(dbrack) except (ProcessException, ValueError), e: logger.client_info("Rack %s not defined in DSDB." % rack) dblocation = dbrack or dbbuilding chassis = rack + "c" + cid dbdns_domain = session.query(DnsDomain).filter_by( name="ms.com").first() dbchassis = Chassis.get_unique(session, chassis) if not dbchassis: dbchassis_model = Model.get_unique(session, name="aurora_chassis_model", vendor="aurora_vendor", compel=True) dbchassis = Chassis(label=chassis, location=dblocation, model=dbchassis_model) session.add(dbchassis) dbfqdn = Fqdn.get_or_create(session, name=chassis, dns_domain=dbdns_domain, preclude=True) dbdns_rec = ReservedName(fqdn=dbfqdn) session.add(dbdns_rec) dbchassis.primary_name = dbdns_rec dbslot = session.query(ChassisSlot).filter_by( chassis=dbchassis, slot_number=nodenum).first() # Note: Could be even more persnickity here and check that # the slot is currently empty. Seems like overkill. if not dbslot: dbslot = ChassisSlot(chassis=dbchassis, slot_number=nodenum) session.add(dbslot) else: dbnet_env = NetworkEnvironment.get_unique_or_default(session) try: host_ip = gethostbyname(hostname) except gaierror, e: raise ArgumentError("Error when looking up host: %d, %s" % (e.errno, e.strerror)) dbnetwork = get_net_id_from_ip(session, IPv4Address(host_ip), dbnet_env) dblocation = dbnetwork.location.building
comments = re.sub("\s+", " ", comments) comments = comments.strip() # This is the easy part, so deal with it first update_switch(dbmodel, serial_no, comments) primary_ip = dbswitch.primary_name.ip # Build a lookup table of discovered IP addresses ip_to_iface = {} networks = [] for ifname, params in data["interfaces"].items(): for ipstr, label in params["ip"].items(): ip = IPv4Address(ipstr) try: dbnetwork = get_net_id_from_ip(session, ip, dbnet_env) ip_to_iface[ip] = {"name": ifname, "label": label} networks.append(dbnetwork) except NotFoundException: warning("Skipping IP address %s: network not found." % ip) # Avoid creating the interface if there are no valid IPs del params["ip"][ipstr] if not params["ip"]: del data["interfaces"][ifname] # Some switches do not report their management IP address. In theory the # discovery script should fix that up, but better to be sure. if primary_ip not in ip_to_iface: for iface in dbswitch.interfaces: if primary_ip in iface.addresses:
def render(self, session, logger, startip, endip, dns_domain, prefix, **arguments): if not prefix: prefix = 'dynamic' dbnet_env = NetworkEnvironment.get_unique_or_default(session) dbdns_env = DnsEnvironment.get_unique_or_default(session) startnet = get_net_id_from_ip(session, startip, dbnet_env) endnet = get_net_id_from_ip(session, endip, dbnet_env) if startnet != endnet: raise ArgumentError("IP addresses %s (%s) and %s (%s) must be on " "the same subnet." % (startip, startnet.ip, endip, endnet.ip)) dbdns_domain = DnsDomain.get_unique(session, dns_domain, compel=True) dbdns_domain.lock_row() startnet.lock_row() q = session.query(AddressAssignment.ip) q = q.filter_by(network=startnet) q = q.filter(AddressAssignment.ip >= startip) q = q.filter(AddressAssignment.ip <= endip) q = q.order_by(AddressAssignment.ip) conflicts = q.all() if conflicts: raise ArgumentError( "Cannot allocate the address range because the " "following IP addresses are already in use:\n" + ", ".join([str(c.ip) for c in conflicts])) # No filtering on DNS environment. If an address is dynamic in one # environment, it should not be considered static in a different # environment. q = session.query(ARecord) q = q.filter_by(network=startnet) q = q.filter(ARecord.ip >= startip) q = q.filter(ARecord.ip <= endip) q = q.order_by(ARecord.ip) conflicts = q.all() if conflicts: raise ArgumentError( "Cannot allocate the address range because the " "following DNS records already exist:\n" + "\n".join([format(c, "a") for c in conflicts])) dsdb_runner = DSDBRunner(logger=logger) with session.no_autoflush: for ipint in range(int(startip), int(endip) + 1): ip = IPv4Address(ipint) check_ip_restrictions(startnet, ip) name = "%s-%s" % (prefix, str(ip).replace('.', '-')) dbfqdn = Fqdn.get_or_create(session, name=name, dns_domain=dbdns_domain, dns_environment=dbdns_env, preclude=True) dbdynamic_stub = DynamicStub(fqdn=dbfqdn, ip=ip, network=startnet) session.add(dbdynamic_stub) dsdb_runner.add_host_details(dbfqdn, ip) session.flush() # This may take some time if the range is big, so be verbose dsdb_runner.commit_or_rollback("Could not add addresses to DSDB", verbose=True) return