class Rack(Location): """ Rack is a subtype of location """ __tablename__ = 'rack' __mapper_args__ = {'polymorphic_identity': 'rack'} valid_parents = [Building, Room, Bunker] id = Column(Integer, ForeignKey('location.id', name='rack_loc_fk', ondelete='CASCADE'), primary_key=True) rack_row = Column(AqStr(4), nullable=True) rack_column = Column(AqStr(4), nullable=True) @validates('rack_row', 'rack_column') def check_rack_coordinates(self, key, value): """ validates the row and column arguments """ value = str(value).strip() if not value.isalnum(): msg = "the value '%s' for %s must be alphanumeric" % (value, key) raise ArgumentError(msg) else: return value
class Resource(Base): """ Abstraction of specific tables (e.g. VM or Filesystem) into a resource A resource is a generic bundle that can be attached to hosts or clusters. The resource can take on different shapes such as a VM or a filesystem, but there are many common operations we want to perform on them and therefore we map the specific types of bundles into this Resource class. """ __tablename__ = _TN id = Column(Integer, Sequence('%s_seq' % _TN), primary_key=True) resource_type = Column(AqStr(16), nullable=False) name = Column(AqStr(64), nullable=False) creation_date = deferred(Column(DateTime, default=datetime.now, nullable=False)) comments = Column(String(255), nullable=True) holder_id = Column(Integer, ForeignKey('%s.id' % _RESHOLDER, name='%s_resholder_fk' % _TN, ondelete='CASCADE'), nullable=False) holder = relation(ResourceHolder, innerjoin=True, backref=backref('resources', cascade='all, delete-orphan')) __table_args__ = (UniqueConstraint(holder_id, name, resource_type, name='%s_holder_name_type_uk' % _TN), Index('%s_holder_idx' %_TN, holder_id)) __mapper_args__ = {'polymorphic_on': resource_type} @property def template_base(self): return "resource/%s/%s/%s" % (self.holder.holder_path, self.resource_type, self.name) @validates('holder') def _validate_holder(self, key, value): return self.validate_holder(key, value) def validate_holder(self, key, value): return value def __lt__(self, other): # Quick optimizations to not have to evaluate the name. if self.holder != other.holder: if self.holder.holder_type != other.holder.holder_type: return self.holder.holder_type < other.holder.holder_type return self.holder.holder_name < other.holder.holder_name if self.resource_type != other.resource_type: return self.resource_type < other.resource_type return self.name < other.name def __repr__(self): return "<{0:c} Resource {0.name} of {1}>".format(self, self.holder)
def force_my_sandbox(self, session, logger, dbuser, sandbox): sbx_split = sandbox.split('/') sandbox = AqStr.normalize(sbx_split[-1]) author = '/'.join(sbx_split[:-1]) if len(sbx_split) <= 1: return sandbox # User used the name/branch syntax - that's fine. They can't # do anything on behalf of anyone else, though, so error if the # user given is anyone else. if AqStr.normalize(author) != dbuser.name: raise ArgumentError("User '%s' cannot add or get a sandbox on " "behalf of '%s'." % (dbuser.name, author)) return sandbox
class Disk(Base): """ Base Class for polymorphic representation of disk or disk-like resources """ __tablename__ = _TN id = Column(Integer, Sequence('%s_id_seq' % _TN), primary_key=True) disk_type = Column(Enum(64, disk_types), nullable=False) capacity = Column(Integer, nullable=False) device_name = Column(AqStr(128), nullable=False, default='sda') controller_type = Column(Enum(64, controller_types), nullable=False) # We need to know the bus address of each disk. # This isn't really nullable, but single-table inheritance means # that the base class will end up with the column and the base class # wants it to be nullable. We enforce this via __init__ instead. address = Column(AqStr(128), nullable=True) machine_id = Column(Integer, ForeignKey('machine.machine_id', name='disk_machine_fk', ondelete='CASCADE'), nullable=False) bootable = Column(Boolean(name="%s_bootable_ck" % _TN), nullable=False, default=False) creation_date = deferred(Column(DateTime, default=datetime.now, nullable=False)) comments = deferred(Column(String(255), nullable=True)) # The order_by here ensures that machine templates always list the # disks in the same order. Technically order is irrelevant in the # template since the disks are stored in a hash but this helps with # the tests and with preventing spurious re-writes. machine = relation(Machine, backref=backref('disks', cascade='all', order_by=[device_name])) __table_args__ = (UniqueConstraint(machine_id, device_name, name='disk_mach_dev_name_uk'),) __mapper_args__ = {'polymorphic_on': disk_type, 'with_polymorphic': '*'} def __repr__(self): # The default __repr__() is too long return "<%s %s (%s) of machine %s, %d GB>" % \ (self._get_class_label(), self.device_name, self.controller_type, self.machine.label, self.capacity)
class System(Base): """ System: a base class which abstracts out the details of between all the various kinds of service providers we may use. A System might be a host/server/workstation, router, firewall, netapp, etc. Naming this is kind of difficult, but "system" seems neutral, and happens not to be overloaded by anything I am aware of. This is exactly what system does. System_id holds a name, and presents an abstract entity that can provide, or utilize services, hardware, networks, configuration sources, etc. It's subtyped for flexibilty to weather future expansion and enhancement without breaking the fundamental archetypes we're building today. It is perhaps the most important table so far, and replaces the notion of 'host' as we've used it in our discussions and designs thus far. """ __tablename__ = 'system' id = Column(Integer, Sequence('SYSTEM_SEQ'), primary_key=True) name = Column(AqStr(64), nullable=False) #TODO: create enum_types for this system_type = Column(AqStr(32), nullable=False) dns_domain_id = Column(Integer, ForeignKey('dns_domain.id', name='SYSTEM_DNS_FK'), nullable=False ) #TODO: default mac = Column(AqMac(17), nullable=True) ip = Column(IPV4, nullable=True) network_id = Column(Integer, ForeignKey('network.id', name='SYSTEM_NET_ID_FK'), nullable=True) creation_date = deferred(Column( DateTime, default=datetime.now, nullable=False)) comments = deferred(Column('comments', String(255), nullable=True)) dns_domain = relation(DnsDomain) network = relation(Network, backref='interfaces') __mapper_args__ = {'polymorphic_on' : system_type} def _fqdn(self): return '.'.join([str(self.name),str(self.dns_domain.name)]) fqdn = property(_fqdn)
class ObservedMac(Base): """ reports the observance of a mac address on a switch port. """ __tablename__ = _TN switch_id = Column(Integer, ForeignKey('switch.hardware_entity_id', ondelete='CASCADE', name='obs_mac_hw_fk'), nullable=False) port = Column(AqStr(32), nullable=False) mac_address = Column(AqMac(17), nullable=False) creation_date = deferred( Column(DateTime, default=datetime.now, nullable=False)) last_seen = Column('last_seen', DateTime, default=datetime.now, nullable=False) switch = relation(Switch, backref=backref('observed_macs', cascade='delete', order_by=[port])) __table_args__ = (PrimaryKeyConstraint(switch_id, port, mac_address), )
class HostGrnMap(Base): __tablename__ = _HOSTGRN host_id = Column(Integer, ForeignKey("%s.machine_id" % _TN, name="%s_host_fk" % _HOSTGRN, ondelete="CASCADE"), nullable=False) eon_id = Column(Integer, ForeignKey('grn.eon_id', name='%s_grn_fk' % _HOSTGRN), nullable=False) host = relation(Host, innerjoin=True, backref=backref('_grns', cascade='all, delete-orphan', passive_deletes=True)) grn = relation(Grn, lazy=False, innerjoin=True, backref=backref('_hosts', passive_deletes=True)) target = Column(AqStr(32), nullable=False, primary_key=True) # used by unmap @property def mapped_object(self): return self.host __table_args__ = (PrimaryKeyConstraint(host_id, eon_id), )
def render(self, session, logger, prefix, dns_domain, hostname, machine, **args): if dns_domain: dbdns_domain = DnsDomain.get_unique(session, dns_domain, compel=True) else: dbmachine = Machine.get_unique(session, machine, compel=True) dbdns_domain = None loc = dbmachine.location while loc and not dbdns_domain: dbdns_domain = loc.default_dns_domain loc = loc.parent if not dbdns_domain: raise ArgumentError("There is no default DNS domain configured " "for the machine's location. Please " "specify --dns_domain.") # Lock the DNS domain to prevent the same name generated for # simultaneous requests dbdns_domain.lock_row() prefix = AqStr.normalize(prefix) result = search_next(session=session, cls=Fqdn, attr=Fqdn.name, value=prefix, dns_domain=dbdns_domain, start=None, pack=None) hostname = "%s%d.%s" % (prefix, result, dbdns_domain) CommandAddHost.render(self, session, logger, hostname=hostname, machine=machine, **args) logger.info("Selected host name %s" % hostname) self.audit_result(session, 'hostname', hostname, **args) return hostname
class ParamDefHolder(Base): """ The dbobj with which this parameter paths are associated with. """ __tablename__ = _PARAM_DEF_HOLDER _class_label = 'Parameter Definition Holder' _instance_label = 'holder_name' id = Column(Integer, Sequence('%s_seq' % _PARAM_DEF_HOLDER), primary_key=True) type = Column(AqStr(16), nullable=False) creation_date = deferred( Column(DateTime, default=datetime.now, nullable=False)) __mapper_args__ = {'polymorphic_on': type} @property def holder_name(self): # pragma: no cover raise InternalError("Abstract base method called") @property def holder_object(self): # pragma: no cover raise InternalError("Abstract base method called")
class ResourceHolder(Base): """ Who owns this resource? This could be a variety of different owners (e.g. a host, a cluster) and so we have a polymorphic class to represent this ownership - that enforces that only a single class can ever have ownership. We enforce uniqueness of (resource name, resourceholder). As an example, you could have a filesystem resource called 'root' on many hosts. """ __tablename__ = _RESHOLDER id = Column(Integer, Sequence('%s_seq' % _RESHOLDER), primary_key=True) # I don't like this as a string... holder_type = Column(AqStr(16), nullable=False) __mapper_args__ = {'polymorphic_on': holder_type} @property def holder_name(self): # pragma: no cover raise InternalError("Abstract base method called") @property def holder_object(self): # pragma: no cover raise InternalError("Abstract base method called") @validates('resources') def _validate_resources(self, key, value): return self.validate_resources(key, value) def validate_resources(self, key, value): return value @property def holder_path(self): return "%s/%s" % (self.holder_type, self.holder_name)
class VlanInfo(Base): """ information regarding well-known/standardized vlans """ __tablename__ = _VTN _instance_label = 'vlan_id' vlan_id = Column(Integer, primary_key=True, autoincrement=False) port_group = Column(AqStr(32), nullable=False) vlan_type = Column(Enum(32, VLAN_TYPES), nullable=False) __table_args__ = (UniqueConstraint(port_group, name='%s_port_group_uk' % _VTN), CheckConstraint(and_(vlan_id >= 0, vlan_id < MAX_VLANS), name='%s_vlan_id_ck' % _VTN)) @classmethod def get_vlan_id(cls, session, port_group, compel=InternalError): info = session.query(cls).filter_by(port_group=port_group).first() if not info and compel: raise compel("No VLAN found for port group %s" % port_group) return info.vlan_id @classmethod def get_port_group(cls, session, vlan_id, compel=InternalError): info = session.query(cls).filter_by(vlan_id=vlan_id).first() if not info and compel: raise compel("No port group found for VLAN id %s" % vlan_id) return info.port_group def __repr__(self): return '<%s vlan_id=%s port_group=%s vlan_type=%s>' % ( self.__class__.__name__, self.vlan_id, self.port_group, self.vlan_type)
class Feature(Base): __tablename__ = _TN post_personality_allowed = False id = Column(Integer, Sequence("%s_id_seq" % _TN), primary_key=True) name = Column(String(128), nullable=False) feature_type = Column(AqStr(16), nullable=False) post_personality = Column(Boolean(name="%s_post_personality_ck" % _TN), nullable=False, default=False) creation_date = deferred(Column(DateTime, default=datetime.now, nullable=False)) comments = deferred(Column(String(255), nullable=True)) __table_args__ = (UniqueConstraint(name, feature_type, name='%s_name_type_uk' % _TN),) __mapper_args__ = {'polymorphic_on': feature_type} @validates('links') def _validate_link(self, key, link): # Due to how decorators work, we have to do a level of indirection to # make polymorphism work return self.validate_link(key, link) def validate_link(self, key, link): # pragma: no cover return link
def render(self, session, machine, model, vendor, machine_type, chassis, slot, **arguments): q = session.query(Machine) if machine: # TODO: This command still mixes search/show facilities. # For now, give an error if machine name not found, but # also allow the command to be used to check if the machine has # the requested attributes (via the standard query filters). # In the future, this should be clearly separated as 'show machine' # and 'search machine'. machine = AqStr.normalize(machine) Machine.check_label(machine) Machine.get_unique(session, machine, compel=True) q = q.filter_by(label=machine) dblocation = get_location(session, **arguments) if dblocation: q = q.filter_by(location=dblocation) if chassis: dbchassis = Chassis.get_unique(session, chassis, compel=True) q = q.join('chassis_slot') q = q.filter_by(chassis=dbchassis) q = q.reset_joinpoint() if slot is not None: q = q.join('chassis_slot') q = q.filter_by(slot_number=slot) q = q.reset_joinpoint() if model or vendor or machine_type: subq = Model.get_matching_query(session, name=model, vendor=vendor, machine_type=machine_type, compel=True) q = q.filter(Machine.model_id.in_(subq)) return q.order_by(Machine.label).all()
def render(self, session, machine, model, vendor, machine_type, chassis, slot, **arguments): q = session.query(Machine) if machine: # TODO: This command still mixes search/show facilities. # For now, give an error if machine name not found, but # also allow the command to be used to check if the machine has # the requested attributes (via the standard query filters). # In the future, this should be clearly separated as 'show machine' # and 'search machine'. machine = AqStr.normalize(machine) Machine.check_label(machine) Machine.get_unique(session, machine, compel=True) q = q.filter_by(label=machine) dblocation = get_location(session, **arguments) if dblocation: q = q.filter_by(location=dblocation) if chassis: dbchassis = Chassis.get_unique(session, chassis, compel=True) q = q.join('chassis_slot') q = q.filter_by(chassis=dbchassis) q = q.reset_joinpoint() if slot is not None: q = q.join('chassis_slot') q = q.filter_by(slot_number=slot) q = q.reset_joinpoint() if model or vendor or machine_type: subq = Model.get_matching_query(session, name=model, vendor=vendor, model_type=machine_type, compel=True) q = q.filter(Machine.model_id.in_(subq)) return q.order_by(Machine.label).all()
class Disk(Base): """ Base Class for polymorphic representation of disk or disk-like resources """ __tablename__ = _TN id = Column(Integer, Sequence('%s_id_seq' % (_TN)), primary_key=True) disk_type = Column(Enum(64, disk_types), nullable=False) capacity = Column(Integer, nullable=False) device_name = Column(AqStr(128), nullable=False, default='sda') controller_type = Column(Enum(64, controller_types), nullable=False) machine_id = Column(Integer, ForeignKey('machine.machine_id', name='disk_machine_fk', ondelete='CASCADE'), nullable=False) creation_date = Column(DateTime, default=datetime.now, nullable=False) comments = Column(String(255), nullable=True) machine = relation(Machine, backref=backref('disks', cascade='all')) __mapper_args__ = {'polymorphic_on': disk_type} def __repr__(self): return '%s <machine %s %s /dev/%s %s GB> ' % ( self.__class__.__name__, self.machine.name, self.controller_type, self.device_name, self.capacity)
class StringTbl(Base): __tablename__ = 'aqstr_test' id = Column(Integer, primary_key=True) name = Column(AqStr(16), nullable=False, unique=True) def __repr__(self): return self.__class__.__name__ + ' "' + str(self.name) + '"'
def render(self, session, logger, prefix, **args): prefix = AqStr.normalize(prefix) result = search_next(session=session, cls=Machine, attr=Machine.label, value=prefix, start=None, pack=None) machine = '%s%d' % (prefix, result) args['machine'] = machine CommandAddMachine.render(self, session, logger, **args) logger.info("Selected hardware label %s" % machine) self.audit_result(session, 'machine', machine, **args) return machine
class Archetype(Base): """ Archetype names """ __tablename__ = _TN id = Column(Integer, Sequence('%s_id_seq' % _TN), primary_key=True) name = Column(AqStr(32), nullable=False) outputdesc = Column(String(255), nullable=True) is_compileable = Column(Boolean(name="%s_is_compileable_ck" % _TN), default=False, nullable=False) cluster_type = Column(AqStr(32), nullable=True) creation_date = deferred( Column(DateTime, default=datetime.now, nullable=False)) comments = deferred(Column(String(255), nullable=True)) __table_args__ = (UniqueConstraint(name, name='%s_uk' % _TN), )
class Rack(Location): """ Rack is a subtype of location """ __tablename__ = 'rack' __mapper_args__ = {'polymorphic_identity':'rack'} id = Column(Integer, ForeignKey('location.id', name='rack_loc_fk', ondelete='CASCADE'), primary_key=True) #TODO: POSTHASTE: constrain to alphabetic in row, and make both non-nullable rack_row = Column(AqStr(4), nullable=True) rack_column = Column(Integer, nullable=True)
class NasDisk(Disk): """ Network attached disks required for root diskless machines, primarily for virtual machines whose images are hosted on NFS shares. In the case of ESX these are mounted by the host OS, not the guest OS. """ __mapper_args__ = {'polymorphic_identity': 'nas'} """ We need to know the bus address of each disk. This isn't really nullable, but single-table inheritance means that the base class will end up with the column and the base class wants it to be nullable. We enforce this via __init__ instead. """ address = Column(AqStr(128), nullable=True) """ No cascade delete here: we want to restrict any attempt to delete any service instance that has client dependencies. """ service_instance_id = Column(Integer, ForeignKey('service_instance.id', name='%s_srv_inst_fk' % (_NDTN)), nullable=True) # TODO: double check property values on forward and backrefs before commit # cascade ops too service_instance = relation(ServiceInstance, backref='nas_disks') def __init__(self, **kw): if 'address' not in kw: raise ValueError("address is mandatory for nas disks") super(NasDisk, self).__init__(**kw) def __repr__(self): return '%s <machine %s %s /dev/%s %s GB provided by %s> ' % ( self.__class__.__name__, self.machine.name, self.controller_type, self.device_name, self.capacity, self.service_instance.name)
def __init__(self, label=None, **kwargs): label = AqStr.normalize(label) if not label: raise ArgumentError("HardwareEntity needs a label.") super(HardwareEntity, self).__init__(label=label, **kwargs)
class AddressAssignment(Base): """ Assignment of IP addresses to network interfaces. It's kept as an association map to model the linkage, since we need to have maximum ability to provide potentially complex configuration scenarios, such as advertising certain VIP addresses from some, but not all of the network interfaces on a machine (to be used for backup servers, cluster filesystem servers, NetApp filers, etc.). While in most cases we can assume VIPs are broadcast out all interfaces on the box we still need to have the underlying model as the more complex many to many relationship implemented here. """ __tablename__ = _TN _label_check = re.compile('^[a-z0-9]{0,16}$') id = Column(Integer, Sequence('%s_seq' % _TN), primary_key=True) interface_id = Column(Integer, ForeignKey('interface.id', name='%s_interface_id_fk' % _ABV, ondelete='CASCADE'), nullable=False) _label = Column("label", AqStr(16), nullable=False) ip = Column(IPV4, nullable=False) network_id = Column(Integer, ForeignKey('network.id', name='%s_network_fk' % _TN), nullable=False) service_address_id = Column(Integer, ForeignKey('service_address.resource_id', name='%s_srv_addr_id' % _ABV, ondelete="CASCADE"), nullable=True) # This should be the same as # # network.network_environment.dns_environment_id, but using that would mean # joining two extra tables in the dns_records relation dns_environment_id = Column(Integer, ForeignKey('dns_environment.id', name='%s_dns_env_fk' % _ABV), nullable=False) creation_date = deferred( Column(DateTime, default=datetime.now, nullable=False)) comments = deferred(Column(String(255), nullable=True)) interface = relation(Interface, innerjoin=True, backref=backref('assignments', order_by=[_label], cascade='all, delete-orphan')) dns_environment = relation(DnsEnvironment, innerjoin=True) # Setting viewonly is very important here as we do not want the removal of # an AddressAssignment record to change the linked DNS record(s) # Can't use backref or back_populates due to the different mappers dns_records = relation( dns_fqdn_mapper, uselist=True, primaryjoin=and_(network_id == ARecord.network_id, ip == ARecord.ip, dns_environment_id == Fqdn.dns_environment_id), foreign_keys=[ARecord.ip, Fqdn.dns_environment_id], viewonly=True) fqdns = association_proxy('dns_records', 'fqdn') network = relation(Network, backref=backref('assignments', passive_deletes=True, order_by=[ip])) __table_args__ = (UniqueConstraint(interface_id, ip, name="%s_iface_ip_uk" % _ABV), UniqueConstraint(interface_id, _label, name="%s_iface_label_uk" % _ABV), Index("%s_service_addr_idx" % _ABV, service_address_id), Index("%s_network_ip_idx" % _ABV, network_id, ip), Index("%s_dns_env_idx" % _ABV, dns_environment_id)) @property def logical_name(self): """ Compute an OS-agnostic name for this interface/address combo. BIG FAT WARNING: do _NOT_ assume that this name really exist on the host! There are external systems like DSDB that can not handle having multiple addresses on the same interface. Because of that this function generates an unique name for every interface/address tuple. """ # Use the Linux naming convention because people are familiar with that # and it is easy to parse if needed name = self.interface.name if self.label: name += ":%s" % self.label return name @property def label(self): if self._label == '-': return "" else: return self._label def __init__(self, label=None, network=None, **kwargs): # This is dirty. We want to allow empty labels, but Oracle converts # empty strings to NULL, violating the NOT NULL constraint. We could # allow label to be NULL and relying on the unique indexes to forbid # adding multiple empty labels, but that is again Oracle-specific # behavior which actually violates the SQL standard, so it would not # work with other databases. if not label: label = '-' elif not self._label_check.match(label): # pragma: no cover raise ValueError("Illegal address label '%s'." % label) # Right now network_id is nullable due to how refresh_network works, so # verify the network here if not network: # pragma: no cover raise InternalError("AddressAssignment needs a network") super(AddressAssignment, self).__init__(_label=label, network=network, **kwargs) def __repr__(self): return "<Address %s on %s/%s>" % ( self.ip, self.interface.hardware_entity.label, self.logical_name)
class Fqdn(Base): __tablename__ = _TN _instance_label = 'fqdn' id = Column(Integer, Sequence('%s_id_seq' % _TN), primary_key=True) name = Column(AqStr(63), nullable=False) dns_domain_id = Column(Integer, ForeignKey('dns_domain.id', name='%s_dns_domain_fk' % _TN), nullable=False) dns_environment_id = Column(Integer, ForeignKey('dns_environment.id', name='%s_dns_env_fk' % _TN), nullable=False) creation_date = deferred( Column(DateTime, default=datetime.now, nullable=False)) dns_domain = relation(DnsDomain, innerjoin=True) dns_environment = relation(DnsEnvironment, innerjoin=True) __table_args__ = (UniqueConstraint(dns_domain_id, name, dns_environment_id, name='%s_domain_name_env_uk' % _TN), Index('%s_dns_env_idx' % _TN, dns_environment_id)) @property def fqdn(self): return self.name + '.' + self.dns_domain.name @classmethod def get_unique(cls, session, fqdn=None, dns_environment=None, name=None, dns_domain=None, **kwargs): if fqdn: if name or dns_domain: # pragma: no cover raise TypeError("fqdn and name/dns_domain should not be mixed") (name, dns_domain) = parse_fqdn(session, fqdn) if not isinstance(dns_environment, DnsEnvironment): dns_environment = DnsEnvironment.get_unique_or_default( session, dns_environment) return super(Fqdn, cls).get_unique(session, name=name, dns_domain=dns_domain, dns_environment=dns_environment, **kwargs) @classmethod def get_or_create(cls, session, dns_environment=None, preclude=False, ignore_name_check=False, query_options=None, **kwargs): fqdn = cls.get_unique(session, dns_environment=dns_environment, query_options=query_options, **kwargs) if fqdn: if preclude: _raise_custom(preclude, ArgumentError, "{0} already exists.".format(fqdn)) return fqdn if not isinstance(dns_environment, DnsEnvironment): dns_environment = DnsEnvironment.get_unique_or_default( session, dns_environment) fqdn = cls(session=session, dns_environment=dns_environment, ignore_name_check=ignore_name_check, **kwargs) session.add(fqdn) return fqdn @classmethod def check_name(cls, name, dns_domain, ignore_name_check=False): """ Validate the name parameter """ if not isinstance(name, basestring): # pragma: no cover raise TypeError("%s: name must be a string." % cls.name) if not isinstance(dns_domain, DnsDomain): # pragma: no cover raise TypeError("%s: dns_domain must be a DnsDomain." % cls.name) # Allow SRV records to opt out from this test if not ignore_name_check: DnsDomain.check_label(name) # The limit for DNS name length is 255, assuming wire format. This # translates to 253 for simple ASCII text; see: # http://www.ops.ietf.org/lists/namedroppers/namedroppers.2003/msg00964.html if len(name) + 1 + len(dns_domain.name) > 253: raise ArgumentError('The fully qualified domain name is too long.') def _check_session(self, session): if not session or not isinstance(session, Session): # pragma: no cover raise InternalError("%s needs a session." % self._get_class_label()) def __init__(self, session=None, name=None, dns_domain=None, fqdn=None, dns_environment=None, ignore_name_check=False, **kwargs): if fqdn: if name or dns_domain: # pragma: no cover raise TypeError("fqdn and name/dns_domain should not be mixed") self._check_session(session) (name, dns_domain) = parse_fqdn(session, fqdn) self.check_name(name, dns_domain, ignore_name_check) if not isinstance(dns_environment, DnsEnvironment): self._check_session(session) dns_environment = DnsEnvironment.get_unique_or_default( session, dns_environment) super(Fqdn, self).__init__(name=name, dns_domain=dns_domain, dns_environment=dns_environment, **kwargs)
def get_unique(cls, sess, name, hardware_type=None, compel=False, preclude=False, query_options=None): """ Returns a unique HardwareEntity given session and fqdn """ # If the hardware_type param isn't explicitly set and we have a # polymorphic identity, assume we're querying only for items of our # hardware_type. if hardware_type: if isclass(hardware_type): clslabel = hardware_type._get_class_label() hardware_type = hardware_type.__mapper_args__['polymorphic_identity'] else: pcls = cls.__mapper__.polymorphic_map[hardware_type].class_ clslabel = pcls._get_class_label() else: if 'polymorphic_identity' in cls.__mapper_args__: hardware_type = cls.__mapper_args__['polymorphic_identity'] clslabel = cls._get_class_label() # The automagic DNS lookup does not really make sense with preclude=True if preclude: name = AqStr.normalize(name) cls.check_label(name) q = sess.query(cls) if "." in name: dns_rec = DnsRecord.get_unique(sess, fqdn=name, compel=True) # We know the primary name, do not load it again q = q.options(lazyload('primary_name')) q = q.filter_by(primary_name=dns_rec) else: dns_rec = None q = q.filter_by(label=name) if query_options: q = q.options(*query_options) try: hwe = q.one() except NoResultFound: # Check if the name is in use by a different hardware type q = sess.query(HardwareEntity) if dns_rec: # We know the primary name, do not load it again q = q.options(lazyload('primary_name')) q = q.filter_by(primary_name=dns_rec) else: q = q.filter_by(label=name) try: hwe = q.one() if dns_rec: # We know the primary name, do not load it again set_committed_value(hwe, 'primary_name', dns_rec) raise ArgumentError("{0} exists, but is not a {1}." .format(hwe, clslabel.lower())) except NoResultFound: hwe = None if compel: raise NotFoundException("%s %s not found." % (clslabel, name)) if hwe: if preclude: raise ArgumentError('{0} already exists.'.format(hwe)) if dns_rec: # We know the primary name, do not load it again set_committed_value(hwe, 'primary_name', dns_rec) return hwe
class FeatureLink(Base): __tablename__ = _LINK id = Column(Integer, Sequence("%s_id_seq" % _LINK), primary_key=True) feature_id = Column(Integer, ForeignKey('feature.id', name='%s_feat_fk' % _LINK), nullable=False) model_id = Column(Integer, ForeignKey('model.id', name='%s_model_fk' % _LINK, ondelete='CASCADE'), nullable=True) archetype_id = Column(Integer, ForeignKey('archetype.id', name='%s_arch_fk' % _LINK, ondelete='CASCADE'), nullable=True) personality_id = Column(Integer, ForeignKey('personality.id', name='%s_pers_fk' % _LINK, ondelete='CASCADE'), nullable=True) interface_name = Column(AqStr(32), nullable=True) creation_date = deferred(Column(DateTime, default=datetime.now, nullable=False)) feature = relation(Feature, innerjoin=True, backref=backref('links', cascade='all, delete-orphan')) model = relation(Model, backref=backref('features', cascade='all, delete-orphan')) archetype = relation(Archetype, backref=backref('features', cascade='all, delete-orphan')) personality = relation(Personality, backref=backref('features', cascade='all, delete-orphan')) # The behavior of UNIQUE constraints in the presence of NULL columns is not # universal. We need the Oracle compatible behavior, meaning: # - Trying to add a row like ('a', NULL) two times should fail # - Trying to add ('b', NULL) after ('a', NULL) should succeed __table_args__ = (UniqueConstraint(feature_id, model_id, archetype_id, personality_id, interface_name, name='%s_uk' % _LINK), Index('%s_model_idx' % _LINK, model_id), Index('%s_archetype_idx' % _LINK, archetype_id), Index('%s_personality_idx' % _LINK, personality_id)) def __init__(self, feature=None, archetype=None, personality=None, model=None, interface_name=None): # Archetype and personality are mutually exclusive. This makes # querying archetype-wide features a bit easier if archetype and personality: # pragma: no cover raise InternalError("Archetype and personality are mutually " "exclusive.") if interface_name and not personality: # pragma: no cover raise InternalError("Binding to a named interface requires " "a personality.") super(FeatureLink, self).__init__(feature=feature, archetype=archetype, personality=personality, model=model, interface_name=interface_name) @classmethod def get_unique(cls, session, feature=None, archetype=None, personality=None, model=None, interface_name=None, compel=False, preclude=False): if feature is None: # pragma: no cover raise ValueError("Feature must be specified.") q = session.query(cls) q = q.filter_by(feature=feature, archetype=archetype, personality=personality, model=model, interface_name=interface_name) try: result = q.one() if preclude: msg = "{0} is already bound to {1}.".format(feature, _error_msg(archetype, personality, model, interface_name)) _raise_custom(preclude, ArgumentError, msg) except NoResultFound: if not compel: return None msg = "{0} is not bound to {1}.".format(feature, _error_msg(archetype, personality, model, interface_name)) _raise_custom(compel, NotFoundException, msg) return result @property def cfg_path(self): format_str="%s/%s" if self.model: return format_str % (self.feature.cfg_path, self.model) if self.interface_name: return format_str % (self.feature.cfg_path, self.interface_name) return self.feature.cfg_path @property def cfg_path_escaped(self): format_str="%s/{%s}"; if self.model: return format_str % (self.feature.cfg_path, self.model) if self.interface_name: return format_str % (self.feature.cfg_path, self.interface_name) return self.feature.cfg_path
class HardwareEntity(Base): __tablename__ = _TN _instance_label = 'printable_name' id = Column(Integer, Sequence('%s_seq' % _TN), primary_key=True) label = Column(AqStr(63), nullable=False) hardware_type = Column(AqStr(64), nullable=False) location_id = Column(Integer, ForeignKey('location.id', name='%s_location_fk' % _ABV), nullable=False) model_id = Column(Integer, ForeignKey('model.id', name='%s_model_fk' % _ABV), nullable=False) serial_no = Column(String(64), nullable=True) primary_name_id = Column(Integer, ForeignKey('dns_record.id', name='%s_pri_name_fk' % _ABV), nullable=True) creation_date = deferred( Column(DateTime, default=datetime.now, nullable=False)) # Most of the update_* commands need to load the comments due to # snapshot_hw(), so it is not worth deferring it comments = Column(String(255), nullable=True) location = relation(Location) model = relation(Model) # When working with machines the primary name always crops up, so load it # eagerly # This is a one-to-one relation, so we need uselist=False on the backref primary_name = relation(DnsRecord, lazy=False, backref=backref('hardware_entity', uselist=False, passive_deletes=True)) __table_args__ = (UniqueConstraint(label, name='%s_label_uk' % _TN), UniqueConstraint('primary_name_id', name='%s_pri_name_uk' % _ABV), Index('%s_location_idx' % _ABV, location_id), Index('%s_model_idx' % _ABV, model_id)) __mapper_args__ = {'polymorphic_on': hardware_type} _label_check = re.compile("^[a-z][a-z0-9]{,62}$") @classmethod def check_label(cls, label): if not cls._label_check.match(label): raise ArgumentError("Illegal hardware label format '%s'. Only " "alphanumeric characters are allowed, and " "the first character must be a letter." % label) @validates('label') def validate_label(self, key, value): self.check_label(value) return value def __init__(self, label=None, **kwargs): label = AqStr.normalize(label) if not label: raise ArgumentError("HardwareEntity needs a label.") super(HardwareEntity, self).__init__(label=label, **kwargs) def __lt__(self, other): return self.label < other.label @property def fqdn(self): """ Returns the FQDN, if there is a primary name """ if self.primary_name: return str(self.primary_name.fqdn) else: return None @property def primary_ip(self): """ Returns the primary IP, if there is one """ if self.primary_name and hasattr(self.primary_name, "ip"): return self.primary_name.ip else: return None @property def printable_name(self): """ Returns the most meaningful name """ if self.primary_name: return str(self.primary_name.fqdn) else: return self.label @classmethod def get_unique(cls, sess, name, hardware_type=None, compel=False, preclude=False, query_options=None): """ Returns a unique HardwareEntity given session and fqdn """ # If the hardware_type param isn't explicitly set and we have a # polymorphic identity, assume we're querying only for items of our # hardware_type. if hardware_type: if isclass(hardware_type): clslabel = hardware_type._get_class_label() hardware_type = hardware_type.__mapper_args__[ 'polymorphic_identity'] else: pcls = cls.__mapper__.polymorphic_map[hardware_type].class_ clslabel = pcls._get_class_label() else: if 'polymorphic_identity' in cls.__mapper_args__: hardware_type = cls.__mapper_args__['polymorphic_identity'] clslabel = cls._get_class_label() # The automagic DNS lookup does not really make sense with preclude=True if preclude: name = AqStr.normalize(name) cls.check_label(name) q = sess.query(cls) if "." in name: dns_rec = DnsRecord.get_unique(sess, fqdn=name, compel=True) # We know the primary name, do not load it again q = q.options(lazyload('primary_name')) q = q.filter_by(primary_name=dns_rec) else: dns_rec = None q = q.filter_by(label=name) if query_options: q = q.options(*query_options) try: hwe = q.one() except NoResultFound: # Check if the name is in use by a different hardware type q = sess.query(HardwareEntity) if dns_rec: # We know the primary name, do not load it again q = q.options(lazyload('primary_name')) q = q.filter_by(primary_name=dns_rec) else: q = q.filter_by(label=name) try: hwe = q.one() if dns_rec: # We know the primary name, do not load it again set_committed_value(hwe, 'primary_name', dns_rec) raise ArgumentError("{0} exists, but is not a {1}.".format( hwe, hardware_type)) except NoResultFound: hwe = None if compel: raise NotFoundException("%s %s not found." % (clslabel, name)) if hwe: if preclude: raise ArgumentError('{0} already exists.'.format(hwe)) if dns_rec: # We know the primary name, do not load it again set_committed_value(hwe, 'primary_name', dns_rec) return hwe def all_addresses(self): """ Iterator returning all addresses of the hardware. """ for iface in self.interfaces: for addr in iface.assignments: yield addr
def get_unique(cls, sess, name, hardware_type=None, compel=False, preclude=False, query_options=None): """ Returns a unique HardwareEntity given session and fqdn """ # If the hardware_type param isn't explicitly set and we have a # polymorphic identity, assume we're querying only for items of our # hardware_type. if hardware_type: if isclass(hardware_type): clslabel = hardware_type._get_class_label() hardware_type = hardware_type.__mapper_args__[ 'polymorphic_identity'] else: pcls = cls.__mapper__.polymorphic_map[hardware_type].class_ clslabel = pcls._get_class_label() else: if 'polymorphic_identity' in cls.__mapper_args__: hardware_type = cls.__mapper_args__['polymorphic_identity'] clslabel = cls._get_class_label() # The automagic DNS lookup does not really make sense with preclude=True if preclude: name = AqStr.normalize(name) cls.check_label(name) q = sess.query(cls) if "." in name: dns_rec = DnsRecord.get_unique(sess, fqdn=name, compel=True) # We know the primary name, do not load it again q = q.options(lazyload('primary_name')) q = q.filter_by(primary_name=dns_rec) else: dns_rec = None q = q.filter_by(label=name) if query_options: q = q.options(*query_options) try: hwe = q.one() except NoResultFound: # Check if the name is in use by a different hardware type q = sess.query(HardwareEntity) if dns_rec: # We know the primary name, do not load it again q = q.options(lazyload('primary_name')) q = q.filter_by(primary_name=dns_rec) else: q = q.filter_by(label=name) try: hwe = q.one() if dns_rec: # We know the primary name, do not load it again set_committed_value(hwe, 'primary_name', dns_rec) raise ArgumentError("{0} exists, but is not a {1}.".format( hwe, hardware_type)) except NoResultFound: hwe = None if compel: raise NotFoundException("%s %s not found." % (clslabel, name)) if hwe: if preclude: raise ArgumentError('{0} already exists.'.format(hwe)) if dns_rec: # We know the primary name, do not load it again set_committed_value(hwe, 'primary_name', dns_rec) return hwe
class Interface(Base): """ Interface: Representation of network interfaces for our network This table stores collections of machines, names, mac addresses, types, and a bootable flag to aid our DHCP and machine configuration. """ __tablename__ = _TN # Any extra fields the subclass needs over the generic interface parameters extra_fields = [] # Name syntax restrictions name_check = None # Allows setting model/vendor model_allowed = False # The Natural (and composite) pk is HW_ENT_ID/NAME. # But is it the "correct" pk in this case???. The surrogate key is here # only because it's easier to have a single target FK in the address # association object. It might actually be doable to use the natural key if # we try it. The upside: less clutter, meaningful keys. Downside: # It's also extra work we may not enjoy, it means rewriting the table # since we'd blow away its PK. id = Column(Integer, Sequence('%s_seq' % _TN), primary_key=True) name = Column(AqStr(32), nullable=False) # like e0, hme1, etc. mac = Column(AqMac(17), nullable=True) model_id = Column(Integer, ForeignKey('model.id', name='%s_model_fk' % _ABV), nullable=False) # PXE boot control. Does not affect how the OS configures the interface. # FIXME: move to PublicInterface bootable = Column(Boolean(name="%s_bootable_ck" % _ABV), nullable=False, default=False) default_route = Column(Boolean(name="%s_default_route_ck" % _ABV), nullable=False, default=False) interface_type = Column(AqStr(32), nullable=False) hardware_entity_id = Column(Integer, ForeignKey('hardware_entity.id', name='%s_hw_ent_fk' % _ABV, ondelete='CASCADE'), nullable=False) # The FK is deferrable to make it easier to copy the DB between different # backends. The broker itself does not make use of deferred constraints. master_id = Column(Integer, ForeignKey('interface.id', name='%s_master_fk' % _ABV, ondelete='CASCADE', deferrable=True, initially='IMMEDIATE'), nullable=True) # FIXME: move to PublicInterface port_group = Column(AqStr(32), nullable=True) creation_date = deferred( Column(DateTime, default=datetime.now, nullable=False)) # Most of the update_* commands need to load the comments due to # snapshot_hw(), so it is not worth deferring it comments = Column('comments', String(255), nullable=True) hardware_entity = relation(HardwareEntity, lazy=False, innerjoin=True, backref=backref('interfaces', cascade='all, delete-orphan')) model = relation(Model, innerjoin=True) master = relation('Interface', uselist=False, remote_side=id, primaryjoin=master_id == id, backref=backref('slaves')) # Order matters here, utils/constraints.py checks for endswith("NOT NULL") __table_args__ = (UniqueConstraint(mac, name='%s_mac_addr_uk' % _ABV), UniqueConstraint(hardware_entity_id, name, name='%s_hw_name_uk' % _ABV), Index('%s_model_idx' % _ABV, model_id), Index('%s_master_idx' % _ABV, master_id)) __mapper_args__ = {'polymorphic_on': interface_type} # Interfaces also have the property 'assignments' which is defined in # address_assignment.py def __format__(self, format_spec): instance = "{0.name} of {1:l}".format(self, self.hardware_entity) return self.format_helper(format_spec, instance) @validates('mac') def _validate_mac(self, key, value): # Due to how decorators work, we have to do a level of indirection to # make polymorphism work return self.validate_mac(key, value) def validate_mac(self, key, value): if self.bootable and not value: raise ValueError("Bootable interfaces require a MAC address.") return value @validates('name') def validate_name(self, key, value): if self.__class__.name_check and \ not self.__class__.name_check.match(value): raise ValueError("Illegal %s interface name '%s'." % (self.interface_type, value)) return value @validates('master') def validate_master(self, key, value): if value is not None and not isinstance(value, BondingInterface) and \ not isinstance(value, BridgeInterface): raise ValueError( "The master must be a bonding or bridge interface.") if self.vlans: raise ValueError("{0} can not be bound as long as it has " "VLANs.".format(self)) if self.assignments: raise ValueError("{0} cannot be enslaved as long as it holds " "addresses.".format(self)) return value @property def last_observation(self): session = object_session(self) q = session.query(ObservedMac) q = q.filter_by(mac_address=self.mac) # Group the results into 'any port number but zero' and 'port 0'. # This prioritizes any port over the uplink port. # Saying that port 0 is an uplink port isn't very elegant, also # with real port names it's not even true. q = q.order_by(desc(case([(ObservedMac.port == "0", 0)], else_=1)))
class Location(Base): """ How we represent location data in Aquilon """ __tablename__ = 'location' valid_parents = [] id = Column(Integer, Sequence('location_id_seq'), primary_key=True) name = Column(AqStr(16), nullable=False) location_type = Column(AqStr(32), nullable=False) fullname = Column(String(255), nullable=False) default_dns_domain_id = Column(Integer, ForeignKey('dns_domain.id', name='location_dns_domain_fk', ondelete='SET NULL'), nullable=True) creation_date = deferred( Column(DateTime, default=datetime.now, nullable=False)) comments = Column(String(255), nullable=True) default_dns_domain = relation(DnsDomain) __table_args__ = (UniqueConstraint(name, location_type, name='loc_name_type_uk'), ) __mapper_args__ = {'polymorphic_on': location_type} def get_p_dict(self, loc_type): if self._parent_dict is None: self._parent_dict = {str(self.location_type): self} for node in self.parents: self._parent_dict[str(node.location_type)] = node return self._parent_dict.get(loc_type, None) @property def hub(self): return self.get_p_dict('hub') @property def continent(self): return self.get_p_dict('continent') @property def country(self): return self.get_p_dict('country') @property def campus(self): return self.get_p_dict('campus') @property def city(self): return self.get_p_dict('city') @property def building(self): return self.get_p_dict('building') @property def bunker(self): return self.get_p_dict('bunker') @property def room(self): return self.get_p_dict('room') @property def rack(self): return self.get_p_dict('rack') @property def chassis(self): return self.get_p_dict('chassis') def offspring_ids(self): session = object_session(self) q = session.query(Location.id) q = q.join((LocationLink, Location.id == LocationLink.child_id)) # Include self as well q = q.filter( or_(Location.id == self.id, LocationLink.parent_id == self.id)) return q.subquery() def parent_ids(self): session = object_session(self) q = session.query(Location.id) q = q.join((LocationLink, Location.id == LocationLink.parent_id)) # Include self as well q = q.filter( or_(Location.id == self.id, LocationLink.child_id == self.id)) return q.subquery() def sysloc(self): components = ('building', 'city', 'continent') names = [] for component in components: value = self.get_p_dict(component) if not value: return None names.append(value.name) return str('.'.join(names)) def get_parts(self): parts = list(self.parents) parts.append(self) return parts def merge(self, loc): """Find the common root of two locations.""" # Optimization since get_parts can be expensive... if self == loc: return self merged = None for (self_part, loc_part) in zip(self.get_parts(), loc.get_parts()): if self_part != loc_part: return merged merged = self_part return merged def __init__(self, parent=None, name=None, fullname=None, **kwargs): # Keep compatibility with the old behavior of the "parent" attribute # when creating new objects. Note that both the location manipulation # commands and the data loader in the unittest suite depends on this. if parent is not None: if parent.__class__ not in self.valid_parents: raise AquilonError( "{0} cannot be a parent of {1:lc} {2}.".format( parent, self, name)) session = object_session(parent) if not session: raise AquilonError("The parent must be persistent") # We have to disable autoflush in case parent._parent_links needs # loading, since self is not ready to be pushed to the DB yet with session.no_autoflush: for link in parent._parent_links: session.add( LocationLink(child=self, parent=link.parent, distance=link.distance + 1)) session.add(LocationLink(child=self, parent=parent, distance=1)) session.expire(parent, ["_child_links", "children"]) if not fullname: fullname = name super(Location, self).__init__(name=name, fullname=fullname, **kwargs) self._parent_dict = None @reconstructor def setup(self): self._parent_dict = None def update_parent(self, parent=None): session = object_session(self) if parent is None: # pragma: no cover raise AquilonError( "Parent location can be updated but not removed") if parent.__class__ not in self.valid_parents: raise AquilonError("{0} cannot be a parent of {1:l}.".format( parent, self)) # Disable autoflush. We'll make use of SQLA's ability to replace # DELETE + INSERT for the same LocationLink with an UPDATE of the # distance column. with session.no_autoflush: # Delete links to our old parent and its ancestors for plink in self._parent_links: q = session.query(LocationLink) q = q.filter( and_(LocationLink.child_id.in_(self.offspring_ids()), LocationLink.parent_id == plink.parent.id)) # See above: we depend on the caching ability of the session, so # we can't use q.delete() for clink in q.all(): session.delete(clink) # Add links to the new parent session.add(LocationLink(child=self, parent=parent, distance=1)) for clink in self._child_links: session.add( LocationLink(child_id=clink.child_id, parent=parent, distance=clink.distance + 1)) # Add links to the new parent's ancestors for plink in parent._parent_links: session.add( LocationLink(child=self, parent_id=plink.parent_id, distance=plink.distance + 1)) for clink in self._child_links: session.add( LocationLink(child_id=clink.child_id, parent_id=plink.parent_id, distance=plink.distance + clink.distance + 1)) session.flush() session.expire(parent, ["_child_links", "children"]) session.expire(self, ["_parent_links", "parent", "parents"]) self._parent_dict = None
class DnsRecord(Base): """ Base class for a DNS Resource Record """ __tablename__ = _TN _instance_label = 'fqdn' id = Column(Integer, Sequence('%s_id_seq' % _TN), primary_key=True) fqdn_id = Column(Integer, ForeignKey('fqdn.id', name='%s_fqdn_fk' % _TN), nullable=False) dns_record_type = Column(AqStr(32), nullable=False) creation_date = deferred( Column(DateTime, default=datetime.now, nullable=False)) comments = deferred(Column(String(255), nullable=True)) fqdn = relation(Fqdn, lazy=False, innerjoin=True, backref=backref('dns_records')) aliases = association_proxy('fqdn', 'aliases') srv_records = association_proxy('fqdn', 'srv_records') __table_args__ = (Index('%s_fqdn_idx' % _TN, fqdn_id), ) __mapper_args__ = { 'polymorphic_on': dns_record_type, 'polymorphic_identity': _TN } @classmethod def get_unique(cls, session, fqdn=None, name=None, dns_domain=None, dns_environment=None, compel=False, preclude=False, **kwargs): # Proxy FQDN lookup to the Fqdn class if not fqdn or not isinstance(fqdn, Fqdn): if not isinstance(dns_environment, DnsEnvironment): dns_environment = DnsEnvironment.get_unique_or_default( session, dns_environment) if fqdn: if name or dns_domain: # pragma: no cover raise TypeError("fqdn and name/dns_domain cannot be mixed") (name, dns_domain) = parse_fqdn(session, fqdn) try: # Do not pass preclude=True to Fqdn fqdn = Fqdn.get_unique(session, name=name, dns_domain=dns_domain, dns_environment=dns_environment, compel=compel) except NotFoundException: # Replace the "Fqdn ... not found" message with a more user # friendly one msg = "%s %s.%s, %s not found." % ( cls._get_class_label(), name, dns_domain, format(dns_environment, "l")) raise NotFoundException(msg) if not fqdn: return None # We already have the FQDN, no need to load it again if "query_options" not in kwargs: kwargs["query_options"] = [lazyload("fqdn")] result = super(DnsRecord, cls).get_unique(session, fqdn=fqdn, compel=compel, preclude=preclude, **kwargs) if result: # Make sure not to load the relation again if we already know its # value set_committed_value(result, 'fqdn', fqdn) return result @classmethod def get_or_create(cls, session, **kwargs): dns_record = cls.get_unique(session, **kwargs) if dns_record: return dns_record dns_record = cls(**kwargs) session.add(dns_record) session.flush() return dns_record def __format__(self, format_spec): if format_spec != "a": return super(DnsRecord, self).__format__(format_spec) return str(self.fqdn) @property def all_aliases(self): """ Returns all distinct aliases that point to this record If Alias1 -> B, A2lias2 -> B, B -> C, remove duplicates. """ found = {} queue = deque(self.aliases) while queue: alias = queue.popleft() found[str(alias.fqdn)] = alias for a in alias.aliases: if not str(a.fqdn) in found: queue.append(a) # Ensure a deterministic order of the returned values aliases = found.values() aliases.sort(cmp=lambda x, y: cmp(str(x.fqdn), str(y.fqdn))) return aliases def __init__(self, fqdn=None, **kwargs): if not fqdn: # pragma: no cover raise ValueError("fqdn cannot be empty") session = object_session(fqdn) if not session: # pragma: no cover raise ValueError("fqdn must be already part of a session") # Disable autoflush because self is not ready to be pushed to the DB yet with session.no_autoflush: # self.dns_record_type is not populated by the ORM yet, so query our # class own_type = self.__class__.__mapper_args__['polymorphic_identity'] # Asking for just one column makes both the query and the ORM faster for existing in fqdn.dns_records: if existing.dns_record_type in _rr_conflict_map[own_type]: raise ArgumentError("{0} already exist.".format(existing)) super(DnsRecord, self).__init__(fqdn=fqdn, **kwargs)
class Cluster(Base): """ A group of two or more hosts for high availablility or grid capabilities. Location constraint is nullable as it may or may not be used. """ __tablename__ = _TN id = Column(Integer, Sequence('%s_seq' % _TN), primary_key=True) cluster_type = Column(AqStr(16), nullable=False) name = Column(AqStr(64), nullable=False) #Lack of cascaded deletion is intentional on personality personality_id = Column(Integer, ForeignKey('personality.id', name='cluster_prsnlty_fk'), nullable=False) branch_id = Column(Integer, ForeignKey('branch.id', name='cluster_branch_fk'), nullable=False) sandbox_author_id = Column(Integer, ForeignKey('user_principal.id', name='cluster_sandbox_author_fk'), nullable=True) location_constraint_id = Column( ForeignKey('location.id', name='cluster_location_fk')) #esx cluster __init__ method overrides this default max_hosts = Column(Integer, nullable=True) # N+M clusters are defined by setting down_hosts_threshold to M # Simple 2-node clusters would have down_hosts_threshold of 0 down_hosts_threshold = Column(Integer, nullable=True) # And that tolerance can be relaxed even further in maintenance windows down_maint_threshold = Column(Integer, nullable=True) # Some clusters (e.g. grid) don't want fixed N+M down_hosts_threshold, but # use percentage goals (i.e. don't alert until 5% of the population dies) down_hosts_percent = Column(Boolean(name="%s_down_hosts_ck" % _TN), default=False, nullable=True) down_maint_percent = Column(Boolean(name="%s_maint_hosts_ck" % _TN), default=False, nullable=True) creation_date = deferred( Column(DateTime, default=datetime.now, nullable=False)) status_id = Column(Integer, ForeignKey('clusterlifecycle.id', name='cluster_status_fk'), nullable=False) comments = Column(String(255)) status = relation(ClusterLifecycle, innerjoin=True) location_constraint = relation(Location, lazy=False) personality = relation(Personality, lazy=False, innerjoin=True) branch = relation(Branch, lazy=False, innerjoin=True, backref='clusters') sandbox_author = relation(UserPrincipal) hosts = association_proxy('_hosts', 'host', creator=_hcm_host_creator) metacluster = association_proxy('_metacluster', 'metacluster') __table_args__ = (UniqueConstraint(name, name='cluster_uk'), Index("cluster_branch_idx", branch_id), Index("cluster_prsnlty_idx", personality_id), Index("cluster_location_idx", location_constraint_id)) __mapper_args__ = {'polymorphic_on': cluster_type} @property def title(self): if self.personality.archetype.outputdesc is not None: return self.personality.archetype.outputdesc return self.personality.archetype.name.capitalize() + " Cluster" @property def dht_value(self): if not self.down_hosts_percent: return self.down_hosts_threshold return int((self.down_hosts_threshold * len(self.hosts)) / 100) @property def dmt_value(self): if not self.down_maint_percent: return self.down_maint_threshold return int((self.down_maint_threshold * len(self.hosts)) / 100) @staticmethod def parse_threshold(threshold): is_percent = False percent = re.search('(\d+)(%)?', threshold) thresh_value = int(percent.group(1)) if percent.group(2): is_percent = True return (is_percent, thresh_value) @property def authored_branch(self): if self.sandbox_author: return "%s/%s" % (self.sandbox_author.name, self.branch.name) return str(self.branch.name) @property def personality_info(self): if self.cluster_type in self.personality.cluster_infos: return self.personality.cluster_infos[self.cluster_type] else: return None @property def required_services(self): return self.personality.services + self.personality.archetype.services @property def machines(self): mach = [] if self.resholder: for res in self.resholder.resources: # TODO: support virtual machines inside resource groups? if res.resource_type == "virtual_machine": mach.append(res.machine) return mach def validate_membership(self, host, error=ArgumentError, **kwargs): if host.machine.location != self.location_constraint and \ self.location_constraint not in \ host.machine.location.parents: raise error("Host location {0} is not within cluster " "location {1}.".format(host.machine.location, self.location_constraint)) if host.branch != self.branch or \ host.sandbox_author != self.sandbox_author: raise ArgumentError("{0} {1} {2} does not match {3:l} {4} " "{5}.".format(host, host.branch.branch_type, host.authored_branch, self, self.branch.branch_type, self.authored_branch)) def validate(self, max_hosts=None, error=ArgumentError, **kwargs): session = object_session(self) q = session.query(HostClusterMember) q = q.filter_by(cluster=self) q = q.options(joinedload('host'), joinedload('host.machine')) members = q.all() set_committed_value(self, '_hosts', members) if self.cluster_type != 'meta': for i in [ "down_hosts_threshold", "down_hosts_percent", "down_maint_percent", "personality_id" #"branch_id" ]: if getattr(self, i, None) is None: raise error("Attribute %s must be set for a %s cluster." % (i, self.cluster_type)) else: if self.metacluster: raise error("Metaclusters can't contain other metaclusters.") if max_hosts is None: max_hosts = self.max_hosts if len(self.hosts) > self.max_hosts: raise error("{0} is over capacity of {1} hosts.".format( self, max_hosts)) if self.metacluster: self.metacluster.validate() def format_helper(self, format_spec, instance): # Based on format_helper() and _get_class_label() in Base lowercase = False class_only = False passthrough = "" for letter in format_spec: if letter == "l": lowercase = True elif letter == "c": class_only = True else: passthrough += letter if self.cluster_type == 'meta': clsname = self.title + " Metacluster" else: clsname = self.title + " Cluster" if lowercase: parts = clsname.split() clsname = ' '.join( map(lambda x: x if x[:-1].isupper() else x.lower(), parts)) if class_only: return clsname.__format__(passthrough) val = "%s %s" % (clsname, instance) return val.__format__(passthrough)
class Network(Base): """ Represents subnets in aqdb. Network Type can be one of four values which have been carried over as legacy from the network table in DSDB: * management: no networks have it(@ 3/27/08), it's probably useless * transit: for the phyical interfaces of zebra nodes * vip: for the zebra addresses themselves * unknown: for network rows in DSDB with NULL values for 'type' * tor_net: tor switches are managed in band, which means that if you know the ip/netmask of the switch, you know the network which it provides for, and the 5th and 6th address are reserved for a dynamic pool for the switch on the net * stretch and vpls: networks that exist in more than one location * external/external_vendor * heartbeat * wan * campus """ __tablename__ = _TN # Class-level cache of properties bound to the network type network_type_map = {} # Default network properties default_network_props = None id = Column(Integer, Sequence('%s_id_seq' % _TN), primary_key=True) network_environment_id = Column(Integer, ForeignKey('network_environment.id', name='%s_net_env_fk' % _TN), nullable=False) location_id = Column(Integer, ForeignKey('location.id', name='%s_loc_fk' % _TN), nullable=False) network_type = Column(AqStr(32), nullable=False) cidr = Column(Integer, nullable=False) name = Column(AqStr(255), nullable=False) ip = Column(IPV4, nullable=False) side = Column(AqStr(4), nullable=True, default='a') creation_date = deferred(Column(DateTime, default=datetime.now, nullable=False)) comments = deferred(Column(String(255), nullable=True)) network_environment = relation(NetworkEnvironment) location = relation(Location) # The routers relation is defined in router_address.py router_ips = association_proxy("routers", "ip") __table_args__ = (UniqueConstraint(network_environment_id, ip, name='%s_net_env_ip_uk' % _TN), CheckConstraint(and_(cidr >= 1, cidr <= 32), name="%s_cidr_ck" % _TN), Index('%s_location_idx' % _TN, location_id)) def __init__(self, network=None, network_type=None, **kw): # pylint: disable=W0621 if not isinstance(network, IPv4Network): raise InternalError("Expected an IPv4Network, got: %s" % type(network)) if not network_type: config = Config() network_type = config.get("broker", "default_network_type") self._network = network self._props = self.network_type_map.get(self.network_type, self.default_network_props) super(Network, self).__init__(ip=network.network, cidr=network.prefixlen, network_type=network_type, **kw) @reconstructor def _init_db(self): # This function gets called instead of __init__ when an object is loaded # from the database self._network = None self._props = self.network_type_map.get(self.network_type, self.default_network_props) @property def first_usable_host(self): start = self._props.first_usable_offset # TODO: do we need this fallback for /31 and /32 networks? if self.network.numhosts < start: start = 0 return self.network[start] @property def reserved_offsets(self): return self._props.reserved_offsets @property def default_gateway_offset(self): return self._props.default_gateway_offset @property def network(self): if not self._network: # TODO: more efficient initialization? Using # IPv4Network(int(self.ip)).supernet(new_prefix=self.cidr) looks # promising at first, but unfortunately it uses the same string # conversion internally... self._network = IPv4Network("%s/%s" % (self.ip, self.cidr)) return self._network @network.setter def network(self, value): if not isinstance(value, IPv4Network): raise InternalError("Expected an IPv4Network, got: %s" % type(network)) self._network = value self.ip = value.network self.cidr = value.prefixlen @validates('ip', 'cidr') def _reset_network(self, attr, value): # pylint: disable=W0613 # Make sure the network object will get re-computed if the parameters # change self._network = None return value @property def netmask(self): return self.network.netmask @property def broadcast(self): return self.network.broadcast @property def available_ip_count(self): return int(self.broadcast) - int(self.first_usable_host) @property def is_internal(self): return self.network_environment.is_default def __le__(self, other): if self.network_environment_id != other.network_environment_id: return NotImplemented return self.ip.__le__(other.ip) def __lt__(self, other): if self.network_environment_id != other.network_environment_id: return NotImplemented return self.ip.__lt__(other.ip) def __ge__(self, other): if self.network_environment_id != other.network_environment_id: return NotImplemented return self.ip.__ge__(other.ip) def __gt__(self, other): if self.network_environment_id != other.network_environment_id: return NotImplemented return self.ip.__gt__(other.ip) @classmethod def get_unique(cls, session, *args, **kwargs): # Fall back to the generic implementation unless the caller used exactly # one non-keyword argument. Any caller using preclude would be passing # keywords anyway. compel = kwargs.pop("compel", False) options = kwargs.pop("query_options", None) netenv = kwargs.pop("network_environment", None) if kwargs or len(args) > 1: return super(Network, cls).get_unique(session, *args, network_environment=netenv, query_options=options, compel=compel, **kwargs) # Just a single positional argumentum - do magic # The order matters here, we don't want to parse '1.2.3.4' as # IPv4Network('1.2.3.4/32') ip = None if isinstance(args[0], IPv4Address): ip = args[0] else: try: ip = IPv4Address(args[0]) except AddressValueError: pass if ip: return super(Network, cls).get_unique(session, ip=ip, network_environment=netenv, query_options=options, compel=compel) net = None if isinstance(args[0], IPv4Network): net = args[0] else: try: net = IPv4Network(args[0]) except (AddressValueError, NetmaskValueError): pass if net: return super(Network, cls).get_unique(session, ip=net.network, cidr=net.prefixlen, network_environment=netenv, query_options=options, compel=compel) return super(Network, cls).get_unique(session, name=args[0], network_environment=netenv, query_options=options, compel=compel) def __format__(self, format_spec): if format_spec != "a": return super(Network, self).__format__(format_spec) return "%s [%s]" % (self.name, self.network) def __repr__(self): msg = '<Network ' if self.name != self.network: msg += '%s ip=' % (self.name) msg += '%s (netmask=%s), type=%s, side=%s, located in %s, environment=%s>' % ( str(self.network), str(self.network.netmask), self.network_type, self.side, format(self.location), self.network_environment) return msg @property def vlans_guest_count(self): return sum([vlan.guest_count for vlan in self.observed_vlans]) @property def is_at_guest_capacity(self): return self.vlans_guest_count >= self.available_ip_count