class OperatingSystem(Base): """ Operating Systems """ __tablename__ = _TN id = Column(Integer, Sequence('%s_seq' % (_ABV)), primary_key=True) name = Column(AqStr(32), nullable=False) version = Column(AqStr(16), nullable=False) archetype_id = Column(Integer, ForeignKey('archetype.id', name='%s_arch_fk' % (_ABV)), nullable=False) #vendor id? creation_date = Column(DateTime, default=datetime.now, nullable=False) comments = Column(String(255), nullable=True) archetype = relation(Archetype, backref='os', uselist=False) #cfg_path = os/name/version def __repr__(self): s = ("<" + self.__class__.__name__ + " " + self.name + " " + self.version + " " + str(self.archetype) + '>') return s @classmethod def by_archetype(cls, dbarchetype): session = object_session(dbarchetype) return session.query(cls).filter( cls.__dict__['archetype'] == dbarchetype).all()
class OperatingSystem(Base): """ Operating Systems """ __tablename__ = _TN _class_label = 'Operating System' id = Column(Integer, Sequence('%s_seq' % _ABV), primary_key=True) name = Column(AqStr(32), nullable=False) version = Column(AqStr(16), nullable=False) archetype_id = Column(Integer, ForeignKey('archetype.id', name='%s_arch_fk' % _ABV, ondelete="CASCADE"), nullable=False) #vendor id? creation_date = deferred(Column(DateTime, default=datetime.now, nullable=False)) comments = Column(String(255), nullable=True) archetype = relation(Archetype, lazy=False, innerjoin=True) __table_args__ = (UniqueConstraint(archetype_id, name, version, name='%s_arch_name_version_uk' % _ABV),) def __format__(self, format_spec): instance = "%s/%s-%s" % (self.archetype.name, self.name, self.version) return self.format_helper(format_spec, instance) @property def cfg_path(self): return 'os/%s/%s' % (self.name, self.version)
class Cluster(Base): """ A group of two or more hosts for high availablility or grid capabilities Location constraint is nullable as it may or may not be used """ __tablename__ = _TN id = Column(Integer, Sequence('%s_seq' % (_TN)), primary_key=True) cluster_type = Column(AqStr(16), nullable=False) name = Column(AqStr(64), nullable=False) #Lack of cascaded deletion is intentional on personality personality_id = Column(Integer, ForeignKey('personality.id', name='cluster_prsnlty_fk'), nullable=False) domain_id = Column(Integer, ForeignKey('domain.id', name='cluster_domain_fk'), nullable=False) location_constraint_id = Column( ForeignKey('location.id', name='cluster_location_fk')) #esx cluster __init__ method overrides this default max_hosts = Column(Integer, default=2, nullable=True) creation_date = Column(DateTime, default=datetime.now, nullable=False) comments = Column(String(255)) location_constraint = relation(Location, uselist=False, lazy=False) personality = relation(Personality, uselist=False, lazy=False) domain = relation(Domain, uselist=False, lazy=False) #FIXME: Is it possible to have an append that checks the max_members? hosts = association_proxy('_hosts', 'host', creator=_cluster_host_append) machines = association_proxy('_machines', 'machine', creator=_cluster_machine_append) service_bindings = association_proxy('_cluster_svc_binding', 'service_instance') _metacluster = None metacluster = association_proxy('_metacluster', 'metacluster') @property def required_services(self): return object_session(self).query(ClusterAlignedService).filter_by( cluster_type=self.cluster_type).all() __mapper_args__ = {'polymorphic_on': cluster_type}
class ClusterAlignedService(Base): """ Express services that must be the same for cluster types. As SQL Alchemy doesn't yet support FK or functionally determined discrimators for polymorphic inheritance, cluster_type is currently being expressed as a string. As ESX is the only type for now, it's seems a reasonable corner to cut. """ __tablename__ = _CRS service_id = Column( Integer, ForeignKey('service.id', name='%s_svc_fk' % (_ABV), ondelete='CASCADE'), #if the service is deleted, delete the link? primary_key=True) cluster_type = Column(AqStr(16), primary_key=True) creation_date = Column(DateTime, default=datetime.now, nullable=False) comments = Column(String(255)) service = relation(Service, uselist=False, lazy=False, backref=backref('_clusters', cascade='all'))
class Machine(HardwareEntity): __tablename__ = 'machine' __mapper_args__ = {'polymorphic_identity' : 'machine'} #hardware_entity_ machine_id = Column(Integer, ForeignKey('hardware_entity.id', name='machine_hw_ent_fk'), primary_key=True) name = Column('name', AqStr(64), nullable=False) cpu_id = Column(Integer, ForeignKey( 'cpu.id', name='machine_cpu_fk'), nullable=False) cpu_quantity = Column(Integer, nullable=False, default=2) #constrain/smallint memory = Column(Integer, nullable=False, default=512) hardware_entity = relation(HardwareEntity, uselist=False, backref='machine') cpu = relation(Cpu, uselist=False) #TODO: synonym in location/model? #location = relation(Location, uselist=False) @property def hardware_name(self): return self.name
class DnsEnvironment(Base): """ Dns Environments are groups of network segments that have their own distinct view of DNS data. This could be the internal institutional network, the external, the dmz, or other corporate segments. For now, SRV Records and aliases may not cross environment boundaries """ __tablename__ = _TN _class_label = 'DNS Environment' id = Column(Integer, Sequence('%s_id_seq' % (_TN)), primary_key=True) name = Column(AqStr(32), nullable=False) creation_date = deferred( Column(DateTime, default=datetime.now, nullable=False)) comments = deferred(Column(String(255), nullable=True)) __table_args__ = (UniqueConstraint(name, name='%s_name_uk' % _TN), ) @property def is_default(self): return self.name == _config.get("site", "default_dns_environment") @classmethod def get_unique_or_default(cls, session, dns_environment=None): if dns_environment: return cls.get_unique(session, dns_environment, compel=True) else: return cls.get_unique(session, _config.get("site", "default_dns_environment"), compel=InternalError)
class ResourceGroup(Resource): """ A collection of resources which operate together (e.g. a VCS Service Group).""" __tablename__ = _TN _class_label = 'Resource Group' id = Column(Integer, ForeignKey('resource.id', name='rg_resource_fk', ondelete='CASCADE'), primary_key=True) # declare any per-group attributes here (none for now) # This is to enforce the same type of resources in the group required_type = Column(AqStr(32), nullable=True) __mapper_args__ = {'polymorphic_identity': _TN} def validate_holder(self, key, value): if isinstance(value, BundleResource): raise ValueError("ResourceGroups must not be held by other " + "ResourceGroups") return value @property def branch(self): return self.holder.holder_object.branch
class Personality(Base): """ Personality names """ __tablename__ = _TN id = Column(Integer, Sequence('%s_seq' % (_ABV)), primary_key=True) name = Column(AqStr(32), nullable=False) archetype_id = Column(Integer, ForeignKey('archetype.id', name='%s_arch_fk' % (_ABV)), nullable=False) creation_date = Column(DateTime, default=datetime.now, nullable=False) comments = Column(String(255), nullable=True) archetype = relation(Archetype, backref='personality', uselist=False) def __repr__(self): s = ("<" + self.__class__.__name__ + " name ='" + self.name + "', " + str(self.archetype) + '>') return s @classmethod def by_archetype(cls, dbarchetype): session = object_session(dbarchetype) return session.query(cls).filter( cls.__dict__['archetype'] == dbarchetype).all()
class HardwareEntity(Base): __tablename__ = 'hardware_entity' id = Column(Integer, Sequence('hardware_entity_seq'), primary_key=True) hardware_entity_type = Column(AqStr(64), nullable=False) location_id = Column(Integer, ForeignKey('location.id', name='hw_ent_loc_fk'), nullable=False) model_id = Column(Integer, ForeignKey('model.id', name='hw_ent_model_fk'), nullable=False) serial_no = Column(String(64), nullable=True) creation_date = deferred(Column(DateTime, default=datetime.now, nullable=False )) comments = deferred(Column(String(255), nullable=True)) location = relation(Location, uselist=False) model = relation(Model, uselist=False) __mapper_args__ = {'polymorphic_on':hardware_entity_type} _hardware_name = 'Unnamed hardware' @property def hardware_name(self): return self._hardware_name
class PersonalityGrnMap(Base): __tablename__ = _PGN personality_id = Column(Integer, ForeignKey('%s.id' % _TN, name='%s_personality_fk' % _PGNABV, ondelete='CASCADE'), nullable=False) eon_id = Column(Integer, ForeignKey('grn.eon_id', name='%s_grn_fk' % _PGNABV), nullable=False) personality = relation(Personality, innerjoin=True, backref=backref('_grns', cascade='all, delete-orphan', passive_deletes=True)) grn = relation(Grn, lazy=False, innerjoin=True, backref=backref('_personalities', passive_deletes=True)) target = Column(AqStr(32), nullable=False, primary_key=True) # used by unmap @property def mapped_object(self): return self.personality __table_args__ = (PrimaryKeyConstraint(personality_id, eon_id),)
class PersonalityClusterInfo(Base): """ Extra personality data specific to clusters """ __tablename__ = _PCI id = Column(Integer, Sequence("%s_seq" % _PCIABV), primary_key=True) personality_id = Column(Integer, ForeignKey("personality.id", name="%s_pers_fk" % _PCIABV, ondelete="CASCADE"), nullable=False) cluster_type = Column(AqStr(16), nullable=False) personality = relation( Personality, lazy=False, innerjoin=True, backref=backref( "cluster_infos", collection_class=column_mapped_collection(cluster_type), cascade="all")) creation_date = deferred( Column(DateTime, default=datetime.now, nullable=False)) __table_args__ = (UniqueConstraint(personality_id, cluster_type, name="%s_pc_uk" % _PCIABV), ) __mapper_args__ = {'polymorphic_on': cluster_type}
class ServiceInstance(Base): """ Service instance captures the data around assignment of a system for a particular purpose (aka usage). If machines have a 'personality' dictated by the application they run """ __tablename__ = _TN id = Column(Integer, Sequence('%s_id_seq' % (_TN)), primary_key=True) service_id = Column(Integer, ForeignKey('service.id', name='%s_svc_fk' % (_ABV)), nullable=False) name = Column(AqStr(64), nullable=False) creation_date = Column(DateTime, default=datetime.now, nullable=False) comments = Column(String(255), nullable=True) service = relation(Service, uselist=False, backref='instances') @property def cfg_path(self): return 'service/%s/%s' % (self.service.name, self.name) @property def client_count(self): return object_session(self).query(BuildItem).filter_by( cfg_path=self.cfg_path).count() def __repr__(self): return '(%s) %s %s' % (self.__class__.__name__, self.service.name, self.name)
class Model(Base): """ Vendor and Model are representations of the various manufacturers and the asset inventory of the kinds of machines we use in the plant """ __tablename__ = 'model' id = Column(Integer, Sequence('model_id_seq'), primary_key=True) name = Column(AqStr(64), nullable=False) vendor_id = Column(Integer, ForeignKey('vendor.id', name='model_vendor_fk'), nullable=False) machine_type = Column(AqStr(16), nullable=False) creation_date = deferred(Column(DateTime, default=datetime.now, nullable=False)) comments = deferred(Column(String(255))) vendor = relation(Vendor)
class MetaCluster(Base): """ A metacluster is a grouping of two or more clusters grouped together for wide-area failover scenarios (So far only for vmware based clusters). Network is nullable for metaclusters that do not utilize IP failover. """ __tablename__ = _MCT id = Column(Integer, Sequence('%s_seq' % (_MCT)), primary_key=True) name = Column(AqStr(64), nullable=False) max_clusters = Column(Integer, default=2, nullable=False) max_shares = Column(Integer, nullable=False) creation_date = Column(DateTime, default=datetime.now, nullable=False) comments = Column(String(255)) members = association_proxy('clusters', 'cluster', creator=_metacluster_member_by_cluster) @property def shares(self): q = object_session(self).query(ServiceInstance) q = q.join( ['nas_disks', 'machine', '_cluster', 'cluster', '_metacluster']) q = q.filter_by(metacluster=self) return q.all()
class Vendor(Base): """ Vendor names """ __tablename__ = _ABV id = Column(Integer, Sequence('%s_id_seq'%(_ABV)), primary_key=True) name = Column(AqStr(32), nullable=False) creation_date = deferred(Column(DateTime, default=datetime.now, nullable=False)) comments = deferred(Column(String(255), nullable=True))
class DiskType(Base): """ Disk Type: scsi, cciss, sata, etc. """ __tablename__ = _ABV id = Column(Integer, Sequence('%s_seq'%(_ABV)), primary_key=True) type = Column(AqStr(32), nullable=False) creation_date = Column(DateTime, default=datetime.now, nullable=False) comments = Column(String(255), nullable=True)
class DnsDomain(Base): """ Dns Domain (simple names that compose bigger records) """ __tablename__ = _TN _class_label = 'DNS Domain' # RFC 1035 _name_check = re.compile('^[a-zA-Z]([-a-zA-Z0-9]*[a-zA-Z0-9])?$') id = Column(Integer, Sequence('%s_id_seq' % (_TN)), primary_key=True) name = Column(AqStr(32), nullable=False) restricted = Column(Boolean(name="%s_restricted_ck" % _TN), nullable=False, default=False) creation_date = deferred(Column(DateTime, default=datetime.now, nullable=False)) comments = deferred(Column(String(255), nullable=True)) servers = association_proxy('_ns_records', 'a_record') # The relation is defined in dns_map.py mapped_locations = association_proxy('dns_maps', 'location') __table_args__ = (UniqueConstraint(name, name='%s_uk' % _TN),) @classmethod def check_label(cls, label): # TODO: database check constraint for length if len(label) < 1 or len(label) > 63: msg = 'DNS name components must have a length between 1 and 63.' raise ArgumentError(msg) if not cls._name_check.match(label): raise ArgumentError("Illegal DNS name format '%s'." % label) def __init__(self, *args, **kwargs): if 'name' not in kwargs: raise KeyError('DNS domain name missing.') domain = kwargs['name'] # The limit for DNS name length is 255, assuming wire format. This # translates to 253 for simple ASCII text; see: # http://www.ops.ietf.org/lists/namedroppers/namedroppers.2003/msg00964.html if len(domain) > 253: raise ArgumentError('The DNS domain name is too long.') parts = domain.split('.') if len(parts) < 2: raise ArgumentError('Top-level DNS domains cannot be added.') # The limit of max. 127 parts mentioned at various documents about DNS # follows from the other checks above and below for part in parts: self.check_label(part) super(DnsDomain, self).__init__(*args, **kwargs)
class Model(Base): """ Vendor and Model are representations of the various manufacturers and the asset inventory of the kinds of machines we use in the plant """ __tablename__ = 'model' id = Column(Integer, Sequence('model_id_seq'), primary_key=True) name = Column(AqStr(64), nullable=False) vendor_id = Column(Integer, ForeignKey('vendor.id', name='model_vendor_fk'), nullable=False) machine_type = Column(AqStr(16), nullable=False) creation_date = deferred( Column(DateTime, default=datetime.now, nullable=False)) comments = Column(String(255)) vendor = relation(Vendor) __table_args__ = (UniqueConstraint(vendor_id, name, name='model_vendor_name_uk'), ) def __format__(self, format_spec): instance = "%s/%s" % (self.vendor.name, self.name) return self.format_helper(format_spec, instance) @classmethod def default_nic_model(cls, session): # TODO: make this configurable return cls.get_unique(session, machine_type='nic', name='generic_nic', vendor='generic', compel=AquilonError) @property def nic_model(self): if self.machine_specs: return self.machine_specs.nic_model session = object_session(self) return self.default_nic_model(session)
class City(Location): """ City is a subtype of location """ __tablename__ = 'city' __mapper_args__ = {'polymorphic_identity': 'city'} id = Column(Integer, ForeignKey('location.id', name='city_loc_fk', ondelete='CASCADE'), primary_key=True) timezone = Column(AqStr(64), nullable=True, default='TZ = FIX ME')
class Personality(Base): """ Personality names """ __tablename__ = _TN id = Column(Integer, Sequence('%s_seq' % _ABV), primary_key=True) name = Column(AqStr(32), nullable=False) archetype_id = Column(Integer, ForeignKey('archetype.id', name='%s_arch_fk' % _ABV), nullable=False) cluster_required = Column(Boolean(name="%s_clstr_req_ck" % _TN), default=False, nullable=False) config_override = Column(Boolean(name="persona_cfg_override_ck"), default=False, nullable=False) owner_eon_id = Column(Integer, ForeignKey('grn.eon_id', name='%s_owner_grn_fk' % _TN), nullable=False) host_environment_id = Column(Integer, ForeignKey('host_environment.id', name='host_environment_fk'), nullable=False) creation_date = deferred(Column(DateTime, default=datetime.now, nullable=False)) comments = Column(String(255), nullable=True) archetype = relation(Archetype) owner_grn = relation(Grn, innerjoin=True) grns = association_proxy('_grns', 'grn', creator=_pgm_creator) host_environment = relation(HostEnvironment, innerjoin=True) __table_args__ = (UniqueConstraint(archetype_id, name, name='%s_arch_name_uk' % _TN),) @property def is_cluster(self): return self.archetype.cluster_type is not None def __format__(self, format_spec): instance = "%s/%s" % (self.archetype.name, self.name) return self.format_helper(format_spec, instance) @classmethod def validate_env_in_name(cls, name, host_environment): env_mapper = inspect(HostEnvironment) persona_env = re.search("[-/](" + "|".join(env_mapper.polymorphic_map.keys()) + ")$", name, re.IGNORECASE) if persona_env and (persona_env.group(1) != host_environment): raise ArgumentError("Environment value in personality name '{0}' " "does not match the host environment '{1}'" .format(name, host_environment))
class Role(Base): __tablename__ = 'role' id = Column(Integer, Sequence('role_id_seq'), primary_key=True) name = Column(AqStr(32), nullable=False) creation_date = deferred(Column(DateTime, nullable=False, default=datetime.now)) comments = deferred(Column('comments', String(255), nullable=True))
class Interface(Base): """ In this design, interface is really just a name/type pair, AND the primary source for MAC address. Name/Mac/IP, the primary tuple, is in system, where mac is duplicated, but code to update MAC addresses must come through here """ __tablename__ = 'interface' id = Column(Integer, Sequence('interface_seq'), primary_key=True) name = Column(AqStr(32), nullable=False) #like e0, hme1, etc. mac = Column(AqMac(17), nullable=False) bootable = Column(Boolean, nullable=False, default=False) interface_type = Column(AqStr(32), nullable=False) #TODO: index hardware_entity_id = Column(Integer, ForeignKey('hardware_entity.id', name='IFACE_HW_ENT_FK', ondelete='CASCADE'), nullable=False) system_id = Column(Integer, ForeignKey('system.id', name='IFACE_SYSTEM_FK', ondelete='CASCADE'), nullable=True) creation_date = deferred( Column('creation_date', DateTime, default=datetime.now, nullable=False)) comments = deferred(Column('comments', String(255))) hardware_entity = relation(HardwareEntity, backref='interfaces', passive_deletes=True) system = relation(System, backref='interfaces', passive_deletes=True)
class Vendor(Base): """ Vendor names """ __tablename__ = _TN id = Column(Integer, Sequence('%s_id_seq' % _TN), primary_key=True) name = Column(AqStr(32), nullable=False) creation_date = deferred(Column(DateTime, default=datetime.now, nullable=False)) comments = Column(String(255), nullable=True) __table_args__ = (UniqueConstraint(name, name='%s_uk' % _TN),)
class LocationSearchList(Base): """ Lists of location types that define search ordering (as an algorithm). Intended for use with service maps during automated instance selection """ __tablename__ = 'location_search_list' id = Column(Integer, Sequence('%s_seq' % (_ABV)), primary_key=True) name = Column(AqStr(32), nullable=False) creation_date = Column(DateTime, default=datetime.now, nullable=False) comments = Column(String(255), nullable=True)
class DnsDomain(Base): """ For Dns Domain names """ __tablename__ = _TN id = Column(Integer, Sequence('%s_id_seq' % (_TN)), primary_key=True) name = Column(AqStr(32), nullable=False) creation_date = Column(DateTime, default=datetime.now, nullable=False) comments = Column(String(255), nullable=True) def __str__(self): return str(self.name)
class Cpu(Base): __tablename__ = 'cpu' id = Column(Integer, Sequence('cpu_id_seq'), primary_key=True) name = Column(AqStr(64), nullable=False) vendor_id = Column(Integer, ForeignKey('vendor.id', name='cpu_vendor_fk'), nullable=False) speed = Column(Integer, nullable=False) creation_date = deferred( Column(DateTime, default=datetime.now, nullable=False)) comments = deferred(Column(String(255), nullable=True)) vendor = relation(Vendor)
class Branch(Base): """ Each branch of template-king represents a distinct set of templates in use broker-side (domains) or client-side (sandboxes) for testing and managing systems. """ __tablename__ = _TN id = Column(Integer, Sequence('%s_id_seq' % _TN), primary_key=True) branch_type = Column(AqStr(16), nullable=False) name = Column(AqStr(32), nullable=False) compiler = Column(String(255), nullable=False) is_sync_valid = Column(Boolean(name="%s_is_sync_valid_ck" % _TN), nullable=False, default=True) autosync = Column(Boolean(name="%s_autosync_ck" % _TN), nullable=False, default=True) owner_id = Column(Integer, ForeignKey('user_principal.id', name='%s_user_princ_fk' % _TN), nullable=False) creation_date = deferred( Column(DateTime, default=datetime.now, nullable=False)) comments = deferred(Column(String(255), nullable=True)) owner = relation(UserPrincipal, innerjoin=True) __mapper_args__ = {'polymorphic_on': branch_type} __table_args__ = (UniqueConstraint(name, name='%s_uk' % _TN), )
class NetworkEnvironment(Base): """ Network Environment Represents an administrative domain for RFC 1918 private network addresses. Network addresses are unique inside an environment, but different environments may have duplicate/overlapping network definitions. It is expected that when two hosts have IP addresses in two different network environments, then they can not communicate directly with each other. """ __tablename__ = _TN _class_label = 'Network Environment' id = Column(Integer, Sequence('%s_id_seq' % _TN), primary_key=True) name = Column(AqStr(64), nullable=False) location_id = Column(Integer, ForeignKey('location.id', name='%s_loc_fk' % _ABV), nullable=True) dns_environment_id = Column(Integer, ForeignKey('dns_environment.id', name='%s_dns_env_fk' % _ABV), nullable=False) creation_date = deferred( Column(DateTime, default=datetime.now, nullable=False)) comments = deferred(Column(String(255), nullable=True)) location = relation(Location) dns_environment = relation(DnsEnvironment) __table_args__ = (UniqueConstraint(name, name='%s_name_uk' % _ABV), ) @property def is_default(self): return self.name == _config.get("site", "default_network_environment") @classmethod def get_unique_or_default(cls, session, network_environment=None): if network_environment: return cls.get_unique(session, network_environment, compel=True) else: return cls.get_unique(session, _config.get("site", "default_network_environment"), compel=InternalError)
class Status(Base): """ Status names """ __tablename__ = _TN id = Column(Integer, Sequence('%s_id_seq' % (_TN)), primary_key=True) name = Column(AqStr(32), nullable=False) creation_date = Column(DateTime, default=datetime.now, nullable=False) comments = Column(String(255), nullable=True) def __init__(self, name): e = "Status is a static table and can't be instanced, only queried." raise ValueError(e) def __repr__(self): return str(self.name)
class Sandbox(Branch): """ Template branch where the checked out contents are managed by a user. Multiple users can have a sandbox checked out. """ __tablename__ = _SBX sandbox_id = Column(Integer, ForeignKey('branch.id', name='%s_fk' % _SBX, ondelete='CASCADE'), primary_key=True) base_commit = Column(AqStr(40), nullable=False) __mapper_args__ = {'polymorphic_identity': _SBX}