def _get_zone_data(self, zone): # ARecord has key=a_record_id # HostInfo, SrvRecord has key=dns_owner_id # CnameRecords key=target_owner_id # entity2txt, entity2note has_key=entity_id for row in ARecord.ARecord(db).list_ext(zone=zone): id = int(row['dns_owner_id']) if self.a_records_by_dns_owner.has_key(id): self.a_records_by_dns_owner[id] += [row] else: self.a_records_by_dns_owner[id] = [row] # Following dict is populated to support HostFile self.a_records[int(row['a_record_id'])] = row logger.debug("... arecords") for row in AAAARecord.AAAARecord(db).list_ext(zone=zone): id = int(row['dns_owner_id']) if self.aaaa_records_by_dns_owner.has_key(id): self.aaaa_records_by_dns_owner[id] += [row] else: self.aaaa_records_by_dns_owner[id] = [row] # Following dict is populated to support HostFile self.aaaa_records[int(row['aaaa_record_id'])] = row logger.debug("... aaaarecords") for row in HostInfo.HostInfo(db).list(zone=zone): # Unique constraint on dns_owner_id self.hosts[int(row['dns_owner_id'])] = row logger.debug("... hosts") for row in CNameRecord.CNameRecord(db).list_ext(zone=zone): # TBD: skal vi ha unique constraint på dns_owner? self.cnames.setdefault(int(row['target_owner_id']), []).append(row) logger.debug("... cnames") # From mix-in classes for row in DnsOwner.MXSet(db).list_mx_sets(): self.mx_sets.setdefault(int(row['mx_set_id']), []).append(row) logger.debug("... mx_sets") for row in DnsOwner.DnsOwner(db).list(zone=zone): self.owner_id2mx_set[int(row['dns_owner_id'])] = int( row['mx_set_id'] or 0) logger.debug("... mx_set owners") for row in DnsOwner.DnsOwner(db).list_general_dns_records( field_type=co.field_type_txt, zone=zone): self.dnsowner2txt_record[int(row['dns_owner_id'])] = row logger.debug("... txt reocrds") for row in DnsOwner.DnsOwner(db).list_srv_records(zone=zone): # We want them listed in the same place # TODO: while doing that, we want it below the first target_owner_id self.srv_records.setdefault(int(row['service_owner_id']), []).append(row) logger.debug("... srv records")
def __init__(self, db, default_zone): self._db = db self._arecord = ARecord.ARecord(self._db) self._ip_number = IPNumber.IPNumber(self._db) self._dns_owner = DnsOwner.DnsOwner(self._db) self._mx_set = DnsOwner.MXSet(self._db) self._host = HostInfo.HostInfo(self._db) self._cname = CNameRecord.CNameRecord(self._db) self._default_zone = default_zone
def __init__(self, db, default_zone): self._db = db self._ip_number = IPNumber.IPNumber(db) self._ipv6_number = IPv6Number.IPv6Number(db) self._arecord = ARecord.ARecord(db) self._aaaarecord = AAAARecord.AAAARecord(db) self._dns_owner = DnsOwner.DnsOwner(db) self._mx_set = DnsOwner.MXSet(db) self._host = HostInfo.HostInfo(db) self._cname = CNameRecord.CNameRecord(db) self._dns_parser = DnsParser(db, default_zone)
def set_ttl(self, owner_id, ttl): """Set TTL entries for this dns_owner""" # TODO: Currently we do this by updating the TTL in all # tables. It has been decided to move ttl-information into # dns_owner. However, we will not do this until after we have # gone into production to avoid a huge diff when comparing # autogenerated zone files to the original ones. dns_owner = DnsOwner.DnsOwner(self.db) dns_owner.find(owner_id) arecord = ARecord.ARecord(self.db) for row in arecord.list_ext(dns_owner_id=owner_id): arecord.clear() arecord.find(row['a_record_id']) arecord.ttl = ttl arecord.write_db() aaaarecord = AAAARecord.AAAARecord(self.db) for row in aaaarecord.list_ext(dns_owner_id=owner_id): aaaarecord.clear() aaaarecord.find(row['aaaa_record_id']) aaaarecord.ttl = ttl aaaarecord.write_db() host = HostInfo.HostInfo(self.db) try: host.find_by_dns_owner_id(owner_id) except Errors.NotFoundError: pass else: host.ttl = ttl host.write_db() for row in dns_owner.list_general_dns_records(dns_owner_id=owner_id): dns_owner.update_general_dns_record(owner_id, row['field_type'], ttl, row['data']) mx_set = DnsOwner.MXSet(self.db) for row in mx_set.list_mx_sets(target_id=owner_id): mx_set.clear() mx_set.find(row['mx_set_id']) mx_set.update_mx_set_member(ttl, row['pri'], row['target_id']) cname = CNameRecord.CNameRecord(self.db) for row in cname.list_ext(cname_owner=owner_id): cname.clear() cname.find(row['cname_id']) cname.ttl = ttl cname.write_db() for row in dns_owner.list_srv_records(owner_id=owner_id): dns_owner.update_srv_record_ttl(owner_id, ttl)
def find_dns_owners(self, dns_owner_id, only_type=True): """Return information about entries using this dns_owner. If only_type=True, returns a list of owner_type. Otherwise returns a list of (owner_type, owner_id) tuples""" ret = [] arecord = ARecord.ARecord(self._db) for row in arecord.list_ext(dns_owner_id=dns_owner_id): ret.append((dns.A_RECORD, row['a_record_id'])) aaaarecord = AAAARecord.AAAARecord(self._db) for row in aaaarecord.list_ext(dns_owner_id=dns_owner_id): ret.append((dns.AAAA_RECORD, row['aaaa_record_id'])) hi = HostInfo.HostInfo(self._db) try: hi.find_by_dns_owner_id(dns_owner_id) ret.append((dns.HOST_INFO, hi.entity_id)) except Errors.NotFoundError: pass dns_owner = DnsOwner.DnsOwner(self._db) for row in dns_owner.list_srv_records(owner_id=dns_owner_id): ret.append((dns.SRV_OWNER, row['service_owner_id'])) for row in dns_owner.list_general_dns_records( dns_owner_id=dns_owner_id): ret.append((dns.GENERAL_DNS_RECORD, row['dns_owner_id'])) cn = CNameRecord.CNameRecord(self._db) for row in cn.list_ext(cname_owner=dns_owner_id): ret.append((dns.CNAME_OWNER, row['cname_id'])) if only_type: return [x[0] for x in ret] return ret
def _get_host(self, host_id): """Helper method for getting the DnsOwner for the given host ID, which can either be an IP address, an A record or a CName alias.""" finder = Find(self.db, self.default_zone) tmp = host_id.split(".") if host_id.find(":") == -1 and tmp[-1].isdigit(): # host_id is an IP owner_id = finder.find_target_by_parsing(host_id, IP_NUMBER) else: owner_id = finder.find_target_by_parsing(host_id, DNS_OWNER) # Check if it is a Cname, if so: update the owner_id try: cname_record = CNameRecord(self.db) cname_record.find_by_cname_owner_id(owner_id) owner_id = cname_record.target_owner_id except Errors.NotFoundError: pass dns_owner = DnsOwner.DnsOwner(self.db) try: dns_owner.find(owner_id) except Errors.NotFoundError: raise CerebrumError('Unknown host: %s' % host_id) return dns_owner
def dns_reg_owner_ok(self, name, record_type, allow_underscores=False): """ Checks if it is legal to register a record of type record_type with given name. Raises an exception if record_type is illegal, or name is illegal. Returns: - dns_owner_ref: reference to dns_owner or None if non-existing - same_type: boolean set to true if a record of the same type exists. """ dns_owner = DnsOwner.DnsOwner(self._db) self.legal_dns_owner_name(name, record_type, allow_underscores) try: dns_owner.find_by_name(name) except Errors.NotFoundError: return None, None owner_types = self._find.find_dns_owners(dns_owner.entity_id) if dns.CNAME_OWNER in owner_types: raise DNSError("%s is already a CNAME" % (name, )) if record_type == dns.CNAME_OWNER: if owner_types: raise DNSError("%s already exists" % (name, )) if record_type == dns.HOST_INFO: if dns.HOST_INFO in owner_types: raise DNSError("%s already exists" % (name, )) # TODO: This should probarbly be rewritten, since it is a bit ugly. # The difference between A- and AAAA-records is minimal, so this is # enough for now. if record_type in (dns.A_RECORD, dns.AAAA_RECORD,): if dns.A_RECORD in owner_types or dns.AAAA_RECORD in owner_types: return dns_owner.entity_id, True return dns_owner.entity_id, False
def __init__(self, db, logger, default_zone): self.logger = logger self.db = db self.const = Factory.get('Constants')(self.db) # TBD: This pre-allocating may interfere with multi-threaded bofhd self._arecord = ARecord.ARecord(self.db) self._aaaarecord = AAAARecord.AAAARecord(self.db) self._host = HostInfo.HostInfo(self.db) self._dns_owner = DnsOwner.DnsOwner(self.db) self._ip_number = IPNumber.IPNumber(self.db) self._ipv6_number = IPv6Number.IPv6Number(self.db) self._cname = CNameRecord.CNameRecord(self.db) self._validator = IntegrityHelper.Validator(self.db, default_zone) self._update_helper = IntegrityHelper.Updater(self.db) self._mx_set = DnsOwner.MXSet(self.db) self.default_zone = default_zone self._find = Utils.Find(self.db, default_zone) self._parser = Utils.DnsParser(self.db, default_zone)
def mx_set_set(self, owner_id, mx_set): dns_owner = DnsOwner.DnsOwner(self.db) dns_owner.find(owner_id) self._validator.dns_reg_owner_ok(dns_owner.name, dns.MX_SET) if mx_set == '': dns_owner.mx_set_id = None else: dns_owner.mx_set_id = self._find.find_mx_set(mx_set).mx_set_id dns_owner.write_db()
def __init__(self, *args, **kwargs): super(TSDUtils, self).__init__(*args, **kwargs) self.ou = Factory.get('OU')(self.db) self.et = EntityTrait.EntityTrait(self.db) self.dnsowner = DnsOwner.DnsOwner(self.db) self.subnet = Subnet.Subnet(self.db) self.subnet6 = IPv6Subnet.IPv6Subnet(self.db) self.ar = ARecord.ARecord(self.db) self.aaaar = AAAARecord.AAAARecord(self.db)
def import_hinfo(fname): """Import the hinfo file. It has the format:: hostname, hinfo, TODO = line.split() Update HINFO for all hosts in the file unless they are already registered as DHCP. """ host = HostInfo.HostInfo(db) dns_id2name = {} for row in DnsOwner.DnsOwner(db).list(): dns_id2name[int(row['dns_owner_id'])] = row['name'] name2info = {} for row in host.list(): name = dns_id2name[int(row['dns_owner_id'])] name = name[:-1] # FQDN without trailing dot name2info[name] = (row['hinfo'], int(row['dns_owner_id'])) ok_line = re.compile(r'^\S*?\.uio\.no') # Temporary work-around for # data & info messages in same file n_lines, n_changed, n_unknown, n_dhcp, n_nf = 0, 0, 0, 0, 0 for line in open(fname, "r"): n_lines += 1 line = line.strip() if not ok_line.match(line): logger.info("Ignoring '%s'" % line) continue hostname, new_hinfo1, new_hinfo2 = line.split(None, 2) new_hinfo = "%s\t%s" % (new_hinfo1, new_hinfo2) if not name2info.has_key(hostname): logger.info("unknown host '%s'" % hostname) n_unknown += 1 continue old_hinfo, dns_owner_id = name2info.get(hostname) if old_hinfo.startswith('DHCP') or new_hinfo == old_hinfo: n_dhcp += 1 continue try: host.clear() host.find_by_dns_owner_id(dns_owner_id) except Errors.NotFoundError: n_nf += 1 continue logger.debug("Setting new hinfo '%s' for %s/%s (old=%s)" % (new_hinfo, dns_owner_id, hostname, host.hinfo)) host.hinfo = new_hinfo host.write_db() db.commit() n_changed += 1 logger.info("Read %i lines, changed %i, %i unknown, %i dhcp, %i nf" % (n_lines, n_changed, n_unknown, n_dhcp, n_nf))
def main(): db = Factory.get('Database')() co = Factory.get('Constants')(db) arecord = ARecord.ARecord(db) dns_owner = DnsOwner.DnsOwner(db) get_id_mac = itemgetter('dns_owner_id', 'mac_adr') get_id_name = itemgetter('dns_owner_id', 'name') get_trait = itemgetter('entity_id', 'code', 'strval') trait2attr = { int(co.trait_dns_comment): 'uioHostComment', int(co.trait_dns_contact): 'uioHostContact', } ldif = LDIFWriter('HOSTS', None) logger.info('Start of hosts export to %s', ldif.f.name) ldif.write_container() base_dn = ldif.getconf('dn') id2attrs = defaultdict(dict) for entity_id, code, strval in imap(get_trait, dns_owner.list_traits( code=trait2attr.keys())): if strval: id2attrs[int(entity_id)][trait2attr[code]] = (strval,) arecords = defaultdict(set) for owner_id, mac in imap(get_id_mac, arecord.list_ext()): if mac: arecords[int(owner_id)].add(mac) done = set() for owner_id, name in sorted(imap(get_id_name, dns_owner.list())): owner_id, name = int(owner_id), name.rstrip('.') # We have both lowercase and uppercase versions of some host # names. Ignore one, hostnames are case-insensitive in LDAP. key = name.lower() if key not in done: done.add(key) entry = { 'host': (name,), 'objectClass': ['uioHostinfo'], 'uioHostMacAddr': arecords.get(owner_id, ()), } entry.update(id2attrs.get(owner_id, ())) ldif.write_entry("host={},{}".format(name, base_dn), entry) ldif.close() logger.info('Done')
def test_setup_for_project(self): """Approved accounts should be set up with host and groups.""" old_cereconf_groups = cereconf.TSD_PROJECT_GROUPS[:] cereconf.TSD_PROJECT_GROUPS = (('member-group', 'All members', ()), ) old_cereconf_members = cereconf.TSD_GROUP_MEMBERS.copy() cereconf.TSD_GROUP_MEMBERS['member-group'] = ('person_aff:PROJECT', ) self._ou.clear() pid = self._ou.create_project('tsthst') self._ou.populate_trait(self._co.trait_project_vm_type, strval='linux_vm') self._ou.setup_project(self.db_tools.get_initial_account_id()) self._ou.write_db() # TODO: Create helper methods for this, if needed in more tests: person_id = self.db_tools.create_person(self.person_ds().next()) self._pe.clear() self._pe.find(person_id) self._pe.populate_affiliation( source_system=self._co.system_nettskjema, ou_id=self._ou.entity_id, affiliation=self._co.affiliation_project, status=self._co.affiliation_status_project_member) self._pe.write_db() account = self.account_ds().next() account_id = self.db_tools.create_account(account, person_id) self._ac.clear() self._ac.find(account_id) self._ac.set_account_type(self._ou.entity_id, self._co.affiliation_project) self._ac.write_db() self._ac.setup_for_project() # Check data dnsowner = DnsOwner.DnsOwner(self._db) # User should have its own host: dnsowner.find_by_name('%s-l.tsd.usit.no.' % self._ac.account_name) # User should be member of some groups: for grname in ('member-group', ): self._gr.clear() self._gr.find_by_name('-'.join((pid, grname))) #self.assertTrue(self._gr.has_member(self._ac.entity_id)) # TODO: This should rather be in the teardown, to avoid trouble in later # tests: cereconf.TSD_PROJECT_GROUPS = old_cereconf_groups cereconf.TSD_GROUP_MEMBERS = old_cereconf_members
def _populate_dnsowner(self, hostname): """Create or update a DnsOwner connected to the given project. The DnsOwner is given a trait, to affiliate it with this project-OU. This should rather be put in the DNS module, but due to its complexity, its weird layout, and my lack of IQ points to understand it, I started just using its API instead. :param str hostname: The given *FQDN* for the host. :rtype: DnsOwner object :return: The DnsOwner object that is created or updated. """ dns_owner = DnsOwner.DnsOwner(self._db) dnsfind = Utils.Find(self._db, cereconf.DNS_DEFAULT_ZONE) ipv6number = IPv6Number.IPv6Number(self._db) aaaarecord = AAAARecord.AAAARecord(self._db) ipnumber = IPNumber.IPNumber(self._db) arecord = ARecord.ARecord(self._db) try: dns_owner.find_by_name(hostname) except Errors.NotFoundError: # TODO: create owner here? dns_owner.populate(self.const.DnsZone(cereconf.DNS_DEFAULT_ZONE), hostname) dns_owner.write_db() # Affiliate with project: dns_owner.populate_trait(self.const.trait_project_host, target_id=self.entity_id) dns_owner.write_db() for (subnets, ipnum, record, ipstr) in ( (self.ipv6_subnets, ipv6number, aaaarecord, "IPv6"), (self.ipv4_subnets, ipnumber, arecord, "IPv4")): # TODO: check if dnsowner already has an ip address. try: ip = dnsfind.find_free_ip(subnets.next(), no_of_addrs=1)[0] except StopIteration: raise Errors.NotFoundError("No %s-subnet for project %s" % (ipstr, self.get_project_id())) ipnum.populate(ip) ipnum.write_db() record.populate(dns_owner.entity_id, ipnum.entity_id) record.write_db() return dns_owner
def remove_reverse_override(self, ip_number_id, dest_host): """Remove reverse-map override for ip_number_id. Will remove dns_owner and ip_number entries if they are no longer in use.""" try: ipnumber = IPv6Number.IPv6Number(self._db) ipnumber.find(ip_number_id) a_type = dns.AAAA_RECORD ip_type = dns.IPv6_NUMBER o_ip_type = dns.IP_NUMBER o_ipnumber = IPNumber.IPNumber(self._db) except Errors.NotFoundError: ipnumber = IPNumber.IPNumber(self._db) ipnumber.find(ip_number_id) a_type = dns.A_RECORD ip_type = dns.IP_NUMBER o_ip_type = dns.IPv6_NUMBER o_ipnumber = IPv6Number.IPv6Number(self._db) ipnumber.delete_reverse_override(ip_number_id, dest_host) refs = self._find.find_referers(ip_number_id=ip_number_id, ip_type=ip_type) if not (dns.REV_IP_NUMBER in refs or a_type in refs): # IP no longer used ipnumber.delete() if dest_host is not None: refs = self._find.find_referers(dns_owner_id=dest_host, ip_type=ip_type) refs += self._find.find_referers(dns_owner_id=dest_host, ip_type=o_ip_type) if not refs: tmp = [] for row in ipnumber.list_override(dns_owner_id=dest_host): # One might argue that find_referers also should find # this type of refs. tmp.append((dns.DNS_OWNER, row['dns_owner_id'])) for row in o_ipnumber.list_override(dns_owner_id=dest_host): tmp.append((dns.DNS_OWNER, row['dns_owner_id'])) if not tmp: dns_owner = DnsOwner.DnsOwner(self._db) dns_owner.find(dest_host) dns_owner.delete()
def full_remove_dns_owner(self, dns_owner_id): # fjerner alle entries der dns_owner vil være til venstre i # sonefila. self.remove_host_info(dns_owner_id) arecord = ARecord.ARecord(self._db) for row in arecord.list_ext(dns_owner_id=dns_owner_id): self.remove_arecord(row['a_record_id']) aaaarecord = AAAARecord.AAAARecord(self._db) for row in aaaarecord.list_ext(dns_owner_id=dns_owner_id): self.remove_arecord(row['aaaa_record_id']) self.remove_cname(dns_owner_id) dns_owner = DnsOwner.DnsOwner(self._db) for row in dns_owner.list_general_dns_records( dns_owner_id=dns_owner_id): dns_owner.delete_general_dns_record(dns_owner_id, row['field_type']) self.remove_dns_owner(dns_owner_id)
def remove_dns_owner(self, dns_owner_id): refs = self._find.find_referers(dns_owner_id=dns_owner_id) if refs: raise DNSError("dns_owner still refered in %s" % str(refs)) dns_owner = DnsOwner.DnsOwner(self._db) dns_owner.find(dns_owner_id) if not self._find.find_overrides(dns_owner_id=dns_owner_id): dns_owner.delete() else: for t in dns_owner.get_traits().keys(): try: dns_owner.delete_trait(t) except errors.NotFoundError: pass dns_owner.mx_set_id = None dns_owner.write_db()
def get_ttl(self, owner_id): """Retrieve TTL ('Time to Live') setting for the records associated with gievn DNS-owner. """ # Caveat: if TTL is set for one of the host's A*-records, it is # set for the host in general. If no A*-record exists, we don't # acknowledge any other TTL than "default" dns_owner = DnsOwner.DnsOwner(self.db) dns_owner.find(owner_id) # This adaption to A- and AAAA-records is very ugly, but it honours # "The Old Way" of getting the TTL for a host. ar = ARecord.ARecord(self.db) ar.clear() for r in ar.list_ext(dns_owner_id=owner_id): ar.find(r['a_record_id']) return ar.ttl ar = AAAARecord.AAAARecord(self.db) ar.clear() for r in ar.list_ext(dns_owner_id=owner_id): ar.find(r['aaaa_record_id']) return ar.ttl return None
def generate_hosts_file(self, fname, with_comments=False): f = AtomicFileWriter(fname, "w") # IPv4 fm = ForwardMap(self._zone) order = fm.a_records.keys() order.sort(lambda x, y: int(fm.a_records[x]['ipnr'] - fm.a_records[y][ 'ipnr'])) entity_id2comment = {} if with_comments: for row in DnsOwner.DnsOwner(db).list_traits(co.trait_dns_comment): entity_id = int(row['entity_id']) entity_id2comment[entity_id] = ' # ' + row['strval'] # If multiple A-records have the same name with different IP, the # dns_owner data is only shown for the first IP. shown_owner = {} for a_id in order: line = '' a_ref = fm.a_records[a_id] prefix = '%s\t%s' % (a_ref['a_ip'], self._exp_name(a_ref['name'])) line = '' names = list() dns_owner_id = int(a_ref['dns_owner_id']) if dns_owner_id in shown_owner: # raise ValueError, "%s already shown?" % a_ref['name'] continue shown_owner[dns_owner_id] = True for c_ref in fm.cnames.get(dns_owner_id, []): names.append(c_ref['name']) line += " " + " ".join([self._exp_name(n) for n in names]) line += entity_id2comment.get(int(a_ref['dns_owner_id']), '') f.write(self._wrap_line(prefix, line)) # IPv6 order = fm.aaaa_records.keys() entity_id2comment = {} if with_comments: for row in DnsOwner.DnsOwner(db).list_traits(co.trait_dns_comment): entity_id = int(row['entity_id']) entity_id2comment[entity_id] = ' # ' + row['strval'] # If multiple A-records have the same name with different IP, the # dns_owner data is only shown for the first IP. shown_owner = {} for a_id in order: line = '' a_ref = fm.aaaa_records[a_id] prefix = '%s\t%s' % (a_ref['aaaa_ip'], self._exp_name( a_ref['name'])) line = '' names = list() dns_owner_id = int(a_ref['dns_owner_id']) if dns_owner_id in shown_owner: # raise ValueError, "%s already shown?" % a_ref['name'] continue shown_owner[dns_owner_id] = True for c_ref in fm.cnames.get(dns_owner_id, []): names.append(c_ref['name']) line += " " + " ".join([self._exp_name(n) for n in names]) line += entity_id2comment.get(int(a_ref['dns_owner_id']), '') f.write(self._wrap_line(prefix, line)) f.close()
def generate_zone_file(self, fname, heads, data_dir): logger.debug("Generating zone file") with self.zu.open(os.path.join(data_dir, os.path.basename(fname))): self.zu.write_heads(heads, data_dir) def aaaa_key(x): return IPv6Calc.ip_to_long(self.aaaa_records[x]['aaaa_ip']) ar = self.a_records.keys() ar.sort(key=lambda x: self.a_records[x]['ipnr']), aaaar = self.aaaa_records.keys() aaaar.sort(key=aaaa_key) order = ar + aaaar # If multiple A- or AAAA-records have the same name with different # IP, the dns_owner data is only shown for the first IP. shown_owner = {} for a_id in order: line = '' ar = self.a_records.get(a_id, None) aaaar = self.aaaa_records.get(a_id, None) if ar is not None: name = self.zu.trim_name(ar['name']) line += "%s\t%s\tA\t%s\n" % (name, ar['ttl'] or '', ar['a_ip']) elif aaaar is not None: name = self.zu.trim_name(aaaar['name']) ar = aaaar line += "%s\t%s\tAAAA\t%s\n" % (name, ar['ttl'] or '', ar['aaaa_ip']) dns_owner_id = int(ar['dns_owner_id']) if dns_owner_id in shown_owner: self.zu.write(line) continue shown_owner[dns_owner_id] = True # logger.debug2("A: %s, owner=%s" % (a_id, dns_owner_id)) if dns_owner_id in self.hosts: line += "\t%s\tHINFO\t%s\n" % ( self.hosts[dns_owner_id]['ttl'] or '', self.hosts[dns_owner_id]['hinfo']) if self.owner_id2mx_set.get(dns_owner_id, None): mx_set = self.owner_id2mx_set[dns_owner_id] for mx_info in self.mx_sets[mx_set]: line += "\t%s\tMX\t%s\t%s\n" % ( mx_info['ttl'] or '', mx_info['pri'], self.zu.exp_name(mx_info['target_name'])) txt = self.dnsowner2txt_record.get(dns_owner_id, None) if txt: line += "\t%s\tTXT\t\"%s\"\n" % (txt['ttl'] or '', txt['data']) for c_ref in self.cnames.get(dns_owner_id, []): line += "%s\t%s\tCNAME\t%s\n" % ( c_ref['name'], c_ref['ttl'] or '', self.zu.exp_name(c_ref['target_name'])) # for machines with multiple a-records and cnames, the # cnames will be listed before the last a-records. self.zu.write(line) self.zu.write('; End of a-record owned entries\n') logger.debug("Check remaining data") rows = DnsOwner.DnsOwner(db).list() for row in sorted(rows, key=lambda i: i['dns_owner_id']): line = '' # Check for any remaining data. Should only be srv_records # and cnames with foreign targets name = self.zu.trim_name(row['name']) for s_ref in self.srv_records.get(row['dns_owner_id'], []): line += "%s\t%s\tSRV\t%i\t%i\t%i\t%s\n" % ( name, s_ref['ttl'] or '', s_ref['pri'], s_ref['weight'], s_ref['port'], self.zu.exp_name(s_ref['target_name'])) name = '' if row['dns_owner_id'] not in shown_owner: txt = self.dnsowner2txt_record.get(row['dns_owner_id'], None) if txt: line += "%s\t%s\tTXT\t\"%s\"\n" % (name, txt['ttl'] or '', txt['data']) name = '' if self.owner_id2mx_set.get(int(row['dns_owner_id']), None): mx_set = self.owner_id2mx_set[int(row['dns_owner_id'])] for mx_info in self.mx_sets[mx_set]: line += "%s\t%s\tMX\t%s\t%s\n" % ( name, mx_info['ttl'] or '', mx_info['pri'], self.zu.exp_name(mx_info['target_name'])) name = '' for c_ref in self.cnames.get(row['dns_owner_id'], []): line += "%s\t%s\tCNAME\t%s\n" % ( c_ref['name'], c_ref['ttl'] or '', self.zu.exp_name(c_ref['target_name'])) if line: self.zu.write(line) logger.debug("zone file completed")
def terminate(self): """Remove all of a project, except its project ID and name (acronym). The project's entities are deleted by this method, so use with care! For the OU object, it does almost the same as L{delete} except from deleting the entity itself. """ self.write_db() ent = EntityTrait(self._db) ac = Factory.get('Account')(self._db) pu = Factory.get('PosixUser')(self._db) # Delete PosixUsers for row in ac.list_accounts_by_type(ou_id=self.entity_id, filter_expired=False): try: pu.clear() pu.find(row['account_id']) pu.delete_posixuser() except Errors.NotFoundError: # not a PosixUser continue # Remove all project's groups gr = Factory.get('Group')(self._db) for row in gr.list_traits(code=self.const.trait_project_group, target_id=self.entity_id): gr.clear() gr.find(row['entity_id']) gr.delete() # Delete all users for row in ac.list_accounts_by_type(ou_id=self.entity_id): ac.clear() ac.find(row['account_id']) ac.delete() # Remove every trace of person affiliations to the project: pe = Factory.get('Person')(self._db) for row in pe.list_affiliations(ou_id=self.entity_id, include_deleted=True): pe.clear() pe.find(row['person_id']) pe.nuke_affiliation(ou_id=row['ou_id'], affiliation=row['affiliation'], source=row['source_system'], status=row['status']) pe.write_db() # Remove all project's DnsOwners (hosts): dnsowner = DnsOwner.DnsOwner(self._db) policy = PolicyComponent(self._db) update_helper = IntegrityHelper.Updater(self._db) for row in ent.list_traits(code=self.const.trait_project_host, target_id=self.entity_id): # TODO: Could we instead update the Subnet classes to use # Factory.get('Entity'), and make use of EntityTrait there to # handle this? owner_id = row['entity_id'] ent.clear() ent.find(owner_id) ent.delete_trait(row['code']) ent.write_db() # Remove the links to policies if hostpolicy is used for prow in policy.search_hostpolicies(dns_owner_id=owner_id): policy.clear() policy.find(prow['policy_id']) policy.remove_from_host(owner_id) # delete the DNS owner update_helper.full_remove_dns_owner(owner_id) # Delete all subnets subnet = Subnet.Subnet(self._db) subnet6 = IPv6Subnet.IPv6Subnet(self._db) for row in ent.list_traits(code=(self.const.trait_project_subnet6, self.const.trait_project_subnet), target_id=self.entity_id): ent.clear() ent.find(row['entity_id']) ent.delete_trait(row['code']) ent.write_db() if row['code'] == self.const.trait_project_subnet: subnet.clear() subnet.find(row['entity_id']) subnet.delete() if row['code'] == self.const.trait_project_subnet6: subnet6.clear() subnet6.find(row['entity_id']) subnet6.delete() # Remove all data from the OU except for: # The project ID and project name for tr in tuple(self.get_traits()): self.delete_trait(tr) for row in self.get_spread(): self.delete_spread(row['spread']) for row in self.get_contact_info(): self.delete_contact_info(row['source_system'], row['contact_type']) for row in self.get_entity_address(): self.delete_entity_address(row['source_system'], row['address_type']) for row in self.search_name_with_language(entity_id=self.entity_id): # The project name must not be removed, to avoid reuse if row['name_variant'] == self.const.ou_name_acronym: continue self.delete_name_with_language(row['name_variant']) self.write_db()
def _setup_project_hosts(self, creator_id): """Setup the hosts initially needed for the given project.""" projectid = self.get_project_id() host = HostInfo.HostInfo(self._db) dns_owner = DnsOwner.DnsOwner(self._db) vm_trait = self.get_trait(self.const.trait_project_vm_type) if vm_trait: vm_type = vm_trait['strval'] else: # Set win as default if trait is not set. vm_type = 'win_vm' if vm_type in ('win_vm', 'win_and_linux_vm'): # Create a Windows host for the whole project if it doesn't exist hostname = '%s-win01.tsd.usit.no.' % projectid hinfo = 'IBM-PC\tWINDOWS' host_dns_owner = None try: host.find_by_name(hostname) except Errors.NotFoundError: host_dns_owner = self._populate_dnsowner(hostname) try: host.find_by_dns_owner_id(host_dns_owner.entity_id) except Errors.NotFoundError: host.populate(host_dns_owner.entity_id, hinfo) if host_dns_owner is None: dns_owner.find_by_name(hostname) host_dns_owner = dns_owner host.hinfo = hinfo host.write_db() for comp in getattr(cereconf, 'TSD_HOSTPOLICIES_WIN', ()): TSDUtils.add_host_to_policy_component(self._db, host_dns_owner.entity_id, comp) if vm_type in ('linux_vm', 'win_and_linux_vm'): host.clear() # Create a Linux host for the whole project if it doesn' exist hostname = '%s-tl01-l.tsd.usit.no.' % projectid hinfo = 'IBM-PC\tLINUX' host_dns_owner = None try: host.find_by_name(hostname) except Errors.NotFoundError: host_dns_owner = self._populate_dnsowner(hostname) try: host.find_by_dns_owner_id(host_dns_owner.entity_id) except Errors.NotFoundError: host.populate(host_dns_owner.entity_id, hinfo) if host_dns_owner is None: dns_owner.clear() dns_owner.find_by_name(hostname) host_dns_owner = dns_owner host.hinfo = hinfo host.write_db() for comp in getattr(cereconf, 'TSD_HOSTPOLICIES_LINUX', ()): TSDUtils.add_host_to_policy_component(self._db, host_dns_owner.entity_id, comp) # Add CNAME-record for connecting via thinlinc-proxy cname_record_name = '%s-tl01-l.tl.tsd.usit.no.' % projectid TSDUtils.add_cname_record(self._db, cname_record_name, cereconf.TSD_THINLINC_PROXY, fail_on_exists=False)
def find_referers(self, ip_number_id=None, dns_owner_id=None, only_type=True, ip_type=dns.IP_NUMBER): """Return information about registrations that point to this ip-number/dns-owner. If only_type=True, returns a list of owner_type. Otherwise returns a list of (owner_type, owner_id) tuples""" # We choose classes and record type depending on the ip_type # parameter. This is a bit dirty, but reduces the amount of # functions required. ip_class = IPNumber.IPNumber if ( ip_type == dns.IP_NUMBER ) else IPv6Number.IPv6Number record_class = ARecord.ARecord if ( ip_type == dns.IP_NUMBER ) else AAAARecord.AAAARecord record_type = dns.A_RECORD if ( ip_type == dns.IP_NUMBER ) else dns.AAAA_RECORD ip_key = 'ip_number_id' if ( ip_type == dns.IP_NUMBER ) else 'ipv6_number_id' record_key = 'a_record_id' if ( ip_type == dns.IP_NUMBER ) else 'aaaa_record_id' # Not including entity-note assert not (ip_number_id and dns_owner_id) ret = [] if ip_number_id and ip_type == dns.REV_IP_NUMBER: for ipn, key in [ (IPNumber.IPNumber(self._db), 'ip_number_id'), (IPv6Number.IPv6Number(self._db), 'ipv6_number_id')]: for row in ipn.list_override(ip_number_id=ip_number_id): ret.append((dns.REV_IP_NUMBER, row[key])) if only_type: return [x[0] for x in ret] return ret if ip_number_id: ipnumber = ip_class(self._db) for row in ipnumber.list_override(ip_number_id=ip_number_id): ret.append((dns.REV_IP_NUMBER, row[ip_key])) arecord = record_class(self._db) for row in arecord.list_ext(ip_number_id=ip_number_id): ret.append((record_type, row[record_key])) if only_type: return [x[0] for x in ret] return ret mx = DnsOwner.MXSet(self._db) for row in mx.list_mx_sets(target_id=dns_owner_id): ret.append((dns.MX_SET, row['mx_set_id'])) dns_owner = DnsOwner.DnsOwner(self._db) for row in dns_owner.list_srv_records(target_owner_id=dns_owner_id): ret.append((dns.SRV_TARGET, row['service_owner_id'])) cn = CNameRecord.CNameRecord(self._db) for row in cn.list_ext(target_owner=dns_owner_id): ret.append((dns.CNAME_TARGET, row['cname_id'])) arecord = record_class(self._db) for row in arecord.list_ext(dns_owner_id=dns_owner_id): ret.append((record_type, row[record_key])) hi = HostInfo.HostInfo(self._db) for row in hi.list_ext(dns_owner_id=dns_owner_id): ret.append((dns.HOST_INFO, row['host_id'],)) if only_type: return [x[0] for x in ret] return ret
from Cerebrum.modules.dns import ARecord from Cerebrum.modules.dns import CNameRecord from Cerebrum.modules.dns import DnsOwner from Cerebrum.modules.dns import HostInfo from Cerebrum.modules.dns import IPNumber from Cerebrum.modules.dns import Utils sys.argv.extend(["--logger-level", "DEBUG"]) logger = Factory.get_logger("cronjob") db = Factory.get('Database')() db.cl_init(change_program='import_dns') co = Factory.get('Constants')(db) ipnumber = IPNumber.IPNumber(db) arecord = ARecord.ARecord(db) cname = CNameRecord.CNameRecord(db) dnsowner = DnsOwner.DnsOwner(db) host = HostInfo.HostInfo(db) mx_set = DnsOwner.MXSet(db) # logger.setLevel(logger.debug) header_splitter = r'^; AUTOGENERATED: do not edit below this line' class Netgroups(object): class MergeRestart(Exception): pass def __init__(self, fname, default_zone): self._fname = fname self._default_zone = default_zone self._import_netgroups() def _parse_netgroups(self):