def test_synchronized_zone_recursive_decorator_call(self): @service.synchronized_zone() def mock_create_record(cls, context, record): self.assertEqual(service.ZONE_LOCKS.held, {record.zone_id}) mock_get_zone(cls, context, zone.Zone(id=record.zone_id)) @service.synchronized_zone() def mock_get_zone(cls, context, zone): self.assertEqual(service.ZONE_LOCKS.held, {zone.id}) mock_create_record(object, self.get_context(), record=record.Record(zone_id=utils.generate_uuid())) mock_get_zone(object, self.get_context(), zone=zone.Zone(id=utils.generate_uuid()))
def get_record_fixture(self, recordset_type, fixture=0, values=None): """override to ensure all records have a recordset_id""" values = values or {} return super(IPABackendTestCase, self).get_record_fixture( recordset_type, fixture, values={"recordset_id": utils.generate_uuid()} )
def create_zone(): id_ = generate_uuid() return objects.Zone( id=id_, name='%s-example.com.' % id_, email='*****@*****.**', )
def test_iter_zones(self, get_central): # Test that the iteration code is working properly. central = mock.Mock() get_central.return_value = central ctxt = mock.Mock() iterer = self.task._iter_zones(ctxt) items = [RoObject(id=generate_uuid()) for i in range(0, 5)] central.find_zones.return_value = items # Iterate through the items causing the "paging" to be done. list(map(lambda i: next(iterer), items)) central.find_zones.assert_called_once_with(ctxt, {"shard": "BETWEEN 0,9"}, limit=100) central.find_zones.reset_mock() # Call next on the iterator and see it trying to load a new page. # Also this will raise a StopIteration since there are no more items. central.find_zones.return_value = [] self.assertRaises(StopIteration, next, iterer) central.find_zones.assert_called_once_with(ctxt, {"shard": "BETWEEN 0,9"}, marker=items[-1].id, limit=100)
def test_iter_zones(self, get_central): # Test that the iteration code is working properly. central = mock.Mock() get_central.return_value = central ctxt = mock.Mock() iterer = self.task._iter_zones(ctxt) items = [RoObject(id=generate_uuid()) for i in range(0, 5)] central.find_zones.return_value = items # Iterate through the items causing the "paging" to be done. list(map(lambda i: next(iterer), items)) central.find_zones.assert_called_once_with( ctxt, {"shard": "BETWEEN 0,9"}, limit=100) central.find_zones.reset_mock() # Call next on the iterator and see it trying to load a new page. # Also this will raise a StopIteration since there are no more items. central.find_zones.return_value = [] with testtools.ExpectedException(StopIteration): next(iterer) central.find_zones.assert_called_once_with( ctxt, {"shard": "BETWEEN 0,9"}, marker=items[-1].id, limit=100)
def _build_zones(self, n, action, status): return [ self._build_zone("zone%02X.example." % cnt, action, status, id=generate_uuid()) for cnt in range(n) ]
def __init__(self, keytab, hostname): # store the kerberos credentials in memory rather than on disk os.environ['KRB5CCNAME'] = "MEMORY:" + generate_uuid() self.token = None self.keytab = keytab self.hostname = hostname if self.keytab: os.environ['KRB5_CLIENT_KTNAME'] = self.keytab else: LOG.warning(_LW('No IPA client kerberos keytab file given'))
def get_record_fixture(self, recordset_type, fixture=0, values=None): """override to ensure all records have a recordset_id""" values = values or {} return super(IPABackendTestCase, self).get_record_fixture( recordset_type, fixture, values={ 'recordset_id': utils.generate_uuid() } )
def get_domain_fixture(self): return super(PowerDNSBackendTestCase, self).get_domain_fixture( values={ 'id': utils.generate_uuid(), 'ttl': 42, 'serial': 42, 'refresh': 42, 'retry': 42, 'expire': 42, 'minimum': 42, })
def get_domain_fixture(self): return super(PowerDNSBackendTestCase, self).get_domain_fixture( values={ 'id': utils.generate_uuid(), 'ttl': 42, 'serial': 42, 'refresh': 42, 'retry': 42, 'expire': 42, 'minimum': 42, } )
def test_refresh_zone(self): transferred = timeutils.utcnow(True) - datetime.timedelta(minutes=62) zone = RoObject( id=generate_uuid(), transferred_at=datetime.datetime.isoformat(transferred), refresh=3600) with mock.patch.object(self.task, '_iter') as _iter: _iter.return_value = [zone] self.task() self.central.xfr_zone.assert_called_once_with(self.ctxt, zone.id)
def test_refresh_zone_not_expired(self): # Dummy zone object transferred = timeutils.utcnow(True) - datetime.timedelta(minutes=50) zone = RoObject( id=generate_uuid(), transferred_at=datetime.datetime.isoformat(transferred), refresh=3600) with mock.patch.object(self.task, '_iter') as _iter: _iter.return_value = [zone] self.task() self.assertFalse(self.central.xfr_zone.called)
def test_synchronized_zone_exception_raised(self): @service.synchronized_zone() def mock_get_zone(cls, index, zone): self.assertEqual(service.ZONE_LOCKS.held, {zone.id}) if index % 3 == 0: raise exceptions.ZoneNotFound() for index in range(9): try: mock_get_zone(object, index, zone.Zone(id=utils.generate_uuid())) except exceptions.ZoneNotFound: pass
def test_emit_exists(self): zone = RoObject(id=generate_uuid()) with mock.patch.object(self.task, '_iter_zones') as iter_: iter_.return_value = [zone] self.task() data = dict(zone) data.update(self.period_data) # Ensure both the old (domain) and new (zone) events are fired # until the old is fully deprecated. self.mock_notifier.info.assert_any_call( self.ctxt, "dns.domain.exists", data) self.mock_notifier.info.assert_any_call( self.ctxt, "dns.zone.exists", data)
def test_emit_exists(self): zone = RoObject(id=generate_uuid()) with mock.patch.object(self.task, '_iter_zones') as iter_: iter_.return_value = [zone] self.task() data = dict(zone) data.update(self.period_data) # Ensure both the old (domain) and new (zone) events are fired # until the old is fully deprecated. self.mock_notifier.info.assert_any_call(self.ctxt, "dns.domain.exists", data) self.mock_notifier.info.assert_any_call(self.ctxt, "dns.zone.exists", data)
def start(self): self._coordination_id = ":".join([CONF.host, generate_uuid()]) if CONF.coordination.backend_url is not None: backend_url = cfg.CONF.coordination.backend_url self._coordinator = tooz.coordination.get_coordinator( backend_url, self._coordination_id) self._coordination_started = False self.tg.add_timer(cfg.CONF.coordination.heartbeat_interval, self._coordinator_heartbeat) self.tg.add_timer(cfg.CONF.coordination.run_watchers_interval, self._coordinator_run_watchers) else: msg = _LW("No coordination backend configured, distributed " "coordination functionality will be disabled. " "Please configure a coordination backend.") LOG.warning(msg) super(CoordinationMixin, self).start() if self._coordinator is not None: while not self._coordination_started: try: self._coordinator.start() try: create_group_req = self._coordinator.create_group( self.service_name) create_group_req.get() except tooz.coordination.GroupAlreadyExist: pass join_group_req = self._coordinator.join_group( self.service_name) join_group_req.get() self._coordination_started = True except Exception: LOG.warning(_LW("Failed to start Coordinator:"), exc_info=True) time.sleep(15)
def start(self): self.coordination_id = ":".join([CONF.host, utils.generate_uuid()]) self._started = False backend_url = CONF.coordination.backend_url if backend_url is None: LOG.warning('No coordination backend configured, distributed ' 'coordination functionality will be disabled. ' 'Please configure a coordination backend.') return self._coordinator = tooz.coordination.get_coordinator( backend_url, self.coordination_id.encode()) while not self._coordinator.is_started: self._coordinator.start(start_heart=True) self._started = True if self._grouping_enabled: self._enable_grouping()
def start(self): self.coordination_id = ":".join([CONF.host, generate_uuid()]) if CONF.coordination.backend_url is not None: backend_url = CONF.coordination.backend_url self._coordinator = tooz.coordination.get_coordinator( backend_url, self.coordination_id.encode()) self._started = False self.tg.add_timer(CONF.coordination.heartbeat_interval, self._coordinator_heartbeat) self.tg.add_timer(CONF.coordination.run_watchers_interval, self._coordinator_run_watchers) else: LOG.warning('No coordination backend configured, distributed ' 'coordination functionality will be disabled. ' 'Please configure a coordination backend.') if self._coordinator is not None: while not self._started: try: self._coordinator.start() try: create_group_req = self._coordinator.create_group( self.name) create_group_req.get() except tooz.coordination.GroupAlreadyExist: pass join_group_req = self._coordinator.join_group(self.name) join_group_req.get() self._started = True except Exception: LOG.warning('Failed to start Coordinator', exc_info=True) time.sleep(15)
def main(): # HACK HACK HACK - allow required config params to be passed # via the command line cfg.CONF['service:api']._group._opts['api_base_uri']['cli'] = True for optdict in cfg.CONF['backend:ipa']._group._opts.values(): if 'cli' in optdict: optdict['cli'] = True # HACK HACK HACK - allow api url to be passed in the usual way utils.read_config('designate', sys.argv) if cfg.CONF['service:central'].backend_driver == 'ipa': raise CannotUseIPABackend(cuiberrorstr) if cfg.CONF.debug: LOG.setLevel(logging.DEBUG) elif cfg.CONF.verbose: LOG.setLevel(logging.INFO) else: LOG.setLevel(logging.WARN) ipabackend = impl_ipa.IPABackend(None) ipabackend.start() version = cfg.CONF['backend:ipa'].ipa_version designateurl = cfg.CONF['service:api'].api_base_uri + "v1" # get the list of domains/zones from IPA ipazones = getipadomains(ipabackend, version) # get unique list of name servers servers = {} for zonerec in ipazones: for nsrec in zonerec['nsrecord']: servers[nsrec] = nsrec if not servers: raise NoNameServers("Error: no name servers found in IPA") # let's see if designate is using the IPA backend # create a fake domain in IPA # create a fake server in Designate # try to create the same fake domain in Designate # if we get a DuplicateZone error from Designate, then # raise the CannotUseIPABackend error, after deleting # the fake server and fake domain # find the first non-reverse zone zone = {} for zrec in ipazones: if not zrec['idnsname'][0].endswith("in-addr.arpa.") and \ zrec['idnszoneactive'][0] == 'TRUE': # ipa returns every data field as a list # convert the list to a scalar for n, v in list(zrec.items()): if n in zoneskips: continue if isinstance(v, list): zone[n] = v[0] else: zone[n] = v break assert (zone) # create a fake subdomain of this zone domname = "%s.%s" % (utils.generate_uuid(), zone['idnsname']) args = copy.copy(zone) del args['idnsname'] args['version'] = version ipareq = {'method': 'dnszone_add', 'params': [[domname], args]} iparesp = ipabackend._call_and_handle_error(ipareq) LOG.debug("Response: %s" % pprint.pformat(iparesp)) if iparesp['error']: raise AddDomainError(pprint.pformat(iparesp)) # set up designate connection designatereq = requests.Session() xtra_hdrs = {'Content-Type': 'application/json'} designatereq.headers.update(xtra_hdrs) # sync ipa name servers to designate syncipaservers2des(servers, designatereq, designateurl) domainurl = designateurl + "/domains" # next, try to add the fake domain to Designate email = zone['idnssoarname'].rstrip(".").replace(".", "@", 1) desreq = { "name": domname, "ttl": int(zone['idnssoarefresh'][0]), "email": email } resp = designatereq.post(domainurl, data=json.dumps(desreq)) exc = None fakezoneid = None if resp.status_code == 200: LOG.info(_LI("Added domain %s"), domname) fakezoneid = resp.json()['id'] delresp = designatereq.delete(domainurl + "/" + fakezoneid) if delresp.status_code != 200: LOG.error( _LE("Unable to delete %(name)s: %(response)s") % { 'name': domname, 'response': pprint.pformat(delresp.json()) }) else: exc = CannotUseIPABackend(cuiberrorstr) # cleanup fake stuff ipareq = { 'method': 'dnszone_del', 'params': [[domname], { 'version': version }] } iparesp = ipabackend._call_and_handle_error(ipareq) LOG.debug("Response: %s" % pprint.pformat(iparesp)) if iparesp['error']: LOG.error(_LE("%s") % pprint.pformat(iparesp)) if exc: raise exc # get and delete existing domains resp = designatereq.get(domainurl) LOG.debug("Response: %s" % pprint.pformat(resp.json())) if resp and resp.status_code == 200 and resp.json() and \ 'domains' in resp.json(): # domains must be deleted in child/parent order i.e. delete # sub-domains before parent domains - simple way to get this # order is to sort the domains in reverse order of name len dreclist = sorted(resp.json()['domains'], key=lambda drec: len(drec['name']), reverse=True) for drec in dreclist: delresp = designatereq.delete(domainurl + "/" + drec['id']) if delresp.status_code != 200: raise DeleteDomainError( "Unable to delete %s: %s" % (drec['name'], pprint.pformat(delresp.json()))) # key is zonename, val is designate rec id zonerecs = {} for zonerec in ipazones: desreq = zone2des(zonerec) resp = designatereq.post(domainurl, data=json.dumps(desreq)) if resp.status_code == 200: LOG.info(_LI("Added domain %s"), desreq['name']) else: raise AddDomainError("Unable to add domain %s: %s" % (desreq['name'], pprint.pformat(resp.json()))) zonerecs[desreq['name']] = resp.json()['id'] # get the records for each zone for zonename, domainid in list(zonerecs.items()): recurl = designateurl + "/domains/" + domainid + "/records" iparecs = getiparecords(ipabackend, zonename, version) for rec in iparecs: desreqs = rec2des(rec, zonename) for desreq in desreqs: resp = designatereq.post(recurl, data=json.dumps(desreq)) if resp.status_code == 200: LOG.info( _LI("Added record %(record)s " "for domain %(domain)s"), { 'record': desreq['name'], 'domain': zonename }) else: raise AddRecordError( "Could not add record %s: %s" % (desreq['name'], pprint.pformat(resp.json())))
# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import six from oslo_log import log as logging from designate.utils import generate_uuid from designate.network_api import base LOG = logging.getLogger(__name__) POOL = dict([(generate_uuid(), '192.168.2.%s' % i) for i in range(0, 254)]) ALLOCATIONS = {} def _format_floatingip(id_, address): return {'region': u'RegionOne', 'address': address, 'id': id_} def allocate_floatingip(project_id, floatingip_id=None): """ Allocates a floating ip from the pool to the project. """ ALLOCATIONS.setdefault(project_id, {}) id_ = floatingip_id or list(six.iterkeys(POOL))[0]
def upgrade(migrate_engine): meta.bind = migrate_engine # Get associated database tables servers_table = Table('servers', meta, autoload=True) zones_table = Table('domains', meta, autoload=True) records_table = Table('records', meta, autoload=True) dialect = migrate_engine.url.get_dialect().name RECORD_TYPES = ['A', 'AAAA', 'CNAME', 'MX', 'SRV', 'TXT', 'SPF', 'NS', 'PTR', 'SSHFP', 'SOA'] recordsets_table = Table('recordsets', meta, autoload=True) recordsets_table.c.type.alter(type=Enum(name='recordset_types', *RECORD_TYPES)) # Re-add constraint for sqlite if dialect.startswith('sqlite'): constraint = UniqueConstraint('domain_id', 'name', 'type', name='unique_recordset', table=recordsets_table) constraint.create() # Get the server names which are used to create NS & SOA records servers = select( columns=[ servers_table.c.name ] ).execute().fetchall() # Get all the zones zones = select( columns=[ zones_table.c.id, zones_table.c.created_at, zones_table.c.tenant_id, zones_table.c.name, zones_table.c.email, zones_table.c.serial, zones_table.c.refresh, zones_table.c.retry, zones_table.c.expire, zones_table.c.minimum ] ).execute().fetchall() # NOTE(per kiall): Since we need a unique UUID for each recordset etc, and # need to maintain cross DB compatibility, we're stuck doing # this in code for zone in zones: # Create the SOA Recordset, returning the UUID primary key to be used # in creating the associated SOA Record soa_pk = recordsets_table.insert().execute( id=utils.generate_uuid().replace('-', ''), created_at=zone.created_at, domain_id=zone.id, tenant_id=zone.tenant_id, name=zone.name, type='SOA', version=1 ).inserted_primary_key[0] # Create the SOA Record soa_data = _build_soa_record(zone, servers) records_table.insert().execute( id=utils.generate_uuid().replace('-', ''), created_at=zone.created_at, domain_id=zone.id, tenant_id=zone.tenant_id, recordset_id=soa_pk, data=soa_data, hash=_build_hash(soa_pk, soa_data), managed=True, version=1 ) # Create the NS Recorset, returning the UUID primary key to be used # in creating the associated NS record # NS records could already exist, so check for duplicates try: ns_pk = recordsets_table.insert().execute( id=utils.generate_uuid().replace('-', ''), created_at=zone.created_at, domain_id=zone.id, tenant_id=zone.tenant_id, name=zone.name, type='NS', version=1 ).inserted_primary_key[0] except exception.DBDuplicateEntry: # If there's already an NS recordset, retrieve it ns_pk = select([recordsets_table.c.id])\ .where(recordsets_table.c.domain_id == zone.id)\ .where(recordsets_table.c.tenant_id == zone.tenant_id)\ .where(recordsets_table.c.name == zone.name)\ .where(recordsets_table.c.type == 'NS')\ .execute().scalar() # Create the NS records, one for each server for server in servers: records_table.insert().execute( id=utils.generate_uuid().replace('-', ''), created_at=zone.created_at, domain_id=zone.id, tenant_id=zone.tenant_id, recordset_id=ns_pk, data=server.name, hash=_build_hash(ns_pk, server.name), managed=True, version=1 )
def get_server_fixture(self): return super(PowerDNSBackendTestCase, self).get_server_fixture( values={ 'id': utils.generate_uuid() } )
def get_admin_context(self): return DesignateContext.get_admin_context( project_id=utils.generate_uuid(), user_id=utils.generate_uuid())
def create_test_zone(): return objects.Zone( id=utils.generate_uuid(), name='www.example.org.', email='*****@*****.**', )
# # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import six from oslo_log import log as logging from designate.utils import generate_uuid from designate.network_api import base LOG = logging.getLogger(__name__) POOL = dict([(generate_uuid(), '192.168.2.%s' % i) for i in range(0, 254)]) ALLOCATIONS = {} def _format_floatingip(id_, address): return { 'region': u'RegionOne', 'address': address, 'id': id_ } def allocate_floatingip(project_id, floatingip_id=None): """ Allocates a floating ip from the pool to the project.
cfg.CONF.register_cli_opts([ cfg.StrOpt("domain_id", help="ID of domain to use."), cfg.IntOpt("records", default=100, help="Records to create (name will be <uuid>.<domain name>.") ]) LOG = logging.getLogger(__name__) if __name__ == '__main__': logging.register_options(cfg.CONF) cfg.CONF(sys.argv[1:], project="designate") logging.setup(cfg.CONF, "designate") project_name = os.environ.get('OS_PROJECT_NAME', os.environ.get('OS_TENANT_NAME')) client = v1.Client(auth_url=os.environ.get('OS_AUTH_URL'), username=os.environ.get('OS_USERNAME'), password=os.environ.get('OS_PASSWORD'), project_name=project_name) domain = client.domains.get(cfg.CONF.domain_id) msg = "Creating %s records", cfg.CONF.records LOG.info(msg) for i in range(0, cfg.CONF.records): name = '%s.%s' % (generate_uuid(), domain.name) record = {"name": name, "type": "A", "data": "10.0.0.1"} client.records.create(domain, record)
cfg.StrOpt("domain_id", help="ID of domain to use."), cfg.IntOpt("records", default=100, help="Records to create (name will be <uuid>.<domain name>.") ]) LOG = logging.getLogger(__name__) if __name__ == '__main__': logging.register_options(cfg.CONF) cfg.CONF(sys.argv[1:], project="designate") logging.setup(cfg.CONF, "designate") project_name = os.environ.get( 'OS_PROJECT_NAME', os.environ.get('OS_TENANT_NAME')) client = v1.Client( auth_url=os.environ.get('OS_AUTH_URL'), username=os.environ.get('OS_USERNAME'), password=os.environ.get('OS_PASSWORD'), project_name=project_name ) domain = client.domains.get(cfg.CONF.domain_id) msg = "Creating %s records", cfg.CONF.records LOG.info(msg) for i in range(0, cfg.CONF.records): name = '%s.%s' % (generate_uuid(), domain.name) record = {"name": name, "type": "A", "data": "10.0.0.1"} client.records.create(domain, record)
def upgrade(migrate_engine): meta.bind = migrate_engine # Get associated database tables servers_table = Table('servers', meta, autoload=True) zones_table = Table('domains', meta, autoload=True) records_table = Table('records', meta, autoload=True) dialect = migrate_engine.url.get_dialect().name RECORD_TYPES = [ 'A', 'AAAA', 'CNAME', 'MX', 'SRV', 'TXT', 'SPF', 'NS', 'PTR', 'SSHFP', 'SOA' ] recordsets_table = Table('recordsets', meta, autoload=True) recordsets_table.c.type.alter( type=Enum(name='recordset_types', *RECORD_TYPES)) # Re-add constraint for sqlite if dialect.startswith('sqlite'): constraint = UniqueConstraint('domain_id', 'name', 'type', name='unique_recordset', table=recordsets_table) constraint.create() # Get the server names which are used to create NS & SOA records servers = select(columns=[servers_table.c.name]).execute().fetchall() # Get all the zones zones = select(columns=[ zones_table.c.id, zones_table.c.created_at, zones_table.c.tenant_id, zones_table.c.name, zones_table.c.email, zones_table.c.serial, zones_table.c.refresh, zones_table.c.retry, zones_table.c.expire, zones_table.c.minimum ]).execute().fetchall() # NOTE(per kiall): Since we need a unique UUID for each recordset etc, and # need to maintain cross DB compatibility, we're stuck doing # this in code for zone in zones: # Create the SOA Recordset, returning the UUID primary key to be used # in creating the associated SOA Record soa_pk = recordsets_table.insert().execute( id=utils.generate_uuid().replace('-', ''), created_at=zone.created_at, domain_id=zone.id, tenant_id=zone.tenant_id, name=zone.name, type='SOA', version=1).inserted_primary_key[0] # Create the SOA Record soa_data = _build_soa_record(zone, servers) records_table.insert().execute(id=utils.generate_uuid().replace( '-', ''), created_at=zone.created_at, domain_id=zone.id, tenant_id=zone.tenant_id, recordset_id=soa_pk, data=soa_data, hash=_build_hash(soa_pk, soa_data), managed=True, version=1) # Create the NS Recorset, returning the UUID primary key to be used # in creating the associated NS record # NS records could already exist, so check for duplicates try: ns_pk = recordsets_table.insert().execute( id=utils.generate_uuid().replace('-', ''), created_at=zone.created_at, domain_id=zone.id, tenant_id=zone.tenant_id, name=zone.name, type='NS', version=1).inserted_primary_key[0] except exception.DBDuplicateEntry: # If there's already an NS recordset, retrieve it ns_pk = select([recordsets_table.c.id])\ .where(recordsets_table.c.domain_id == zone.id)\ .where(recordsets_table.c.tenant_id == zone.tenant_id)\ .where(recordsets_table.c.name == zone.name)\ .where(recordsets_table.c.type == 'NS')\ .execute().scalar() # Create the NS records, one for each server for server in servers: records_table.insert().execute( id=utils.generate_uuid().replace('-', ''), created_at=zone.created_at, domain_id=zone.id, tenant_id=zone.tenant_id, recordset_id=ns_pk, data=server.name, hash=_build_hash(ns_pk, server.name), managed=True, version=1)
def get_admin_context(self): return DesignateContext.get_admin_context( tenant=utils.generate_uuid(), user=utils.generate_uuid())
def get_server_fixture(self): return super( PowerDNSBackendTestCase, self).get_server_fixture(values={'id': utils.generate_uuid()})