def __call__(self): pstart, pend = self._my_range() msg = _LI("Emitting zone exist events for shards %(start)s to %(end)s") LOG.info(msg, {"start": pstart, "end": pend}) ctxt = context.DesignateContext.get_admin_context() ctxt.all_tenants = True start, end = self._get_period(self.options.interval) extra_data = { "audit_period_beginning": start, "audit_period_ending": end } counter = 0 for zone in self._iter_zones(ctxt): counter += 1 zone_data = zone.to_dict() zone_data.update(extra_data) self.notifier.info(ctxt, 'dns.domain.exists', zone_data) LOG.info(_LI("Finished emitting %(counter)d events for shards " "%(start)s to %(end)s"), {"start": pstart, "end": pend, "counter": counter})
def bind_udp(host, port): """Bind to an UDP port and listen. Use reuseaddr, reuseport if available :param host: IPv4/v6 address or "". "" binds to every IPv4 interface. :type host: str :param port: UDP port :type port: int :returns: socket """ LOG.info(_LI('Opening UDP Listening Socket on %(host)s:%(port)d'), {'host': host, 'port': port}) family = socket.AF_INET6 if is_valid_ipv6(host) else socket.AF_INET sock_udp = socket.socket(family, socket.SOCK_DGRAM) sock_udp.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) # NOTE: Linux supports socket.SO_REUSEPORT only in 3.9 and later releases. try: sock_udp.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1) except Exception: LOG.info(_LI('SO_REUSEPORT not available, ignoring.')) sock_udp.setblocking(True) sock_udp.bind((host, port)) if port == 0: newport = sock_udp.getsockname()[1] LOG.info(_LI('Listening on UDP port %(port)d'), {'port': newport}) return sock_udp
def __init__(self, *args, **kwargs): notify_endpoint = notify.NotifyEndpoint() kwargs['endpoints'] = [notify_endpoint] super(Service, self).__init__(*args, **kwargs) # Create an instance of the RequestHandler class self.application = handler.RequestHandler() # Wrap the application in any middleware required # TODO(kiall): In the future, we want to allow users to pick+choose # the middleware to be applied, similar to how we do this # in the API. self.application = middleware.ContextMiddleware(self.application) # Bind to the TCP port LOG.info(_LI('Opening TCP Listening Socket on %(host)s:%(port)d') % {'host': CONF['service:mdns'].host, 'port': CONF['service:mdns'].port}) self._sock_tcp = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self._sock_tcp.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) self._sock_tcp.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1) self._sock_tcp.bind((CONF['service:mdns'].host, CONF['service:mdns'].port)) self._sock_tcp.listen(CONF['service:mdns'].tcp_backlog) # Bind to the UDP port LOG.info(_LI('Opening UDP Listening Socket on %(host)s:%(port)d') % {'host': CONF['service:mdns'].host, 'port': CONF['service:mdns'].port}) self._sock_udp = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) self._sock_udp.bind((CONF['service:mdns'].host, CONF['service:mdns'].port))
def __call__(self): pstart, pend = self._my_range() msg = _LI("Emitting zone exist events for %(start)s to %(end)s") LOG.info(msg % {"start": pstart, "end": pend}) ctxt = context.DesignateContext.get_admin_context() ctxt.all_tenants = True criterion = self._filter_between('shard') start, end = self._get_period(self.options.interval) data = { "audit_period_beginning": str(start), "audit_period_ending": str(end) } marker = None while True: zones = self.central_api.find_domains(ctxt, criterion, marker=marker, limit=self.options.per_page) if len(zones) == 0: LOG.info(_LI("Finished emitting events.")) break else: marker = zones.objects[-1].id for zone in zones: zone_data = dict(zone) zone_data.update(data) self.notifier.info(ctxt, 'dns.domain.exists', zone_data)
def _handle_notify(self, request): """ Constructs the response to a NOTIFY and acts accordingly on it. * Checks if the master sending the NOTIFY is in the Zone's masters, if not it is ignored. * Checks if SOA query response serial != local serial. """ context = request.environ["context"] response = dns.message.make_response(request) if len(request.question) != 1: response.set_rcode(dns.rcode.FORMERR) yield response raise StopIteration else: question = request.question[0] criterion = {"name": question.name.to_text(), "type": "SECONDARY", "deleted": False} try: zone = self.storage.find_zone(context, criterion) except exceptions.ZoneNotFound: response.set_rcode(dns.rcode.NOTAUTH) yield response raise StopIteration notify_addr = request.environ["addr"][0] # We check if the src_master which is the assumed master for the zone # that is sending this NOTIFY OP is actually the master. If it's not # We'll reply but don't do anything with the NOTIFY. master_addr = zone.get_master_by_ip(notify_addr) if not master_addr: msg = _LW("NOTIFY for %(name)s from non-master server " "%(addr)s, ignoring.") LOG.warning(msg % {"name": zone.name, "addr": notify_addr}) response.set_rcode(dns.rcode.REFUSED) yield response raise StopIteration resolver = dns.resolver.Resolver() # According to RFC we should query the server that sent the NOTIFY resolver.nameservers = [notify_addr] soa_answer = resolver.query(zone.name, "SOA") soa_serial = soa_answer[0].serial if soa_serial == zone.serial: msg = _LI("Serial %(serial)s is the same for master and us for " "%(zone_id)s") LOG.info(msg, {"serial": soa_serial, "zone_id": zone.id}) else: msg = _LI("Scheduling AXFR for %(zone_id)s from %(master_addr)s") info = {"zone_id": zone.id, "master_addr": master_addr} LOG.info(msg, info) self.tg.add_thread(self.zone_sync, context, zone, [master_addr]) response.flags |= dns.flags.AA yield response raise StopIteration
def get_server(server_id): context = flask.request.environ.get('context') central_api = central_rpcapi.CentralAPI.get_instance() # Get the default pool pool = central_api.get_pool(context, default_pool_id) LOG.info(_LI("Retrieved %(pool)s"), {'pool': pool}) # Create an empty PoolNsRecord object nameserver = objects.PoolNsRecord() # Get the desired nameserver from the pool for ns in pool.ns_records: if ns.id == server_id: nameserver = ns break # If the nameserver wasn't found, raise an exception if nameserver.id != server_id: raise exceptions.ServerNotFound LOG.info(_LI("Retrieved %(server)s"), {'server': nameserver}) server = _pool_ns_record_to_server(nameserver) return flask.jsonify(server_schema.filter(server))
def sync_domains(self, pool_id, pool_target_id, batch_size): pool, target = self._get_config(pool_id, pool_target_id) client = impl_akamai.EnhancedDNSClient( target.options.get("username"), target.options.get("password")) LOG.info(_LI("Doing batches of %i") % batch_size) criterion = {"pool_id": pool_id} marker = None while (marker is not False): zones = self.central_api.find_domains( self.context, criterion, limit=batch_size, marker=marker) update = [] if len(zones) == 0: LOG.info(_LI("Stopping as there are no more zones.")) break else: marker = zones[-1]['id'] for zone in zones: z = impl_akamai.build_zone(client, target, zone) update.append(z) LOG.info(_LI('Uploading %d Zones') % len(update)) client.setZones(update)
def create_zone(self, context, zone): LOG.info(_LI("Creating zone %(d_id)s / %(d_name)s"), {"d_id": zone["id"], "d_name": zone["name"]}) url = "/Secondary/%s" % zone["name"].rstrip(".") data = {"masters": [m.host for m in self.masters]} if self.contact_nickname is not None: data["contact_nickname"] = self.contact_nickname if self.tsig_key_name is not None: data["tsig_key_name"] = self.tsig_key_name client = self.get_client() try: client.post(url, data=data) except DynClientError as e: for emsg in e.msgs: if emsg["ERR_CD"] == "TARGET_EXISTS": msg = _LI("Zone already exists, updating existing " "zone instead %s") LOG.info(msg, zone["name"]) client.put(url, data=data) break else: raise e client.put(url, data={"activate": True}) client.logout()
def stop(self): self._started = False if self._coordinator: LOG.info(_LI('Stopping leader election for group %(group)s'), {'group': self._group_id}) try: # Remove the elected_as_leader callback self._coordinator.unwatch_elected_as_leader( self._group_id, self._on_elected_leader) except AttributeError: # TODO(kiall): Remove when tooz bug #1467907 is fixed + # released, and is in our requirements. if not self._coordinator._hooks_elected_leader[self._group_id]: del self._coordinator._hooks_elected_leader[self._group_id] if self._leader: # Tell Tooz we no longer wish to be the leader LOG.info(_LI('Standing down as leader candidate for group ' '%(group)s'), {'group': self._group_id}) self._leader = False self._coordinator.stand_down_group_leader(self._group_id) elif self._leader: LOG.info(_LI('Standing down as leader candidate for group ' '%(group)s'), {'group': self._group_id}) self._leader = False
def sync_zones(self, pool_id, pool_target_id, batch_size): pool, target = self._get_config(pool_id, pool_target_id) client = impl_akamai.EnhancedDNSClient(target.options.get("username"), target.options.get("password")) LOG.info(_LI("Doing batches of %i"), batch_size) criterion = {"pool_id": pool_id} marker = None # Bug 1519356 - Init policy after configuration has been read policy.init() self.context.all_tenants = True while marker is not False: zones = self.central_api.find_zones(self.context, criterion, limit=batch_size, marker=marker) update = [] if len(zones) == 0: LOG.info(_LI("Stopping as there are no more zones.")) break else: marker = zones[-1]["id"] for zone in zones: z = impl_akamai.build_zone(client, target, zone) update.append(z) LOG.info(_LI("Uploading %d Zones"), len(update)) client.setZones(update)
def create_domain(self, context, domain): LOG.info(_LI('Creating domain %(d_id)s / %(d_name)s') % {'d_id': domain['id'], 'd_name': domain['name']}) url = '/Secondary/%s' % domain['name'].rstrip('.') data = { 'masters': cfg.CONF[GROUP].masters } if cfg.CONF[GROUP].contact_nickname is not None: data['contact_nickname'] = cfg.CONF[GROUP].contact_nickname if cfg.CONF[GROUP].tsig_key_name is not None: data['tsig_key_name'] = cfg.CONF[GROUP].tsig_key_name client = self.get_client() try: client.post(url, data=data) except DynClientError as e: msg = _LI( "Domain already exists, updating existing domain instead %s") LOG.info(msg % domain['name']) for emsg in e.msgs: if emsg['ERR_CD'] == 'TARGET_EXISTS': client.put(url, data=data) client.put(url, data={'activate': True}) client.logout()
def create_zone(self, context, zone): LOG.info(_LI('Creating zone %(d_id)s / %(d_name)s') % {'d_id': zone['id'], 'd_name': zone['name']}) url = '/Secondary/%s' % zone['name'].rstrip('.') data = { 'masters': [m.host for m in self.masters] } if self.contact_nickname is not None: data['contact_nickname'] = self.contact_nickname if self.tsig_key_name is not None: data['tsig_key_name'] = self.tsig_key_name client = self.get_client() try: client.post(url, data=data) except DynClientError as e: for emsg in e.msgs: if emsg['ERR_CD'] == 'TARGET_EXISTS': msg = _LI("Zone already exists, updating existing " "zone instead %s") LOG.info(msg % zone['name']) client.put(url, data=data) break else: raise e client.put(url, data={'activate': True}) client.logout()
def start(self): for target in self.pool.targets: self.target_backends[target.id].start() super(Service, self).start() # Setup a Leader Election, use for ensuring certain tasks are executed # on exactly one pool-manager instance at a time] self._pool_election = coordination.LeaderElection( self._coordinator, '%s:%s' % (self.service_name, self.pool.id)) self._pool_election.start() if CONF['service:pool_manager'].enable_recovery_timer: LOG.info(_LI('Starting periodic recovery timer')) self.tg.add_timer( CONF['service:pool_manager'].periodic_recovery_interval, self.periodic_recovery, CONF['service:pool_manager'].periodic_recovery_interval) if CONF['service:pool_manager'].enable_sync_timer: LOG.info(_LI('Starting periodic synchronization timer')) self.tg.add_timer( CONF['service:pool_manager'].periodic_sync_interval, self.periodic_sync, CONF['service:pool_manager'].periodic_sync_interval)
def bind_tcp(host, port, tcp_backlog, tcp_keepidle=None): # Bind to the TCP port LOG.info(_LI('Opening TCP Listening Socket on %(host)s:%(port)d'), {'host': host, 'port': port}) sock_tcp = socket.socket(socket.AF_INET, socket.SOCK_STREAM) sock_tcp.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) sock_tcp.setsockopt(socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1) # NOTE: Linux supports socket.SO_REUSEPORT only in 3.9 and later releases. try: sock_tcp.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1) except Exception: pass # This option isn't available in the OS X version of eventlet if tcp_keepidle and hasattr(socket, 'TCP_KEEPIDLE'): sock_tcp.setsockopt(socket.IPPROTO_TCP, socket.TCP_KEEPIDLE, tcp_keepidle) sock_tcp.setblocking(True) sock_tcp.bind((host, port)) if port == 0: newport = sock_tcp.getsockname()[1] LOG.info(_LI('Listening on TCP port %(port)d'), {'port': newport}) sock_tcp.listen(tcp_backlog) return sock_tcp
def from_file(self, input_file=None, delimiter=None): input_file = str(input_file) if input_file is not None else None if not os.path.exists(input_file): raise Exception('TLD Input file Not Found') LOG.info(_LI("Importing TLDs from %s") % input_file) error_lines = [] tlds_added = 0 with open(input_file) as inf: csv.register_dialect('import-tlds', delimiter=str(delimiter)) reader = csv.DictReader(inf, fieldnames=['name', 'description'], restkey='extra_fields', dialect='import-tlds') for line in reader: # check if there are more than 2 fields if 'extra_fields' in line: error_lines.append("InvalidLine --> " + self._convert_tld_dict_to_str(line)) else: tlds_added += self._validate_and_create_tld(line, error_lines) LOG.info(_LI("Number of tlds added: %d") % tlds_added) errors = len(error_lines) if errors > 0: LOG.error(_LE("Number of errors: %d") % errors) # Sorting the errors and printing them so that it is easier to # read the errors LOG.error(_LE("Error Lines:\n%s") % '\n'.join(sorted(error_lines)))
def start(self): # Build the Pool (and related) Object from Config context = DesignateContext.get_admin_context() pool_id = CONF['service:pool_manager'].pool_id has_targets = False # TODO(kiall): This block of code should be replaced with a cleaner, # limited version. e.g. should retry for X minutes, and # backoff rather than fixed retry intervals. while not has_targets: try: self.pool = self.central_api.get_pool(context, pool_id) if len(self.pool.targets) > 0: has_targets = True else: LOG.error(_LE("No targets for %s found."), self.pool) time.sleep(5) # Pool data may not have migrated to the DB yet except exceptions.PoolNotFound: LOG.error(_LE("Pool ID %s not found."), pool_id) time.sleep(5) # designate-central service may not have started yet except messaging.exceptions.MessagingTimeout: time.sleep(0.2) # designate-central failed in an unknown way, don't allow another # failing / not started service to cause pool-manager to crash. except Exception: LOG.exception(_LE("An unknown exception occurred while " "fetching pool details")) time.sleep(5) # Create the necessary Backend instances for each target self._setup_target_backends() for target in self.pool.targets: self.target_backends[target.id].start() super(Service, self).start() # Setup a Leader Election, use for ensuring certain tasks are executed # on exactly one pool-manager instance at a time] self._pool_election = coordination.LeaderElection( self._coordinator, '%s:%s' % (self.service_name, self.pool.id)) self._pool_election.start() if CONF['service:pool_manager'].enable_recovery_timer: interval = CONF['service:pool_manager'].periodic_recovery_interval LOG.info(_LI('Starting periodic recovery timer every' ' %(interval)s s') % {'interval': interval}) self.tg.add_timer(interval, self.periodic_recovery, interval) if CONF['service:pool_manager'].enable_sync_timer: interval = CONF['service:pool_manager'].periodic_sync_interval LOG.info(_LI('Starting periodic synchronization timer every' ' %(interval)s s') % {'interval': interval}) self.tg.add_timer(interval, self.periodic_sync, interval)
def periodic_recovery(self): """ Runs only on the pool leader :return: None """ if not self._pool_election.is_leader: return context = self._get_admin_context_all_tenants() LOG.debug("Starting Periodic Recovery") try: # Handle Deletion Failures zones = self._get_failed_zones(context, DELETE_ACTION) LOG.info(_LI("periodic_recovery:delete_zone needed on %d zones"), len(zones)) for zone in zones: self.delete_zone(context, zone) # Handle Creation Failures zones = self._get_failed_zones(context, CREATE_ACTION) LOG.info(_LI("periodic_recovery:create_zone needed on %d zones"), len(zones)) for zone in zones: self.create_zone(context, zone) # Handle Update Failures zones = self._get_failed_zones(context, UPDATE_ACTION) LOG.info(_LI("periodic_recovery:update_zone needed on %d zones"), len(zones)) for zone in zones: self.update_zone(context, zone) except Exception: LOG.exception(_LE("An unhandled exception in periodic " "recovery occurred"))
def start(self): # Build the Pool (and related) Object from Config context = DesignateContext.get_admin_context() pool_id = CONF['service:pool_manager'].pool_id has_targets = False while not has_targets: try: self.pool = self.central_api.get_pool(context, pool_id) if len(self.pool.targets) > 0: has_targets = True else: LOG.error(_LE("No targets for %s found."), self.pool) time.sleep(5) # Pool data may not have migrated to the DB yet except exceptions.PoolNotFound: LOG.error(_LE("Pool ID %s not found."), pool_id) time.sleep(5) # designate-central service may not have started yet except messaging.exceptions.MessagingTimeout: time.sleep(0.2) # Create the necessary Backend instances for each target self._setup_target_backends() for target in self.pool.targets: self.target_backends[target.id].start() super(Service, self).start() # Setup a Leader Election, use for ensuring certain tasks are executed # on exactly one pool-manager instance at a time] self._pool_election = coordination.LeaderElection( self._coordinator, '%s:%s' % (self.service_name, self.pool.id)) self._pool_election.start() if CONF['service:pool_manager'].enable_recovery_timer: interval = CONF['service:pool_manager'].periodic_recovery_interval LOG.info(_LI('Starting periodic recovery timer every' ' %(interval)s s') % {'interval': interval}) self.tg.add_timer(interval, self.periodic_recovery, interval) if CONF['service:pool_manager'].enable_sync_timer: interval = CONF['service:pool_manager'].periodic_sync_interval LOG.info(_LI('Starting periodic synchronization timer every' ' %(interval)s s') % {'interval': interval}) self.tg.add_timer(interval, self.periodic_sync, interval)
def __init__(self, *a, **kw): """Configure the backend""" super(GdnsdBackend, self).__init__(*a, **kw) self._gdnsd_cmd_name = cfg.CONF[CFG_GROUP].gdnsd_cmd_name LOG.info(_LI("gdnsd command: %r"), self._gdnsd_cmd_name) self._confdir_path = cfg.CONF[CFG_GROUP].confdir_path self._zonedir_path = os.path.join(self._confdir_path, 'zones') LOG.info(_LI("gdnsd conf directory: %r"), self._confdir_path) self._resolver = dns.resolver.Resolver(configure=False) self._resolver.timeout = SOA_QUERY_TIMEOUT self._resolver.lifetime = SOA_QUERY_TIMEOUT self._resolver.nameservers = [cfg.CONF[CFG_GROUP].query_destination] LOG.info(_LI("Resolvers: %r"), self._resolver.nameservers) self._check_dirs(self._zonedir_path)
def post_all(self): """Create a Pool""" LOG.warning(_LW("Use of this API Method is DEPRICATED. This will have " "unforseen side affects when used with the " "designate-manage pool commands")) request = pecan.request response = pecan.response context = request.environ['context'] body = request.body_dict pool = DesignateAdapter.parse('API_v2', body, Pool()) pool.validate() # Create the pool pool = self.central_api.create_pool(context, pool) LOG.info(_LI("Created %(pool)s"), {'pool': pool}) pool = DesignateAdapter.render('API_v2', pool, request=request) response.status_int = 201 response.headers['Location'] = pool['links']['self'] # Prepare and return the response body return pool
def delete_zone(self, context, zone): """ :param context: Security context information. :param zone: Zone to be deleted :return: None """ LOG.info(_LI("Deleting zone %s"), zone.name) results = [] # Delete the zone on each of the Pool Targets for target in self.pool.targets: results.append( self._delete_zone_on_target(context, target, zone)) # TODO(kiall): We should monitor that the Zone is actually deleted # correctly on each of the nameservers, rather than # assuming a successful delete-on-target is OK as we have # in the past. if self._exceed_or_meet_threshold( results.count(True), MAXIMUM_THRESHOLD): LOG.debug('Consensus reached for deleting zone %(zone)s ' 'on pool targets' % {'zone': zone.name}) self.central_api.update_status( context, zone.id, SUCCESS_STATUS, zone.serial) else: LOG.warn(_LW('Consensus not reached for deleting zone %(zone)s' ' on pool targets') % {'zone': zone.name}) self.central_api.update_status( context, zone.id, ERROR_STATUS, zone.serial)
def __init__(self, application): super(MaintenanceMiddleware, self).__init__(application) LOG.info(_LI('Starting designate maintenance middleware')) self.enabled = cfg.CONF['service:api'].maintenance_mode self.role = cfg.CONF['service:api'].maintenance_mode_role
def find_zone_transfer_accepts(self, context, criterion=None, marker=None, limit=None, sort_key=None, sort_dir=None): LOG.info(_LI("find_zone_transfer_accepts: \ Calling central's find_zone_transfer_accepts.")) return self.client.call( context, 'find_zone_transfer_accepts', criterion=criterion, marker=marker, limit=limit, sort_key=sort_key, sort_dir=sort_dir)
def post_all(self): """Create Zone Import""" request = pecan.request response = pecan.response context = request.environ['context'] if six.PY2: body = request.body else: body = request.body.decode('utf-8') if request.content_type != 'text/dns': raise exceptions.UnsupportedContentType( 'Content-type must be text/dns') # Create the zone_import zone_import = self.central_api.create_zone_import( context, body) response.status_int = 202 LOG.info(_LI("Created %(zone_import)s"), {'zone_import': zone_import}) zone_import = DesignateAdapter.render( 'API_v2', zone_import, request=request) response.headers['Location'] = zone_import['links']['self'] # Prepare and return the response body return zone_import
def __init__(self, application): super(SSLMiddleware, self).__init__(application) LOG.info(_LI('Starting designate ssl middleware')) self.secure_proxy_ssl_header = 'HTTP_{0}'.format( cfg.CONF['service:api'].secure_proxy_ssl_header.upper(). replace('-', '_')) self.override = cfg.CONF['service:api'].override_proto
def patch_one(self, pool_id): """Update the specific pool""" LOG.warning(_LW("Use of this API Method is DEPRICATED. This will have " "unforseen side affects when used with the " "designate-manage pool commands")) request = pecan.request context = request.environ['context'] body = request.body_dict response = pecan.response if request.content_type == 'application/json-patch+json': raise NotImplemented('json-patch not implemented') # Fetch the existing pool pool = self.central_api.get_pool(context, pool_id) pool = DesignateAdapter.parse('API_v2', body, pool) pool.validate() pool = self.central_api.update_pool(context, pool) LOG.info(_LI("Updated %(pool)s"), {'pool': pool}) response.status_int = 202 return DesignateAdapter.render('API_v2', pool, request=request)
def post_all(self): """Create Blacklisted Zone""" request = pecan.request response = pecan.response context = request.environ['context'] body = request.body_dict blacklist = DesignateAdapter.parse('API_v2', body, Blacklist()) blacklist.validate() # Create the blacklist blacklist = self.central_api.create_blacklist( context, blacklist) LOG.info(_LI("Created %(blacklist)s"), {'blacklist': blacklist}) response.status_int = 201 blacklist = DesignateAdapter.render( 'API_v2', blacklist, request=request) response.headers['Location'] = blacklist['links']['self'] # Prepare and return the response body return blacklist
def _on_elected_leader(self, event): LOG.info(_LI('Sucessfully elected as leader of group %(group)s'), {'group': self._group_id}) self._leader = True for callback in self._callbacks: callback(event)
def get_zone_transfer_request(self, context, zone_transfer_request_id): LOG.info(_LI("get_zone_transfer_request: \ Calling central's get_zone_transfer_request.")) return self.client.call( context, 'get_zone_transfer_request', zone_transfer_request_id=zone_transfer_request_id)
def delete_zone_transfer_accept(self, context, zone_transfer_accept_id): LOG.info(_LI("delete_zone_transfer_accept: \ Calling central's delete_zone_transfer_accept.")) return self.client.call( context, 'delete_zone_transfer_accept', zone_transfer_accept_id=zone_transfer_accept_id)
def get_one(self, transfer_request_id): """Get transfer_request""" request = pecan.request context = request.environ['context'] transfer_request = self.central_api.get_zone_transfer_request( context, transfer_request_id) LOG.info(_LI("Retrieved %(transfer_request)s"), {'transfer_request': transfer_request}) return DesignateAdapter.render('API_v2', transfer_request, request=request, context=context)
def post_all(self, zone_id): """Abandon a zone""" request = pecan.request response = pecan.response context = request.environ['context'] context.abandon = 'True' # abandon the zone zone = self.central_api.delete_zone(context, zone_id) if zone.deleted_at: response.status_int = 204 LOG.info(_LI("Abandoned %(zone)s"), {'zone': zone}) else: response.status_int = 500 return ''
def target_sync(self, context, pool_id, target_id, timestamp): LOG.info( _LI("target_sync: Syncing target %(target) since " "%(timestamp)d."), { 'target': target_id, 'timestamp': timestamp }) # Modifying the topic so it is pool manager instance specific. topic = '%s.%s' % (self.topic, pool_id) cctxt = self.client.prepare(topic=topic) return cctxt.call(context, 'target_sync', pool_id=pool_id, target_id=target_id, timestamp=timestamp)
def get_domains(): """List existing zones except those flagged for deletion """ context = flask.request.environ.get('context') central_api = central_rpcapi.CentralAPI.get_instance() domains = central_api.find_zones(context, criterion={ "type": "PRIMARY", "action": "!DELETE" }) LOG.info(_LI("Retrieved %(zones)s"), {'zones': domains}) return flask.jsonify(domains_schema.filter({'domains': domains}))
def start(self): self._started = True if self._coordinator: LOG.info(_LI('Starting leader election for group %(group)s'), {'group': self._group_id}) # Nominate myself for election self._coordinator.watch_elected_as_leader( self._group_id, self._on_elected_leader) else: self._warn_no_backend() self._leader = True for callback in self._callbacks: callback(None)
def _threshold_met(self, results): # If we don't meet threshold for action, update status met_action_threshold = self._compare_threshold(results.count(True), len(results)) if not met_action_threshold: LOG.info( _LI('Could not %(action)s %(zone)s on enough targets. ' 'Updating status to ERROR'), { 'action': self.zone.action, 'zone': self.zone.name }) self.zone.status = 'ERROR' self._update_status() return False return True
def get_all(self, zone_id): """List NameServers for Zone""" request = pecan.request context = request.environ['context'] # This is a work around to overcome the fact that pool ns_records list # object have 2 different representations in the v2 API ns_records = self.central_api.get_zone_ns_records(context, zone_id) LOG.info(_LI("Created %(ns_records)s"), {'ns_records': ns_records}) return { "nameservers": DesignateAdapter.render('API_v2', ns_records, request=request) }
def delete_one(self, zone_transfer_request_id): """Delete ZoneTransferRequest""" request = pecan.request response = pecan.response context = request.environ['context'] zone_transfer_request = self.central_api.delete_zone_transfer_request( context, zone_transfer_request_id) response.status_int = 204 LOG.info(_LI("Deleted %(zone_transfer_request)s"), {'zone_transfer_request': zone_transfer_request}) # NOTE: This is a hack and a half.. But Pecan needs it. return ''
def delete_domain(self, context, domain): LOG.info(_LI('Deleting domain %(d_id)s / %(d_name)s') % {'d_id': domain['id'], 'd_name': domain['name']}) url = '/Zone/%s' % domain['name'].rstrip('.') client = self.get_client() try: client.delete(url) except DynClientError as e: if e.http_status == 404: LOG.warn(_LW("Attempt to delete %(d_id)s / %(d_name)s " "caused 404, ignoring.") % {'d_id': domain['id'], 'd_name': domain['name']}) pass else: raise client.logout()
def rec2des(rec, zonename): """Convert an IPA record to Designate format. A single IPA record returned from the search may translate into multiple Designate. IPA dnsrecord_find returns a "name". Each DNS name may contain multiple record types. Each record type may contain multiple values. Each one of these values must be added separately to Designate. This function returns all of those as a list of dict designate records. """ # convert record name if rec['idnsname'][0] == '@': name = zonename else: name = rec['idnsname'][0] + "." + zonename # find all record types rectypes = [] for k in rec: if k.endswith("record"): if k in iparectype2designate: rectypes.append(k) else: LOG.info(_LI("Skipping unknown record type " "%(type)s in %(name)s") % {'type': k, 'name': name}) desrecs = [] for rectype in rectypes: dtype = iparectype2designate[rectype] for ddata in rec[rectype]: desreq = {'name': name, 'type': dtype} if dtype == 'SRV' or dtype == 'MX': # split off the priority and send in a separate field idx = ddata.find(' ') desreq['priority'] = int(ddata[:idx]) if dtype == 'SRV' and not ddata.endswith("."): # if server is specified as relative, add zonename desreq['data'] = ddata[(idx + 1):] + "." + zonename else: desreq['data'] = ddata[(idx + 1):] else: desreq['data'] = ddata if rec.get('description', [None])[0]: desreq['description'] = rec.get('description')[0] if rec.get('ttl', [None])[0]: desreq['ttl'] = int(rec['dnsttl'][0]) desrecs.append(desreq) return desrecs
def update_zone(self, context, zone): """ :param context: Security context information. :param zone: Zone to be updated :return: None """ LOG.info(_LI("Updating zone %s"), zone.name) results = [] # Update the zone on each of the Pool Targets for target in self.pool.targets: results.append(self._update_zone_on_target(context, target, zone)) if self._exceed_or_meet_threshold(results.count(True)): LOG.debug('Consensus reached for updating zone %(zone)s ' 'on pool targets' % {'zone': zone.name}) else: LOG.warn( _LW('Consensus not reached for updating zone %(zone)s' ' on pool targets') % {'zone': zone.name}) self.central_api.update_status(context, zone.id, ERROR_STATUS, zone.serial) return # Send a NOTIFY to each also-notifies for also_notify in self.pool.also_notifies: self._update_zone_on_also_notify(context, also_notify, zone) # Ensure the change has propogated to each nameserver for nameserver in self.pool.nameservers: # See if there is already another update in progress try: update_status = self.cache.retrieve(context, nameserver.id, zone.id, UPDATE_ACTION) except exceptions.PoolManagerStatusNotFound: update_status = self._build_status_object( nameserver, zone, UPDATE_ACTION) self.cache.store(context, update_status) self.mdns_api.poll_for_serial_number(context, zone, nameserver, self.timeout, self.retry_interval, self.max_retries, self.delay)
def find_zone_exports(self, context, criterion=None, marker=None, limit=None, sort_key=None, sort_dir=None): LOG.info( _LI("find_zone_exports: Calling central's " "find_zone_exports.")) return self.client.call(context, 'find_zone_exports', criterion=criterion, marker=marker, limit=limit, sort_key=sort_key, sort_dir=sort_dir)
def bind_udp(host, port): # Bind to the UDP port LOG.info(_LI('Opening UDP Listening Socket on %(host)s:%(port)d') % {'host': host, 'port': port}) sock_udp = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) sock_udp.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) # NOTE: Linux supports socket.SO_REUSEPORT only in 3.9 and later releases. try: sock_udp.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1) except Exception: pass sock_udp.setblocking(True) sock_udp.bind((host, port)) return sock_udp
def update_record(domain_id, record_id): context = flask.request.environ.get('context') values = flask.request.json central_api = central_rpcapi.CentralAPI.get_instance() # NOTE: We need to ensure the domain actually exists, otherwise we may # return a record not found instead of a domain not found criterion = {"id": domain_id, "type": "PRIMARY", "action": "!DELETE"} central_api.find_zone(context, criterion) # Fetch the existing resource # NOTE(kiall): We use "find_record" rather than "get_record" as we do not # have the recordset_id. criterion = {'zone_id': domain_id, 'id': record_id} record = central_api.find_record(context, criterion) # TODO(graham): Move this further down the stack if record.managed and not context.edit_managed_records: raise exceptions.BadRequest('Managed records may not be updated') # Find the associated recordset recordset = central_api.get_recordset(context, domain_id, record.recordset_id) # Prepare a dict of fields for validation record_data = record_schema.filter(_format_record_v1(record, recordset)) record_data.update(values) # Validate the new set of data record_schema.validate(record_data) # Update and persist the resource record.update(_extract_record_values(values)) record = central_api.update_record(context, record) # Update the recordset resource (if necessary) recordset.update(_extract_recordset_values(values)) if len(recordset.obj_what_changed()) > 0: recordset = central_api.update_recordset(context, recordset) LOG.info(_LI("Updated %(recordset)s"), {'recordset': recordset}) # Format and return the response record = _format_record_v1(record, recordset) return flask.jsonify(record_schema.filter(record))
def do_axfr(zone_name, servers, timeout=None, source=None): """ Performs an AXFR for a given zone name """ random.shuffle(servers) timeout = timeout or 10 xfr = None for srv in servers: timeout = eventlet.Timeout(timeout) log_info = {'name': zone_name, 'host': srv} try: LOG.info(_LI("Doing AXFR for %(name)s from %(host)s") % log_info) xfr = dns.query.xfr(srv['ip'], zone_name, relativize=False, timeout=1, port=srv['port'], source=source) raw_zone = dns.zone.from_xfr(xfr, relativize=False) break except eventlet.Timeout as t: if t == timeout: msg = _LE("AXFR timed out for %(name)s from %(host)s") LOG.error(msg % log_info) continue except dns.exception.FormError: msg = _LE("Domain %(name)s is not present on %(host)s." "Trying next server.") LOG.error(msg % log_info) except socket.error: msg = _LE("Connection error when doing AXFR for %(name)s from " "%(host)s") LOG.error(msg % log_info) except Exception: msg = _LE("Problem doing AXFR %(name)s from %(host)s. " "Trying next server.") LOG.exception(msg % log_info) finally: timeout.cancel() continue else: msg = _LE("XFR failed for %(name)s. No servers in %(servers)s was " "reached.") raise exceptions.XFRFailure( msg % {"name": zone_name, "servers": servers}) LOG.debug("AXFR Successful for %s" % raw_zone.origin.to_text()) return raw_zone
def update_domain(self, context, domain): """ :param context: Security context information. :param domain: Domain to be updated :return: None """ LOG.info(_LI("Updating domain %s"), domain.name) results = [] # Update the domain on each of the Pool Targets for target in self.pool.targets: results.append( self._update_domain_on_target(context, target, domain)) if self._exceed_or_meet_threshold(results.count(True)): LOG.debug('Consensus reached for updating domain %(domain)s ' 'on pool targets' % {'domain': domain.name}) else: LOG.warn( _LW('Consensus not reached for updating domain %(domain)s' ' on pool targets') % {'domain': domain.name}) self.central_api.update_status(context, domain.id, ERROR_STATUS, domain.serial) return # Send a NOTIFY to each also-notifies for also_notify in self.pool.also_notifies: self._update_domain_on_also_notify(context, also_notify, domain) # Send a NOTIFY to each nameserver for nameserver in self.pool.nameservers: # See if there is already another update in progress try: update_status = self.cache.retrieve(context, nameserver.id, domain.id, UPDATE_ACTION) except exceptions.PoolManagerStatusNotFound: update_status = self._build_status_object( nameserver, domain, UPDATE_ACTION) self.cache.store(context, update_status) self._update_domain_on_nameserver(context, nameserver, domain)
def __init__(self, storage): enabled_filters = cfg.CONF['service:central'].scheduler_filters # Get a storage connection self.storage = storage if len(enabled_filters) > 0: filters = named.NamedExtensionManager( namespace='designate.scheduler.filters', names=enabled_filters, name_order=True) self.filters = [x.plugin(storage=self.storage) for x in filters] for filter in self.filters: LOG.info(_LI("Loaded Scheduler Filter: %s") % filter.name) else: raise exceptions.NoFiltersConfigured('There are no scheduling ' 'filters configured')
def __call__(self): LOG.info(_LI('Attempting %(action)s on zone %(name)s'), { 'action': self.action, 'name': self.zone.name }) if not self._zone_action_on_targets(): return False self._wait_for_nameservers() if self.action == 'DELETE': self.zone.serial = 0 if not self._poll_for_zone(): return False return True
def post_all(self, zone_id): """Create Zone Export""" request = pecan.request response = pecan.response context = request.environ['context'] # Create the zone_export zone_export = self.central_api.create_zone_export( context, zone_id) response.status_int = 202 LOG.info(_LI("Created %(zone_export)s"), {'zone_export': zone_export}) zone_export = DesignateAdapter.render( 'API_v2', zone_export, request=request) response.headers['Location'] = zone_export['links']['self'] return zone_export
def _check_delete_status(self, context, domain): if self._is_consensus(context, domain, DELETE_ACTION, SUCCESS_STATUS): LOG.info( _LI('Consensus reached for deleting domain %(domain)s') % {'domain': domain.name}) self.central_api.update_status(context, domain.id, SUCCESS_STATUS, domain.serial) else: LOG.warn( _LW('Consensus not reached for deleting domain ' '%(domain)s') % {'domain': domain.name}) self.central_api.update_status(context, domain.id, ERROR_STATUS, domain.serial) if self._is_consensus(context, domain, DELETE_ACTION, SUCCESS_STATUS, MAXIMUM_THRESHOLD): # Clear all the entries from cache self._clear_cache(context, domain)
def get_records(domain_id): context = flask.request.environ.get('context') central_api = central_rpcapi.CentralAPI.get_instance() # NOTE: We need to ensure the domain actually exists, otherwise we may # return an empty records array instead of a domain not found central_api.get_zone(context, domain_id) recordsets = central_api.find_recordsets(context, {'zone_id': domain_id}) LOG.info(_LI("Retrieved %(recordsets)s"), {'recordsets': recordsets}) records = [] for rrset in recordsets: records.extend([_format_record_v1(r, rrset) for r in rrset.records]) return flask.jsonify(records_schema.filter({'records': records}))
def create_domain(self, context, domain): """ :param context: Security context information. :param domain: Domain to be created :return: None """ LOG.info(_LI("Creating new domain %s"), domain.name) results = [] # Create the domain on each of the Pool Targets for target in self.pool.targets: results.append( self._create_domain_on_target(context, target, domain)) if self._exceed_or_meet_threshold(results.count(True)): LOG.debug('Consensus reached for creating domain %(domain)s ' 'on pool targets' % {'domain': domain.name}) else: LOG.warn( _LW('Consensus not reached for creating domain %(domain)s' ' on pool targets') % {'domain': domain.name}) self.central_api.update_status(context, domain.id, ERROR_STATUS, domain.serial) return # Send a NOTIFY to each also-notifies for also_notify in self.pool.also_notifies: self._update_domain_on_also_notify(context, also_notify, domain) # Send a NOTIFY to each nameserver for nameserver in self.pool.nameservers: create_status = self._build_status_object(nameserver, domain, CREATE_ACTION) self.cache.store(context, create_status) self.mdns_api.poll_for_serial_number(context, domain, nameserver, self.timeout, self.retry_interval, self.max_retries, self.delay)
def get_all(self, **params): """List Tlds""" request = pecan.request context = request.environ['context'] # Extract the pagination params marker, limit, sort_key, sort_dir = utils.get_paging_params( context, params, self.SORT_KEYS) # Extract any filter params. accepted_filters = ('name', ) criterion = self._apply_filter_params(params, accepted_filters, {}) tlds = self.central_api.find_tlds(context, criterion, marker, limit, sort_key, sort_dir) LOG.info(_LI("Retrieved %(tlds)s"), {'tlds': tlds}) return DesignateAdapter.render('API_v2', tlds, request=request)
def init(default_rule=None): policy_files = utils.find_config(CONF['oslo_policy'].policy_file) if len(policy_files) == 0: msg = 'Unable to determine appropriate policy json file' raise exceptions.ConfigurationError(msg) LOG.info(_LI('Using policy_file found at: %s'), policy_files[0]) with open(policy_files[0]) as fh: policy_string = fh.read() rules = policy.Rules.load_json(policy_string, default_rule=default_rule) global _ENFORCER if not _ENFORCER: LOG.debug("Enforcer is not present, recreating.") _ENFORCER = policy.Enforcer(CONF) _ENFORCER.set_rules(rules)
def get_serial_number(self, context, domain, nameserver, timeout, retry_interval, max_retries, delay): LOG.info( _LI("get_serial_number: Calling mdns for zone '%(zone)s', serial " "%(serial)s' on nameserver '%(host)s:%(port)s'") % { 'zone': domain.name, 'serial': domain.serial, 'host': nameserver.host, 'port': nameserver.port }) cctxt = self.notify_client.prepare(version='1.1') return cctxt.call(context, 'get_serial_number', domain=domain, nameserver=nameserver, timeout=timeout, retry_interval=retry_interval, max_retries=max_retries, delay=delay)
def _modify_zone(self, *knotc_args, **kw): """Create or delete a zone while locking, and within a Knot transaction. Knot supports only one config transaction at a time. :raises: exceptions.Backend """ with lockutils.lock(self._lock_name): self._execute_knotc('conf-begin') try: self._execute_knotc(*knotc_args, **kw) # conf-diff can be used for debugging # self._execute_knotc('conf-diff') except Exception as e: self._execute_knotc('conf-abort') LOG.info(_LI("Zone change aborted: %r"), e) raise else: self._execute_knotc('conf-commit')
def delete_one(self, pool_id): """Delete the specific pool""" LOG.warning( _LW("Use of this API Method is DEPRECATED. This will have " "unforeseen side affects when used with the " "designate-manage pool commands")) request = pecan.request response = pecan.response context = request.environ['context'] pool = self.central_api.delete_pool(context, pool_id) LOG.info(_LI("Deleted %(pool)s"), {'pool': pool}) response.status_int = 204 # NOTE: This is a hack and a half.. But Pecan needs it. return ''
def get_serial_number(self, context, zone, host, port, timeout, retry_interval, max_retries, delay): LOG.info( _LI("get_serial_number: Calling mdns for zone '%(zone)s', serial " "%(serial)s' on nameserver '%(host)s:%(port)s'"), { 'zone': zone.name, 'serial': zone.serial, 'host': host, 'port': port }) cctxt = self.notify_client.prepare() return cctxt.call(context, 'get_serial_number', zone=zone, host=host, port=port, timeout=timeout, retry_interval=retry_interval, max_retries=max_retries, delay=delay)
def notify_zone_changed(self, context, domain, nameserver, timeout, retry_interval, max_retries, delay): LOG.info( _LI("notify_zone_changed: Calling mdns for zone '%(zone)s', " "serial '%(serial)s' to nameserver '%(host)s:%(port)s'") % { 'zone': domain.name, 'serial': domain.serial, 'host': nameserver.host, 'port': nameserver.port }) # The notify_zone_changed method is a cast rather than a call since the # caller need not wait for the notify to complete. return self.notify_client.cast(context, 'notify_zone_changed', domain=domain, nameserver=nameserver, timeout=timeout, retry_interval=retry_interval, max_retries=max_retries, delay=delay)
def delete_zone(self, zone_name): """Delete a new Zone Do not raise exceptions if the zone does not exist. :param zone_name: zone name :type zone_name: str :raises: exceptions.Backend on error """ zone_name = zone_name.rstrip('.') LOG.debug('Deleting Zone: %s', zone_name) zone_fn = self._generate_zone_filename(zone_name) zone_fn = os.path.join(self._zonedir_path, zone_fn) try: os.remove(zone_fn) LOG.debug('Deleted Zone: %s', zone_name) except OSError as e: if os.errno.ENOENT == e.errno: LOG.info(_LI("Zone datafile %s was already deleted"), zone_fn) return raise