def from_file(self, input_file=None, delimiter=None): input_file = str(input_file) if input_file is not None else None if not os.path.exists(input_file): raise Exception('TLD Input file Not Found') LOG.info(_LI("Importing TLDs from %s"), input_file) error_lines = [] tlds_added = 0 with open(input_file) as inf: csv.register_dialect('import-tlds', delimiter=str(delimiter)) reader = csv.DictReader(inf, fieldnames=['name', 'description'], restkey='extra_fields', dialect='import-tlds') for line in reader: # check if there are more than 2 fields if 'extra_fields' in line: error_lines.append("InvalidLine --> " + self._convert_tld_dict_to_str(line)) else: tlds_added += self._validate_and_create_tld( line, error_lines) LOG.info(_LI("Number of tlds added: %d"), tlds_added) errors = len(error_lines) if errors > 0: LOG.error(_LE("Number of errors: %d"), errors) # Sorting the errors and printing them so that it is easier to # read the errors LOG.error(_LE("Error Lines:\n%s"), '\n'.join(sorted(error_lines)))
def from_file(self, input_file=None, delimiter=None): input_file = str(input_file) if input_file is not None else None if not os.path.exists(input_file): raise Exception('TLD Input file Not Found') LOG.info(_LI("Importing TLDs from %s"), input_file) error_lines = [] tlds_added = 0 with open(input_file) as inf: csv.register_dialect('import-tlds', delimiter=str(delimiter)) reader = csv.DictReader(inf, fieldnames=['name', 'description'], restkey='extra_fields', dialect='import-tlds') for line in reader: # check if there are more than 2 fields if 'extra_fields' in line: error_lines.append("InvalidLine --> " + self._convert_tld_dict_to_str(line)) else: tlds_added += self._validate_and_create_tld(line, error_lines) LOG.info(_LI("Number of tlds added: %d"), tlds_added) errors = len(error_lines) if errors > 0: LOG.error(_LE("Number of errors: %d"), errors) # Sorting the errors and printing them so that it is easier to # read the errors LOG.error(_LE("Error Lines:\n%s"), '\n'.join(sorted(error_lines)))
def refresh_auth(self): service = "HTTP@" + self.hostname flags = kerberos.GSS_C_MUTUAL_FLAG | kerberos.GSS_C_SEQUENCE_FLAG try: (_, vc) = kerberos.authGSSClientInit(service, flags) except kerberos.GSSError as e: LOG.error(_LE("caught kerberos exception %r") % e) raise IPAAuthError(str(e)) try: kerberos.authGSSClientStep(vc, "") except kerberos.GSSError as e: LOG.error(_LE("caught kerberos exception %r") % e) raise IPAAuthError(str(e)) self.token = kerberos.authGSSClientResponse(vc)
def handle(self, payload, addr): """ :param payload: Raw DNS query payload :param addr: Tuple of the client's (IP, Port) :return: response to the query or None if there is an issue decoding the query. """ try: request = dns.message.from_wire(payload) except dns.exception.DNSException: LOG.exception(_LE("got exception while decoding packet from " "%(host)s:%(port)d") % {'host': addr[0], 'port': addr[1]}) # We might not have the correct request id to send a response back # So make up a response with a blank question section response = self._handle_query_error( self._fake_request, dns.rcode.FORMERR) response.question = [] else: if request.opcode() == dns.opcode.QUERY: response = self._handle_query(request) else: # Unhandled OpCode's include STATUS, IQUERY, NOTIFY, UPDATE response = self._handle_query_error(request, dns.rcode.REFUSED) return response.to_wire()
def _inner(): if initial_delay: greenthread.sleep(initial_delay) try: while self._running: start = timeutils.utcnow() self.f(*self.args, **self.kw) end = timeutils.utcnow() if not self._running: break delay = interval - timeutils.delta_seconds(start, end) if delay <= 0: LOG.warn(_LW('task run outlasted interval by %s sec') % -delay) greenthread.sleep(delay if delay > 0 else 0) except LoopingCallDone as e: self.stop() done.send(e.retvalue) except Exception: LOG.exception(_LE('in fixed duration looping call')) done.send_exception(*sys.exc_info()) return else: done.send(True)
def _migrate_up(self, engine, version, with_data=False): """migrate up to a new version of the db. We allow for data insertion and post checks at every migration version with special _pre_upgrade_### and _check_### functions in the main test. """ # NOTE(sdague): try block is here because it's impossible to debug # where a failed data migration happens otherwise try: if with_data: data = None pre_upgrade = getattr( self, "_pre_upgrade_%03d" % version, None) if pre_upgrade: data = pre_upgrade(engine) self.migration_api.upgrade(engine, self.REPOSITORY, version) self.assertEqual(version, self.migration_api.db_version(engine, self.REPOSITORY)) if with_data: check = getattr(self, "_check_%03d" % version, None) if check: check(engine, data) except Exception: LOG.error(_LE("Failed to migrate to version %s on engine %s") % (version, engine)) raise
def _handle_exception(self, request, e, status=500, response={}): # Log the exception ASAP LOG.exception(e) headers = [ ('Content-Type', 'application/json'), ] url = getattr(request, 'url', None) # Set a response code and type, if they are missing. if 'code' not in response: response['code'] = status if 'type' not in response: response['type'] = 'unknown' # Return the new response if 'context' in request.environ: response['request_id'] = request.environ['context'].request_id notifications.send_api_fault(request.environ['context'], url, response['code'], e) else: #TODO(ekarlso): Remove after verifying that there's actually a # context always set LOG.error(_LE('Missing context in request, please check.')) return flask.Response(status=status, headers=headers, response=json.dumps(response))
def _wait_for_exit_or_signal(self, ready_callback=None): status = None signo = 0 LOG.debug('Full set of CONF:') CONF.log_opt_values(LOG, std_logging.DEBUG) try: if ready_callback: ready_callback() super(ServiceLauncher, self).wait() except SignalExit as exc: signame = _signo_to_signame(exc.signo) LOG.info(_LI('Caught %s, exiting'), signame) status = exc.code signo = exc.signo except SystemExit as exc: status = exc.code finally: self.stop() if rpc: try: rpc.cleanup() except Exception: # We're shutting down, so it doesn't matter at this point. LOG.exception(_LE('Exception during rpc cleanup.')) return status, signo
def _wrap(self, *args, **kwargs): try: assert issubclass( self.__class__, sqlalchemy.orm.session.Session ), ('_wrap_db_error() can only be applied to methods of ' 'subclasses of sqlalchemy.orm.session.Session.') return f(self, *args, **kwargs) except UnicodeEncodeError: raise exception.DBInvalidUnicodeParameter() except sqla_exc.OperationalError as e: _raise_if_db_connection_lost(e, self.bind) _raise_if_deadlock_error(e, self.bind.dialect.name) # NOTE(comstud): A lot of code is checking for OperationalError # so let's not wrap it for now. raise # note(boris-42): We should catch unique constraint violation and # wrap it by our own DBDuplicateEntry exception. Unique constraint # violation is wrapped by IntegrityError. except sqla_exc.IntegrityError as e: # note(boris-42): SqlAlchemy doesn't unify errors from different # DBs so we must do this. Also in some tables (for example # instance_types) there are more than one unique constraint. This # means we should get names of columns, which values violate # unique constraint, from error message. _raise_if_duplicate_entry_error(e, self.bind.dialect.name) raise exception.DBError(e) except Exception as e: LOG.exception(_LE('DB exception wrapped.')) raise exception.DBError(e)
def inner_func(*args, **kwargs): last_log_time = 0 last_exc_message = None exc_count = 0 while True: try: return infunc(*args, **kwargs) except Exception as exc: this_exc_message = six.u(str(exc)) if this_exc_message == last_exc_message: exc_count += 1 else: exc_count = 1 # Do not log any more frequently than once a minute unless # the exception message changes cur_time = int(time.time()) if (cur_time - last_log_time > 60 or this_exc_message != last_exc_message): logging.exception( _LE('Unexpected exception occurred %d time(s)... ' 'retrying.') % exc_count) last_log_time = cur_time last_exc_message = this_exc_message exc_count = 0 # This should be a very rare event. In case it isn't, do # a sleep. time.sleep(1)
def _wrap(self, *args, **kwargs): try: assert issubclass( self.__class__, sqlalchemy.orm.session.Session), ( '_wrap_db_error() can only be applied to methods of ' 'subclasses of sqlalchemy.orm.session.Session.') return f(self, *args, **kwargs) except UnicodeEncodeError: raise exception.DBInvalidUnicodeParameter() except sqla_exc.OperationalError as e: _raise_if_db_connection_lost(e, self.bind) _raise_if_deadlock_error(e, self.bind.dialect.name) # NOTE(comstud): A lot of code is checking for OperationalError # so let's not wrap it for now. raise # note(boris-42): We should catch unique constraint violation and # wrap it by our own DBDuplicateEntry exception. Unique constraint # violation is wrapped by IntegrityError. except sqla_exc.IntegrityError as e: # note(boris-42): SqlAlchemy doesn't unify errors from different # DBs so we must do this. Also in some tables (for example # instance_types) there are more than one unique constraint. This # means we should get names of columns, which values violate # unique constraint, from error message. _raise_if_duplicate_entry_error(e, self.bind.dialect.name) raise exception.DBError(e) except Exception as e: LOG.exception(_LE('DB exception wrapped.')) raise exception.DBError(e)
def _migrate_up(self, engine, version, with_data=False): """migrate up to a new version of the db. We allow for data insertion and post checks at every migration version with special _pre_upgrade_### and _check_### functions in the main test. """ # NOTE(sdague): try block is here because it's impossible to debug # where a failed data migration happens otherwise try: if with_data: data = None pre_upgrade = getattr(self, "_pre_upgrade_%03d" % version, None) if pre_upgrade: data = pre_upgrade(engine) self.migration_api.upgrade(engine, self.REPOSITORY, version) self.assertEqual( version, self.migration_api.db_version(engine, self.REPOSITORY)) if with_data: check = getattr(self, "_check_%03d" % version, None) if check: check(engine, data) except Exception: LOG.error( _LE("Failed to migrate to version %s on engine %s") % (version, engine)) raise
def _call(endpoint, region, *args, **kw): client = get_client(context, endpoint=endpoint) LOG.debug("Attempting to fetch FloatingIPs from %s @ %s" % (endpoint, region)) try: fips = client.list_floatingips(*args, **kw) except neutron_exceptions.Unauthorized as e: # NOTE: 401 might be that the user doesn't have neutron # activated in a particular region, we'll just log the failure # and go on with our lives. LOG.warn(_LW("Calling Neutron resulted in a 401, " "please investigate.")) LOG.exception(e) return except Exception as e: LOG.error(_LE('Failed calling Neutron ' '%(region)s - %(endpoint)s') % {'region': region, 'endpoint': endpoint}) LOG.exception(e) failed.append((e, endpoint, region)) return for fip in fips['floatingips']: data.append({ 'id': fip['id'], 'address': fip['floating_ip_address'], 'region': region }) LOG.debug("Added %i FloatingIPs from %s @ %s" % (len(data), endpoint, region))
def release(self): try: self.unlock() self.lockfile.close() LOG.debug('Released file lock "%s"', self.fname) except IOError: LOG.exception(_LE("Could not release the acquired lock `%s`"), self.fname)
def __exit__(self, exc_type, exc_val, exc_tb): if exc_type is not None: if self.reraise: logging.error(_LE('Original exception being dropped: %s'), traceback.format_exception(self.type_, self.value, self.tb)) return False if self.reraise: six.reraise(self.type_, self.value, self.tb)
def __exit__(self, exc_type, exc_val, exc_tb): if exc_type is not None: if self.reraise: logging.error( _LE("Original exception being dropped: %s"), traceback.format_exception(self.type_, self.value, self.tb), ) return False if self.reraise: six.reraise(self.type_, self.value, self.tb)
def wrapper(*args, **kwargs): next_interval = self.retry_interval remaining = self.max_retries while True: try: return f(*args, **kwargs) except exception.DBConnectionError as e: if remaining == 0: LOG.exception(_LE('DB exceeded retry limit.')) raise exception.DBError(e) if remaining != -1: remaining -= 1 LOG.exception(_LE('DB connection error.')) # NOTE(vsergeyev): We are using patched time module, so # this effectively yields the execution # context to another green thread. time.sleep(next_interval) if self.inc_retry_interval: next_interval = min(next_interval * 2, self.max_retry_interval)
def wrapper(*args, **kwargs): next_interval = self.retry_interval remaining = self.max_retries while True: try: return f(*args, **kwargs) except exception.DBConnectionError as e: if remaining == 0: LOG.exception(_LE('DB exceeded retry limit.')) raise exception.DBError(e) if remaining != -1: remaining -= 1 LOG.exception(_LE('DB connection error.')) # NOTE(vsergeyev): We are using patched time module, so # this effectively yields the execution # context to another green thread. time.sleep(next_interval) if self.inc_retry_interval: next_interval = min( next_interval * 2, self.max_retry_interval )
def _parse_check(rule): """Parse a single base check rule into an appropriate Check object.""" # Handle the special checks if rule == "!": return FalseCheck() elif rule == "@": return TrueCheck() try: kind, match = rule.split(":", 1) except Exception: LOG.exception(_LE("Failed to understand rule %s") % rule) # If the rule is invalid, we'll fail closed return FalseCheck() # Find what implements the check if kind in _checks: return _checks[kind](kind, match) elif None in _checks: return _checks[None](kind, match) else: LOG.error(_LE("No handler for matches of kind %s") % kind) return FalseCheck()
def _parse_check(rule): """Parse a single base check rule into an appropriate Check object.""" # Handle the special checks if rule == '!': return FalseCheck() elif rule == '@': return TrueCheck() try: kind, match = rule.split(':', 1) except Exception: LOG.exception(_LE("Failed to understand rule %s") % rule) # If the rule is invalid, we'll fail closed return FalseCheck() # Find what implements the check if kind in _checks: return _checks[kind](kind, match) elif None in _checks: return _checks[None](kind, match) else: LOG.error(_LE("No handler for matches of kind %s") % kind) return FalseCheck()
def _call_and_handle_error(self, ipareq): if 'version' not in ipareq['params'][1]: ipareq['params'][1]['version'] = cfg.CONF[self.name].ipa_version need_reauth = False while True: status_code = 200 try: if need_reauth: self.request.auth.refresh_auth() rawresp = self.request.post(self.jsonurl, data=json.dumps(ipareq)) status_code = rawresp.status_code except IPAAuthError: status_code = 401 if status_code == 401: if self.ntries == 0: # persistent inability to auth LOG.error(_LE("Error: could not authenticate to IPA - " "please check for correct keytab file")) # reset for next time self.ntries = cfg.CONF[self.name].ipa_connect_retries raise IPACommunicationFailure() else: LOG.debug("Refresh authentication") need_reauth = True self.ntries -= 1 time.sleep(1) else: # successful - reset self.ntries = cfg.CONF[self.name].ipa_connect_retries break try: resp = json.loads(rawresp.text) except ValueError: # response was not json - some sort of error response LOG.debug("Error: unknown error from IPA [%s]" % rawresp.text) raise IPAUnknownError("unable to process response from IPA") # raise the appropriate exception, if error exclass = self._ipa_error_to_exception(resp, ipareq) if exclass: # could add additional info/message to exception here raise exclass() return resp
def _extract_zones(self): zones = [] for zone in self._zone_regex.finditer(self._conf): content = zone.group('content') name = zone.group('name') # Make sure it's a master zone: if self._type_master_regex.search(content): zonefile = self._zonefile_regex.search(content).group('file') try: zone_object = dns.zone.from_file(zonefile, allow_include=True) except dns.zone.UnknownOrigin: LOG.info(_LI('%s is missing $ORIGIN, inserting %s') % (zonefile, name)) zone_object = dns.zone.from_file(zonefile, allow_include=True, origin=name) except dns.zone.NoSOA: LOG.error(_LE('%s has no SOA') % zonefile) zones.append(Zone(zone_object)) return zones
def _extract_zones(self): zones = [] for zone in self._zone_regex.finditer(self._conf): content = zone.group('content') name = zone.group('name') # Make sure it's a master zone: if self._type_master_regex.search(content): zonefile = self._zonefile_regex.search(content).group('file') try: zone_object = dns.zone.from_file(zonefile, allow_include=True) except dns.zone.UnknownOrigin: LOG.info( _LI('%s is missing $ORIGIN, inserting %s') % (zonefile, name)) zone_object = dns.zone.from_file(zonefile, allow_include=True, origin=name) except dns.zone.NoSOA: LOG.error(_LE('%s has no SOA') % zonefile) zones.append(Zone(zone_object)) return zones
def _child_wait_for_exit_or_signal(self, launcher): status = 0 signo = 0 # NOTE(johannes): All exceptions are caught to ensure this # doesn't fallback into the loop spawning children. It would # be bad for a child to spawn more children. try: launcher.wait() except SignalExit as exc: signame = _signo_to_signame(exc.signo) LOG.info(_LI('Caught %s, exiting'), signame) status = exc.code signo = exc.signo except SystemExit as exc: status = exc.code except BaseException: LOG.exception(_LE('Unhandled exception')) status = 2 finally: launcher.stop() return status, signo
def _child_wait_for_exit_or_signal(self, launcher): status = 0 signo = 0 # NOTE(johannes): All exceptions are caught to ensure this # doesn't fallback into the loop spawning children. It would # be bad for a child to spawn more children. try: launcher.wait() except SignalExit as exc: signame = _signo_to_signame(exc.signo) LOG.info(_LI('Child caught %s, exiting'), signame) status = exc.code signo = exc.signo except SystemExit as exc: status = exc.code except BaseException: LOG.exception(_LE('Unhandled exception')) status = 2 finally: launcher.stop() return status, signo
def inner_func(*args, **kwargs): last_log_time = 0 last_exc_message = None exc_count = 0 while True: try: return infunc(*args, **kwargs) except Exception as exc: this_exc_message = six.u(str(exc)) if this_exc_message == last_exc_message: exc_count += 1 else: exc_count = 1 # Do not log any more frequently than once a minute unless # the exception message changes cur_time = int(time.time()) if cur_time - last_log_time > 60 or this_exc_message != last_exc_message: logging.exception(_LE("Unexpected exception occurred %d time(s)... " "retrying.") % exc_count) last_log_time = cur_time last_exc_message = this_exc_message exc_count = 0 # This should be a very rare event. In case it isn't, do # a sleep. time.sleep(1)
def _handle(self, addr, payload, client=None): """ Handle a DNS Query :param addr: Tuple of the client's (IP, Port) :param payload: Raw DNS query payload :param client: Client socket (for TCP only) """ try: response = self.handler.handle(payload, addr) # send back a response only if present if response: if client is not None: # Handle TCP Responses client.send(response) client.close() else: # Handle UDP Responses self._sock_udp.sendto(response, addr) except Exception: LOG.exception(_LE("Unhandled exception while processing request " "from %(host)s:%(port)d") % {'host': addr[0], 'port': addr[1]})
def _inner(): if initial_delay: greenthread.sleep(initial_delay) try: while self._running: idle = self.f(*self.args, **self.kw) if not self._running: break if periodic_interval_max is not None: idle = min(idle, periodic_interval_max) LOG.debug('Dynamic looping call sleeping for %.02f ' 'seconds', idle) greenthread.sleep(idle) except LoopingCallDone as e: self.stop() done.send(e.retvalue) except Exception: LOG.exception(_LE('in dynamic looping call')) done.send_exception(*sys.exc_info()) return else: done.send(True)
def _parse_text_rule(rule): """Parses policy to the tree. Translates a policy written in the policy language into a tree of Check objects. """ # Empty rule means always accept if not rule: return TrueCheck() # Parse the token stream state = ParseState() for tok, value in _parse_tokenize(rule): state.shift(tok, value) try: return state.result except ValueError: # Couldn't parse the rule LOG.exception(_LE("Failed to understand rule %r") % rule) # Fail closed return FalseCheck()
def main(): # HACK HACK HACK - allow required config params to be passed # via the command line cfg.CONF['service:api']._group._opts['api_base_uri']['cli'] = True for optdict in cfg.CONF['backend:ipa']._group._opts.itervalues(): if 'cli' in optdict: optdict['cli'] = True # HACK HACK HACK - allow api url to be passed in the usual way utils.read_config('designate', sys.argv) if cfg.CONF['service:central'].backend_driver == 'ipa': raise CannotUseIPABackend(cuiberrorstr) if cfg.CONF.debug: LOG.setLevel(logging.DEBUG) elif cfg.CONF.verbose: LOG.setLevel(logging.INFO) else: LOG.setLevel(logging.WARN) ipabackend = impl_ipa.IPABackend(None) ipabackend.start() version = cfg.CONF['backend:ipa'].ipa_version designateurl = cfg.CONF['service:api'].api_base_uri + "v1" # get the list of domains/zones from IPA ipazones = getipadomains(ipabackend, version) # get unique list of name servers servers = {} for zonerec in ipazones: for nsrec in zonerec['nsrecord']: servers[nsrec] = nsrec if not servers: raise NoNameServers("Error: no name servers found in IPA") # let's see if designate is using the IPA backend # create a fake domain in IPA # create a fake server in Designate # try to create the same fake domain in Designate # if we get a DuplicateDomain error from Designate, then # raise the CannotUseIPABackend error, after deleting # the fake server and fake domain # find the first non-reverse zone zone = {} for zrec in ipazones: if not zrec['idnsname'][0].endswith("in-addr.arpa.") and \ zrec['idnszoneactive'][0] == 'TRUE': # ipa returns every data field as a list # convert the list to a scalar for n, v in zrec.iteritems(): if n in zoneskips: continue if isinstance(v, list): zone[n] = v[0] else: zone[n] = v break assert (zone) # create a fake subdomain of this zone domname = "%s.%s" % (uuid.uuid4(), zone['idnsname']) args = copy.copy(zone) del args['idnsname'] args['version'] = version ipareq = {'method': 'dnszone_add', 'params': [[domname], args]} iparesp = ipabackend._call_and_handle_error(ipareq) LOG.debug("Response: %s" % pprint.pformat(iparesp)) if iparesp['error']: raise AddDomainError(pprint.pformat(iparesp)) # set up designate connection designatereq = requests.Session() xtra_hdrs = {'Content-Type': 'application/json'} designatereq.headers.update(xtra_hdrs) # sync ipa name servers to designate syncipaservers2des(servers, designatereq, designateurl) domainurl = designateurl + "/domains" # next, try to add the fake domain to Designate email = zone['idnssoarname'].rstrip(".").replace(".", "@", 1) desreq = { "name": domname, "ttl": int(zone['idnssoarefresh'][0]), "email": email } resp = designatereq.post(domainurl, data=json.dumps(desreq)) exc = None fakezoneid = None if resp.status_code == 200: LOG.info(_LI("Added domain %s") % domname) fakezoneid = resp.json()['id'] delresp = designatereq.delete(domainurl + "/" + fakezoneid) if delresp.status_code != 200: LOG.error( _LE("Unable to delete %s: %s") % (domname, pprint.pformat(delresp.json()))) else: exc = CannotUseIPABackend(cuiberrorstr) # cleanup fake stuff ipareq = { 'method': 'dnszone_del', 'params': [[domname], { 'version': version }] } iparesp = ipabackend._call_and_handle_error(ipareq) LOG.debug("Response: %s" % pprint.pformat(iparesp)) if iparesp['error']: LOG.error(_LE(pprint.pformat(iparesp))) if exc: raise exc # get and delete existing domains resp = designatereq.get(domainurl) LOG.debug("Response: %s" % pprint.pformat(resp.json())) if resp and resp.status_code == 200 and resp.json() and \ 'domains' in resp.json(): # domains must be deleted in child/parent order i.e. delete # sub-domains before parent domains - simple way to get this # order is to sort the domains in reverse order of name len dreclist = sorted(resp.json()['domains'], key=lambda drec: len(drec['name']), reverse=True) for drec in dreclist: delresp = designatereq.delete(domainurl + "/" + drec['id']) if delresp.status_code != 200: raise DeleteDomainError( "Unable to delete %s: %s" % (drec['name'], pprint.pformat(delresp.json()))) # key is zonename, val is designate rec id zonerecs = {} for zonerec in ipazones: desreq = zone2des(zonerec) resp = designatereq.post(domainurl, data=json.dumps(desreq)) if resp.status_code == 200: LOG.info(_LI("Added domain %s") % desreq['name']) else: raise AddDomainError("Unable to add domain %s: %s" % (desreq['name'], pprint.pformat(resp.json()))) zonerecs[desreq['name']] = resp.json()['id'] # get the records for each zone for zonename, domainid in zonerecs.iteritems(): recurl = designateurl + "/domains/" + domainid + "/records" iparecs = getiparecords(ipabackend, zonename, version) for rec in iparecs: desreqs = rec2des(rec, zonename) for desreq in desreqs: resp = designatereq.post(recurl, data=json.dumps(desreq)) if resp.status_code == 200: LOG.info( _LI("Added record %s for domain %s") % (desreq['name'], zonename)) else: raise AddRecordError( "Could not add record %s: %s" % (desreq['name'], pprint.pformat(resp.json())))
def main(): # HACK HACK HACK - allow required config params to be passed # via the command line cfg.CONF['service:api']._group._opts['api_base_uri']['cli'] = True for optdict in cfg.CONF['backend:ipa']._group._opts.itervalues(): if 'cli' in optdict: optdict['cli'] = True # HACK HACK HACK - allow api url to be passed in the usual way utils.read_config('designate', sys.argv) if cfg.CONF['service:central'].backend_driver == 'ipa': raise CannotUseIPABackend(cuiberrorstr) if cfg.CONF.debug: LOG.setLevel(logging.DEBUG) elif cfg.CONF.verbose: LOG.setLevel(logging.INFO) else: LOG.setLevel(logging.WARN) ipabackend = impl_ipa.IPABackend(None) ipabackend.start() version = cfg.CONF['backend:ipa'].ipa_version designateurl = cfg.CONF['service:api'].api_base_uri + "v1" # get the list of domains/zones from IPA ipazones = getipadomains(ipabackend, version) # get unique list of name servers servers = {} for zonerec in ipazones: for nsrec in zonerec['nsrecord']: servers[nsrec] = nsrec if not servers: raise NoNameServers("Error: no name servers found in IPA") # let's see if designate is using the IPA backend # create a fake domain in IPA # create a fake server in Designate # try to create the same fake domain in Designate # if we get a DuplicateDomain error from Designate, then # raise the CannotUseIPABackend error, after deleting # the fake server and fake domain # find the first non-reverse zone zone = {} for zrec in ipazones: if not zrec['idnsname'][0].endswith("in-addr.arpa.") and \ zrec['idnszoneactive'][0] == 'TRUE': # ipa returns every data field as a list # convert the list to a scalar for n, v in zrec.iteritems(): if n in zoneskips: continue if isinstance(v, list): zone[n] = v[0] else: zone[n] = v break assert(zone) # create a fake subdomain of this zone domname = "%s.%s" % (uuid.uuid4(), zone['idnsname']) args = copy.copy(zone) del args['idnsname'] args['version'] = version ipareq = {'method': 'dnszone_add', 'params': [[domname], args]} iparesp = ipabackend._call_and_handle_error(ipareq) LOG.debug("Response: %s" % pprint.pformat(iparesp)) if iparesp['error']: raise AddDomainError(pprint.pformat(iparesp)) # set up designate connection designatereq = requests.Session() xtra_hdrs = {'Content-Type': 'application/json'} designatereq.headers.update(xtra_hdrs) # sync ipa name servers to designate syncipaservers2des(servers, designatereq, designateurl) domainurl = designateurl + "/domains" # next, try to add the fake domain to Designate email = zone['idnssoarname'].rstrip(".").replace(".", "@", 1) desreq = {"name": domname, "ttl": int(zone['idnssoarefresh'][0]), "email": email} resp = designatereq.post(domainurl, data=json.dumps(desreq)) exc = None fakezoneid = None if resp.status_code == 200: LOG.info(_LI("Added domain %s") % domname) fakezoneid = resp.json()['id'] delresp = designatereq.delete(domainurl + "/" + fakezoneid) if delresp.status_code != 200: LOG.error(_LE("Unable to delete %(name)s: %(response)s") % {'name': domname, 'response': pprint.pformat( delresp.json())}) else: exc = CannotUseIPABackend(cuiberrorstr) # cleanup fake stuff ipareq = {'method': 'dnszone_del', 'params': [[domname], {'version': version}]} iparesp = ipabackend._call_and_handle_error(ipareq) LOG.debug("Response: %s" % pprint.pformat(iparesp)) if iparesp['error']: LOG.error(_LE("%s") % pprint.pformat(iparesp)) if exc: raise exc # get and delete existing domains resp = designatereq.get(domainurl) LOG.debug("Response: %s" % pprint.pformat(resp.json())) if resp and resp.status_code == 200 and resp.json() and \ 'domains' in resp.json(): # domains must be deleted in child/parent order i.e. delete # sub-domains before parent domains - simple way to get this # order is to sort the domains in reverse order of name len dreclist = sorted(resp.json()['domains'], key=lambda drec: len(drec['name']), reverse=True) for drec in dreclist: delresp = designatereq.delete(domainurl + "/" + drec['id']) if delresp.status_code != 200: raise DeleteDomainError("Unable to delete %s: %s" % (drec['name'], pprint.pformat(delresp.json()))) # key is zonename, val is designate rec id zonerecs = {} for zonerec in ipazones: desreq = zone2des(zonerec) resp = designatereq.post(domainurl, data=json.dumps(desreq)) if resp.status_code == 200: LOG.info(_LI("Added domain %s") % desreq['name']) else: raise AddDomainError("Unable to add domain %s: %s" % (desreq['name'], pprint.pformat(resp.json()))) zonerecs[desreq['name']] = resp.json()['id'] # get the records for each zone for zonename, domainid in zonerecs.iteritems(): recurl = designateurl + "/domains/" + domainid + "/records" iparecs = getiparecords(ipabackend, zonename, version) for rec in iparecs: desreqs = rec2des(rec, zonename) for desreq in desreqs: resp = designatereq.post(recurl, data=json.dumps(desreq)) if resp.status_code == 200: LOG.info(_LI("Added record %(record)s " "for domain %(domain)s") % {'record': desreq['name'], 'domain': zonename}) else: raise AddRecordError("Could not add record %s: %s" % (desreq['name'], pprint.pformat(resp.json())))