def resolve(self, request, handler): reply = request.reply() qname = request.q.qname qtype = QTYPE[request.q.qtype] for name, rtype, rr in self.get_split_records(): # Check if label & type match if getattr(qname, self.eq)(name) and (qtype == rtype or qtype == "ANY" or rtype == "CNAME"): logger.debug( "record matched - name: {} | rtype: {} | rr: {} | qname: {}" .format(str(name), str(rtype), str(rr), str(qname))) # If we have a glob match fix reply label if self.glob: a = copy.copy(rr) a.rname = qname reply.add_answer(a) else: reply.add_answer(rr) # Check for A/AAAA records associated with reply and # add in additional section if rtype in ["CNAME", "NS", "MX", "PTR"]: for a_name, a_rtype, a_rr in self.get_split_records(): if a_name == rr.rdata.label and a_rtype in [ "A", "AAAA" ]: reply.add_ar(a_rr) else: logger.debug("record not matched: {} for {}".format( str(name), str(qname))) if not reply.rr: reply.header.rcode = RCODE.NXDOMAIN return reply
def log_error(self, handler, e): logger.debug("%sInvalid Request: [%s:%d] (%s) :: %s" % ( self.log_prefix(handler), handler.client_address[0], handler.client_address[1], handler.protocol, e, ))
def log_send(self, handler, data): logger.debug("%sSent: [%s:%d] (%s) <%d> : %s" % ( self.log_prefix(handler), handler.client_address[0], handler.client_address[1], handler.protocol, len(data), binascii.hexlify(data), ))
def from_zone(cls, zone): records = [] logger.info( f"[email protected] - Loading zone: {zone.domain}/{zone.ip} ({zone.id})" ) dns_records = zone.dns_records or [] # if the zone has no records, create some default ones if not dns_records: logger.warning( f"[email protected] - Zone has no dns_records. loading defaults: {zone.domain}/{zone.ip} ({zone.id})" ) rrs = RR.fromZone( ZONE_TEMPLATE.format(domain_name=zone.domain, domain_ip=zone.ip) ) zone_records = [Record.make(zone, rr) for rr in rrs] for zr in zone_records: # TODO: make this clean on output rrstr = str(dedent(str(zr.rr))) logger.debug(f"[email protected] - Loading record entry: {rrstr}") logger.debug( "[email protected] - Loaded record details - name: {} | rtype: {} | rr: {}".format( str(zr.rr.rname), str(QTYPE[zr.rr.rtype]), str(zr.rr) ) ) else: # loop over each dns_record of the zone and convert it to RR record dns_records = sorted(dns_records, key=lambda x: x.sort) zone_records = [] for dns_record in dns_records: try: rrs = RR.fromZone(dns_record.record) _zone_records = [Record.make(zone, rr) for rr in rrs] for zr in _zone_records: rrstr = str(dedent(str(zr.rr))) logger.debug( f"[email protected] - Loading record: {str(dns_record.record)}" ) logger.debug( f"[email protected] - Loading record entry: {rrstr}" ) logger.debug( "[email protected] - Loaded record details - name: {} | rtype: {} | rr: {}".format( str(zr.rr.rname), str(QTYPE[zr.rr.rtype]), str(zr.rr) ) ) zone_records = zone_records + _zone_records except Exception as e: logger.critical( f'[email protected] - Error processing line ({e.__class__.__name__}: {e}) "{dns_record.id}:{dns_record.record}" ' ) raise e # add the records for the zone to the rest of the records records = records + zone_records return cls(records)
def log_truncated(self, handler, reply): logger.debug("%sTruncated Reply: [%s:%d] (%s) / '%s' (%s) / RRs: %s" % ( self.log_prefix(handler), handler.client_address[0], handler.client_address[1], handler.protocol, reply.q.qname, QTYPE[reply.q.qtype], ",".join([QTYPE[a.rtype] for a in reply.rr]), )) self.log_data(reply)
def make(cls, zone, rr): if not getattr(rr, "rtype", None): logger.critical( f"No rtype found for rr: {str(rr)} - {str(rr.__class__)}") # RR's don't have knowledge of domiain so replace "." with zone domain if rr.rname == ".": zone_rname = zone.domain + "." else: zone_rname = "." + zone.domain + "." new_label_name = str(rr.rname).replace(".", zone_rname) logger.debug( f"Replacing RR's rname {str(rr.rname)} with {new_label_name}") rr.set_rname(new_label_name) return cls(zone, rr)
async def is_broadcast_up(): seconds = 0 while True: if seconds > 60: logger.critical("could not start api. broadcast (queue) not up") return False logger.debug("checking for broadcast (queue) status") try: redis = await make_redis() await redis.set("up-key", "value") val = await redis.get("up-key") val = await redis.delete("up-key") return True except Exception as e: logger.critical( "broadcast (queue) check not ready after {} seconds: {}". format(str(seconds), str(e.__class__.__name__))) seconds = seconds + 2 sleep(2)
def is_db_up(): seconds = 0 while True: if seconds > 60: logger.critical("could not start api. database not up") return False logger.debug("checking for db status") try: session().execute("SELECT 1") return True except KeyError as e: logger.critical( "database has not be registered. please call db_register(db_url)" ) return False except Exception as e: logger.critical( "database check not ready after {} seconds: {}".format( str(seconds), str(e.__class__.__name__))) seconds = seconds + 1 sleep(1)
def is_db_setup(): seconds = 0 while True: if seconds > 60: logger.critical("could not start api. database not setup") return False logger.debug("checking for db migrations") try: session().execute("SELECT * from alembic_version") return True except KeyError as e: logger.critical( "database has not be registered. please call db_register(db_url)" ) return False except Exception as e: logger.critical( "database has not been migrated. please run: bdnsctl.py db-setup" ) return False seconds = seconds + 1 sleep(1)
def zones_to_records(self, zones): records = [] for zone in zones: try: logger.warning(f"loading zone: {zone.domain} ({zone.id})") for rr in self.zone_to_rr(zone): logger.debug( "registering zone rr name {} and type {}".format( rr.rname, QTYPE[rr.rtype] ) ) record = Record(zone, rr) records.append(record) logger.debug(" %2d: %s", len(records), record) except Exception as e: raise RuntimeError( f'Error processing line ({e.__class__.__name__}: {e}) "{zone.domain}"' ) from e logger.debug("zone map generated {}".format(str(zone))) logger.debug("%d zone resource records generated", len(records)) return records
def maps_to_zones(self, maps): zones = [] for zone_map in maps: try: for rr in self.map_to_rr(zone_map): logger.debug( 'registering zone rr name {} and type {}'.format( rr.rname, QTYPE[rr.rtype])) zone = Record(zone_map, rr) zones.append(zone) logger.debug(' %2d: %s', len(zones), zone) except Exception as e: raise RuntimeError( f'Error processing line ({e.__class__.__name__}: {e}) "{zone_map[0].strip()}"' ) from e logger.debug('%d zone resource records generated', len(zones)) return zones
def match(self, q): matched = False logger.debug( "record matcher: comparing types {} with {} (or any)".format( QTYPE[q.qtype], self.type)) if q.qtype == QTYPE.ANY or QTYPE[q.qtype] == self.type: if str(self.name) in ['.', '@', '*']: record_name = self.zone_map[0] logger.debug( "record matcher: replacing rname {} with record {}".format( self.name, record_name)) else: record_name = self.name logger.debug( "record matcher: comparing request {}:{} to name {}:{}".format( QTYPE[q.qtype], q.qname, self.type, record_name)) matched = q.qname.matchGlob(record_name) if matched: logger.info( "record matcher: match found {}:{} to name {}:{}".format( QTYPE[q.qtype], q.qname, self.type, record_name)) return matched
def debug(self, msg): # pass logger.debug(msg)
origins.append(use_origin) logger.info(f"[email protected] - Registering cors origins {origins}") api.add_middleware( CORSMiddleware, allow_origins=origins, allow_credentials=True, allow_methods=["*"], allow_headers=["*"], ), main_router = APIRouter() for r, ropts in routers: logger.debug(f"[email protected] - Registering router {str(r)} {str(ropts)}") main_router.include_router(r, **ropts) logger.debug(f"[email protected] - Registering main router {str(r)} {str(ropts)}") api.include_router(main_router, prefix=config.API_V1_STR) from starlette.websockets import WebSocket # public broadcast if int(environ.get("BROADCAST_ENABLED", 0)) == 1: logger.info("[email protected] - Registering broadcast routers") api.add_websocket_route("/broadcast", broadcast_index, name="broadcast.index")
from bountydns.api import config # environment must be loaded # CORS api = FastAPI(title=config.API_PROJECT_NAME, openapi_url="/api/v1/openapi.json") origins = [] # Set all CORS enabled origins if config.API_CORS_ORIGINS: origins_raw = config.API_CORS_ORIGINS.split(",") for origin in origins_raw: use_origin = origin.strip() origins.append(use_origin) logger.debug(f"registering cors origins {origins}") api.add_middleware( CORSMiddleware, allow_origins=origins, allow_credentials=True, allow_methods=["*"], allow_headers=["*"], ), main_router = APIRouter() for r, ropts in routers: logger.debug(f"registering router {str(r)} {str(ropts)}") main_router.include_router(r, **ropts) api.include_router(main_router, prefix=config.API_V1_STR)
def log_data(self, dnsobj): logger.debug(str(dnsobj.toZone(" ")))
async def run(self): app = "bountydns.api.main:api" kwargs = self.get_kwargs() env = self.option("env") self.load_env(f"api.{env}") if self.should_import_check(): logger.info("run@api_server.py - Performing import check") from bountydns.api.main import api logger.critical( "run@api_server.py - Starting api server with options: {}".format( str(kwargs))) from bountydns.db.checks import is_db_up, is_db_setup # alembic just destroys the loggers, it's annoying if self.should_db_check(): logger.info( "run@api_server.py - Waiting for database service to be up") db_wait_options = self._args_to_dict(self.options) await DbWait(db_wait_options).run() if self.option("db_setup"): logger.critical("run@api_server.py - Running database migration") db_setup_options = self._args_to_dict(self.options) if self.option("db_seed"): db_setup_options["seed"] = True await DbSetup(db_setup_options).run() if self.should_db_check(): logger.info( "run@api_server.py - Checking if application database is setup and configured" ) db_setup = is_db_setup() if not db_setup: logger.critical( "run@api_server.py - Database not setup error. please check logs" ) return self.exit(1) from bountydns.broadcast import is_broadcast_up if self.should_bcast_check(): bcast_up = await is_broadcast_up() if not bcast_up: logger.critical( "run@api_server.py - Broadcast (queue) not up error. please check logs" ) return self.exit(1) if self.option("db_seed_env", False): self.seed_from_env() # taken from uvicorn/main.py:run logger.debug("run@api_server.py - Building Uvicorn Config and Server") config = UvicornConfig(app, log_config=self.get_uvicorn_logging(), **kwargs) server = UvicornServer(config=config) if self.option("force_exit"): server.force_exit = True if isinstance(app, str) and (config.debug or config.reload): logger.warning( f"run@api_server.py - Running bountydns api in dev mode...") sock = config.bind_socket() supervisor = StatReload(config) return supervisor.run(server.run, sockets=[sock]) elif config.workers > 1: sock = config.bind_socket() supervisor = Multiprocess(config) logger.warning( f"run@api_server.py - Running bountydns api in worker mode...") return supervisor.run(server.run, sockets=[sock]) else: sockets = None logger.warning( f"run@api_server.py - Running bountydns api in standard mode..." ) return await server.serve(sockets=sockets)
def from_zones(cls, zones): records = [] for zone in zones: records = records + cls.from_zone(zone).records logger.debug("%d zone resource records generated", len(records)) return cls(records)
def log_request(self, handler, request, request_uuid): logger.debug(f"log_request: {handler}, {request}, {request_uuid}") self.api.create_dns_request(handler, request, request_uuid) super().log_request(handler, request)