def run(self): zones = self.get_zones() upstream = self.get_upstream() api_url = self.get_api_url() api_token = self.get_api_token() port = self.get_port() listen = self.get_listen() resolver = Resolver(zones, upstream, api_url, api_token) udp_server = DNSServer(resolver, address=listen, port=port, logger=DnsLogger()) tcp_server = DNSServer(resolver, address=listen, port=port, tcp=True, logger=DnsLogger()) logger.info( 'starting DNS server on port %d, upstream DNS server "%s", %d zones', port, upstream, len(zones), ) udp_server.start_thread() tcp_server.start_thread() try: while udp_server.isAlive(): sleep(1) except KeyboardInterrupt: pass
def log_reply(self, handler, reply): if self.api_client: self.api_client.dns_event.make( event="RESPONSE", name=reply.q.qname, type=QTYPE[reply.q.qtype], source_address=handler.client_address[0], source_port=handler.client_address[1], protocol=handler.protocol, ) if reply.header.rcode == RCODE.NOERROR: logger.info("%sReply: [%s:%d] (%s) / '%s' (%s) / RRs: %s" % ( self.log_prefix(handler), handler.client_address[0], handler.client_address[1], handler.protocol, reply.q.qname, QTYPE[reply.q.qtype], ",".join([QTYPE[a.rtype] for a in reply.rr]), )) else: logger.info("%sReply: [%s:%d] (%s) / '%s' (%s) / %s" % ( self.log_prefix(handler), handler.client_address[0], handler.client_address[1], handler.protocol, reply.q.qname, QTYPE[reply.q.qtype], RCODE[reply.header.rcode], )) self.log_data(reply)
async def store( form: DnsRequestCreateForm, dns_request_repo: DnsRequestRepo = Depends(DnsRequestRepo()), zone_repo: ZoneRepo = Depends(ZoneRepo()), dns_server_repo: DnsServerRepo = Depends(DnsServerRepo()), token: str = Depends(ScopedTo("dns-request:create")), ): dns_server_id = (dns_server_repo.first_or_fail( name=form.dns_server_name.lower()).results().id) zone = (zone_repo.filter( literal(form.name.lower()).contains( zone_repo.label("domain"))).first().results()) zone_id = zone.id if zone else None data = only( dict(form), [ "name", "source_address", "source_port", "type", "protocol", "raw_request" ], ) data["name"] = data["name"].lower() data["type"] = data["type"].upper() data["dns_server_id"] = dns_server_id data["zone_id"] = zone_id logger.info("[email protected] - Creating DNS Request") dns_request = dns_request_repo.create(data).data() return DnsRequestResponse(dns_request=dns_request)
def get_zones(self): logger.info("getting zones") zone_data = self.get( f"/dns-server/{self.dns_server_name}/zone", params={"includes": ["dns_records"]}, ) data = [ZoneData(**z) for z in zone_data["zones"]] return data
def db_register_model_events(models): for m in models: for event_name in ORM_EVENTS: event_cb = "on_" + event_name if hasattr(m, event_cb): logger.info( "[email protected]: registering " + event_cb + " on " + str(m)) listen(m, event_name, make_event(getattr(m, event_cb)))
def from_zone(cls, zone): records = [] logger.info( f"[email protected] - Loading zone: {zone.domain}/{zone.ip} ({zone.id})" ) dns_records = zone.dns_records or [] # if the zone has no records, create some default ones if not dns_records: logger.warning( f"[email protected] - Zone has no dns_records. loading defaults: {zone.domain}/{zone.ip} ({zone.id})" ) rrs = RR.fromZone( ZONE_TEMPLATE.format(domain_name=zone.domain, domain_ip=zone.ip) ) zone_records = [Record.make(zone, rr) for rr in rrs] for zr in zone_records: # TODO: make this clean on output rrstr = str(dedent(str(zr.rr))) logger.debug(f"[email protected] - Loading record entry: {rrstr}") logger.debug( "[email protected] - Loaded record details - name: {} | rtype: {} | rr: {}".format( str(zr.rr.rname), str(QTYPE[zr.rr.rtype]), str(zr.rr) ) ) else: # loop over each dns_record of the zone and convert it to RR record dns_records = sorted(dns_records, key=lambda x: x.sort) zone_records = [] for dns_record in dns_records: try: rrs = RR.fromZone(dns_record.record) _zone_records = [Record.make(zone, rr) for rr in rrs] for zr in _zone_records: rrstr = str(dedent(str(zr.rr))) logger.debug( f"[email protected] - Loading record: {str(dns_record.record)}" ) logger.debug( f"[email protected] - Loading record entry: {rrstr}" ) logger.debug( "[email protected] - Loaded record details - name: {} | rtype: {} | rr: {}".format( str(zr.rr.rname), str(QTYPE[zr.rr.rtype]), str(zr.rr) ) ) zone_records = zone_records + _zone_records except Exception as e: logger.critical( f'[email protected] - Error processing line ({e.__class__.__name__}: {e}) "{dns_record.id}:{dns_record.record}" ' ) raise e # add the records for the zone to the rest of the records records = records + zone_records return cls(records)
def post(self, url: str, data=None, fail=True): data = data or {} headers = self.get_default_headers() res = requests.post(self.url(url), json=data, headers=headers) logger.info("Posting URL: " + str(self.url(url))) if fail: if res.status_code != 200: logger.critical(f"Error posting API {self.url(url)}: " + str(res.json())) res.raise_for_status() return res.json()
def refresh_zones_if_needed(self): logger.info( "refresh_zones_if_needed@api_client.py - Checking for New Zones and Records..." ) old_zones = self.zones new_zones = self.get_zones() # TODO: fix this mess if len(old_zones) != len(new_zones): logger.warning( f"refresh_zones_if_needed@api_client.py - Zone Length mistmatch. New or Changed Zone Found: {str(old_zones)} != {str(new_zones)}. Reloading zones." ) self.load_zones() return True for nz in new_zones: # make sure new zones are in old zones is_nz_exists = False for oz in old_zones: if oz.domain == nz.domain and oz.ip == nz.ip: nz_dns_records = nz.dns_records or [] for nrec in nz_dns_records: is_rec_satisfied = False oz_dns_records = oz.dns_records or [] if len(nz_dns_records) != len(oz_dns_records): logger.warning( f"refresh_zones_if_needed@api_client.py - Zone Record Length mistmatch {str(len(nz_dns_records))} != {str(len(oz_dns_records))}. New or Changed Zone Record Found: {str(nz_dns_records)} != {str(oz_dns_records)}. Reloading zones." ) self.load_zones() return True for orec in oz_dns_records: if orec.record == nrec.record and orec.sort == nrec.sort: is_rec_satisfied = True if not is_rec_satisfied: logger.warning( f"refresh_zones_if_needed@api_client.py - New or Changed Zone Record {str(nz)}: {str(nrec)} found for server. Reloading zones." ) self.load_zones() return True is_nz_exists = True if not is_nz_exists: logger.warning( f"refresh_zones_if_needed@api_client.py - New or Changed Zone {str(nz)} found for server. Reloading zones." ) self.load_zones() return True logger.info( "refresh_zones_if_needed@api_client.py - No New Zones or Records Found. All is well" ) return False
def get(self, url: str, params=None, fail=True): params = params or {} headers = self.get_default_headers() res = requests.get(self.url(url), headers=headers, params=params) logger.info("Getting URL: " + str(self.url(url))) if fail: if res.status_code != 200: logger.critical(f"Error getting API {self.url(url)}: " + str(res.json())) res.raise_for_status() return res.json()
async def run(self): if not Path(AlembicInit.migration_dir).is_dir(): logger.info("[*] running alembic-init") await AlembicInit.make(self.options).run() logger.info("[*] running alembic upgrade") await AlembicUpgrade.make(self.options).run() logger.info("[*] running alembic migrate") await AlembicMigrate.make(self.options).run() logger.info("[*] running alembic upgrade again") await AlembicUpgrade.make(self.options).run() if self.option("seed", None): logger.info("[*] running db seed") await DbSeed.make(self.options).run()
async def broadcast_index(websocket: WebSocket): try: logger.info("[email protected] - Accepting websockets") await websocket.accept() except Exception as e: logger.critical( f"[email protected] - Accept: Error: {str(type(e))}") logger.critical( f"[email protected] - Accept: Trace: {str(e)}") logger.critical( f"[email protected] - Accept:Not closing or unsubscribing" ) return 1 try: while True: logger.info("[email protected] - Receiving json") data = await websocket.receive_json() logger.info("[email protected] - Received json: " + str(data)) logger.info("[email protected] - Sending message: " + str({"message": "greetings"})) await websocket.send_json({"message": "greetings"}) except Exception as e: logger.critical( f"[email protected] - Receieve/Send: Error: {str(type(e))}" ) logger.critical( f"[email protected] - Receieve/Send: Trace: {str(e)}") logger.critical( f"[email protected] - Receieve/Send: Not closing or unsubscribing" ) return 1 await websocket.close()
async def on_after_insert(mapper, connection, target): logger.info("on_after_insert@DnsRequest: Publishing message") try: publisher = await make_redis() res = await publisher.publish_json( "channel:auth", { "type": "MESSAGE", "name": "DNS_REQUEST_CREATED", "payload": "" }, ) except Exception as e: logger.warning(f"on_after_insert error: {str(e)}")
def create_dns_request(self, handler, request, request_uuid): logger.info("creating dns request") name = str(request.q.qname) name = name.rstrip(".") data = { "name": name, "source_address": str(handler.client_address[0]), "source_port": int(handler.client_address[1]), "type": str(QTYPE[request.q.qtype]), "protocol": str(handler.protocol), "dns_server_name": str(self.dns_server_name), } self.post("/dns-request", data=data)
async def broadcast_auth(websocket: WebSocket): try: await websocket.accept() except Exception as e: logger.critical( f"[email protected] - Accept Error: {str(type(e))}") logger.critical( f"[email protected] - Accept Trace: {str(e)}") return 1 params = parse_qs(urlparse(str(websocket.url)).query) token = verify_jwt_token(params["ws_access_token"][0]) if not token_has_required_scopes(token, []): # TODO: check scopes later raise HTTPException(403, detail="Forbidden") user_repo = UserRepo(session()) user = await current_user(token, user_repo) subscriber, channel = await make_subscriber("auth") try: while await channel.wait_message(): logger.info("[email protected] - Waiting for message") msg = await channel.get(encoding="utf-8") logger.info("[email protected] - Received message: " + str(msg)) data = json.loads(msg) logger.info("[email protected] - Sending message: " + str(data)) await websocket.send_json(data) except Exception as e: logger.critical( f"[email protected] - Receieve/Send: Error: {str(type(e))}" ) logger.critical( f"[email protected] - Receieve/Send: Trace:{str(e)}") logger.critical( f"[email protected] - Receieve/Send: Not closing or unsubscribing" ) return 1 logger.info(f"[email protected] - Attempting to unsuscribe") await subscriber.unsubscribe("channel:auth") logger.info( f"[email protected]: Websocket - Attempting to close socket" ) await websocket.close()
def wait_for_up(self): seconds = 0 while True: if seconds > 60: logger.warning("could not connect to api. api not up") return False logger.info("checking for api status") try: sleep(1) self.get_status() sleep(3) return True except Exception as e: logger.info("api check not ready after {} seconds: {}".format( str(seconds), str(e.__class__.__name__))) seconds = seconds + 1 sleep(1)
async def run(self): args = ["bountydns.api.main:api"] kwargs = self.get_kwargs() self.load_env("api") if self.should_import_check(): logger.info("performing import check") from bountydns.api.main import api logger.critical("starting api server with options: {}".format( str(kwargs))) from bountydns.db.checks import is_db_up, is_db_setup if self.should_db_check(): self.db_register() db_up = is_db_up() if not db_up: logger.critical("database not up error. please check logs") return self.exit(1) if self.option("db_setup"): logger.critical("running database migration") db_setup_options = self._args_to_dict(self.options) if self.option("db_seed"): db_setup_options["seed"] = True await DbSetup(db_setup_options).run() if self.should_db_check(): db_setup = is_db_setup() if not db_setup: logger.critical("database not setup error. please check logs") return self.exit(1) from bountydns.broadcast import is_broadcast_up if self.should_bcast_check(): bcast_up = await is_broadcast_up() if not bcast_up: logger.critical( "broadcast (queue) not up error. please check logs") return self.exit(1) if self.option("db_seed_env", False): self.seed_from_env() return uvicorn.run(*args, **kwargs)
def get_zones(self): zones = [] if self.option("zone", None): for z in self.option("zone"): zp = z.split(":") zones.append((zp[0], zp[1])) if self.option("zone_file"): logger.info('loading zones from zone file "%s"', self.option("zone_file")) zones = zones + self.read_zone_file(self.option("zone_file")) if os.getenv("ZONE_FILE", None): logger.info('loading zones from zone file "%s"', os.getenv("ZONE_FILE")) zones = zones + self.read_zone_file(os.getenv("ZONE_FILE")) if os.getenv("ZONES", None): zones = zones + [(zp.split(":")[0], zp.split(":")[1]) for zp in os.getenv("ZONES").split(",")] return zones
def log_request(self, handler, request): if self.api_client: self.api_client.dns_event.make( event="REQUEST", name=request.q.qname, type=QTYPE[request.q.qtype], source_address=handler.client_address[0], source_port=handler.client_address[1], protocol=handler.protocol, ) logger.info("%sRequest: [%s:%d] (%s) / '%s' (%s)" % ( self.log_prefix(handler), handler.client_address[0], handler.client_address[1], handler.protocol, request.q.qname, QTYPE[request.q.qtype], )) self.log_data(request)
async def run(self): port = self.get_port() listen = self.get_listen() # TODO: thread issues? api_client = ApiClient(self.get_api_url(), self.get_api_token()) if not api_client.wait_for_up(): logger.critical("could not connect to api. quitting") self.exit(1) if self.option("no_sync"): logger.info("skipping syncing api token") else: api_client.sync() resolver = Resolver(api_client) udp_server = DNSServer( resolver, address=listen, port=port, handler=DNSHandler, logger=DNSLogger(api_client), ) tcp_server = DNSServer( resolver, address=listen, port=port, tcp=True, handler=DNSHandler, logger=DNSLogger(api_client), ) logger.info("starting DNS server on port %d", port) udp_server.start_thread() tcp_server.start_thread() try: while udp_server.isAlive(): sleep(1) except KeyboardInterrupt: pass
def wait_for_up(self): attempts = 0 while True: if attempts > 60: logger.warning("could not connect to api. api not up") return False logger.info( f"wait_for_up@api_client.py - checking for api status : {self.url('/status')}" ) try: sleep(1) self.get_status() sleep(3) return True except Exception as e: logger.info( "wait_for_up@api_client.py - api check not ready after {} attempts: {}" .format(str(attempts), str(e.__class__.__name__))) attempts = attempts + 1 sleep(1)
async def run(self): self.load_env("api") self.db_register() failed = [] if self.option("confirm"): for model in models: for item in self.session().query(model).all(): logger.info(f"deleting {item}") try: self.session().delete(item) self.session().commit() except Exception as e: failed.append((item, e)) else: logger.warning("You must confirm to drop data") if len(failed) > 0: logger.critical("encountered errors") for f in failed: print("Failed:", item[0]) print("Error", item[1])
def boot(self): port = self.get_port() listen = self.get_listen() self.resolver = Resolver(self.api_client) self.udp_server = DNSServer( self.resolver, address=listen, port=port, handler=DNSHandler, logger=DNSLogger(self.api_client), ) self.tcp_server = DNSServer( self.resolver, address=listen, port=port, tcp=True, handler=DNSHandler, logger=DNSLogger(self.api_client), ) logger.info("starting DNS server on port %d", port)
def match(self, q): matched = False logger.debug( "record matcher: comparing types {} with {} (or any)".format( QTYPE[q.qtype], self.type)) if q.qtype == QTYPE.ANY or QTYPE[q.qtype] == self.type: if str(self.name) in ['.', '@', '*']: record_name = self.zone_map[0] logger.debug( "record matcher: replacing rname {} with record {}".format( self.name, record_name)) else: record_name = self.name logger.debug( "record matcher: comparing request {}:{} to name {}:{}".format( QTYPE[q.qtype], q.qname, self.type, record_name)) matched = q.qname.matchGlob(record_name) if matched: logger.info( "record matcher: match found {}:{} to name {}:{}".format( QTYPE[q.qtype], q.qname, self.type, record_name)) return matched
async def index( api_token_repo: ApiTokenRepo = Depends(ApiTokenRepo), token: TokenPayload = ScopedTo("api-token:syncable"), ): scopes = token.scopes if "api-token" in scopes or "api-token:syncable" in scopes: api_token = None if not api_token_repo.exists(token=token.token): api_token_repo.clear() logger.info("saving api token from auth token") api_token = api_token_repo.create( dict( token=token.token, scopes=" ".join(scopes), dns_server_name=token.payload.dns_server_name, expires_at=datetime.utcfromtimestamp(float(token.exp)), )).data() else: logger.info("token already exists in database") api_token = api_token_repo.data() return ApiTokenResponse(api_token=api_token) else: raise HTTPException(403, detail="Not found")
def seed_from_env(self): from bountydns.core.entities.user import UserRepo for i in range(9): i = str(i) user_data = {} email_key = f"SEED_USER_{i}_EMAIL" email = environ.get(email_key, None) password_key = f"SEED_USER_{i}_PASSWORD" password = environ.get(password_key, None) superuser_key = f"SEED_USER_{i}_SUPERUSER" is_superuser = int(environ.get(superuser_key, 0)) if email and password: hashed_password = hash_password(password) repo = UserRepo(db=self.session()) if not repo.exists(email=email): logger.info(f"seeding user {email}") user = factory("UserFactory").create( email=email, hashed_password=hashed_password, is_superuser=is_superuser, ) else: logger.info(f"seeded user {email} already exists")
async def sync( api_token_repo: ApiTokenRepo = Depends(ApiTokenRepo), dns_server_repo: DnsServerRepo = Depends(DnsServerRepo), token: TokenPayload = Depends(ScopedTo("api-token:syncable")), ): scopes = token.scopes if "api-token" in scopes or "api-token:syncable" in scopes: api_token = None dns_server = None if not dns_server_repo.exists(name=token.payload.dns_server_name.lower()): dns_server_repo.clear() logger.info("saving dns server from api token") dns_server = dns_server_repo.create( dict(name=token.payload.dns_server_name.lower()) ).results() else: dns_server = dns_server_repo.results() if not api_token_repo.loads("dns_server").exists(token=token.token): api_token_repo.clear() logger.info("saving api token from auth token") item = api_token_repo.create( dict( token=token.token, scopes=" ".join(scopes), dns_server=dns_server, expires_at=datetime.utcfromtimestamp(float(token.exp)), ) ).data() api_token_id = item.id item = ( api_token_repo.clear() .loads("dns_server") .get(api_token_id) .includes("dns_server") .data() ) else: logger.info("token already exists in database") item = api_token_repo.loads("dns_server").includes("dns_server").data() return ApiTokenResponse(api_token=item) else: raise HTTPException(403, detail="Not found")
def get_zones(self): logger.info("getting zones") dm = ZoneData zone_data = self.get(f"/dns-server/{self.dns_server_name}/zone") return [dm(**z) for z in zone_data["zones"]]
def sync(self): logger.info("syncing api token") return self.post("/api-token/sync", fail=False)
async def run(self): app = "bountydns.api.main:api" kwargs = self.get_kwargs() env = self.option("env") self.load_env(f"api.{env}") if self.should_import_check(): logger.info("performing import check") from bountydns.api.main import api logger.critical("starting api server with options: {}".format( str(kwargs))) from bountydns.db.checks import is_db_up, is_db_setup if self.should_db_check(): self.db_register() db_up = is_db_up() if not db_up: logger.critical("database not up error. please check logs") return self.exit(1) if self.option("db_setup"): logger.critical("running database migration") db_setup_options = self._args_to_dict(self.options) if self.option("db_seed"): db_setup_options["seed"] = True await DbSetup(db_setup_options).run() if self.should_db_check(): db_setup = is_db_setup() if not db_setup: logger.critical("database not setup error. please check logs") return self.exit(1) from bountydns.broadcast import is_broadcast_up if self.should_bcast_check(): bcast_up = await is_broadcast_up() if not bcast_up: logger.critical( "broadcast (queue) not up error. please check logs") return self.exit(1) if self.option("db_seed_env", False): self.seed_from_env() # taken from uvicorn/main.py:run config = UvicornConfig(app, **kwargs) server = UvicornServer(config=config) if isinstance(app, str) and (config.debug or config.reload): sock = config.bind_socket() supervisor = StatReload(config) logger.warning(f"running bountydns api in dev mode...") return supervisor.run(server.run, sockets=[sock]) elif config.workers > 1: sock = config.bind_socket() supervisor = Multiprocess(config) logger.warning(f"running bountydns api in worker mode...") return supervisor.run(server.run, sockets=[sock]) else: sockets = None logger.warning(f"running bountydns api in standard mode...") return await server.serve(sockets=sockets)
def seed_from_env(self): from bountydns.core.user import UserRepo from bountydns.core.zone import ZoneRepo from bountydns.core.dns_server import DnsServerRepo from bountydns.db.session import _scoped_session session = _scoped_session for i in range(9): i = str(i) user_data = {} email_key = f"SEED_USER_{i}_EMAIL" email = environ.get(email_key, None) password_key = f"SEED_USER_{i}_PASSWORD" password = environ.get(password_key, None) superuser_key = f"SEED_USER_{i}_SUPERUSER" is_superuser = int(environ.get(superuser_key, 0)) if email and password: email = email.lower() hashed_password = hash_password(password) repo = UserRepo(db=session) if not repo.exists(email=email): logger.info(f"seeding user {email}") user = factory("UserFactory", session=session).create( email=email, hashed_password=hashed_password, is_superuser=is_superuser, ) else: logger.info(f"seeded user {email} already exists") for i in range(9): i = str(i) name_key = f"SEED_DNS_SERVER_{i}_NAME" name = environ.get(name_key, None) if name: repo = DnsServerRepo(db=session) if not repo.exists(name=name): logger.info(f"seeding domain {name}") domain = factory("DnsServerFactory", session=session).create(name=name) for i in range(9): i = str(i) ip_key = f"SEED_ZONE_{i}_IP" domain_key = f"SEED_ZONE_{i}_DOMAIN" dns_server_name_key = f"SEED_ZONE_{i}_DNS_SERVER_NAME" ip = environ.get(ip_key, None) domain = environ.get(domain_key, None) if domain: domain = domain.lower() dns_server_name = environ.get(dns_server_name_key, None) if ip and domain: if dns_server_name: dns_server_repo = DnsServerRepo(db=session) if dns_server_repo.exists(name=dns_server_name): dns_server = dns_server_repo.results() else: logger.info( f"seeding dns server as zone dependency: {name}") dns_server = factory( "DnsServerFactory", session=session).create(name=dns_server_name) factory("ZoneFactory", session=session).create(ip=ip, domain=domain, dns_server=dns_server) else: repo = ZoneRepo(db=session) if not repo.exists(ip=ip, domain=domain): logger.info( f"seeding zone without dns server: {ip}, {domain}") factory("GlobalZoneFactory", session=session).create(ip=ip, domain=domain)