def _reset_flag(cls, cache_type): setattr(cls, f'clear_{cache_type}', False) with ConfigurationManager('dns_server') as dnx: dns_settings = dnx.load_configuration() dns_settings['cache'][cache_type] = False dnx.write_configuration(dns_settings) Log.notice(f'{cache_type.replace("_", " ")} has been cleared.')
def _recv_handler(self, recv_buffer=[]): Log.debug( f'[{self._relay_conn.remote_ip}/{self._protocol.name}] Response handler opened.' ) # pylint: disable=no-member recv_buff_append = recv_buffer.append recv_buff_clear = recv_buffer.clear conn_recv = self._relay_conn.recv responder_add = self._DNSServer.responder.add while True: try: data_from_server = conn_recv(2048) except OSError: break except timeout: self.mark_server_down() Log.warning( f'[{self._relay_conn.remote_ip}/{self._protocol.name}] Remote server connection timeout. Marking down.' ) # pylint: disable=no-member return else: # if no data is received/EOF the remote end has closed the connection if (not data_from_server): break self._reset_fail_detection() recv_buff_append(data_from_server) while recv_buffer: current_data = byte_join(recv_buffer) data_len, data = short_unpackf( current_data)[0], current_data[2:] # more data is needed for a complete response. NOTE: this scenario is kind of dumb # and shouldnt happen unless the server sends length of record and record seperately. if (len(data) < data_len): break # clearing the buffer since we either have nothing left to process or we will re add # the leftover bytes back with the next condition. recv_buff_clear() # if expected data length is greater than local buffer, multiple records were returned # in a batch so appending leftover bytes after removing the current records data from buffer. if (len(data) > data_len): recv_buff_append(data[data_len:]) # ignoring internally generated connection keepalives if (data[0] != DNS.KEEPALIVE): responder_add(data[:data_len]) self._relay_conn.sock.close()
def _register_new_socket(self): for dns_server in self._DNSServer.dns_servers: # if server is down we will skip over it if (not dns_server[self._protocol]): continue # never fail so will always return True return self._create_socket(dns_server['ip']) else: Log.critical(f'[{self._protocol}] No DNS servers available.')
def _tls_connect(self, tls_server): Log.dprint( f'[{tls_server}/{self._protocol.name}] Opening secure socket.') # pylint: disable=no-member sock = socket(AF_INET, SOCK_STREAM) sock.settimeout(RELAY_TIMEOUT) dns_sock = self._tls_context.wrap_socket(sock, server_hostname=tls_server) try: dns_sock.connect((tls_server, PROTO.DNS_TLS)) except OSError: Log.error( f'[{tls_server}/{self._protocol.name}] Failed to connect to server: {E}' ) # pylint: disable=no-member except Exception as E: Log.console( f'[{tls_server}/{self._protocol.name}] TLS context error while attemping to connect to server: {E}' ) # pylint: disable=no-member Log.debug( f'[{tls_server}/{self._protocol.name}] TLS context error while attemping to connect to server: {E}' ) # pylint: disable=no-member else: self._relay_conn = RELAY_CONN(tls_server, dns_sock, dns_sock.send, dns_sock.recv, dns_sock.version()) return True return None
def _register_new_socket(self): #, client_query=None): for tls_server in self._DNSServer.dns_servers: # skipping over known down server if (not tls_server[self._protocol]): continue # attempting to connect via tls. if successful will return True, otherwise mark server as # down and try next server. if self._tls_connect(tls_server['ip']): return True self.mark_server_down(remote_server=tls_server['ip']) else: self._DNSServer.tls_up = False Log.error(f'[{self._protocol}] No DNS servers available.')
def _request_queue(self): return_ready = self.REQ_TRACKER.return_ready while True: # this blocks until request tracker returns (at least 1 client query has been inspected) requests = return_ready() for client_query, decision in requests: # if request is allowed, search cache before sending to relay. if decision is DNS.ALLOWED and not self._cached_response( client_query): self._handle_query(client_query) Log.debug( f'{self.protocol.name} Relay ALLOWED | {client_query}') # pylint: disable=no-member
def _auto_top_domains(self): if (self.clear_top_domains): self._dom_counter = Counter() self._reset_flag('top_domains') most_common_doms = self._dom_counter.most_common self._top_domains = { dom[0]: cnt for cnt, dom in enumerate(most_common_doms(TOP_DOMAIN_COUNT), 1) } request_handler, dns_packet = self._request_handler, self._dns_packet for domain in self._top_domains: request_handler(dns_packet(domain)) fast_sleep(.1) Log.debug('top domains refreshed') write_configuration(self._top_domains, 'dns_cache')
def add(self, request, data_to_cache): '''add query to cache after calculating expiration time.''' self[request] = data_to_cache Log.debug(f'[{request}:{data_to_cache.ttl}] Added to standard cache. ')