def _recv_handler(self):
        recv_buffer = []
        while True:
            try:
                data_from_server = self._relay_conn.sock.recv(1024)
            except (socket.timeout, OSError) as e:
                Log.dprint(f'RECV HANDLER: {e}')
                break

            else:
                self._reset_fail_detection()
                if (not data_from_server):
                    Log.dprint(
                        'RECV HANDLER: PIPELINE CLOSED BY REMOTE SERVER!')
                    break

                recv_buffer.append(data_from_server)
                while recv_buffer:
                    current_data = b''.join(recv_buffer)[2:]
                    data_len = short_unpackf(recv_buffer[0])[0]
                    if (len(current_data) == data_len):
                        recv_buffer = []
                    elif (len(current_data) > data_len):
                        recv_buffer = [current_data[data_len:]]
                    else:
                        break

                    if not self.is_keepalive(current_data):
                        self.DNSServer.responder.add(current_data[:data_len])

        self._relay_conn.sock.close()
Exemplo n.º 2
0
    def _tls_connect(self, tls_server):
        Log.dprint(f'Opening Secure socket to {tls_server}: 853')
        sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)

        dns_sock = self._tls_context.wrap_socket(sock,
                                                 server_hostname=tls_server)
        try:
            dns_sock.connect((tls_server, PROTO.DNS_TLS))
        except OSError:
            return None
        else:
            self._relay_conn = RELAY_CONN(tls_server, dns_sock, dns_sock.send,
                                          dns_sock.recv, dns_sock.version())

            return True
Exemplo n.º 3
0
    def _recv_handler(self, recv_buffer=[]):
        recv_buff_append = recv_buffer.append
        recv_buff_clear = recv_buffer.clear
        conn_recv = self._relay_conn.recv
        responder_add = self._DNSServer.responder.add

        while True:
            try:
                data_from_server = conn_recv(2048)

            # TODO: i feel like this has to do a lookup everytime. if that is the case we should directly reference timeout
            except (socket.timeout, OSError) as e:
                Log.dprint(f'RECV HANDLER: {e}')
                break
            else:
                self._reset_fail_detection()

                # if no data is received/EOF the remote end has closed the connection
                if (not data_from_server):
                    Log.dprint(
                        'RECV HANDLER: PIPELINE CLOSED BY REMOTE SERVER!')
                    break

            recv_buff_append(data_from_server)
            while recv_buffer:
                current_data = byte_join(recv_buffer)
                data_len, data = short_unpackf(
                    current_data)[0], current_data[2:]

                # more data is needed for a complete response. NOTE: this scenario is kind of dumb
                # and shouldnt happen unless the server sends length of record and record seperately.
                if (len(data) < data_len): break

                # clearing the buffer since we either have nothing left to process or we will re add
                # the leftover bytes back with the next condition.
                recv_buff_clear()

                # if expected data length is greater than local buffer, multiple records were returned
                # in a batch so appending leftover bytes after removing the current records data from buffer.
                if (len(data) > data_len):
                    recv_buff_append(data[data_len:])

                # ignoring internally generated connection keepalives
                if (data[0] != DNS.KEEPALIVE):
                    responder_add(data[:data_len])

        self._relay_conn.sock.close()
Exemplo n.º 4
0
    def _tls_connect(self, tls_server):
        Log.dprint(f'Opening Secure socket to {tls_server}: 853')
        sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)

        dns_sock = self._tls_context.wrap_socket(sock,
                                                 server_hostname=tls_server)
        try:
            dns_sock.connect((tls_server, PROTO.DNS_TLS))
        except OSError:
            return None
        else:
            return True

        # NOTE: is this ok if we fail to connect? seems alittle weird after looking at it again.
        finally:
            self._relay_conn = RELAY_CONN(tls_server, dns_sock, dns_sock.send,
                                          dns_sock.recv, dns_sock.version())
Exemplo n.º 5
0
    def _recv_handler(self, recv_buffer=[]):
        recv_buff_append = recv_buffer.append
        recv_buff_clear = recv_buffer.clear
        conn_recv = self._relay_conn.recv
        responder_add = self._DNSServer.responder.add

        while True:
            try:
                data_from_server = conn_recv(2048)

            # TODO: i feel like this has to do a lookup everytime. if that is the case we should directly reference timeout
            except (socket.timeout, OSError) as e:
                Log.dprint(f'RECV HANDLER: {e}')
                break
            else:
                self._reset_fail_detection()

                # if no data is received/EOF the remote end has closed the connection
                if (not data_from_server):
                    Log.dprint(
                        'RECV HANDLER: PIPELINE CLOSED BY REMOTE SERVER!')
                    break

            recv_buff_append(data_from_server)
            while recv_buffer:
                current_data = byte_join(recv_buffer)
                data_len, data = short_unpackf(
                    current_data)[0], current_data[2:]

                # more data is needed for a complete response.
                if data_len < len(data): break

                # clearing the buffer. this is the easiest way to deal with unkown condition of single or multiple
                # dns records contained in one packet.
                recv_buff_clear()

                # if identified data length is > the actual data we have, multiple records are contained in the packet
                # so we will append the remainder back into the buffer.
                if data_len > len(data):
                    recv_buff_append(data[data_len:])

                # filtering internal connection keepalives
                if (data[0] != DNS.KEEPALIVE):
                    responder_add(data[:data_len])

        self._relay_conn.sock.close()
 def _tls_connect(self, tls_server):
     Log.dprint(f'Opening Secure socket to {tls_server}: 853')
     sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
     # NOTE: this should improve sending performance since we expect a dns record to only be a small
     # portion of available bytes in MTU/max bytes(1500). seems to provide no improvement after 1 run.
     # there could be other bottlenecks in play so we can re evaluate later.
     # sock.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
     dns_sock = self._tls_context.wrap_socket(sock,
                                              server_hostname=tls_server)
     try:
         dns_sock.connect((tls_server, PROTO.DNS_TLS))
     except OSError:
         return None
     else:
         return True
     finally:
         self._relay_conn = RELAY_CONN(tls_server, dns_sock)
Exemplo n.º 7
0
    def _dns_inspect(self):
        packet, Proxy, whitelisted = self._packet, self._Proxy, False

        # TODO: make this only apply to global whitelist as currently it will think tor whitelist entries
        # are part of it.
        # checking whitelist.
        if (packet.src_ip in Proxy.whitelist.ip):
            whitelisted = True

        # NOTE: dns whitelist does not override tld blocks at the moment
        # signature/ blacklist check. if either match will return results
        for i, enum_request in enumerate(packet.requests):
            # TLD (top level domain) block | after first index will pass
            # nested to allow for continue
            if (not i):
                if Proxy.signatures.tld.get(enum_request):
                    Log.dprint(f'TLD Block: {packet.request}')

                    return DNS_REQUEST_RESULTS(True, 'tld filter',
                                               enum_request)
                continue

            # NOTE: allowing malicious category overrides (for false positives)
            if (enum_request in Proxy.whitelist.dns):

                return DNS_REQUEST_RESULTS(False, None, None)

            # ip whitelist overrides configured blacklist
            if (not whitelisted and enum_request in Proxy.blacklist.dns):
                Log.dprint(f'Blacklist Block: {packet.request}')

                return DNS_REQUEST_RESULTS(True, 'blacklist', 'time based')

            # pulling domain category if signature present.
            category = self._bin_search(enum_request)
            if category and self._block_query(category, whitelisted):
                Log.dprint(f'Category Block: {packet.request}')

                return DNS_REQUEST_RESULTS(True, 'category', category)

        # Keyword search within domain || block if match
        for keyword, category in Proxy.signatures.keyword:
            if (keyword in packet.request):
                Log.dprint(f'Keyword Block: {packet.request}')

                return DNS_REQUEST_RESULTS(True, 'keyword', category)

        # DEFAULT ACTION | ALLOW
        return DNS_REQUEST_RESULTS(False, None, None)
Exemplo n.º 8
0
    def _dns_inspect(self, packet):
        Proxy = self._Proxy  # NOTE: consider sending this in on all inspection classes?

        whitelisted = self._ip_whitelist_get(packet.src_ip, False)

        # signature/ blacklist check.
        # DNS_REQUEST_RESULTS(redirect, block type, category)
        # NOTE: dns whitelist does not override tld blocks at the moment | this is most likely the desired setup
        for i, enum_request in enumerate(packet.requests):
            # TLD (top level domain) block | after first index will pass nested to allow for continue
            if (not i):
                if self._tld_get(enum_request):
                    Log.dprint(f'TLD Block: {packet.request}')

                    return DNS_REQUEST_RESULTS(True, 'tld filter',
                                               enum_request)

                continue

            # NOTE: allowing malicious category overrides (for false positives)
            if (enum_request in Proxy.whitelist.dns):

                return DNS_REQUEST_RESULTS(False, None, None)

            # ip whitelist overrides configured blacklist
            if (not whitelisted and enum_request in Proxy.blacklist.dns):
                Log.dprint(f'Blacklist Block: {packet.request}')

                return DNS_REQUEST_RESULTS(True, 'blacklist', 'time based')

            # pulling domain category if signature present. | NOTE: this is now using imported cython function factory
            category = DNS_CAT(_recursive_binary_search(enum_request))
            if (category is not DNS_CAT.NONE) and self._block_query(
                    category, whitelisted):
                Log.dprint(f'Category Block: {packet.request}')

                return DNS_REQUEST_RESULTS(True, 'category', category)

        # Keyword search within domain || block if match
        for keyword, category in Proxy.signatures.keyword:
            if (keyword in packet.request):
                Log.dprint(f'Keyword Block: {packet.request}')

                return DNS_REQUEST_RESULTS(True, 'keyword', category)

        # DEFAULT ACTION | ALLOW
        return DNS_REQUEST_RESULTS(False, None, None)
Exemplo n.º 9
0
    def add(self, request, data_to_cache):
        '''add query to cache after calculating expiration time.'''
        self[request] = data_to_cache

        Log.dprint(f'CACHE ADD | NAME: {request} TTL: {data_to_cache.ttl}')