示例#1
0
    def __init__(self, tree, count, inter, iface, strict, capmethod, \
                 scallback, rcallback, sudata, rudata, excback):
        """
        Create a SequenceConsumer object.

        @param tree a tree represantation of the packets to send
        @param count how many time we have to send the tree
        @param inter the interval to wait between 2 consecutive sends
        @param iface the interface to send/recv on
        @param capmethod the method to use 0 for native, 1 for tcpdump, 2 for
                         dumpcap
        @param scallback the send callback
        @param rcallback the receive callback
        @param sudata user data for send callback
        @param rudata user data for receive callback
        @param excback exception callback
        """

        assert len(tree) > 0

        self.tree = tree
        self.count = count
        self.inter = inter
        self.strict = strict
        self.iface = iface
        self.timeout = None
        self.capmethod = capmethod

        self.procs = {}
        self.sockets = []

        self.recv_list = defaultdict(list)
        self.receiving = False

        self.internal = False

        self.active_helpers = 0
        self.active_helpers_lock = Lock()
        self.running = Condition()

        self.pool = ThreadPool(2, 10)
        self.pool.queue_work(None, self.__notify_exc, self.__check)

        self.scallback = scallback
        self.rcallback = rcallback
        self.excback = excback

        self.sudata, self.rudata = sudata, rudata

        log.debug("%d total packets to send for %d times" %
                  (len(tree), self.count))
示例#2
0
    def __init__(self, tree, count, inter, iface, strict, capmethod, \
                 scallback, rcallback, sudata, rudata, excback):

        """
        Create a SequenceConsumer object.

        @param tree a tree represantation of the packets to send
        @param count how many time we have to send the tree
        @param inter the interval to wait between 2 consecutive sends
        @param iface the interface to send/recv on
        @param capmethod the method to use 0 for native, 1 for tcpdump, 2 for
                         dumpcap
        @param scallback the send callback
        @param rcallback the receive callback
        @param sudata user data for send callback
        @param rudata user data for receive callback
        @param excback exception callback
        """

        assert len(tree) > 0

        self.tree = tree
        self.count = count
        self.inter = inter
        self.strict = strict
        self.iface = iface
        self.timeout = None
        self.capmethod = capmethod

        self.procs = {}
        self.sockets = []

        self.recv_list = defaultdict(list)
        self.receiving = False

        self.internal = False

        self.active_helpers = 0
        self.active_helpers_lock = Lock()
        self.running = Condition()

        self.pool = ThreadPool(2, 10)
        self.pool.queue_work(None, self.__notify_exc, self.__check)

        self.scallback = scallback
        self.rcallback = rcallback
        self.excback = excback

        self.sudata, self.rudata = sudata, rudata

        log.debug("%d total packets to send for %d times" % (len(tree),
                                                             self.count))
示例#3
0
class SequenceConsumer(Interruptable):
    # This class most probably suffers when there's repeated sequence
    # because we destroy and then recreate various process when the refcount
    # associated to the sockets or process reach 0. Should be optimized to
    # better handle this situation.

    def __init__(self, tree, count, inter, iface, strict, capmethod, \
                 scallback, rcallback, sudata, rudata, excback):
        """
        Create a SequenceConsumer object.

        @param tree a tree represantation of the packets to send
        @param count how many time we have to send the tree
        @param inter the interval to wait between 2 consecutive sends
        @param iface the interface to send/recv on
        @param capmethod the method to use 0 for native, 1 for tcpdump, 2 for
                         dumpcap
        @param scallback the send callback
        @param rcallback the receive callback
        @param sudata user data for send callback
        @param rudata user data for receive callback
        @param excback exception callback
        """

        assert len(tree) > 0

        self.tree = tree
        self.count = count
        self.inter = inter
        self.strict = strict
        self.iface = iface
        self.timeout = None
        self.capmethod = capmethod

        self.procs = {}
        self.sockets = []

        self.recv_list = defaultdict(list)
        self.receiving = False

        self.internal = False

        self.active_helpers = 0
        self.active_helpers_lock = Lock()
        self.running = Condition()

        self.pool = ThreadPool(2, 10)
        self.pool.queue_work(None, self.__notify_exc, self.__check)

        self.scallback = scallback
        self.rcallback = rcallback
        self.excback = excback

        self.sudata, self.rudata = sudata, rudata

        log.debug("%d total packets to send for %d times" %
                  (len(tree), self.count))

    def isAlive(self):
        return self.internal

    def stop(self):
        self.internal = False

        self.pool.stop()
        #self.pool.join_threads()

    def terminate(self):
        self.stop()

    def start(self):
        if self.internal or self.receiving:
            log.debug("Pool already started")
            return

        self.receiving = True
        self.internal = True

        self.pool.start()

    def __check(self):
        # This is a function to allow the sequence
        # to be respawned n times

        self.receiving = True
        self.running.acquire()

        if not self.count:
            log.debug("This is an infinite loop")
            self.count = -1

        while self.internal and self.count:
            self.receiving = True

            if self.count > 0:
                log.debug("Next step %d" % self.count)
            else:
                log.debug("Another loop (infinite)")

            self.__notify_send(None)

            if self.capmethod == 0:
                # If we use native capmethod we should run this
                self.pool.queue_work(None, self.__notify_exc,
                                     self.__recv_worker)

            for node in self.tree.get_children():
                log.debug("Adding first packet of the sequence")
                self.pool.queue_work(None, self.__notify_exc,
                                     self.__send_worker, node)
                break

            log.debug("Waiting recv to begin another loop")

            self.running.wait()

            if self.count > 0:
                self.count -= 1

        self.running.release()

        if not self.internal:
            log.debug("Stopping the thread pool (async)")
            self.pool.stop()

        log.debug("Finished")

    def __recv_helper_worker(self, udata):
        (process, outfile) = udata

        self.active_helpers_lock.acquire()

        if self.active_helpers is not None:
            self.active_helpers += 1

        self.active_helpers_lock.release()

        if self.timeout is not None:
            stoptime = time.time() + self.timeout

        for reader in bind_reader(process, outfile):
            if self.timeout is not None:
                remain = stoptime - time.time()

                if remain <= 0:
                    self.receiving = False
                    log.debug("Timeout here!")
                    break

            if not self.running or not self.receiving:
                break

            if reader:
                reader, outfile_size, position = reader

        report_idx = 0
        reported_packets = 0

        while self.running and self.receiving:
            if self.timeout is not None:
                remain = stoptime - time.time()

                if remain <= 0:
                    self.receiving = False
                    log.debug("Timeout here!")
                    break

            while report_idx < reported_packets:
                report_idx = get_n_packets(self.process)

            r = reader.read_packet()

            try:
                # The helper capture packets at L2 so we need to drop the
                # first protocol to make the match against the packets.

                r = r[1]
            except:
                continue

            if not r:
                break

            is_reply = True
            my_node = None
            requested_process = None

            if self.strict:
                is_reply = False
                hashret = r.hashret()

                if hashret in self.recv_list:
                    for (idx, process, node) in self.recv_list[hashret]:
                        packet = node.get_data().packet.root

                        if r.answers(packet):
                            requested_process = process
                            my_node = node
                            is_reply = True

                            break

            elif not self.strict and my_node is None:
                # Get the first packet

                list = [(v, k) for k, v in self.recv_list.items()]
                list.sort()

                requested_process = list[0][0][0][1]
                my_node = list[0][0][0][2]
            else:
                continue

            # Now cleanup the sockets
            for key in self.procs:
                if self.procs[key][0] == requested_process:
                    self.procs[key][2] -= 1

                    if self.procs[key][2] == 0:
                        process, outfile, refcount = self.procs[key]

                        log.debug("Killing helper %s cause refcount == 0" % \
                                  process)

                        kill_helper(process)
                        del self.procs[key]

                    break

            if is_reply:
                self.__notify_recv(my_node, MetaPacket(r), is_reply)

                # Queue another send thread
                for node in my_node.get_children():
                    self.pool.queue_work(None, self.__notify_exc,
                                         self.__send_worker, node)
            else:
                self.__notify_recv(None, MetaPacket(r), is_reply)

        # Here we've to check if this current thread is also the last
        # thread used to receive packets from the helper process.

        self.active_helpers_lock.acquire()

        if self.active_helpers is not None:
            self.active_helpers -= 1

            if self.active_helpers == 0:
                log.debug("Trying to exit")

                self.running.acquire()
                self.running.notify()
                self.running.release()

                self.receiving = False
                self.active_helpers = None

                self.active_helpers_lock.release()

                self.__notify_recv(None, None, False)

                return

        self.active_helpers_lock.release()

    def __recv_worker(self):
        # Here we should receive the packet and check against
        # recv_list if the packet match remove from the list
        # and start another send_worker

        if self.timeout is not None:
            stoptime = time.time() + self.timeout

        while self.internal and self.receiving:
            r = []
            inmask = [socket for socket, refcount in self.sockets]

            if self.timeout is not None:
                remain = stoptime - time.time()

                if remain <= 0:
                    self.receiving = False
                    log.debug("Timeout here!")
                    break

            if not inmask:
                time.sleep(0.05)

            if FREEBSD or DARWIN:
                inp, out, err = select(inmask, [], [], 0.05)

                for sock in inp:
                    r.append(sock.nonblock_recv())

            elif WINDOWS:
                for sock in inmask:
                    r.append(sock.recv(MTU))
            else:
                # FIXME: needs a revision here! possibly packet lost
                inp, out, err = select(inmask, [], [], 0.05)

                for sock in inp:
                    r.append(sock.recv(MTU))

            if not r:
                continue

            if self.timeout is not None:
                stoptime = time.time() + self.timeout

            for precv in r:

                if precv is None:
                    continue

                is_reply = True
                my_node = None
                requested_socket = None

                if self.strict:
                    is_reply = False
                    hashret = precv.hashret()

                    if hashret in self.recv_list:
                        for (idx, sock, node) in self.recv_list[hashret]:
                            packet = node.get_data().packet.root

                            if precv.answers(packet):
                                requested_socket = sock
                                my_node = node
                                is_reply = True

                                break

                elif not self.strict and my_node is None:
                    # Get the first packet

                    list = [(v, k) for k, v in self.recv_list.items()]
                    list.sort()

                    requested_socket = list[0][0][0][1]
                    my_node = list[0][0][0][2]
                else:
                    continue

                # Now cleanup the sockets
                for idx in xrange(len(self.sockets)):
                    if self.sockets[idx][0] == requested_socket:
                        self.sockets[idx][1] -= 1

                        if self.sockets[idx][1] == 0:
                            self.sockets.remove(self.sockets[idx])

                        break

                if is_reply:
                    self.__notify_recv(my_node, MetaPacket(precv), is_reply)

                    # Queue another send thread
                    for node in my_node.get_children():
                        self.pool.queue_work(None, self.__notify_exc,
                                             self.__send_worker, node)
                else:
                    self.__notify_recv(None, MetaPacket(precv), is_reply)

        log.debug("Trying to exit")

        self.running.acquire()
        self.running.notify()
        self.running.release()

        self.receiving = False

        self.__notify_recv(None, None, False)

    def __send_worker(self, node):
        if not self.internal:
            log.debug("Discarding packet")
            return

        obj = node.get_data()

        sock = get_socket_for(obj.packet, iff=self.iface)

        if node.is_parent():
            # Here we should add the node to the dict
            # to check the the replies for a given time
            # and continue the sequence with the next
            # depth.

            if self.capmethod == 0:

                try:
                    idx = self.sockets.index(sock)
                    self.sockets[idx][1] += 1
                except:
                    self.sockets.append([sock, 1])

                key = obj.packet.root.hashret()
                self.recv_list[key].append((len(self.recv_list), sock, node))

                log.debug(
                    "Adding socket to the list for receiving my packet %s" %
                    sock)
            else:
                # TODO: here we could create another thread that spawns tcpdump
                # process. We have to resize also the pool directly leaving n
                # available threads by having n sniff process and only 1 to send
                # packets. All controls should be in __check()

                iface = get_iface_from_ip(obj.packet)

                if not iface in self.procs:
                    process, outfile = run_helper(self.capmethod - 1, iface)
                    self.procs[iface] = [process, outfile, 1]

                    # Just increase the size of our pool to avoid starvation
                    self.pool.resize(maxthreads=self.pool.max + 1)

                    # And now start a new worker
                    self.pool.queue_work(None, self.__notify_exc,
                                         self.__recv_helper_worker,
                                         (process, outfile))
                else:
                    self.procs[iface][2] += 1
                    process, outfile = self.procs[iface][0:2]
                    log.debug("A process sniffing on %s exists." % iface)

                key = obj.packet.root.hashret()
                self.recv_list[key].append(
                    (len(self.recv_list), process, node))

        sock.send(obj.packet.root)

        self.__notify_send(node)

        log.debug("Sleeping %f after send" % self.inter)
        time.sleep(self.inter + obj.inter)

        if self.internal and node.get_parent():

            parent = node.get_parent()
            next = parent.get_next_of(node)

            if next:
                log.debug("Processing next packet")
                self.pool.queue_work(None, self.__notify_exc,
                                     self.__send_worker, next)

            else:
                log.debug("Last packet of this level")
                self.__notify_recv(None, None, False)
        else:
            log.debug("Last packet sent")
            self.__notify_recv(None, None, False)

    def __notify_exc(self, exc):
        self.scallback = None
        self.rcallback = None

        if isinstance(exc, socket.error):
            exc = Exception(str(exc[1]))

        if self.excback:
            self.excback(exc)
        else:
            log.debug("Exception not properly handled. Dumping:")

            traceback.print_exc(file=sys.stdout)

        self.stop()

    def __notify_send(self, node):
        log.debug("Packet sent")

        if not self.scallback:
            return

        packet = None
        parent = False

        if node is not None:
            packet = node.get_data().packet
            parent = node.is_parent()

        if self.scallback(packet, parent, self.sudata):

            log.debug("send_callback want to exit")
            self.internal = False

    def __notify_recv(self, node, reply, is_reply):
        log.debug("Packet received (is reply? %s)" % is_reply)

        if not self.rcallback:
            return

        packet = None

        if node is not None:
            packet = node.get_data().packet

        if self.rcallback(packet, reply, is_reply, self.rudata):

            log.debug("recv_callback want to exit")
            self.internal = False
示例#4
0
class SequenceConsumer(Interruptable):
    # This class most probably suffers when there's repeated sequence
    # because we destroy and then recreate various process when the refcount
    # associated to the sockets or process reach 0. Should be optimized to
    # better handle this situation.

    def __init__(self, tree, count, inter, iface, strict, capmethod, \
                 scallback, rcallback, sudata, rudata, excback):

        """
        Create a SequenceConsumer object.

        @param tree a tree represantation of the packets to send
        @param count how many time we have to send the tree
        @param inter the interval to wait between 2 consecutive sends
        @param iface the interface to send/recv on
        @param capmethod the method to use 0 for native, 1 for tcpdump, 2 for
                         dumpcap
        @param scallback the send callback
        @param rcallback the receive callback
        @param sudata user data for send callback
        @param rudata user data for receive callback
        @param excback exception callback
        """

        assert len(tree) > 0

        self.tree = tree
        self.count = count
        self.inter = inter
        self.strict = strict
        self.iface = iface
        self.timeout = None
        self.capmethod = capmethod

        self.procs = {}
        self.sockets = []

        self.recv_list = defaultdict(list)
        self.receiving = False

        self.internal = False

        self.active_helpers = 0
        self.active_helpers_lock = Lock()
        self.running = Condition()

        self.pool = ThreadPool(2, 10)
        self.pool.queue_work(None, self.__notify_exc, self.__check)

        self.scallback = scallback
        self.rcallback = rcallback
        self.excback = excback

        self.sudata, self.rudata = sudata, rudata

        log.debug("%d total packets to send for %d times" % (len(tree),
                                                             self.count))

    def isAlive(self):
        return self.internal

    def stop(self):
        self.internal = False

        self.pool.stop()
        #self.pool.join_threads()

    def terminate(self):
        self.stop()

    def start(self):
        if self.internal or self.receiving:
            log.debug("Pool already started")
            return

        self.receiving = True
        self.internal = True

        self.pool.start()

    def __check(self):
        # This is a function to allow the sequence
        # to be respawned n times

        self.receiving = True
        self.running.acquire()

        if not self.count:
            log.debug("This is an infinite loop")
            self.count = -1

        while self.internal and self.count:
            self.receiving = True

            if self.count > 0:
                log.debug("Next step %d" % self.count)
            else:
                log.debug("Another loop (infinite)")

            self.__notify_send(None)

            if self.capmethod == 0:
                # If we use native capmethod we should run this
                self.pool.queue_work(None, self.__notify_exc,
                                     self.__recv_worker)

            for node in self.tree.get_children():
                log.debug("Adding first packet of the sequence")
                self.pool.queue_work(None, self.__notify_exc,
                                     self.__send_worker, node)
                break

            log.debug("Waiting recv to begin another loop")

            self.running.wait()

            if self.count > 0:
                self.count -= 1

        self.running.release()

        if not self.internal:
            log.debug("Stopping the thread pool (async)")
            self.pool.stop()

        log.debug("Finished")

    def __recv_helper_worker(self, udata):
        (process, outfile) = udata

        self.active_helpers_lock.acquire()

        if self.active_helpers is not None:
            self.active_helpers += 1

        self.active_helpers_lock.release()

        if self.timeout is not None:
            stoptime = time.time() + self.timeout

        for reader in bind_reader(process, outfile):
            if self.timeout is not None:
                remain = stoptime - time.time()

                if remain <= 0:
                    self.receiving = False
                    log.debug("Timeout here!")
                    break

            if not self.running or not self.receiving:
                break

            if reader:
                reader, outfile_size, position = reader

        report_idx = 0
        reported_packets = 0

        while self.running and self.receiving:
            if self.timeout is not None:
                remain = stoptime - time.time()

                if remain <= 0:
                    self.receiving = False
                    log.debug("Timeout here!")
                    break

            while report_idx < reported_packets:
                report_idx = get_n_packets(self.process)

            r = reader.read_packet()

            try:
                # The helper capture packets at L2 so we need to drop the
                # first protocol to make the match against the packets.

                r = r[1]
            except:
                continue

            if not r:
                break

            is_reply = True
            my_node = None
            requested_process = None

            if self.strict:
                is_reply = False
                hashret = r.hashret()

                if hashret in self.recv_list:
                    for (idx, process, node) in self.recv_list[hashret]:
                        packet = node.get_data().packet.root

                        if r.answers(packet):
                            requested_process = process
                            my_node = node
                            is_reply = True

                            break

            elif not self.strict and my_node is None:
                # Get the first packet

                list = [(v, k) for k, v in self.recv_list.items()]
                list.sort()

                requested_process = list[0][0][0][1]
                my_node = list[0][0][0][2]
            else:
                continue

            # Now cleanup the sockets
            for key in self.procs:
                if self.procs[key][0] == requested_process:
                    self.procs[key][2] -= 1

                    if self.procs[key][2] == 0:
                        process, outfile, refcount = self.procs[key]

                        log.debug("Killing helper %s cause refcount == 0" % \
                                  process)

                        kill_helper(process)
                        del self.procs[key]

                    break

            if is_reply:
                self.__notify_recv(my_node, MetaPacket(r), is_reply)

                # Queue another send thread
                for node in my_node.get_children():
                    self.pool.queue_work(None, self.__notify_exc,
                                         self.__send_worker, node)
            else:
                self.__notify_recv(None, MetaPacket(r), is_reply)

        # Here we've to check if this current thread is also the last
        # thread used to receive packets from the helper process.

        self.active_helpers_lock.acquire()

        if self.active_helpers is not None:
            self.active_helpers -= 1

            if self.active_helpers == 0:
                log.debug("Trying to exit")

                self.running.acquire()
                self.running.notify()
                self.running.release()

                self.receiving = False
                self.active_helpers = None

                self.active_helpers_lock.release()

                self.__notify_recv(None, None, False)

                return

        self.active_helpers_lock.release()

    def __recv_worker(self):
        # Here we should receive the packet and check against
        # recv_list if the packet match remove from the list
        # and start another send_worker

        if self.timeout is not None:
            stoptime = time.time() + self.timeout

        while self.internal and self.receiving:
            r = []
            inmask = [socket for socket, refcount in self.sockets]

            if self.timeout is not None:
                remain = stoptime - time.time()

                if remain <= 0:
                    self.receiving = False
                    log.debug("Timeout here!")
                    break

            if not inmask:
                time.sleep(0.05)

            if FREEBSD or DARWIN:
                inp, out, err = select(inmask, [], [], 0.05)

                for sock in inp:
                    r.append(sock.nonblock_recv())

            elif WINDOWS:
                for sock in inmask:
                    r.append(sock.recv(MTU))
            else:
                # FIXME: needs a revision here! possibly packet lost
                inp, out, err = select(inmask, [], [], 0.05)

                for sock in inp:
                    r.append(sock.recv(MTU))

            if not r:
                continue

            if self.timeout is not None:
                stoptime = time.time() + self.timeout

            for precv in r:

                if precv is None:
                    continue

                is_reply = True
                my_node = None
                requested_socket = None

                if self.strict:
                    is_reply = False
                    hashret = precv.hashret()

                    if hashret in self.recv_list:
                        for (idx, sock, node) in self.recv_list[hashret]:
                            packet = node.get_data().packet.root

                            if precv.answers(packet):
                                requested_socket = sock
                                my_node = node
                                is_reply = True

                                break

                elif not self.strict and my_node is None:
                    # Get the first packet

                    list = [(v, k) for k, v in self.recv_list.items()]
                    list.sort()

                    requested_socket = list[0][0][0][1]
                    my_node = list[0][0][0][2]
                else:
                    continue

                # Now cleanup the sockets
                for idx in xrange(len(self.sockets)):
                    if self.sockets[idx][0] == requested_socket:
                        self.sockets[idx][1] -= 1

                        if self.sockets[idx][1] == 0:
                            self.sockets.remove(self.sockets[idx])

                        break

                if is_reply:
                    self.__notify_recv(my_node, MetaPacket(precv), is_reply)

                    # Queue another send thread
                    for node in my_node.get_children():
                        self.pool.queue_work(None, self.__notify_exc,
                                             self.__send_worker, node)
                else:
                    self.__notify_recv(None, MetaPacket(precv), is_reply)

        log.debug("Trying to exit")

        self.running.acquire()
        self.running.notify()
        self.running.release()

        self.receiving = False

        self.__notify_recv(None, None, False)

    def __send_worker(self, node):
        if not self.internal:
            log.debug("Discarding packet")
            return

        obj = node.get_data()

        sock = get_socket_for(obj.packet, iff=self.iface)

        if node.is_parent():
            # Here we should add the node to the dict
            # to check the the replies for a given time
            # and continue the sequence with the next
            # depth.

            if self.capmethod == 0:

                try:
                    idx = self.sockets.index(sock)
                    self.sockets[idx][1] += 1
                except:
                    self.sockets.append([sock, 1])

                key = obj.packet.root.hashret()
                self.recv_list[key].append((len(self.recv_list), sock, node))

                log.debug("Adding socket to the list for receiving my packet %s"
                          % sock)
            else:
                # TODO: here we could create another thread that spawns tcpdump
                # process. We have to resize also the pool directly leaving n
                # available threads by having n sniff process and only 1 to send
                # packets. All controls should be in __check()

                iface = get_iface_from_ip(obj.packet)

                if not iface in self.procs:
                    process, outfile = run_helper(self.capmethod - 1, iface)
                    self.procs[iface] = [process, outfile, 1]

                    # Just increase the size of our pool to avoid starvation
                    self.pool.resize(maxthreads=self.pool.max + 1)

                    # And now start a new worker
                    self.pool.queue_work(None, self.__notify_exc,
                                         self.__recv_helper_worker,
                                         (process, outfile))
                else:
                    self.procs[iface][2] += 1
                    process, outfile = self.procs[iface][0:2]
                    log.debug("A process sniffing on %s exists." % iface)

                key = obj.packet.root.hashret()
                self.recv_list[key].append((len(self.recv_list), process, node))

        sock.send(obj.packet.root)

        self.__notify_send(node)

        log.debug("Sleeping %f after send" % self.inter)
        time.sleep(self.inter + obj.inter)

        if self.internal and node.get_parent():

            parent = node.get_parent()
            next = parent.get_next_of(node)

            if next:
                log.debug("Processing next packet")
                self.pool.queue_work(None, self.__notify_exc,
                                     self.__send_worker, next)

            else:
                log.debug("Last packet of this level")
                self.__notify_recv(None, None, False)
        else:
            log.debug("Last packet sent")
            self.__notify_recv(None, None, False)

    def __notify_exc(self, exc):
        self.scallback = None
        self.rcallback = None

        if isinstance(exc, socket.error):
            exc = Exception(str(exc[1]))

        if self.excback:
            self.excback(exc)
        else:
            log.debug("Exception not properly handled. Dumping:")

            traceback.print_exc(file=sys.stdout)

        self.stop()

    def __notify_send(self, node):
        log.debug("Packet sent")

        if not self.scallback:
            return

        packet = None
        parent = False

        if node is not None:
            packet = node.get_data().packet
            parent = node.is_parent()

        if self.scallback(packet, parent, self.sudata):

            log.debug("send_callback want to exit")
            self.internal = False

    def __notify_recv(self, node, reply, is_reply):
        log.debug("Packet received (is reply? %s)" % is_reply)

        if not self.rcallback:
            return

        packet = None

        if node is not None:
            packet = node.get_data().packet

        if self.rcallback(packet, reply, is_reply, self.rudata):

            log.debug("recv_callback want to exit")
            self.internal = False