Beispiel #1
0
def third():

    print("= selftest 3 started")
    R = Reactor()
    a = Box("on x=10..12,y=10..12,z=10..12")
    a.id = "Box1"
    R += a
    assert (R.size() == 27)
    b = Box("on x=11..13,y=11..13,z=11..13")
    b.id = "Box2"
    R += b

    check(R.size() == 27 + 19,
          [R.size(), R.realcubes, [i.size() for i in R.realcubes]],
          f=lambda: R.savefig())

    c = Box("off x=9..11,y=9..11,z=9..11")
    c.id = "Box3"
    R += c

    check(R.size() == 27 + 19 - 8,
          [R.size(), R.realcubes, [i.size() for i in R.realcubes]],
          f=lambda: R.savefig(colliding=True))

    R += Box("on x=10..10,y=10..10,z=10..10")
    assert (R.size() == 39)

    print("= selftest 3 passed")
 def __init__(self):
     super(EventProcessor, self).__init__()
     self.message_handler = MessageHandler()
     #self.gps = GPS()
     Message.init(self.message_handler.plugin.address())
     Detector.start_plugins()
     Reactor.add_plugin_events()
Beispiel #3
0
 def __init__(self):
     super(EventProcessor, self).__init__()
     self.message_handler = MessageHandler()
     self.gps = GPS()
     Message.init()
     Detector.start_plugins()
     Reactor.add_plugin_events()
Beispiel #4
0
 def test_get_conversation_new_with_reading_packet(self):
     conversation_table = ConversationTable()
     packet = packets.ReadRequestPacket('stub filename', 'stub mode')
     reactor = Reactor('stub_socket', 'stub_router', conversation_table)
     conversation = reactor.get_conversation('10.26.0.1', 3942, packet)
     self.assertEqual(len(conversation_table), 1)
     self.assertTrue(isinstance(conversation, TFTPConversation))
Beispiel #5
0
    def __init__(self):
        self._peer_id = self._gen_own_peer_id()
        self.atorrents = []

        self.reactor = Reactor(self)
        listener = ListeningConnection(self, PORT)
        self.reactor.add_reader(listener)
Beispiel #6
0
    def __init__(self):
        # state of various entities/room thingies
        self.inv = []
        self.rooms = make_rooms()
        self.player_coords = [1, 1]
        self.active_room = self.rooms[0]
        self.hp = HP()
        self.damage_rate = 0.2
        self.time = 0
        self.rads = 0
        self.td = TimeDisplay()

        self.map = None
        self.time_txt = None
        self.room_txt = None
        self.health_txt = None
        self.inventory_txt = None
        self.radiation_exposure_txt = None
        self.current_form = None
        self.air_temp = None
        self.set_map_pos = True
        self.get_map_pos = False
        self.last_time = 0
        self.rad_mult = 1
        self.equipment_txt = None
        self.equipment = []
        self.hinted = False

        self.reactor = Reactor()

        # Variable for end-state
        self.good_end = None
Beispiel #7
0
 def test_get_conversation_new_with_reading_packet(self):
     conversation_table = ConversationTable()
     packet = packets.ReadRequestPacket('stub filename', 'stub mode')
     reactor = Reactor('stub_socket', 'stub_router', conversation_table)
     conversation = reactor.get_conversation('10.26.0.1', 3942, packet)
     self.assertEqual(len(conversation_table), 1)
     self.assertTrue(isinstance(conversation, TFTPConversation))
Beispiel #8
0
class ConnectHandler(object):
    def __init__(self, addr, requestor):
        self._requestor = requestor
        self._reactor = Reactor()
        self._addr = addr

        try:
            self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
            self._socket.setblocking(0)
            self._socket.connect(addr)
        except socket.error:
            err = self._socket.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR)
            if err != 0 and err != errno.EINPROGRESS:
                self._socket.close()
                logger.debug("Connection failed: {0}\n".format(err))
                requestor.connection_failed(addr)
                return

        self._reactor.register_for_write_events(self)

    def stream(self):
        return self._socket

    def write_event(self):
        self._reactor.unregister_for_write_events(self)

        err = self._socket.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR)
        if err == 0:
            logger.info("Connection established with {0}"
                        .format(str(self._addr)))
            self._requestor.connection_complete(self._addr, self._socket)
        elif err != 0 and err != errno.EINPROGRESS:
            logger.info("Connection establishment failed with {0}"
                        .format(str(self._addr)))
            self._requestor.connection_failed(self._addr)
Beispiel #9
0
    def add_atom(self, info, method='random_position', pbi = [0, 0, 0]):
        """Add later"""
        from copy import deepcopy
        from reactor import Reactor
        if method == 'random_position':
            position = self.shape.random_position_on_surface(self.cutoff_distance)
            new_info = deepcopy(info)
            new_info.extend(position)
            for i in range(3):
                new_info.append(str(pbi[i]))
            Reactor.add_atom(self, new_info)
            max_key = max(self.atoms)
            temp_atoms = {}
            temp_atoms[max_key] = self.atoms[max_key]
            self._create_surface(temp_atoms)            
#        elif method == 'guess_position': not implemented in this version
#            pass
        elif method == 'known_position':
            Reactor.add_atom(self, info)
            max_key = max(self.atoms)
            temp_atoms = {}
            temp_atoms[max_key] = self.atoms[max_key]
            self._create_surface(temp_atoms)
        else:
            raise RuntimeError("{0} is an invalid method.".format(method))
Beispiel #10
0
class ConnectHandler(object):
    def __init__(self, addr, requestor):
        self._requestor = requestor
        self._reactor = Reactor()
        self._addr = addr

        try:
            self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
            self._socket.setblocking(0)
            self._socket.connect(addr)
        except socket.error:
            err = self._socket.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR)
            if err != 0 and err != errno.EINPROGRESS:
                self._socket.close()
                logger.debug("Connection failed: {0}\n".format(err))
                requestor.connection_failed(addr)
                return

        self._reactor.register_for_write_events(self)

    def stream(self):
        return self._socket

    def write_event(self):
        self._reactor.unregister_for_write_events(self)

        err = self._socket.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR)
        if err == 0:
            logger.info("Connection established with {0}".format(
                str(self._addr)))
            self._requestor.connection_complete(self._addr, self._socket)
        elif err != 0 and err != errno.EINPROGRESS:
            logger.info("Connection establishment failed with {0}".format(
                str(self._addr)))
            self._requestor.connection_failed(self._addr)
Beispiel #11
0
class Emmer(object):
    """This is the wrapping class for the Emmer framework. It initializes
    running services and also offers the client level interface.
    """
    def __init__(self):
        self.host = config.HOST
        self.port = config.PORT
        self.response_router = ResponseRouter()
        self.conversation_table = ConversationTable()
        self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
        self.reactor = Reactor(self.sock, self.response_router,
                               self.conversation_table)
        self.performer = Performer(self.sock, self.conversation_table,
                                   config.RESEND_TIMEOUT,
                                   config.RETRIES_BEFORE_GIVEUP)

    def route_read(self, filename_pattern):
        """Adds a function with a filename pattern to the Emmer server. Upon a
        read request, Emmer will run the action corresponding to the first
        filename pattern to match the request's filename.

        Use this function as a decorator on a function to add that function
        as an action with which to handle a tftp conversation.

        Args:
            filename_pattern: a regex pattern to match filenames against.
        """
        def decorator(action):
            self.response_router.append_read_rule(filename_pattern, action)

        return decorator

    def route_write(self, filename_pattern):
        """Adds a function with a filename pattern to the Emmer server. Upon a
        write request, Emmer will run the action corresponding to the first
        filename pattern to match the request's filename.

        Use this function as a decorator on a function to add that function
        as an action with which to handle a tftp conversation.

        Args:
            filename_pattern: a regex pattern to match filenames against.
        """
        def decorator(action):
            self.response_router.append_write_rule(filename_pattern, action)

        return decorator

    def run(self):
        """Initiates the Emmer server. This includes:
        * Listening on the given UDP host and port.
        * Sending messages through the given port to reach out on timed out
          tftp conversations.
        """
        self.sock.bind((self.host, self.port))
        print "TFTP Server running at %s:%s" % (self.host, self.port)
        thread.start_new_thread(self.performer.run,
                                (config.PERFORMER_THREAD_INTERVAL, ))
        self.reactor.run()
Beispiel #12
0
class Emmer(object):
    """This is the wrapping class for the Emmer framework. It initializes
    running services and also offers the client level interface.
    """
    def __init__(self):
        self.host = config.HOST
        self.port = config.PORT
        self.response_router = ResponseRouter()
        self.conversation_table = ConversationTable()
        self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
        self.reactor = Reactor(self.sock, self.response_router,
                               self.conversation_table)
        self.performer = Performer(self.sock, self.conversation_table,
                                   config.RESEND_TIMEOUT,
                                   config.RETRIES_BEFORE_GIVEUP)

    def route_read(self, filename_pattern):
        """Adds a function with a filename pattern to the Emmer server. Upon a
        read request, Emmer will run the action corresponding to the first
        filename pattern to match the request's filename.

        Use this function as a decorator on a function to add that function
        as an action with which to handle a tftp conversation.

        Args:
            filename_pattern: a regex pattern to match filenames against.
        """
        def decorator(action):
            self.response_router.append_read_rule(filename_pattern, action)

        return decorator

    def route_write(self, filename_pattern):
        """Adds a function with a filename pattern to the Emmer server. Upon a
        write request, Emmer will run the action corresponding to the first
        filename pattern to match the request's filename.

        Use this function as a decorator on a function to add that function
        as an action with which to handle a tftp conversation.

        Args:
            filename_pattern: a regex pattern to match filenames against.
        """
        def decorator(action):
            self.response_router.append_write_rule(filename_pattern, action)

        return decorator

    def run(self):
        """Initiates the Emmer server. This includes:
        * Listening on the given UDP host and port.
        * Sending messages through the given port to reach out on timed out
          tftp conversations.
        """
        self.sock.bind((self.host, self.port))
        print "TFTP Server running at %s:%s" % (self.host, self.port)
        thread.start_new_thread(self.performer.run,
                                (config.PERFORMER_THREAD_INTERVAL,))
        self.reactor.run()
Beispiel #13
0
def download(name):
    shared_mem = Queue.PriorityQueue()
    if platform == "win32":
        directory = 'C:\Users' + user + '\Torrents\\'
    else:
        directory = '/home/' + user + '/Torrents/'
    Torrent = directory + name
    peerMngr = PeerManager(Torrent)
    bittorrentThread = Reactor(1, "Thread-1", peerMngr, shared_mem, debug=True)
    bittorrentThread.run()
Beispiel #14
0
 def test_get_conversation_old_with_data_packet(self):
     conversation_table = ConversationTable()
     packet = packets.DataPacket('stub block number', 'stub data')
     old_conversation = TFTPConversation('10.26.0.1', 3942, 'stub_router')
     conversation_table.add_conversation('10.26.0.1', 3942, old_conversation)
     reactor = Reactor('stub_socket', 'stub_router', conversation_table)
     conversation = reactor.get_conversation('10.26.0.1', 3942, packet)
     self.assertEqual(len(conversation_table), 1)
     self.assertTrue(isinstance(conversation, TFTPConversation))
     self.assertEqual(conversation, old_conversation)
Beispiel #15
0
 def delete_atoms(self, atom_keys): #finished #do you need to check if atoms are on surface
 #can we assume atoms are on surface since we are working with a nano particle?
     """Add later"""
     for key in atom_keys:
         try:
             self.surface.remove(key)
         except ValueError:
             continue
     from reactor import Reactor
     Reactor.delete_atoms(self, atom_keys)
Beispiel #16
0
def download(name):
    shared_mem = Queue.PriorityQueue()
    if platform == "win32":
    	directory = 'C:\Users' + user + '\Torrents\\'
    else:
    	directory = '/home/'+ user +'/Torrents/'
    Torrent = directory + name
    peerMngr = PeerManager(Torrent)
    bittorrentThread = Reactor(1, "Thread-1", peerMngr, shared_mem, debug=True)
    bittorrentThread.run()
Beispiel #17
0
 def test_get_conversation_old_with_data_packet(self):
     conversation_table = ConversationTable()
     packet = packets.DataPacket('stub block number', 'stub data')
     old_conversation = TFTPConversation('10.26.0.1', 3942, 'stub_router')
     conversation_table.add_conversation('10.26.0.1', 3942,
                                         old_conversation)
     reactor = Reactor('stub_socket', 'stub_router', conversation_table)
     conversation = reactor.get_conversation('10.26.0.1', 3942, packet)
     self.assertEqual(len(conversation_table), 1)
     self.assertTrue(isinstance(conversation, TFTPConversation))
     self.assertEqual(conversation, old_conversation)
class SocketReaderWriter(object):
    def __init__(self, sock):
        self._reactor = Reactor()
        self._receiver = None
        self._output = []
        self._socket = sock
        self._reactor.register_for_read_events(self)

    def stream(self):
        return self._socket

    def stop(self):
        self._reactor.unregister_for_read_events(self)
        self._reactor.unregister_for_write_events(self)

    def set_receiver(self, receiver):
        self._receiver = receiver
    
    def unset_receiver(self):
        self._receiver = None

    def read_event(self):
        if self._receiver:
            view, size = self._receiver.get_rx_buffer()

            try:
                num = self._socket.recv_into(view, size)
                if num > 0:
                    self._receiver.rx_bytes(num)
                else:
                    if self._receiver:
                        self._receiver.connection_lost()
            except socket.error as err:
                logger.info("Socket Error {}".format(err))
                self._receiver.connection_lost()


    def write_event(self):
        if self._output == []:
            self._reactor.unregister_for_write_events(self)
        else:
            while self._output != []:
                num = self._socket.send(self._output[0])

                if num == len(self._output[0]):
                    del self._output[0]
                else:
                    self._output[0] = self._output[0][num:]
                    break

    def tx_bytes(self, bytestr):
        self._output.append(bytestr) 
        if len(self._output) == 1:
            self._reactor.register_for_write_events(self)
Beispiel #19
0
def test_io_watcher():

    reactor = Reactor()
    s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
    s.connect(('localhost',8001))
    def cb(a,b):
        # should be reworked, this is a bit of a hammer
        os.kill(os.getpid(), signal.SIGINT)
    io_event = events.ReadEvent(s, cb)
    reactor.install(io_event)
    reactor.run()
Beispiel #20
0
 def add_body_data(self, data): #start with this one
     """will implement later"""
     old_new_keys = Body_data.add_atoms(self, data.atoms)
     from reactor import Reactor
     #the problem is in reactor change_atom_num
     Reactor.change_atom_num(data, old_new_keys)
     body_keywords = ["Angles", "Bonds", "Angle Coeffs", "Bond Coeffs",\
     "Dihedral Coeffs", "Improper Coeffs", "Dihedrals", "Impropers",\
     "Masses", "Pair Coeffs", "Velocities"]
     for keyword in body_keywords:
         Body_data.add_data(self, data.get_body_data(keyword), keyword)
Beispiel #21
0
 def __init__(self):
     self.host = config.HOST
     self.port = config.PORT
     self.response_router = ResponseRouter()
     self.conversation_table = ConversationTable()
     self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
     self.reactor = Reactor(self.sock, self.response_router,
                            self.conversation_table)
     self.performer = Performer(self.sock, self.conversation_table,
                                config.RESEND_TIMEOUT,
                                config.RETRIES_BEFORE_GIVEUP)
Beispiel #22
0
class SocketReaderWriter(object):
    def __init__(self, sock):
        self._reactor = Reactor()
        self._receiver = None
        self._output = []
        self._socket = sock
        self._reactor.register_for_read_events(self)

    def stream(self):
        return self._socket

    def stop(self):
        self._reactor.unregister_for_read_events(self)
        self._reactor.unregister_for_write_events(self)

    def set_receiver(self, receiver):
        self._receiver = receiver

    def unset_receiver(self):
        self._receiver = None

    def read_event(self):
        if self._receiver:
            view, size = self._receiver.get_rx_buffer()

            try:
                n = self._socket.recv_into(view, size)
                if n > 0:
                    self._receiver.rx_bytes(n)
                else:
                    if self._receiver:
                        self._receiver.connection_lost()
            except socket.error as err:
                logger.info("Socket Error {}".format(err))
                self._receiver.connection_lost()

    def write_event(self):
        if self._output == []:
            self._reactor.unregister_for_write_events(self)
        else:
            while self._output != []:
                n = self._socket.send(self._output[0])

                if n == len(self._output[0]):
                    del self._output[0]
                else:
                    self._output[0] = self._output[0][n:]
                    break

    def tx_bytes(self, bytestr):
        self._output.append(bytestr)
        if len(self._output) == 1:
            self._reactor.register_for_write_events(self)
Beispiel #23
0
class Buttons:
    Miss = Reactor.event()
    Hit = Reactor.event()

    def __init__(self, board):
        self.board = board
        self.reactor = board.reactor
        self.time = self.board.time
        self.tick_interval = 0.005
        self.reactor.hook(Board.SwitchPress, self.press)
        #self.board.hook(Board.SwitchRelease, self.release)
        self.target(random.randint(0, self.board.switch_count() - 1))
        self.wins = []
        self.setup()

    def setup(self):
        ThreeWins(self.board)

    def target(self, button):
        assert button >= 0
        assert button < self.board.switch_count()
        self.clear()
        self.__target = button
        self.board.setPixelColorRGB(self.board.pixel(button), 0, 255, 0)
        self.board.render()

    def clear(self):
        for x in range(self.board.switch_count()):
            self.board.setPixelColorRGB(x, 0, 0, 0)
        self.board.render()

    def press(self, button, timestamp):
        assert button >= 0
        assert button < self.board.switch_count()
        if button != self.__target:
            self.missed(button, timestamp)
        else:
            self.hit(button, timestamp)

    def missed(self, button, timestamp):
        assert button >= 0
        assert button < self.board.switch_count()
        self.reactor.call(Buttons.Miss, button, timestamp)

        self.board.setPixelColorRGB(self.board.pixel(button), 255, 0, 0)
        self.board.render()

    def hit(self, button, timestamp):
        assert button >= 0
        assert button < self.board.switch_count()
        self.reactor.call(Buttons.Hit, button, timestamp)

        self.target(random.randint(0, self.board.switch_count() - 1))
Beispiel #24
0
    def start(self):
        '''Opens ZeroMQ sockets, starts listening to PUB events and kicks off initial REQs'''
        factory_kwargs = {'timeout': 60, 'safe': True, 'io_loop': self.io_loop}
        pub_channel = salt.transport.client.AsyncPubChannel.factory(
            self.opts, **factory_kwargs)
        tok = pub_channel.auth.gen_token('salt')
        yield pub_channel.connect()
        req_channel = salt.transport.client.AsyncReqChannel.factory(
            self.opts, **factory_kwargs)

        reactor = Reactor(tok, req_channel, self.dump_path, self.opts)
        pub_channel.on_recv(reactor.dispatch)
        yield reactor.start()
Beispiel #25
0
 def __init__(self, port=0, files = [], bootstrap_address = None):
     self._logger = logging.getLogger("%s(%s)" % (self.__class__.__name__, hex(id(self))[:-1]))        
     # forwarding table: (message_id, payload_type) -> (connection_handler,
     # expiration)
     self.forwarding_table = {}
     # flood/forward ignore table: message_id -> timestamp
     # this table used to prevent loop in flood
     # all message send out need to put their message_id and
     # timestamp = time.time()+ttl*FIXED_EXPIRED_INTERVAL
     self.ignore = {}
     # create servent id
     self.id = uuid.uuid4().bytes
     self.log("id is %s" % self.id.encode('hex_codec'))
     # calculate number of file and number of kilobyte shared
     self._files = files
     self.num_files = len(files)
     self.num_kilobytes = 0
     for f in files:
         self.num_kilobytes += f.file_size
     self.num_kilobytes /= 1000 # shrink the unit
     # create Reactor class for socket management
     self.reactor = Reactor(self, port)
     # check if bootstrap_address is given
     if bootstrap_address:
         self.reactor.bootstrap_connect(bootstrap_address)        
     return
Beispiel #26
0
    def __init__(self, root, bind_host, port=33348, control_sock_path="/tmp/tftp_control_sock"):
        self.master_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
        self.master_sock.bind((bind_host, port))
        self.bind_addr = (bind_host, port)
        self.control_sock_path = control_sock_path
        self.root = root

        bind_host_ip = socket.gethostbyname(bind_host)
        if bind_host_ip == '0.0.0.0' or bind_host_ip.startswith("127."):
            self.my_addr = (get_my_ip(), port)
        else:
            self.my_addr = (bind_host_ip, port)

        self.dload_count = 0
        self.upload_count = 0
        self.downloads = []

        self.files = {}  # name => FileInfo
        self.servers = {}  # addr => Server
        self.server_ping_queue = set()

        if os.path.exists(control_sock_path):
            os.unlink(control_sock_path)

        self.control_sock = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
        self.control_sock.bind(control_sock_path)

        self.reactor = Reactor()
        self.reactor.register(self.control_sock, select.POLLIN,
                              self.on_control_cmd, None)
        self.reactor.register(self.master_sock, select.POLLIN,
                              self.on_new_connection, None)
        self.reactor.call_later(60, self.check_servers)
        self.reactor.call_later(60, self.process_ping_queue)
Beispiel #27
0
    def __init__(self):
        self._peer_id = self._gen_own_peer_id()
        self.atorrents = []

        self.reactor = Reactor(self)
        listener = ListeningConnection(self, PORT)
        self.reactor.add_reader(listener)
Beispiel #28
0
 def __init__(self, carbon_only=False, pruning_method=None, ignore_chirality=True, use_antimotifs=True, outstream=sys.stderr, reaction_database_fname="../rec/reaction_templates.dat"):
     self.carbon_only = carbon_only
     self.ignore_chirality = ignore_chirality
     self.use_antimotifs = use_antimotifs
     self.pruning_method = pruning_method
     self.outstream = outstream
     self.reaction_database_fname = reaction_database_fname
     self.reactor = Reactor(carbon_only=self.carbon_only, ignore_chirality=self.ignore_chirality, use_antimotifs=self.use_antimotifs, reaction_database_fname=self.reaction_database_fname)
Beispiel #29
0
 def __init__(self, config, data_store, telemetry, messagedispatcher):
     self.messagedispatcher = messagedispatcher
     self.reactor = Reactor(config, data_store, telemetry, messagedispatcher)
     self.current_target = Point(
         latitude = telemetry.get_location().latitude,
         longitude = telemetry.get_location().longitude,
         altitude = telemetry.get_location().altitude
     )
     self.c2_reactor = self.reactor.c2_reactor
Beispiel #30
0
 def __init__(self,
              carbon_only=False,
              pruning_method=None,
              ignore_chirality=True,
              use_antimotifs=True,
              outstream=sys.stderr,
              reaction_database_fname="../rec/reaction_templates.dat"):
     self.carbon_only = carbon_only
     self.ignore_chirality = ignore_chirality
     self.use_antimotifs = use_antimotifs
     self.pruning_method = pruning_method
     self.outstream = outstream
     self.reaction_database_fname = reaction_database_fname
     self.reactor = Reactor(
         carbon_only=self.carbon_only,
         ignore_chirality=self.ignore_chirality,
         use_antimotifs=self.use_antimotifs,
         reaction_database_fname=self.reaction_database_fname)
Beispiel #31
0
    def __init__(self, addr, requestor):
        self._requestor = requestor
        self._reactor = Reactor()
        self._addr = addr

        try:
            self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
            self._socket.setblocking(0)
            self._socket.connect(addr)
        except socket.error:
            err = self._socket.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR)
            if err != 0 and err != errno.EINPROGRESS:
                self._socket.close()
                logger.debug("Connection failed: {0}\n".format(err))
                requestor.connection_failed(addr)
                return

        self._reactor.register_for_write_events(self)
Beispiel #32
0
    def __init__(self, addr, server):
        self._server = server

        self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
        self._socket.setblocking(0)

        self._socket.bind(addr)
        self._socket.listen(5)

        Reactor().register_for_read_events(self)
Beispiel #33
0
class Client(object):
    def __init__(self):
        self._peer_id = self._gen_own_peer_id()
        self.atorrents = []

        self.reactor = Reactor(self)
        listener = ListeningConnection(self, PORT)
        self.reactor.add_reader(listener)

    @property
    def peer_id(self):
        return self._peer_id

    def add_torrent(self, torrent_name):
        torrent = ActiveTorrent(self, torrent_name, Strategy)
        torrent.announce()
        self.atorrents.append(torrent)

    def add_torrent_peer(self, peer, info_hash):
        info_hash_filter = lambda x: x.info_hash != info_hash
        matching_torrent = filter(info_hash_filter, self.atorrents)
        assert len(matching_torrent) <= 1

        if not matching_torrent:
            log.info('Invalid peer: no torrent found for info_hash %s' %
                     info_hash)
            return

        peer.atorrent = matching_torrent[0]
        log.info('Adding peer %s to torrent %s' %
                 (repr(peer), matching_torrent[0]))

    def receive_incoming_connection(self, sock, host, port):
        peer = Peer(host, port, client=self, sock=sock)
        self.reactor.add_reader_writer(peer)

    def _gen_own_peer_id(self):
        """Return a 20 byte string to be used as a unique ID for this client"""
        remain = 20 - len(PEER_ID)
        seed_chars = string.ascii_lowercase + string.digits
        seed = ''.join(random.choice(seed_chars) for x in range(remain))
        return PEER_ID + seed
Beispiel #34
0
class Client(object):
    def __init__(self):
        self._peer_id = self._gen_own_peer_id()
        self.atorrents = []

        self.reactor = Reactor(self)
        listener = ListeningConnection(self, PORT)
        self.reactor.add_reader(listener)

    @property
    def peer_id(self):
        return self._peer_id

    def add_torrent(self, torrent_name):
        torrent = ActiveTorrent(self, torrent_name, Strategy)
        torrent.announce()
        self.atorrents.append(torrent)

    def add_torrent_peer(self, peer, info_hash):
        info_hash_filter = lambda x: x.info_hash != info_hash
        matching_torrent = filter(info_hash_filter, self.atorrents)
        assert len(matching_torrent) <= 1

        if not matching_torrent:
            log.info('Invalid peer: no torrent found for info_hash %s' %
                     info_hash)
            return

        peer.atorrent = matching_torrent[0]
        log.info('Adding peer %s to torrent %s' %
                 (repr(peer), matching_torrent[0]))

    def receive_incoming_connection(self, sock, host, port):
        peer = Peer(host, port, client=self, sock=sock)
        self.reactor.add_reader_writer(peer)

    def _gen_own_peer_id(self):
        """Return a 20 byte string to be used as a unique ID for this client"""
        remain = 20 - len(PEER_ID)
        seed_chars = string.ascii_lowercase + string.digits
        seed = ''.join(random.choice(seed_chars) for x in range(remain))
        return PEER_ID + seed
Beispiel #35
0
    def __init__(self, client, filename, port, peer_id):
        self._client = client
        self._filename = filename
        self._port = port
        self._peer_id = peer_id

        # _peers is a list of peers that the TorrentManager is trying
        # to communicate with
        self._peers = []

        # _bitfields is a dictionary mapping peers to a bitfield of the pieces
        # each has
        self._bitfields = {}

        try:
            self._metainfo = Metainfo(filename)
        except (IOError, ValueError) as err:
            if isinstance(err, IOError):
                message = err.strerror+' ('+filename+')'
            else:
                message = err.message+' ('+filename+')'
            logger.error(message)
            raise TorrentManagerError(message)

        # _have is the bitfield for this torrent. It is initialized to reflect
        # which pieces are already available on disk.
        self._filemanager = FileManager(self._metainfo)
        self._have = self._filemanager.have()

        try:
            self._tracker_proxy = TrackerProxy(self._metainfo, self._port,
                                               self._peer_id)
        except TrackerError as err:
            logger.critical("Could not connect to tracker at {}"
                            .format(self._metainfo.announce))
            logger.debug("    TrackerError: {}".format(err.message))
            raise TorrentManagerError(err.message)

        self._needed = {piece: (0, []) for piece
                        in list(self._have.findall('0b0'))}

        self._interested = {}

        self._requesting = {}

        self._partial = []

        self._reactor = Reactor()
        self._reactor.schedule_timer(_TIMER_INTERVAL, self.timer_event)
        self._tick = 1

        print "Starting to serve torrent {} off of Cracker website...".format(filename)

        self._connect_to_peers(20)
Beispiel #36
0
 def __init__(self):
     self.host = config.HOST
     self.port = config.PORT
     self.response_router = ResponseRouter()
     self.conversation_table = ConversationTable()
     self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
     self.reactor = Reactor(self.sock, self.response_router,
                            self.conversation_table)
     self.performer = Performer(self.sock, self.conversation_table,
                                config.RESEND_TIMEOUT,
                                config.RETRIES_BEFORE_GIVEUP)
Beispiel #37
0
 def __init__(self, torrent):
     self.torrent = torrent
     self.torrent_state = 'random'
     self.reactor = Reactor()
     self.reactor_activated = False
     self.peer_id = '-TZ-0000-00000000000'
     self.peers = [] 
     self.decode_torrent_and_setup_pieces()
     self.handshake = self.build_handshake()
     self.setup_tracker()
     self.stitcher = Stitcher(self)
     self.setup_peers()
Beispiel #38
0
 def __init__(self, config, data_store, telemetry, messagedispatcher, communicator, detection):
     self.uuid = config.get('DEFAULT', 'uuid')
     self.debug = 'True' == config.get('DEFAULT', 'debug')
     self.data_store = data_store
     self.messagedispatcher = messagedispatcher
     self.communicator = communicator
     self.reactor = Reactor(config, data_store, telemetry, messagedispatcher, communicator, detection)
     self.current_target = Point(
         latitude = telemetry.get_location().latitude,
         longitude = telemetry.get_location().longitude,
         altitude = telemetry.get_location().altitude
     )
Beispiel #39
0
    def __init__(self):
        self._peer_id = "-HS0001-" + str(int(time.time())).zfill(12)
        self._torrents = {}
        self._downloads = set()

        for self._port in range(_PORT_FIRST, _PORT_LAST + 1):
            try:
                self._acceptor = Acceptor(("localhost", self._port), self)
                break
            except Exception as err:
                logger.debug(err)
                continue
        else:
            logger.critical(
                ("Could not find free port in range {}-{} to "
                 "accept connections").format(_PORT_FIRST, _PORT_LAST))
            sys.exit(1)

        logger.info("Listening on port {}".format(self._port))

        Reactor().schedule_timer(.01, self.start_downloads)
        Reactor().run()
Beispiel #40
0
def test_reactor():
    reactor = Reactor()

    step = 0
    while 0 < reactor.status_percentage() < 1:
        print(f"step {step:04d}")
        print("\n".join(reactor.get_statuses()))
        reactor.auto_changes()
        step += 1

    print("Final")
    print("\n".join(reactor.get_statuses()))
Beispiel #41
0
def readinaTOR():
    c=1
    import time
    t = time.time()
    RR = Reactor()
    
    for l in sys.stdin:
        l = l.strip()
        b = Box(l)
        print(str(c)+": ("+str(len(RR.realcubes))+") "+str(b)+" "+str(int(time.time()-t)))
        c+=1
        RR = RR + b
        
    return RR
Beispiel #42
0
    def __init__(self, addr, requestor):
        self._requestor = requestor
        self._reactor = Reactor()
        self._addr = addr

        try:
            self._socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
            self._socket.setblocking(0)
            self._socket.connect(addr)
        except socket.error:
            err = self._socket.getsockopt(socket.SOL_SOCKET, socket.SO_ERROR)
            if err != 0 and err != errno.EINPROGRESS:
                self._socket.close()
                logger.debug("Connection failed: {0}\n".format(err))
                requestor.connection_failed(addr)
                return

        self._reactor.register_for_write_events(self)
Beispiel #43
0
class Navigator:
    def __init__(self, config, data_store, telemetry, messagedispatcher):
        self.messagedispatcher = messagedispatcher
        self.reactor = Reactor(config, data_store, telemetry, messagedispatcher)
        self.current_target = Point(
            latitude = telemetry.get_location().latitude,
            longitude = telemetry.get_location().longitude,
            altitude = telemetry.get_location().altitude
        )
        self.c2_reactor = self.reactor.c2_reactor

    @asyncio.coroutine
    def startup(self):
        while True:
            action = self.reactor.run()
            if action is not None:
                if action.has_move():
                    self.current_target = action.move
            yield from asyncio.sleep(1)

    def get_current_target(self):
        return self.current_target
Beispiel #44
0
class Navigator:
    def __init__(self, config, data_store, telemetry, messagedispatcher, communicator, detection):
        self.uuid = config.get('DEFAULT', 'uuid')
        self.debug = 'True' == config.get('DEFAULT', 'debug')
        self.data_store = data_store
        self.messagedispatcher = messagedispatcher
        self.communicator = communicator
        self.reactor = Reactor(config, data_store, telemetry, messagedispatcher, communicator, detection)
        self.current_target = Point(
            latitude = telemetry.get_location().latitude,
            longitude = telemetry.get_location().longitude,
            altitude = telemetry.get_location().altitude
        )

    @asyncio.coroutine
    def startup(self):
        while True:
            action = self.reactor.run()
            grid = self.data_store.get_grid_state()
            if action is not None:
                if action.has_move():
                    self.current_target = action.move
                    if self.current_target.altitude < 10:
                        self.current_target.altitude = 100
                if action.has_claim_sector():
                    grid.set_state_for(action.claim_sector, datastore.SectorState.being_searched, self.uuid)
                if action.has_complete_sector():
                    grid.set_state_for(action.complete_sector, datastore.SectorState.searched, self.uuid)
                if (self.debug or self.current_target.altitude < 10) and action.has_move_info():
                    print(action.move_info)

            if grid is not None:
                yield from self.communicator.send_message(GridMesh(self.uuid, self.uuid, grid))

            yield from asyncio.sleep(1)

    def get_current_target(self):
        return self.current_target
Beispiel #45
0
class TorrentManager(object):
    def __init__(self, client, filename, port, peer_id):
        self._client = client
        self._filename = filename
        self._port = port
        self._peer_id = peer_id

        # _peers is a list of peers that the TorrentManager is trying
        # to communicate with
        self._peers = []

        # _bitfields is a dictionary mapping peers to a bitfield of the pieces
        # each has
        self._bitfields = {}

        try:
            self._metainfo = Metainfo(filename)
        except (IOError, ValueError) as err:
            if isinstance(err, IOError):
                message = err.strerror+' ('+filename+')'
            else:
                message = err.message+' ('+filename+')'
            logger.error(message)
            raise TorrentManagerError(message)

        # _have is the bitfield for this torrent. It is initialized to reflect
        # which pieces are already available on disk.
        self._filemanager = FileManager(self._metainfo)
        self._have = self._filemanager.have()

        try:
            self._tracker_proxy = TrackerProxy(self._metainfo, self._port,
                                               self._peer_id)
        except TrackerError as err:
            logger.critical("Could not connect to tracker at {}"
                            .format(self._metainfo.announce))
            logger.debug("    TrackerError: {}".format(err.message))
            raise TorrentManagerError(err.message)

        self._needed = {piece: (0, []) for piece
                        in list(self._have.findall('0b0'))}

        self._interested = {}

        self._requesting = {}

        self._partial = []

        self._reactor = Reactor()
        self._reactor.schedule_timer(_TIMER_INTERVAL, self.timer_event)
        self._tick = 1

        print "Starting to serve torrent {} off of Cracker website...".format(filename)

        self._connect_to_peers(20)

    def _connect_to_peers(self, n):
        # Get addresses of n peers from the tracker and try to establish
        # a connection with each
        addrs = self._tracker_proxy.get_peers(n)
        for addr in addrs:
            peer = PeerProxy(self, self._peer_id, (addr['ip'], addr['port']),
                             info_hash=self._metainfo.info_hash)
            self._peers.append(peer)
            self._bitfields[peer] = BitArray(self._metainfo.num_pieces)

    def _remove_peer(self, peer):
        # Clean up references to the peer in various data structures
        self._peers.remove(peer)

        pieces = list(self._bitfields[peer].findall('0b1'))
        for piece in pieces:
            if piece in self._needed:
                occurences, peers = self._needed[piece]
                if peer in peers:
                    peers.remove(peer)
                    self._needed[piece] = (occurences-1, peers)

        del self._bitfields[peer]

        if peer in self._interested:
            del self._interested[peer]
        elif peer in self._requesting:
            # If the peer is in the middle of downloading a piece, save
            # the state in the partial list
            index, offset, sha1, _, _ = self._requesting[peer]
            self._partial.append((index, offset, sha1))
            del self._requesting[peer]

    def _rarest(self):
        # Returns a list of tuples which includes a piece index sorted by
        # the number of peers which have the piece in ascending order
        return sorted([(occurences, peers, index)
                       for (index, (occurences, peers)) in self._needed.items()
                       if occurences != 0])

    def _show_interest(self, peer):
        if not peer.is_interested():
            logger.debug("Expressing interest in peer {}"
                         .format(str(peer.addr())))
            peer.interested()

        if not peer.is_peer_choked():
            self._request(peer)

    def _check_interest(self, peer):
        # If the peer is not already interested or requesting, identify a piece
        # for it to download and show interest to the peer.
        if peer not in self._interested and peer not in self._requesting:
            # Compute the set of needed pieces which the peer has that are not
            # already designated for another peer
            needed = self._have.copy()
            needed.invert()
            of_interest = list((needed & self._bitfields[peer]).findall('0b1'))
            dont_consider = [i for i, _, _, _ in self._interested.values()]
            dont_consider.extend([i for i, _, _, _, _
                                  in self._requesting.values()])

            if len(of_interest) > 0:
                for index, offset, sha1 in self._partial:
                    if index in of_interest:
                        self._partial.remove((index, offset, sha1))
                        self._interested[peer] = (index, offset, sha1,
                                                  self._tick)
                        self._show_interest(peer)
                        return
                for _, _, index in self._rarest():
                    if index in of_interest and index not in dont_consider:
                        self._interested[peer] = (index, 0, hashlib.sha1(),
                                                  self._tick)
                        self._show_interest(peer)
                        return
            if peer not in self._interested and peer.is_interested():
                logger.debug("Expressing lack of interest in peer {}"
                             .format(str(peer.addr())))
                peer.not_interested()
                self._connect_to_peers(1)

    def _request(self, peer):
        if peer in self._interested:
            index, offset, sha1, _ = self._interested[peer]
            del self._interested[peer]
            self._requesting[peer] = (index, offset, sha1, self._tick, 0)

        index, received_bytes, _, _, _ = self._requesting[peer]

        bytes_to_request = self._bytes_to_request(index, received_bytes)
        logger.debug("Requesting pc: {} off: {} len: {} from {}"
                     .format(index, received_bytes, bytes_to_request,
                             str(peer.addr())))
        peer.request(index, received_bytes, bytes_to_request)

    def _is_last_piece(self, index):
        return index == self._metainfo.num_pieces-1

    def _length_of_last_piece(self):
        return (self._metainfo.total_length -
                (self._metainfo.num_pieces-1)*self._metainfo.piece_length)

    def _length_of_piece(self, index):
        if self._is_last_piece(index):
            return self._length_of_last_piece()
        else:
            return self._metainfo.piece_length

    def _in_last_block(self, index, offset):
        if self._is_last_piece(index):
            piece_length = self._length_of_last_piece()
        else:
            piece_length = self._metainfo.piece_length

        return piece_length-offset < _BLOCK_SIZE

    def _bytes_to_request(self, index, offset):
        if not self._in_last_block(index, offset):
            return _BLOCK_SIZE
        else:
            return self._length_of_piece(index) - offset

    def info_hash(self):
        return self._metainfo.info_hash

    # PeerProxy callbacks

    def get_bitfield(self):
        return self._have

    def peer_unconnected(self, peer):
        logger.info("Peer {} is unconnected".format(str(peer.addr())))
        self._remove_peer(peer)
        self._connect_to_peers(1)

    def peer_bitfield(self, peer, bitfield):
        # Validate the bitfield
        length = len(bitfield)
        if (length < self._metainfo.num_pieces or
            (length > self._metainfo.num_pieces and
             bitfield[self._metainfo.num_pieces:length].any(1))):
            logger.debug("Invalid bitfield from peer {}"
                         .format(str(peer.addr())))
            peer.drop_connection()
            self._remove_peer(peer)
            self._connect_to_peers(1)
            return

        # Set the peer's bitfield and updated needed to reflect which pieces
        # the peer has
        logger.debug("Peer at {} sent bitfield".format(str(peer.addr())))
        self._bitfields[peer] = bitfield[0:self._metainfo.num_pieces]
        pieces = list(self._bitfields[peer].findall('0b1'))
        for piece in pieces:
            if piece in self._needed:
                occurences, peers = self._needed[piece]
                if peer not in peers:
                    peers.append(peer)
                    self._needed[piece] = (occurences+1, peers)

        # Check whether there may be interest obtaining a piece from this peer
        self._check_interest(peer)

    def peer_has(self, peer, index):
        # Update the peer's bitfield and needed to reflect the availability
        # of the piece
        logger.debug("Peer at {} has piece {}".format(str(peer.addr()), index))
        if index < self._metainfo.num_pieces:
            self._bitfields[peer][index] = 1
        else:
            raise IndexError

        if index in self._needed:
            occurences, peers = self._needed[index]
            if peer not in peers:
                peers.append(peer)
                self._needed[index] = (occurences+1, peers)

            # Check whether there may be interest obtaining a piece from this
            # peer
            self._check_interest(peer)

    def peer_choked(self, peer):
        logger.debug("Peer {} choked".format(str(peer.addr())))
        if peer in self._interested:
            del self._interested[peer]
        elif peer in self._requesting:
            # When choked in the middle of obtaining a piece, save the
            # progress in the partial list
            index, offset, sha1, _, _ = self._requesting[peer]
            self._partial.append((index, offset, sha1))
            del self._requesting[peer]

    def peer_unchoked(self, peer):
        logger.debug("Peer {} unchoked".format(str(peer.addr())))
        if peer in self._interested:
            self._request(peer)

    def peer_sent_block(self, peer, index, begin, buf):
        if peer not in self._requesting:
            # If a peer is very slow in responding, a block could come after
            # it has timed out.  Just ignore the data at this point and
            # ignore the slow peer
            logger.debug("Received block from peer {} which has timed out"
                         .format(str(peer.addr())))
            return

        piece, received_bytes, sha1, _, _ = self._requesting[peer]
        if piece == index and begin == received_bytes:
            # When the next expected block is received, update the hash value
            # and write the block to file
            sha1.update(buf)
            self._filemanager.write_block(index, begin, buf)
            self._requesting[peer] = (piece, received_bytes + len(buf),
                                      sha1, self._tick, 0)

            if received_bytes + len(buf) < self._length_of_piece(index):
                # Request the next block in the piece
                self._request(peer)
            else:
                # On receipt of the last block in the piece, verify the hash
                # and update the records to reflect receipt of the piece
                if sha1.digest() == self._metainfo.piece_hash(index):
                    logger.info("Successfully got piece {} from {}"
                                .format(index, str(peer.addr())))
                    del self._needed[index]
                    percent = 100 * (1 - (len(self._needed) /
                                          float(self._metainfo.num_pieces)))
                    print "{0}: Downloaded {1:1.4f}%".format(self._filename,
                                                             percent)
                    self._have[index] = 1
                else:
                    logger.info("Unsuccessfully got piece {} from {}"
                                .format(index, str(peer.addr())))
                del self._requesting[peer]

                if self._needed != {}:
                    # Try to find another piece for this peer to get
                    self._check_interest(peer)
                else:
                    logger.info("Successfully downloaded entire torrent {}"
                                .format(self._filename))
                    self._client.download_complete(self._filename)

    def peer_interested(self, peer):
        pass

    def peer_not_interested(self, peer):
        pass

    def peer_request(self, peer, index, begin, length):
        pass

    def peer_canceled(self, peer, index, begin, length):
        pass

    # Reactor callback

    def timer_event(self):
        self._reactor.schedule_timer(_TIMER_INTERVAL, self.timer_event)
        self._tick += 1

        # For any peers that have been interested but unchoked for an
        # excessive period of time, stop being interested, free up assigned
        # piece and connect to another peer
        for peer, (_, _, _, tick) in self._interested.items():
            if tick + 4 == self._tick:
                logger.debug("Timed out on interest for peer {}"
                             .format(str(peer.addr())))
                peer.not_interested()
                del self._interested[peer]
                self._connect_to_peers(1)

        # For any peer that has an outstanding request for an excessive period
        # of time, resend the request message in case it got lost or is being
        # ignored
        for peer, (index, offset, sha1, tick, retries) \
                in self._requesting.items():
            if tick + 5 == self._tick:
                logger.debug("Timed out on request for peer {}"
                             .format(str(peer.addr())))
                if retries < _MAX_RETRIES:
                    self._requesting[peer] = (index, offset, sha1,
                                              self._tick, retries+1)
                    self._request(peer)
                else:
                    self._partial.append((index, offset, sha1))
                    del self._requesting[peer]
                    peer.not_interested()
                    self._connect_to_peers(1)
Beispiel #46
0
 def react_external(self, event):
     distance = self.compute_distance(event.location)
     alert = Alert(event, distance)
     Reactor.react(alert)
Beispiel #47
0
 def react_internal(self, event):
     alert = Alert(event)
     Reactor.react(alert)
Beispiel #48
0
def run_main(listen_fd):
    try:
        ev_fd = Reactor()
        ev_fd.register(listen_fd.fileno(), ev_fd.EV_IN | ev_fd.EV_DISCONNECTED)
    except select.error, msg:
        logger.error(msg)
Beispiel #49
0
class PathFinder:
    def __init__(self,
                 carbon_only=False,
                 pruning_method=None,
                 ignore_chirality=True,
                 use_antimotifs=True,
                 outstream=sys.stderr,
                 reaction_database_fname="../rec/reaction_templates.dat"):
        self.carbon_only = carbon_only
        self.ignore_chirality = ignore_chirality
        self.use_antimotifs = use_antimotifs
        self.pruning_method = pruning_method
        self.outstream = outstream
        self.reaction_database_fname = reaction_database_fname
        self.reactor = Reactor(
            carbon_only=self.carbon_only,
            ignore_chirality=self.ignore_chirality,
            use_antimotifs=self.use_antimotifs,
            reaction_database_fname=self.reaction_database_fname)

    def balance_reaction(self, substrate, product):
        """ Balances the reaction (by counting atoms)
        """
        atom_gap = compound2graph(substrate).node_bag() - compounds2graph(
            product).node_bag()
        extra_bag = bag.Bag()

        extra_bag['CO2'] = atom_gap['C']
        extra_bag['H2O'] = atom_gap['O'] + atom_gap['N'] - 2 * atom_gap['C']
        extra_bag['PO3'] = atom_gap['PO3']
        for (atom, count) in atom_gap.itercounts():
            if (not atom in ['C', 'O', 'N', 'PO3'] and count != 0):
                raise Exception(
                    "cannot balance the number of '%s' atoms, between %s and %s"
                    % (atom, substrate, product))

        for (metabolite, count) in extra_bag.itercounts():
            if (count > 0):
                product += (" + " + metabolite) * count
            if (count < 0):
                substrate += (" + " + metabolite) * (-count)

        return (substrate, product)

    def verify_hash(self, hash):
        """Returns True iff the hash passes a basic test (based on general requirements for pathways)
           This method is used for pruning the search tree.
        """
        if (
                self.pruning_method == 'PP'
        ):  # this method has the same assumptions as Melendez-Hevia's paper about the pentose phosephate cycle
            for (nodes, bonds) in parse_hash(
                    hash):  # check each of the molecules in the hash
                node_bag = bag.Bag()
                for atom in nodes:
                    (base_atom, valence, hydrogens, charge,
                     chirality) = parse_atom(atom)
                    node_bag[base_atom] += 1

                if (node_bag['C']
                        in [1, 2]):  # this is a 1 or 2 carbon sugar - invalid!
                    return False
                elif (node_bag['C'] > 0 and node_bag['PO3']
                      == 0):  # this is a unphosphorylated sugar - invalid!
                    return False
                elif (node_bag['C'] == 0
                      ):  # this is not a sugar (might be PO3 or H2O) - valid!
                    pass
                else:  # this is a phosphorylated sugar with at least 3 carbons - valid!
                    pass

        return True

    def prune_product_list(self, prod_list):
        unique_substrate_product_pairs = set([])
        verified_list = []
        count_failed_verification = 0
        count_hash_duplications = 0

        for (h_substrate, G_product, rid, mapping) in prod_list:
            h_product = G_product.hash(ignore_chirality=self.ignore_chirality)

            if (not self.verify_hash(h_product)):
                count_failed_verification += 1
            elif ((h_substrate, h_product) in unique_substrate_product_pairs):
                count_hash_duplications += 1
            else:
                verified_list.append((h_substrate, G_product, rid, mapping))
                unique_substrate_product_pairs.add((h_substrate, h_product))

        return verified_list

    def generate_new_compounds(self,
                               compounds,
                               write_progress_bar=True,
                               backward=False):
        """ Produce a list of all the new products that can be derived from the given compounds
            direction can be: "both", "forward", "backward"
        """
        new_product_list = []
        total_count = len(compounds)

        if (write_progress_bar):
            n_dots = 80
            n_dots_written = 0
            self.outstream.write("\t\t- [")

        counter = 0
        for (h, G) in compounds.iteritems():
            if (write_progress_bar):
                dots_to_write = (counter * n_dots /
                                 total_count) - n_dots_written
                self.outstream.write("." * dots_to_write)
                n_dots_written += dots_to_write
                counter += 1

            for (G_product, rid,
                 mapping) in self.reactor.apply_all_reactions(G, backward):
                new_product_list.append((h, G_product, rid, mapping))

        if (write_progress_bar):
            self.outstream.write("." * (n_dots - n_dots_written) + "]\n")

        return self.prune_product_list(new_product_list)

    def expand_tree(self,
                    compound_map,
                    set_of_processed_compounds,
                    reaction_tree=None,
                    backward=False):
        """ Expands the tree of compounds by one level
            * reaction_tree is a multi-map, where the keys are compound hashes, and the values are
              lists if 3-tuples, containing (predecessor hash, reaction_id, reaction_mapping)
              describing the reaction from the predecessor to the current compound (in the key).
            * compound_map is a map from hashes to ChemGraphs, because we need the graph in order
              to apply all reactions to it. We discard it in the next round of expand_tree to
              save memory.
            * set_of_processed_compounds is a set of all the hashes that have been processed,
              i.e. entered the compound_map in an earlier stage. We need to know them in order
              not to 'expand' the same compound twice. Note the it is common for both 
              substrate and product compound maps.
        """

        new_compound_list = self.generate_new_compounds(compound_map, backward)
        compound_map.clear()
        for (h_predecessor, G, reaction_id, mapping) in new_compound_list:
            h_compound = G.hash(ignore_chirality=self.ignore_chirality)
            if (h_compound not in set_of_processed_compounds):
                compound_map[h_compound] = G
                set_of_processed_compounds.add(h_compound)

            if (reaction_tree != None):
                # add the reaction to the hash
                if (not reaction_tree.has_key(h_compound)):
                    reaction_tree[h_compound] = []
                reaction_tree[h_compound] += [(h_predecessor, reaction_id,
                                               mapping)]

    def reaction_DFS(self, reaction_tree, h, depth):
        """ Returns all the pathways that lead from a seed to the given compound (h)
            reaction_tree - is a dictionary mapping compounds to the reactions that create them
            h - is a hash of the compound to be created
            depth - will be the maximum number of reactions in the returned paths.
        """
        if (
                depth < 0
        ):  # this means we exceeded the allowed depth, without reaching a seed, i.e. dead-end
            return []

        pathways = []
        for (h_predecessor, rid, map) in reaction_tree[h]:
            if (h_predecessor == None):
                pathways += [
                    [h]
                ]  # this means 'h' can be creating from nothing, i.e. it is a seed
            else:
                for pathway in self.reaction_DFS(reaction_tree, h_predecessor,
                                                 depth - 1):
                    pathways += [pathway + [(rid, map)]]

        return pathways

    def find_shortest_pathway(self,
                              substrates,
                              products,
                              max_levels=4,
                              stop_after_first_solution=False):
        """input is a list of substrates and a list of products
           output is the shortest path between any of the substrates to any of the products
        """
        # reaction_tree is a dictionary mapping each compound (represented by its hash) to a list,
        # the first value is the hash of the same compound with the ignore-attributes flag on
        # the second value in the list is the depth of the compound in the tree
        # the following members in the list are (predecessor, reaction) pairs, i.e.
        # predecessor - the substrate in the reaction to create this product
        # reaction    - the reaction for creating the product from the substrate

        if (max_levels < 1):
            raise Exception("max_levels must be at least 1")

        # a map containing only the new compounds (from both trees), mapping hashes to ChemGraphs
        # in order to save memory, only hashes of old compounds are saved, and the ChemGraphs discarded
        original_compound_map = {}
        set_of_processed_compounds = set()
        substrate_reaction_tree = {}
        product_reaction_tree = {}
        current_substrate_map = {}
        current_product_map = {}

        for G in substrates:
            G_temp = G.clone()
            if (self.ignore_chirality):
                G_temp.reset_chiralities()
            h = G_temp.hash(ignore_chirality=self.ignore_chirality)
            substrate_reaction_tree[h] = [(None, -1, [])]
            original_compound_map[h] = G_temp
            current_substrate_map[h] = G_temp

            print >> self.outstream, "Substrate: " + h

        for G in products:
            G_temp = G.clone()
            if (self.ignore_chirality):
                G_temp.reset_chiralities()
            h = G_temp.hash(ignore_chirality=self.ignore_chirality)
            product_reaction_tree[h] = [(None, -1, [])]
            original_compound_map[h] = G_temp
            current_product_map[h] = G_temp
            print >> self.outstream, "Product: " + h

        time_per_compound = 0
        substrate_map_depth = 0
        product_map_depth = 0
        while (substrate_map_depth + product_map_depth < max_levels):
            print >> self.outstream, "\t*** Level #%d" % (
                substrate_map_depth + product_map_depth + 1),
            begin_time = time.time()
            if (substrate_map_depth <= product_map_depth):
                num_current_compounds = len(current_substrate_map)
                print >> self.outstream, "- estimated time: %.2f sec" % (
                    time_per_compound * len(current_substrate_map))
                self.expand_tree(current_substrate_map,
                                 set_of_processed_compounds,
                                 reaction_tree=substrate_reaction_tree,
                                 backward=False)
                substrate_map_depth += 1
            else:
                num_current_compounds = len(current_product_map)
                print >> self.outstream, "- estimated time: %.2f sec" % (
                    time_per_compound * len(current_product_map))
                self.expand_tree(current_product_map,
                                 set_of_processed_compounds,
                                 reaction_tree=product_reaction_tree,
                                 backward=True)
                product_map_depth += 1

            if (num_current_compounds == 0):
                print >> self.outstream, "Reached a dead end, no new compounds can be created..."
                return (original_compound_map, [], -1)

            elapsed_time = float(time.time() - begin_time)
            time_per_compound = elapsed_time / num_current_compounds
            print >> self.outstream, "\t\t- %d substrates + %d products" % (
                len(substrate_reaction_tree), len(product_reaction_tree))

            bridging_compounds = set(substrate_reaction_tree.keys()) & set(
                product_reaction_tree.keys())
            if (stop_after_first_solution and len(bridging_compounds) > 0):
                break

        if (bridging_compounds != set()):
            print >> self.outstream, "\t*** found %d bridging compounds" % len(
                bridging_compounds)
            possible_pathways = []

            # for each bridging compound, find the pair of pathways list leading to it
            # one from the substrate and one from the product
            for h_bridge in bridging_compounds:
                # gather all the possible pathways that lead from the substrates
                # to the bridging compound, using the substrate reaction-tree
                substrate_pathways = self.reaction_DFS(substrate_reaction_tree,
                                                       h_bridge,
                                                       substrate_map_depth)

                # the same but for the products reaction-tree
                product_pathways = self.reaction_DFS(product_reaction_tree,
                                                     h_bridge,
                                                     product_map_depth)

                possible_pathways.append(
                    (substrate_pathways, product_pathways, h_bridge))
            return (original_compound_map, possible_pathways,
                    substrate_map_depth + product_map_depth)
        else:
            print >> self.outstream, "No path was found, even after %d levels" % max_levels
            return (original_compound_map, [], -1)

    def find_distance(self, substrates, products, max_levels=4):
        """input is a list of substrates and a list of products
           output is the shortest path between any of the substrates to any of the products
        """
        # reaction_tree is a dictionary mapping each compound (represented by its hash) to a list,
        # the first value is the hash of the same compound with the ignore-attributes flag on
        # the second value in the list is the depth of the compound in the tree
        # the following members in the list are (predecessor, reaction) pairs, i.e.
        # predecessor - the substrate in the reaction to create this product
        # reaction    - the reaction for creating the product from the substrate

        if (max_levels < 1):
            raise Exception("max_levels must be at least 1")

        set_of_processed_substrates = set()
        set_of_processed_products = set()
        current_substrate_map = {}
        current_product_map = {}

        for G in substrates:
            G_temp = G.clone()
            if (self.ignore_chirality):
                G_temp.reset_chiralities()
            h = G_temp.hash(ignore_chirality=self.ignore_chirality)
            set_of_processed_substrates.add(h)
            current_substrate_map[h] = G_temp

            print >> self.outstream, "Substrate: " + h

        for G in products:
            G_temp = G.clone()
            if (self.ignore_chirality):
                G_temp.reset_chiralities()
            h = G_temp.hash(ignore_chirality=self.ignore_chirality)
            set_of_processed_products.add(h)
            current_product_map[h] = G_temp

            print >> self.outstream, "Product: " + h

        time_per_compound = 0
        for level in range(1, max_levels + 1):
            print >> self.outstream, "\t*** Level #%d" % level,
            begin_time = time.time()
            if (level % 2 == 0):
                print >> self.outstream, "- estimated time: %.2f sec" % (
                    time_per_compound * len(current_substrate_map))
                self.expand_tree(current_substrate_map,
                                 set_of_processed_substrates,
                                 backward=False)
                num_current_compounds = len(current_substrate_map)
            else:
                print >> self.outstream, "- estimated time: %.2f sec" % (
                    time_per_compound * len(current_product_map))
                self.expand_tree(current_product_map,
                                 set_of_processed_products,
                                 backward=True)
                num_current_compounds = len(current_product_map)

            if (num_current_compounds == 0):
                print >> self.outstream, "Reached a dead end, no new compounds can be created..."
                return -1

            elapsed_time = float(time.time() - begin_time)
            time_per_compound = elapsed_time / num_current_compounds
            print >> self.outstream, "\t\t- %d substrates + %d products" % (
                len(set_of_processed_substrates),
                len(set_of_processed_products))

            bridging_compounds = set_of_processed_substrates & set_of_processed_products
            if (len(bridging_compounds) > 0):
                print >> self.outstream, "\t*** found %d bridging compounds" % len(
                    bridging_compounds)
                return level

        print >> self.outstream, "No path was found, even after %d levels" % max_levels
        return -1

    def pathway2text(self, G_subs, expanded_reaction_list):
        num_reactions = len(expanded_reaction_list)
        num_compounds = len(expanded_reaction_list) + 1

        i = 0
        G = G_subs.clone()
        rid = None

        s = ""

        while True:
            if (i == len(expanded_reaction_list)):
                break

            (rid, mapping, reaction_list) = expanded_reaction_list[i]

            s += str(G) + " (" + graph2compound(
                G, self.ignore_chirality) + ") - " + str(rid) + " : " + str(
                    mapping) + "\n"
            for reaction in reaction_list:
                s += "\t" + str(G) + " (" + graph2compound(
                    G, self.ignore_chirality) + ") - " + str(
                        reaction.tostring(mapping)) + "\n"
                reaction.apply(G, mapping)

            G.update_attributes()
            if (self.ignore_chirality):
                G.reset_chiralities()

            i += 1
        s += str(G) + " (" + graph2compound(G, self.ignore_chirality) + ")\n"
        return (s, G)

    def pathway2svg(self,
                    G_subs,
                    expanded_reaction_list,
                    size_x=300,
                    size_y=150,
                    font_size=10):
        num_reactions = len(expanded_reaction_list)
        num_compounds = len(expanded_reaction_list) + 1
        gap_size_x = 100
        gap_size_y = 15
        scene = Scene()

        # first add all the compounds to the graph
        i = 0
        curr_x = 0
        G = G_subs.clone()
        rid = None

        while True:
            if (rid != 'hidden'):
                scene.add(G.svg(Scene(size_x, size_y, font_size)),
                          offset=(curr_x, gap_size_y))

                curr_x += size_x

            if (i == len(expanded_reaction_list)):
                break

            (rid, mapping, reaction_list) = expanded_reaction_list[i]
            for reaction in reaction_list:
                reaction.apply(G, mapping)

            G.update_attributes()
            if (self.ignore_chirality):
                G.reset_chiralities()

            if (rid != 'hidden'):
                # draw the arrows for the direction of the reactions
                scene.add(
                    ChemicalArrow((curr_x + 30, size_y / 2),
                                  (curr_x + 70, size_y / 2),
                                  stroke_width=2))
                scene.add(
                    Text((curr_x, size_y / 2 - 20),
                         self.reactor.get_reaction_name(rid),
                         font_size,
                         fill_color=red))
                scene.add(
                    Text((curr_x, size_y / 2 + 25),
                         str(mapping),
                         font_size,
                         fill_color=red))
                curr_x += gap_size_x

            # calculate the cost of this reaction
            i += 1

        scene.justify()
        return (scene, G)

    def expand_rid_list(self, rid_list):
        """ Attach the list of subreaction corresponding to each Reaction ID in the list
        """
        return [(rid, map, self.reactor.get_reaction_list(rid))
                for (rid, map) in rid_list]

    def apply_rid_list(self, G, rid_list):
        for (rid, map) in rid_list:
            subreaction_list = self.reactor.get_reaction_list(rid)
            for subreaction in subreaction_list:
                subreaction.apply(G, map)
        G.update_attributes()
        return G

    def reverse_rid_list(self, rid_list):
        return [(self.reactor.reverse_reaction(rid), map)
                for (rid, map) in reversed(rid_list)]

    def get_all_possible_scenes(self, original_compound_map,
                                possible_pathways):
        def compare_graph_to_hash(G1, h2):
            h1 = G1.hash(ignore_chirality=self.ignore_chirality)
            return compare_hashes(h1, h2, self.ignore_chirality)

        """ returns a list of pairs of (cost, scene) which is a graphical representation of each possible pathway
        """

        scene_list = []

        # prepare the SVG scenes for all the possible pathways, and calculate their cost
        for (substrate_pathways, product_pathways,
             h_bridge) in possible_pathways:
            #            print >> self.outstream, "Bridge: " + h_bridge
            for subs_path in substrate_pathways:
                G_subs = original_compound_map[subs_path[0]]
                subs_reaction_list = self.expand_rid_list(subs_path[1:])
                try:
                    (subs_log,
                     G_last_subs) = self.pathway2text(G_subs.clone(),
                                                      subs_reaction_list)
                except ReactionException, msg:
                    print >> self.outstream, msg
                    continue

#                print >> self.outstream, "*** SUBSTRATE LOG: \n", subs_log
#                if (G_last_subs.hash(ignore_chirality=self.ignore_chirality) != h_bridge):
                if (compare_graph_to_hash(G_last_subs, h_bridge) != 0):
                    print "ERROR:"
                    print "subs:      ", G_subs.hash(
                        ignore_chirality=self.ignore_chirality)
                    print "last_subs: ", G_last_subs.hash(
                        ignore_chirality=self.ignore_chirality)
                    print "bridge:    ", h_bridge
                    sys.exit(-1)
                    print >> self.outstream, "G_last_subs != G_bridge, check the DFS function..."
                    raise Exception(
                        "G_last_subs != G_bridge, check the DFS function...")

                for prod_path in product_pathways:
                    G_prod = original_compound_map[prod_path[0]]
                    prod_reaction_list = self.expand_rid_list(prod_path[1:])
                    reverse_prod_reaction_list = self.expand_rid_list(
                        self.reverse_rid_list(prod_path[1:]))

                    try:
                        (prod_log, G_last_prod) = self.pathway2text(
                            G_prod.clone(), prod_reaction_list)
                    except ReactionException, msg:
                        print >> self.outstream, msg
                        continue


#                    print >> self.outstream, "*** PRODUCT LOG: \n", prod_log
#                    if (G_last_prod.hash(ignore_chirality=self.ignore_chirality) != h_bridge):
                    if (compare_graph_to_hash(G_last_prod, h_bridge) != 0):
                        print "ERROR:"
                        print "subs:      ", G_subs.hash(
                            ignore_chirality=self.ignore_chirality)
                        print "prod:      ", G_prod.hash(
                            ignore_chirality=self.ignore_chirality)
                        print "last_prod: ", G_last_prod.hash(
                            ignore_chirality=self.ignore_chirality)
                        print "bridge:    ", h_bridge
                        sys.exit(-1)
                        print >> self.outstream, "G_last_prod != G_bridge, check the DFS function..."
                        raise Exception(
                            "G_last_prod != G_bridge, check the DFS function..."
                        )

                    perm_reaction = self.reactor.get_permutation_reaction(
                        G_last_subs, G_last_prod)
                    full_reaction_list = subs_reaction_list + [
                        perm_reaction
                    ] + reverse_prod_reaction_list
                    try:
                        (pathway_scene,
                         G_last) = self.pathway2svg(G_subs, full_reaction_list)
                    except ReactionException, msg:
                        print >> self.outstream, msg
                        continue

                    cost = len(subs_reaction_list) + len(
                        reverse_prod_reaction_list)
                    scene_list.append((cost, pathway_scene))
Beispiel #50
0
#!/usr/bin/python
# -*- coding:utf-8 -*-
__author__ = '*****@*****.**'
from reactor import Reactor


if __name__ == '__main__':
    reactor = Reactor()
    reactor.server_forever()
Beispiel #51
0
 def __init__(self, sock):
     self._reactor = Reactor()
     self._receiver = None
     self._output = []
     self._socket = sock
     self._reactor.register_for_read_events(self)
Beispiel #52
0
class PathFinder:
    def __init__(self, carbon_only=False, pruning_method=None, ignore_chirality=True, use_antimotifs=True, outstream=sys.stderr, reaction_database_fname="../rec/reaction_templates.dat"):
        self.carbon_only = carbon_only
        self.ignore_chirality = ignore_chirality
        self.use_antimotifs = use_antimotifs
        self.pruning_method = pruning_method
        self.outstream = outstream
        self.reaction_database_fname = reaction_database_fname
        self.reactor = Reactor(carbon_only=self.carbon_only, ignore_chirality=self.ignore_chirality, use_antimotifs=self.use_antimotifs, reaction_database_fname=self.reaction_database_fname)

    def balance_reaction(self, substrate, product):
        """ Balances the reaction (by counting atoms)
        """
        atom_gap = compound2graph(substrate).node_bag() - compounds2graph(product).node_bag()
        extra_bag = bag.Bag()
        
        extra_bag['CO2'] = atom_gap['C']
        extra_bag['H2O'] = atom_gap['O'] + atom_gap['N'] - 2 * atom_gap['C']
        extra_bag['PO3'] = atom_gap['PO3']
        for (atom, count) in atom_gap.itercounts():
            if (not atom in ['C', 'O', 'N', 'PO3'] and count != 0):
                raise Exception("cannot balance the number of '%s' atoms, between %s and %s" % (atom, substrate, product))

        for (metabolite, count) in extra_bag.itercounts():
            if (count > 0):
                product += (" + " + metabolite) * count
            if (count < 0):
                substrate += (" + " + metabolite) * (-count)

        return (substrate, product)
            
    def verify_hash(self, hash):
        """Returns True iff the hash passes a basic test (based on general requirements for pathways)
           This method is used for pruning the search tree.
        """
        if (self.pruning_method == 'PP'): # this method has the same assumptions as Melendez-Hevia's paper about the pentose phosephate cycle 
            for (nodes, bonds) in parse_hash(hash): # check each of the molecules in the hash
                node_bag = bag.Bag()
                for atom in nodes:
                    (base_atom, valence, hydrogens, charge, chirality) = parse_atom(atom)
                    node_bag[base_atom] += 1
                
                if (node_bag['C'] in [1,2]): # this is a 1 or 2 carbon sugar - invalid!
                    return False
                elif (node_bag['C'] > 0 and node_bag['PO3'] == 0): # this is a unphosphorylated sugar - invalid!
                    return False
                elif (node_bag['C'] == 0): # this is not a sugar (might be PO3 or H2O) - valid!
                    pass
                else: # this is a phosphorylated sugar with at least 3 carbons - valid!
                    pass
            
        return True

    def prune_product_list(self, prod_list):
        unique_substrate_product_pairs = set([])
        verified_list = []
        count_failed_verification = 0
        count_hash_duplications = 0

        for (h_substrate, G_product, rid, mapping) in prod_list:
            h_product = G_product.hash(ignore_chirality=self.ignore_chirality)

            if (not self.verify_hash(h_product)):
                count_failed_verification += 1
            elif ((h_substrate, h_product) in unique_substrate_product_pairs):
                count_hash_duplications += 1
            else:
                verified_list.append((h_substrate, G_product, rid, mapping))
                unique_substrate_product_pairs.add((h_substrate, h_product))
        
        return verified_list

    def generate_new_compounds(self, compounds, write_progress_bar=True, backward=False):
        """ Produce a list of all the new products that can be derived from the given compounds
            direction can be: "both", "forward", "backward"
        """
        new_product_list = []
        total_count = len(compounds)
        
        if (write_progress_bar):
            n_dots = 80
            n_dots_written = 0
            self.outstream.write("\t\t- [")
        
        counter = 0
        for (h, G) in compounds.iteritems():
            if (write_progress_bar):
                dots_to_write = (counter * n_dots / total_count) - n_dots_written
                self.outstream.write("." * dots_to_write)
                n_dots_written += dots_to_write
                counter += 1
            
            for (G_product, rid, mapping) in self.reactor.apply_all_reactions(G, backward):
                new_product_list.append((h, G_product, rid, mapping))
                
        if (write_progress_bar):
            self.outstream.write("." * (n_dots - n_dots_written) + "]\n")

        return self.prune_product_list(new_product_list)

    def expand_tree(self, compound_map, set_of_processed_compounds, reaction_tree=None, backward=False):
        """ Expands the tree of compounds by one level
            * reaction_tree is a multi-map, where the keys are compound hashes, and the values are
              lists if 3-tuples, containing (predecessor hash, reaction_id, reaction_mapping)
              describing the reaction from the predecessor to the current compound (in the key).
            * compound_map is a map from hashes to ChemGraphs, because we need the graph in order
              to apply all reactions to it. We discard it in the next round of expand_tree to
              save memory.
            * set_of_processed_compounds is a set of all the hashes that have been processed,
              i.e. entered the compound_map in an earlier stage. We need to know them in order
              not to 'expand' the same compound twice. Note the it is common for both 
              substrate and product compound maps.
        """
        
        new_compound_list = self.generate_new_compounds(compound_map, backward)
        compound_map.clear()
        for (h_predecessor, G, reaction_id, mapping) in new_compound_list:
            h_compound = G.hash(ignore_chirality=self.ignore_chirality)
            if (h_compound not in set_of_processed_compounds):
                compound_map[h_compound] = G
                set_of_processed_compounds.add(h_compound)
                
            if (reaction_tree != None):
                # add the reaction to the hash
                if (not reaction_tree.has_key(h_compound)):
                    reaction_tree[h_compound] = []
                reaction_tree[h_compound] += [(h_predecessor, reaction_id, mapping)]

    def reaction_DFS(self, reaction_tree, h, depth):
        """ Returns all the pathways that lead from a seed to the given compound (h)
            reaction_tree - is a dictionary mapping compounds to the reactions that create them
            h - is a hash of the compound to be created
            depth - will be the maximum number of reactions in the returned paths.
        """
        if (depth < 0): # this means we exceeded the allowed depth, without reaching a seed, i.e. dead-end
            return []
        
        pathways = []
        for (h_predecessor, rid, map) in reaction_tree[h]:
            if (h_predecessor == None):
                pathways += [[h]] # this means 'h' can be creating from nothing, i.e. it is a seed
            else:
                for pathway in self.reaction_DFS(reaction_tree, h_predecessor, depth-1):
                    pathways += [pathway + [(rid, map)]]

        return pathways

    def find_shortest_pathway(self, substrates, products, max_levels=4, stop_after_first_solution=False):
        """input is a list of substrates and a list of products
           output is the shortest path between any of the substrates to any of the products
        """
        # reaction_tree is a dictionary mapping each compound (represented by its hash) to a list,
        # the first value is the hash of the same compound with the ignore-attributes flag on
        # the second value in the list is the depth of the compound in the tree
        # the following members in the list are (predecessor, reaction) pairs, i.e.
        # predecessor - the substrate in the reaction to create this product
        # reaction    - the reaction for creating the product from the substrate

        if (max_levels < 1):
            raise Exception("max_levels must be at least 1")
        
        # a map containing only the new compounds (from both trees), mapping hashes to ChemGraphs
        # in order to save memory, only hashes of old compounds are saved, and the ChemGraphs discarded
        original_compound_map = {}
        set_of_processed_compounds = set()
        substrate_reaction_tree = {}
        product_reaction_tree = {}
        current_substrate_map = {}
        current_product_map = {}
        
        for G in substrates:
            G_temp = G.clone()
            if (self.ignore_chirality):
                G_temp.reset_chiralities()
            h = G_temp.hash(ignore_chirality=self.ignore_chirality)
            substrate_reaction_tree[h] = [(None, -1, [])]
            original_compound_map[h] = G_temp
            current_substrate_map[h] = G_temp
            
            print >> self.outstream, "Substrate: " + h
            
        for G in products:
            G_temp = G.clone()
            if (self.ignore_chirality):
                G_temp.reset_chiralities()
            h = G_temp.hash(ignore_chirality=self.ignore_chirality)
            product_reaction_tree[h] = [(None, -1, [])]
            original_compound_map[h] = G_temp
            current_product_map[h] = G_temp
            print >> self.outstream, "Product: " + h

        time_per_compound = 0
        substrate_map_depth = 0
        product_map_depth = 0
        while (substrate_map_depth + product_map_depth < max_levels):
            print >> self.outstream, "\t*** Level #%d" % (substrate_map_depth + product_map_depth + 1),
            begin_time = time.time()
            if (substrate_map_depth <= product_map_depth):
                num_current_compounds = len(current_substrate_map)
                print >> self.outstream, "- estimated time: %.2f sec" % (time_per_compound * len(current_substrate_map))
                self.expand_tree(current_substrate_map, set_of_processed_compounds, reaction_tree=substrate_reaction_tree, backward=False)
                substrate_map_depth += 1
            else:
                num_current_compounds = len(current_product_map)
                print >> self.outstream, "- estimated time: %.2f sec" % (time_per_compound * len(current_product_map))
                self.expand_tree(current_product_map, set_of_processed_compounds, reaction_tree=product_reaction_tree, backward=True)
                product_map_depth += 1
            
            if (num_current_compounds == 0):
                print >> self.outstream, "Reached a dead end, no new compounds can be created..."
                return (original_compound_map, [], -1)
            
            elapsed_time = float(time.time() - begin_time)
            time_per_compound = elapsed_time / num_current_compounds
            print >> self.outstream, "\t\t- %d substrates + %d products" % (len(substrate_reaction_tree), len(product_reaction_tree))
            
            bridging_compounds = set(substrate_reaction_tree.keys()) & set(product_reaction_tree.keys())
            if (stop_after_first_solution and len(bridging_compounds) > 0):
                break
            
        if (bridging_compounds != set()):
            print >> self.outstream, "\t*** found %d bridging compounds" % len(bridging_compounds)
            possible_pathways = []
            
            # for each bridging compound, find the pair of pathways list leading to it
            # one from the substrate and one from the product
            for h_bridge in bridging_compounds:
                # gather all the possible pathways that lead from the substrates
                # to the bridging compound, using the substrate reaction-tree
                substrate_pathways = self.reaction_DFS(substrate_reaction_tree, h_bridge, substrate_map_depth)

                # the same but for the products reaction-tree
                product_pathways = self.reaction_DFS(product_reaction_tree, h_bridge, product_map_depth)

                possible_pathways.append((substrate_pathways, product_pathways, h_bridge))
            return (original_compound_map, possible_pathways, substrate_map_depth + product_map_depth)
        else:
            print >> self.outstream, "No path was found, even after %d levels" % max_levels
            return (original_compound_map, [], -1)

    def find_distance(self, substrates, products, max_levels=4):
        """input is a list of substrates and a list of products
           output is the shortest path between any of the substrates to any of the products
        """
        # reaction_tree is a dictionary mapping each compound (represented by its hash) to a list,
        # the first value is the hash of the same compound with the ignore-attributes flag on
        # the second value in the list is the depth of the compound in the tree
        # the following members in the list are (predecessor, reaction) pairs, i.e.
        # predecessor - the substrate in the reaction to create this product
        # reaction    - the reaction for creating the product from the substrate

        if (max_levels < 1):
            raise Exception("max_levels must be at least 1")
        
        set_of_processed_substrates = set()
        set_of_processed_products = set()
        current_substrate_map = {}
        current_product_map = {}
        
        for G in substrates:
            G_temp = G.clone()
            if (self.ignore_chirality):
                G_temp.reset_chiralities()
            h = G_temp.hash(ignore_chirality=self.ignore_chirality)
            set_of_processed_substrates.add(h)
            current_substrate_map[h] = G_temp
            
            print >> self.outstream, "Substrate: " + h
            
        for G in products:
            G_temp = G.clone()
            if (self.ignore_chirality):
                G_temp.reset_chiralities()
            h = G_temp.hash(ignore_chirality=self.ignore_chirality)
            set_of_processed_products.add(h)
            current_product_map[h] = G_temp

            print >> self.outstream, "Product: " + h

        time_per_compound = 0
        for level in range(1, max_levels+1):
            print >> self.outstream, "\t*** Level #%d" % level,
            begin_time = time.time()
            if (level % 2 == 0):
                print >> self.outstream, "- estimated time: %.2f sec" % (time_per_compound * len(current_substrate_map))
                self.expand_tree(current_substrate_map, set_of_processed_substrates, backward=False)
                num_current_compounds = len(current_substrate_map)
            else:
                print >> self.outstream, "- estimated time: %.2f sec" % (time_per_compound * len(current_product_map))
                self.expand_tree(current_product_map, set_of_processed_products, backward=True)
                num_current_compounds = len(current_product_map)
            
            if (num_current_compounds == 0):
                print >> self.outstream, "Reached a dead end, no new compounds can be created..."
                return -1
            
            elapsed_time = float(time.time() - begin_time)
            time_per_compound = elapsed_time / num_current_compounds
            print >> self.outstream, "\t\t- %d substrates + %d products" % (len(set_of_processed_substrates), len(set_of_processed_products))
            
            bridging_compounds = set_of_processed_substrates & set_of_processed_products
            if (len(bridging_compounds) > 0):
                print >> self.outstream, "\t*** found %d bridging compounds" % len(bridging_compounds)
                return level
            
        print >> self.outstream, "No path was found, even after %d levels" % max_levels
        return -1

    def pathway2text(self, G_subs, expanded_reaction_list):
        num_reactions = len(expanded_reaction_list)
        num_compounds = len(expanded_reaction_list) + 1
        
        i = 0
        G = G_subs.clone()
        rid = None
        
        s = ""

        while True:
            if (i == len(expanded_reaction_list)):
                break
            
            (rid, mapping, reaction_list) = expanded_reaction_list[i]

            s += str(G) + " (" + graph2compound(G, self.ignore_chirality) + ") - " +  str(rid) + " : " + str(mapping) + "\n"
            for reaction in reaction_list:
                s += "\t" + str(G) + " (" + graph2compound(G, self.ignore_chirality) + ") - " + str(reaction.tostring(mapping)) + "\n"
                reaction.apply(G, mapping)
            
            G.update_attributes()
            if (self.ignore_chirality):
                G.reset_chiralities()

            i += 1
        s += str(G) + " (" + graph2compound(G, self.ignore_chirality) + ")\n"
        return (s, G)

    def pathway2svg(self, G_subs, expanded_reaction_list, size_x=300, size_y=150, font_size=10):
        num_reactions = len(expanded_reaction_list)
        num_compounds = len(expanded_reaction_list) + 1
        gap_size_x = 100
        gap_size_y = 15
        scene = Scene()
        
        # first add all the compounds to the graph
        i = 0
        curr_x = 0
        G = G_subs.clone()
        rid = None

        while True:
            if (rid != 'hidden'):
                scene.add(G.svg(Scene(size_x, size_y, font_size)), offset=(curr_x, gap_size_y))
                
                curr_x += size_x

            if (i == len(expanded_reaction_list)):
                break
            
            (rid, mapping, reaction_list) = expanded_reaction_list[i]
            for reaction in reaction_list:
                reaction.apply(G, mapping)
                
            G.update_attributes()
            if (self.ignore_chirality):
                G.reset_chiralities()

            if (rid != 'hidden'):
                # draw the arrows for the direction of the reactions
                scene.add(ChemicalArrow((curr_x + 30, size_y / 2), (curr_x + 70, size_y / 2), stroke_width=2))
                scene.add(Text((curr_x, size_y / 2 - 20), self.reactor.get_reaction_name(rid), font_size, fill_color=red))
                scene.add(Text((curr_x, size_y / 2 + 25), str(mapping), font_size, fill_color=red))
                curr_x += gap_size_x

            # calculate the cost of this reaction
            i += 1
        
        scene.justify()
        return (scene, G)

    def expand_rid_list(self, rid_list):
        """ Attach the list of subreaction corresponding to each Reaction ID in the list
        """
        return [(rid, map, self.reactor.get_reaction_list(rid)) for (rid, map) in rid_list]

    def apply_rid_list(self, G, rid_list):
        for (rid, map) in rid_list:
            subreaction_list = self.reactor.get_reaction_list(rid)
            for subreaction in subreaction_list:
                subreaction.apply(G, map)
        G.update_attributes()
        return G
    
    def reverse_rid_list(self, rid_list):
        return [(self.reactor.reverse_reaction(rid), map) for (rid, map) in reversed(rid_list)]

    def get_all_possible_scenes(self, original_compound_map, possible_pathways):
        def compare_graph_to_hash(G1, h2):
            h1 = G1.hash(ignore_chirality=self.ignore_chirality)
            return compare_hashes(h1, h2, self.ignore_chirality)
        
        """ returns a list of pairs of (cost, scene) which is a graphical representation of each possible pathway
        """
            
        scene_list = []

        # prepare the SVG scenes for all the possible pathways, and calculate their cost
        for (substrate_pathways, product_pathways, h_bridge) in possible_pathways:
#            print >> self.outstream, "Bridge: " + h_bridge
            for subs_path in substrate_pathways:
                G_subs = original_compound_map[subs_path[0]]
                subs_reaction_list = self.expand_rid_list(subs_path[1:])
                try:
                    (subs_log, G_last_subs) = self.pathway2text(G_subs.clone(), subs_reaction_list)
                except ReactionException, msg:
                    print >> self.outstream, msg
                    continue

#                print >> self.outstream, "*** SUBSTRATE LOG: \n", subs_log
#                if (G_last_subs.hash(ignore_chirality=self.ignore_chirality) != h_bridge):
                if (compare_graph_to_hash(G_last_subs, h_bridge) != 0):
                    print "ERROR:"
                    print "subs:      ", G_subs.hash(ignore_chirality=self.ignore_chirality)
                    print "last_subs: ", G_last_subs.hash(ignore_chirality=self.ignore_chirality)
                    print "bridge:    ", h_bridge
                    sys.exit(-1)
                    print >> self.outstream, "G_last_subs != G_bridge, check the DFS function..."
                    raise Exception("G_last_subs != G_bridge, check the DFS function...")

                for prod_path in product_pathways:
                    G_prod = original_compound_map[prod_path[0]]
                    prod_reaction_list = self.expand_rid_list(prod_path[1:])
                    reverse_prod_reaction_list = self.expand_rid_list(self.reverse_rid_list(prod_path[1:]))

                    try:
                        (prod_log, G_last_prod) = self.pathway2text(G_prod.clone(), prod_reaction_list)
                    except ReactionException, msg:
                        print >> self.outstream, msg
                        continue
                
#                    print >> self.outstream, "*** PRODUCT LOG: \n", prod_log
#                    if (G_last_prod.hash(ignore_chirality=self.ignore_chirality) != h_bridge):
                    if (compare_graph_to_hash(G_last_prod, h_bridge) != 0):
                        print "ERROR:"
                        print "subs:      ", G_subs.hash(ignore_chirality=self.ignore_chirality)
                        print "prod:      ", G_prod.hash(ignore_chirality=self.ignore_chirality)
                        print "last_prod: ", G_last_prod.hash(ignore_chirality=self.ignore_chirality)
                        print "bridge:    ", h_bridge
                        sys.exit(-1)
                        print >> self.outstream, "G_last_prod != G_bridge, check the DFS function..."
                        raise Exception("G_last_prod != G_bridge, check the DFS function...")

                    perm_reaction = self.reactor.get_permutation_reaction(G_last_subs, G_last_prod)
                    full_reaction_list = subs_reaction_list + [perm_reaction] + reverse_prod_reaction_list
                    try:
                        (pathway_scene, G_last) = self.pathway2svg(G_subs, full_reaction_list)
                    except ReactionException, msg:
                        print >> self.outstream, msg
                        continue
                        
                    cost = len(subs_reaction_list) + len(reverse_prod_reaction_list)
                    scene_list.append((cost, pathway_scene))
Beispiel #53
0
def run_main(listen_fd):
    try:
        ev_fd = Reactor()
        ev_fd.register(listen_fd.fileno(), ev_fd.EV_IN | ev_fd.EV_DISCONNECTED)
    except select.error, msg:
        logger.error(msg)
Beispiel #54
0
    def __init__(self, client, filename, port, peer_id):
        self._client = client
        self._filename = filename
        self._port = port
        self._peer_id = peer_id

        # _peers is a list of peers that the TorrentMgr is trying
        # to communicate with
        self._peers = []

        # _bitfields is a dictionary mapping peers to a bitfield of the pieces
        # each has
        self._bitfields = {}

        try:
            self._metainfo = Metainfo(filename)
        except (IOError, ValueError) as err:
            if isinstance(err, IOError):
                message = err.strerror+' ('+filename+')'
            else:
                message = err.message+' ('+filename+')'
            logger.error(message)
            raise TorrentMgrError(message)

        # _have is the bitfield for this torrent. It is initialized to reflect
        # which pieces are already available on disk.
        self._filemgr = FileMgr(self._metainfo)
        self._have = self._filemgr.have()

        try:
            self._tracker_proxy = TrackerProxy(self._metainfo, self._port,
                                               self._peer_id)
        except TrackerError as err:
            logger.critical("Could not connect to tracker at {}"
                            .format(self._metainfo.announce))
            logger.debug("    TrackerError: {}".format(err.message))
            raise TorrentMgrError(err.message)

        # _needed is a dictionary of pieces which are still needed.
        # The value for each piece is a tuple of the number of peers which
        # have the piece and a list of those peers.
        self._needed = {piece: (0, []) for piece
                        in list(self._have.findall('0b0'))}

        # _interested is a dictionary of peers to whom interest has been
        # expressed.  The value for each peer is a tuple of the piece that
        # has been reserved for the peer, the number of bytes of the piece that
        # have already been received, the sha1 hash of the bytes received so
        # far and the value of the tick at the time interest was expressed.
        self._interested = {}

        # _requesting is a dictionary of peers to whom a block request has been
        # made.  The value for each peer is a tuple of the piece that is being
        # requested, the number of bytes that have already been received, the
        # shal2 hash of the bytes received so far, the value of the tick at
        # the time the request was made and the number of retries that have
        # been attempted
        self._requesting = {}

        # _partial is a list which tracks pieces that were interrupted while
        # being downloaded.  Each entry is a tuple containing the index of the
        # piece, the number of bytes received so far and the sha1 hash of those
        # bytes.
        self._partial = []

        self._reactor = Reactor()
        self._reactor.schedule_timer(_TIMER_INTERVAL, self.timer_event)
        self._tick = 1

        print "Starting to serve torrent {}".format(filename)

        self._connect_to_peers(20)
Beispiel #55
0
def first():
    print(
        colored(
            "Testcase 1: adding two identical boxes results in only one box",
            "red"))
    R = Reactor()
    R += Box("on x=1..3,y=1..3,z=1..3")
    R += Box("on x=1..3,y=1..3,z=1..3")
    assert (R.realcubes.__repr__() == "[on x=1..3,y=1..3,z=1..3]")

    print(
        colored(
            "Testcase 2: adding various boxes that overlap but are on the edge with the first, also result in only one box",
            "red"))
    R = Reactor()
    R += Box("on x=1..3,y=1..3,z=1..3")

    print(
        colored(
            "        2a: check that adding sliver on x doesn't cause more boxes",
            "yellow"))
    R += Box("on x=1..1,y=1..3,z=1..3")
    check(R.realcubes.__repr__() == "[on x=1..3,y=1..3,z=1..3]",
          [R.size(), R.realcubes, [i.size() for i in R.realcubes]],
          f=lambda: R.savefig())

    print(
        colored(
            "        2c: check that adding sliver on z doesn't cause more boxes ",
            "yellow"))
    R += Box("on x=1..3,y=1..3,z=1..1")
    check(R.realcubes.__repr__() == "[on x=1..3,y=1..3,z=1..3]",
          [R.size(), R.realcubes, [i.size() for i in R.realcubes]],
          f=lambda: R.savefig())

    print(
        colored(
            "        2b: check that adding sliver on y doesn't cause more boxes",
            "yellow"))
    R += Box("on x=1..3,y=1..1,z=1..3")
    check(R.realcubes.__repr__() == "[on x=1..3,y=1..3,z=1..3]",
          [R.size(), R.realcubes, [i.size() for i in R.realcubes]],
          f=lambda: R.savefig())

    print(colored("Testcase 3: merge on X edge"), "red")
    R = Reactor()

    R += Box("on x=1..1,y=1..1,z=1..1")
    R += Box("on x=2..3,y=1..1,z=1..1")
    assert (R.realcubes.__repr__() == "[on x=1..3,y=1..1,z=1..1]")

    print("Testcase 4: merge on Y edge")
    R += Box("on x=1..3,y=2..3,z=1..1")
    assert (R.realcubes.__repr__() == "[on x=1..3,y=1..3,z=1..1]")

    print("Testcase 5: merge on Z edge")
    R += Box("on x=1..3,y=1..3,z=2..3")
    assert (R.realcubes.__repr__() == "[on x=1..3,y=1..3,z=1..3]")

    print("Testcase 6: merge on (reverse) X edge")
    R = Reactor()

    R += Box("on x=2..3,y=2..3,z=2..3")
    R += Box("on x=1..1,y=2..3,z=2..3")

    assert (R.realcubes.__repr__() == "[on x=1..3,y=2..3,z=2..3]")

    print("Testcase 7: merge on (reverse) Y edge")
    R += Box("on x=1..3,y=1..1,z=2..3")
    assert (R.realcubes.__repr__() == "[on x=1..3,y=1..3,z=2..3]")

    print("Testcase 8: merge on (reverse) Z edge")
    R += Box("on x=1..3,y=1..3,z=1..1")
    assert (R.realcubes.__repr__() == "[on x=1..3,y=1..3,z=1..3]")

    print(colored("Testcase 9a: Remove a 1x1x1 cube from a 3x3x3 cube", "red"))

    R = Reactor()
    R += Box("on x=1..3,y=1..3,z=1..3")
    assert (R.size() == 27)
    R += Box("off x=1..1,y=1..1,z=1..1")
    check(R.size() == 26, [R.size(), R.realcubes], f=lambda: R.savefig())

    print(colored("Testcase 9b: Remove a 1x1x1 cube from a 3x3x3 cube", "red"))

    R = Reactor()
    R += Box("on x=1..3,y=1..3,z=1..3")
    assert (R.size() == 27)
    R += Box("off x=3..3,y=3..3,z=3..3")
    check(R.size() == 26, [R.size(), R.realcubes], f=lambda: R.savefig())

    print(colored("Testcase 9c: Remove a 1x1x1 cube from a 3x3x3 cube", "red"))

    R = Reactor()
    R += Box("on x=1..3,y=1..3,z=1..3")
    assert (R.size() == 27)
    R += Box("off x=2..2,y=2..2,z=2..2")
    check(R.size() == 26, [R.size(), R.realcubes], f=lambda: R.savefig())

    print(
        colored("Testcase 10x: Remove a slab from the upper end of a slab",
                "red"))

    R = Reactor()
    R += Box("on x=1..3,y=1..1,z=1..1")
    assert (R.size() == 3)
    R += Box("off x=3..3,y=1..1,z=1..1")
    check(R.size() == 2, [R.size(), R.realcubes], f=lambda: R.savefig())

    print(
        colored("Testcase 10y: Remove a slab from the upper end of a slab",
                "red"))

    R = Reactor()
    R += Box("on x=1..1,y=1..3,z=1..1")
    assert (R.size() == 3)
    R += Box("off x=1..1,y=3..3,z=1..1")
    check(R.size() == 2, [R.size(), R.realcubes], f=lambda: R.savefig())

    print(
        colored("Testcase 10z: Remove a slab from the upper end of a slab",
                "red"))

    R = Reactor()
    R += Box("on x=1..1,y=1..1,z=1..3")
    assert (R.size() == 3)
    R += Box("off x=1..1,y=1..1,z=3..3")
    check(R.size() == 2, [R.size(), R.realcubes], f=lambda: R.savefig())

    print(
        colored("Testcase 10xy: Remove a corner from the upper end of a slab",
                "red"))

    R = Reactor()
    R += Box("on x=1..3,y=1..3,z=1..1")
    assert (R.size() == 9)
    R += Box("off x=3..3,y=3..3,z=1..1")
    check(R.size() == 8, [R.size(), R.realcubes], f=lambda: R.savefig())

    print(
        colored("Testcase 10xy: Remove a corner from the upper end of a cube",
                "red"))

    R = Reactor()
    R += Box("on x=1..3,y=1..3,z=1..3")
    assert (R.size() == 27)
    R += Box("off x=3..3,y=3..3,z=3..3")
    check(R.size() == 26, [R.size(), R.realcubes], f=lambda: R.savefig())

    print(colored("Testcase 10x-x: Remove a smaller part of the blob", "red"))
    R = Reactor()
    R += Box("on x=1..4,y=1..1,z=1..1")
    R += Box("off x=2..3,y=1..1,z=1..1")
    check(R.size() == 2,
          [R.size(), R.realcubes, [i.size() for i in R.realcubes]],
          f=lambda: R.savefig())

    print(colored("Testcase 10y-y: Remove a smaller part of the blob", "red"))
    R = Reactor()
    R += Box("on x=1..1,y=1..4,z=1..1")
    R += Box("off x=1..1,y=2..3,z=1..1")
    check(R.size() == 2,
          [R.size(), R.realcubes, [i.size() for i in R.realcubes]],
          f=lambda: R.savefig())

    print(colored("Testcase 10z-z: Remove a smaller part of the blob", "red"))
    R = Reactor()
    R += Box("on x=1..1,y=1..1,z=1..4")
    R += Box("off x=1..1,y=1..1,z=2..3")
    check(R.size() == 2,
          [R.size(), R.realcubes, [i.size() for i in R.realcubes]],
          f=lambda: R.savefig())

    # -----------------

    print(colored("Testcase 10xyz: Remove a part of a blob", "red"))
    # off x=9..11,y=9..11,z=9..11 from on x=10..10,y=10..12,z=10..12
    R = Reactor()
    R += Box("on x=10..10,y=10..12,z=10..12")
    R += Box("off x=9..11,y=9..11,z=9..11")
    check(R.size() == 5,
          [R.size(), R.realcubes, [i.size() for i in R.realcubes]],
          f=lambda: R.savefig())
Beispiel #56
0
def fourth():

    print("= selftest 4 started")
    R = Reactor()

    R += Box("on x=1..4,y=1..4,z=1..4")
    R += Box("off x=2..3,y=1..4,z=1..4")
    R.consistencycheck()

    R = Reactor()

    # this screws up things... what happens here?
    a = Box('on x=-27..23,y=-28..26,z=-21..29')
    b = Box("on x=-22..26,y=-27..20,z=-29..19")
    a.id = "a"
    b.id = "b"
    R += a
    R += b
    # vi tappar bort flaket som ligger mellan y=-28..-27
    # nya kuben är mindre än gamla i en av dimensionerna
    # check the on set vs the all set
    R.consistencycheck()

    R = Reactor()
    R += Box("on x=-20..26,y=-36..17,z=-47..7")
    R += Box("on x=-20..33,y=-21..23,z=-26..28")
    R += Box("on x=-22..28,y=-29..23,z=-38..16")
    R += Box("on x=-46..7,y=-6..46,z=-50..-1")
    R += Box("on x=-49..1,y=-3..46,z=-24..28")
    R += Box("on x=2..47,y=-22..22,z=-23..27")
    R += Box("on x=-27..23,y=-28..26,z=-21..29")
    R += Box("on x=-39..5,y=-6..47,z=-3..44")
    R += Box("on x=-30..21,y=-8..43,z=-13..34")
    R += Box("on x=-22..26,y=-27..20,z=-29..19")
    R += Box("off x=-48..-32,y=26..41,z=-47..-37")
    R += Box("on x=-12..35,y=6..50,z=-50..-2")
    R += Box("off x=-48..-32,y=-32..-16,z=-15..-5")
    R += Box("on x=-18..26,y=-33..15,z=-7..46")
    R += Box("off x=-40..-22,y=-38..-28,z=23..41")
    R += Box("on x=-16..35,y=-41..10,z=-47..6")
    R += Box("off x=-32..-23,y=11..30,z=-14..3")
    R += Box("on x=-49..-5,y=-3..45,z=-29..18")
    R += Box("off x=18..30,y=-20..-8,z=-3..13")
    R += Box("on x=-41..9,y=-7..43,z=-33..15")

    # check for the correct size of things
    nx = R.drawallblobs()

    assert (sum(sum(sum(nx))) == R.size())

    assert (R.size() == 590784)
    print("= selftest 4 passed")
    return R
Beispiel #57
0
def example2():

    print("= example 2 started")
    R = Reactor()

    R += Box("on x=-5..47,y=-31..22,z=-19..33")
    R += Box("on x=-44..5,y=-27..21,z=-14..35")
    R += Box("on x=-49..-1,y=-11..42,z=-10..38")
    R += Box("on x=-20..34,y=-40..6,z=-44..1")
    R += Box("off x=26..39,y=40..50,z=-2..11")
    R += Box("on x=-41..5,y=-41..6,z=-36..8")
    R += Box("off x=-43..-33,y=-45..-28,z=7..25")
    R += Box("on x=-33..15,y=-32..19,z=-34..11")
    R += Box("off x=35..47,y=-46..-34,z=-11..5")
    R += Box("on x=-14..36,y=-6..44,z=-16..29")
    R += Box("on x=-57795..-6158,y=29564..72030,z=20435..90618")
    R += Box("on x=36731..105352,y=-21140..28532,z=16094..90401")
    R += Box("on x=30999..107136,y=-53464..15513,z=8553..71215")
    R += Box("on x=13528..83982,y=-99403..-27377,z=-24141..23996")
    R += Box("on x=-72682..-12347,y=18159..111354,z=7391..80950")
    R += Box("on x=-1060..80757,y=-65301..-20884,z=-103788..-16709")
    R += Box("on x=-83015..-9461,y=-72160..-8347,z=-81239..-26856")
    R += Box("on x=-52752..22273,y=-49450..9096,z=54442..119054")
    R += Box("on x=-29982..40483,y=-108474..-28371,z=-24328..38471")
    R += Box("on x=-4958..62750,y=40422..118853,z=-7672..65583")
    R += Box("on x=55694..108686,y=-43367..46958,z=-26781..48729")
    R += Box("on x=-98497..-18186,y=-63569..3412,z=1232..88485")
    R += Box("on x=-726..56291,y=-62629..13224,z=18033..85226")
    R += Box("on x=-110886..-34664,y=-81338..-8658,z=8914..63723")
    R += Box("on x=-55829..24974,y=-16897..54165,z=-121762..-28058")
    R += Box("on x=-65152..-11147,y=22489..91432,z=-58782..1780")
    R += Box("on x=-120100..-32970,y=-46592..27473,z=-11695..61039")
    R += Box("on x=-18631..37533,y=-124565..-50804,z=-35667..28308")
    R += Box("on x=-57817..18248,y=49321..117703,z=5745..55881")
    R += Box("on x=14781..98692,y=-1341..70827,z=15753..70151")
    R += Box("on x=-34419..55919,y=-19626..40991,z=39015..114138")
    R += Box("on x=-60785..11593,y=-56135..2999,z=-95368..-26915")
    R += Box("on x=-32178..58085,y=17647..101866,z=-91405..-8878")
    R += Box("on x=-53655..12091,y=50097..105568,z=-75335..-4862")
    R += Box("on x=-111166..-40997,y=-71714..2688,z=5609..50954")
    R += Box("on x=-16602..70118,y=-98693..-44401,z=5197..76897")
    R += Box("on x=16383..101554,y=4615..83635,z=-44907..18747")
    R += Box("off x=-95822..-15171,y=-19987..48940,z=10804..104439")
    R += Box("on x=-89813..-14614,y=16069..88491,z=-3297..45228")
    R += Box("on x=41075..99376,y=-20427..49978,z=-52012..13762")
    R += Box("on x=-21330..50085,y=-17944..62733,z=-112280..-30197")
    R += Box("on x=-16478..35915,y=36008..118594,z=-7885..47086")
    R += Box("off x=-98156..-27851,y=-49952..43171,z=-99005..-8456")
    R += Box("off x=2032..69770,y=-71013..4824,z=7471..94418")
    R += Box("on x=43670..120875,y=-42068..12382,z=-24787..38892")
    R += Box("off x=37514..111226,y=-45862..25743,z=-16714..54663")
    R += Box("off x=25699..97951,y=-30668..59918,z=-15349..69697")
    R += Box("off x=-44271..17935,y=-9516..60759,z=49131..112598")
    R += Box("on x=-61695..-5813,y=40978..94975,z=8655..80240")
    R += Box("off x=-101086..-9439,y=-7088..67543,z=33935..83858")
    R += Box("off x=18020..114017,y=-48931..32606,z=21474..89843")
    R += Box("off x=-77139..10506,y=-89994..-18797,z=-80..59318")
    R += Box("off x=8476..79288,y=-75520..11602,z=-96624..-24783")
    R += Box("on x=-47488..-1262,y=24338..100707,z=16292..72967")
    R += Box("off x=-84341..13987,y=2429..92914,z=-90671..-1318")
    R += Box("off x=-37810..49457,y=-71013..-7894,z=-105357..-13188")
    R += Box("off x=-27365..46395,y=31009..98017,z=15428..76570")
    R += Box("off x=-70369..-16548,y=22648..78696,z=-1892..86821")
    R += Box("on x=-53470..21291,y=-120233..-33476,z=-44150..38147")
    R += Box("off x=-93533..-4276,y=-16170..68771,z=-104985..-24507")

    s = sum([x.size() for x in R.realcubes])
    assert (s == 2758514936282235)

    print("= example 2 passed")
Beispiel #58
0
 def __init__(self):
     Reactor.__init__(self)
     GPIO.setmode(GPIO.BCM)
     GPIO.setup(LEDReactor.GPIO_OUT, GPIO.OUT)
Beispiel #59
0
class BasicServent:
    """
    This class implements everything that a Servent "must do" as part of Gnutella 0.4 protocol
    
    To add more feature, simply derive this. You can either override entire method or
    add more functionality by implement it and then add parent's method.
    """
    # Fixed expiration period  per ttl in unit of second
    FIXED_EXPIRED_INTERVAL = 5
    def __init__(self, port=0, files = [], bootstrap_address = None):
        self._logger = logging.getLogger("%s(%s)" % (self.__class__.__name__, hex(id(self))[:-1]))        
        # forwarding table: (message_id, payload_type) -> (connection_handler,
        # expiration)
        self.forwarding_table = {}
        # flood/forward ignore table: message_id -> timestamp
        # this table used to prevent loop in flood
        # all message send out need to put their message_id and
        # timestamp = time.time()+ttl*FIXED_EXPIRED_INTERVAL
        self.ignore = {}
        # create servent id
        self.id = uuid.uuid4().bytes
        self.log("id is %s" % self.id.encode('hex_codec'))
        # calculate number of file and number of kilobyte shared
        self._files = files
        self.num_files = len(files)
        self.num_kilobytes = 0
        for f in files:
            self.num_kilobytes += f.file_size
        self.num_kilobytes /= 1000 # shrink the unit
        # create Reactor class for socket management
        self.reactor = Reactor(self, port)
        # check if bootstrap_address is given
        if bootstrap_address:
            self.reactor.bootstrap_connect(bootstrap_address)        
        return
        
    def on_accept(self):
        """
        on event of gnutella connection, accept or refuse depends on
        resource management
        """
        # Always accept new connection
        return True
    
    def on_connect(self, connection_handler):
        """ 
        on event of a servent connects to 
        """      
        return

    def on_receive(self, connection_handler, message):
        """ 
        on event of receiving a message from an existing connection 
        """
        if message.payload_descriptor == GnutellaBodyId.PING:
            # check if we saw this ping before. If not, then process
            forward_key = (message.message_id, GnutellaBodyId.PONG)
            now = time.time()
            not_seen_or_expire = forward_key not in self.forwarding_table or (self.forwarding_table[forward_key][1] < now)
            not_ignore_or_expire = message.message_id not in self.ignore or (self.ignore[message.message_id] < now) 
            if  not_seen_or_expire and not_ignore_or_expire:                                 
                # send Ping to any neighbor that not the one servent recceived the Ping from
                self.flood(connection_handler, message)
                # add ping to forwarding table to forward PONG
                self.put_into_forwarding_table(message, connection_handler)
                # reply with Pong (the return trip's ttl should be equals to hops)
                pong_message = create_message(GnutellaBodyId.PONG, 
                                              message.message_id, 
                                              message.hops+1,
                                              ip = self.reactor.ip,
                                              port = self.reactor.port,
                                              num_of_files = self.num_files,
                                              num_of_kb = self.num_kilobytes)
                self.log("Sending replied pong %s", pong_message)
                self.send_message(pong_message, connection_handler)
        elif message.payload_descriptor == GnutellaBodyId.PONG:
            # forwarding pong                 
            self.forward(message)
        elif message.payload_descriptor == GnutellaBodyId.QUERY:
            # check if we saw this query before. If not, then process
            forward_key = (message.message_id, GnutellaBodyId.QUERYHIT)
            now = time.time()
            not_seen_or_expire = forward_key not in self.forwarding_table or (self.forwarding_table[forward_key][1] < now)
            not_ignore_or_expire = message.message_id not in self.ignore or (self.ignore[message.message_id] < now)             
            if not_seen_or_expire and not_ignore_or_expire:
                # add to forwarding table to forward QUERYHIT
                self.put_into_forwarding_table(message, connection_handler)
                # forward query packet to neighbor servent
                self.flood(connection_handler, message)
        elif message.payload_descriptor == GnutellaBodyId.QUERYHIT:
            # don't route query hit if it is meant for this node            
            if not message.body.servent_id == self.id:
                # forwarding query hit
                if self.forward(message):
                    # add to forwarding table to forward PUSH
                    self.put_into_forwarding_table(message, connection_handler)                
        elif message.payload_descriptor == GnutellaBodyId.PUSH:
            # don't route push if it is meant for this node
            if not message.body.servent_id == self.id:
                # forward push
                self.forward(message)                
        else:
            raise ValueError('message type is not one of PING, PONG, QUERY, QUERYHIT, PUSH')
            
    def on_disconnect(self, connection_handler):
        """ 
        servent behavior when leaving the network 
        """
        # resource clean up
        # clean up forwarding table
        remove = [k for k,v in self.forwarding_table.iteritems() if v[0] == connection_handler]
        for k in remove: 
            del self.forwarding_table[k]
        return
    
    def on_download(self, event_id, connection_handler):
        # DO some logging or resource clean up in here
        return
    
    def on_bootstrap(self, peer_address):
        # connect to all suggested peer
        self.reactor.gnutella_connect(peer_address) 
    
    def log(self, msg, *args, **kwargs):
        self._logger.debug(msg, *args, **kwargs)
    
    def send_message(self, message, handler):
        """
        Servent sends its own message (not as part of flood / forwarding)
        
        By using this method, Servent can keep track which message (by message_id) is its own
        by adding it to ignore dictionary.
        """
        self.ignore[message.message_id] = time.time()+self.FIXED_EXPIRED_INTERVAL*message.ttl
        handler.write(message.serialize())
    
    def put_into_forwarding_table(self, message, handler):
        now = time.time()
        message_id = copy.deepcopy(message.message_id)
        value = (handler, now+self.FIXED_EXPIRED_INTERVAL*message.ttl)
        key = None
        if message.payload_descriptor == GnutellaBodyId.QUERYHIT:
            key = (message_id, message.body.servent_id, GnutellaBodyId.PUSH)            
            self.forwarding_table[key] = value
        elif message.payload_descriptor == GnutellaBodyId.PING:
            key = (message_id, GnutellaBodyId.PONG)
            self.forwarding_table[key] = value
        elif message.payload_descriptor == GnutellaBodyId.QUERY:
            key = (message_id, GnutellaBodyId.QUERYHIT)        
        else:
            raise ValueError
        # insert into forwarding table
        self.forwarding_table[key] = value
        
    
    def forward(self, message):
        """
        Forward message to correct servent in according to forwarding table
        """
        if message.ttl < 2:
            return False        
        try:
            # create a deep copy
            message = copy.deepcopy(message)
            message.decrease_ttl()
            packet = message.serialize()
            key = None
            if message.payload_descriptor != GnutellaBodyId.PUSH:
                key = (message.message_id, message.payload_descriptor)
            else:
                key = (message.message_id, message.body.servent_id, message.payload_descriptor)            
            handler, timestamp = self.forwarding_table[key]
            if time.time() < timestamp:
                handler.write(packet)
                return True
            else:
                self.log("forwarding timestamp expired -> %s", message)
                del self.forwarding_table[key]
            return False
        except KeyError:
            return False
        return False 
            
    
    def flood(self, received_handler, message):
        """
        Flood other servent's message to every directly connected servent other
        than the received handler. This method is used as part of forwarding of ping, query
        
        return number of connection is flooded
        """
        if message.ttl < 2:
            return 0
        # create a deep copy
        message = copy.deepcopy(message)        
        message.decrease_ttl()
        return self.reactor.broadcast_except_for(received_handler, message)

    def flood_ex(self, message):
        """
        Flood your own message to every directly connected servent

        return number of connection is flooded
        """
        # you can only flood query or ping message by definition of protocol
        if message.payload_descriptor == GnutellaBodyId.QUERY or message.payload_descriptor == GnutellaBodyId.PING:
            self.ignore[message.message_id] = time.time()+self.FIXED_EXPIRED_INTERVAL*message.ttl
            return self.reactor.broadcast_except_for(None, message)
        return 0
           
    def set_files(self, files):
        # each member of files is a FileInfo 
        self._files = files
        # calculate number of file and number of kilobyte shared
        self.num_files = len(files)
        self.num_kilobytes = 0
        for f in files:
            self.num_kilobytes += f.file_size
        self.num_kilobytes /= 1000 # shrink the unit        
        return

    def get_files(self):
        return self._files

    files = property(get_files, set_files)
    

    def check_file(self, file_id):
        """
        check if the servent have the file with id = file_id
        """
        for fileinfo in self.files:
            if fileinfo.file_id == file_id:
                return True
        return False

    def get_file_content(self, file_id, file_name):
        if not self.check_file(file_id):
            return None
        # return dummy content
        return "This is (%s, %s)" % (file_name, file_id)

    def search(self, criteria):
        """ 
        Return a list of file fit the criteria
        Exact match for file name
        """
        match = []
        for fileinfo in self.files:
            if criteria == fileinfo.file_name:
                match.append(fileinfo)
        return match