def __init__(self, dht_addr):
        self.my_addr = dht_addr
        self.my_id = identifier.RandomId()
        self.my_node = Node(self.my_addr, self.my_id)
        self.tracker = tracker.Tracker()
        self.token_m = token_manager.TokenManager()

        self.reactor = ThreadedReactor()
        self.rpc_m = RPCManager(self.reactor, self.my_addr[1])
        self.querier = Querier(self.rpc_m, self.my_id)
        self.routing_m = RoutingManager(self.my_node, self.querier,
                                        bootstrap_nodes)
        self.responder = Responder(self.my_id, self.routing_m,
                                   self.tracker, self.token_m)

        self.responder.set_on_query_received_callback(
            self.routing_m.on_query_received)
        self.querier.set_on_response_received_callback(
            self.routing_m.on_response_received)
        self.querier.set_on_error_received_callback(
            self.routing_m.on_error_received)
        self.querier.set_on_timeout_callback(self.routing_m.on_timeout)
        self.querier.set_on_nodes_found_callback(self.routing_m.on_nodes_found)

        self.routing_m.do_bootstrap()

        self.rpc_m.add_msg_callback(QUERY,
                                    self.responder.on_query_received)

        self.lookup_m = LookupManager(self.my_id, self.querier,
                                      self.routing_m)
Exemple #2
0
    def __init__(self, dht_addr):
        my_addr = dht_addr
        my_id = identifier.RandomId()
        my_node = Node(my_addr, my_id)
        tracker_ = tracker.Tracker()
        token_m = token_manager.TokenManager()

        self.reactor = ThreadedReactor()
        rpc_m = RPCManager(self.reactor, my_addr[1])
        querier_ = Querier(rpc_m, my_id)
        routing_m = RoutingManager(my_node, querier_, bootstrap_nodes)
        responder_ = Responder(my_id, routing_m, tracker_, token_m)

        responder_.set_on_query_received_callback(routing_m.on_query_received)
        querier_.set_on_response_received_callback(
            routing_m.on_response_received)
        querier_.set_on_error_received_callback(routing_m.on_error_received)
        querier_.set_on_timeout_callback(routing_m.on_timeout)
        querier_.set_on_nodes_found_callback(routing_m.on_nodes_found)

        routing_m.do_bootstrap()

        rpc_m.add_msg_callback(QUERY, responder_.on_query_received)

        self.lookup_m = LookupManager(my_id, querier_, routing_m)
        self._routing_m = routing_m
Exemple #3
0
    def __init__(self, version_label, my_node, conf_path, routing_m_mod,
                 lookup_m_mod, experimental_m_mod, private_dht_name,
                 bootstrap_mode):
        self.bootstrapper = bootstrap.OverlayBootstrapper(conf_path)
        my_addr = my_node.addr
        self._my_id = my_node.id  # id indicated by user
        if not self._my_id:
            self._my_id = self._my_id = identifier.RandomId()  # random id
        self._my_node = Node(my_addr, self._my_id, version=version_label)
        self.msg_f = message.MsgFactory(version_label, self._my_id,
                                        private_dht_name)
        self._querier = Querier()
        self._routing_m = routing_m_mod.RoutingManager(self._my_node,
                                                       self.msg_f,
                                                       self.bootstrapper)

        self._responder = responder.Responder(self._my_id, self._routing_m,
                                              self.msg_f, bootstrap_mode)
        self._tracker = self._responder._tracker

        self._lookup_m = lookup_m_mod.LookupManager(self._my_id, self.msg_f,
                                                    self.bootstrapper)
        self._experimental_m = experimental_m_mod.ExperimentalManager(
            self._my_node.id, self.msg_f)

        current_ts = time.time()
        self._next_maintenance_ts = current_ts
        self._next_timeout_ts = current_ts
        self._next_main_loop_call_ts = current_ts
        self._cached_lookups = []
    def getid(self):
        """getid()
		get modem id data"""
        self.querier = Querier(InsteonAddress("00.00.00"))
        self.querier.setMsgHandler(IMInfoMsgHandler("getid"))
        msg = Msg.s_makeMessage("GetIMInfo")
        self.querier.sendMsg(msg)
Exemple #5
0
    def __init__(self, dht_addr, state_path, routing_m_mod, lookup_m_mod):
        self.state_filename = os.path.join(state_path, STATE_FILENAME)
        self.load_state()
        if not self._my_id:
            self._my_id = identifier.RandomId()
        self._my_node = Node(dht_addr, self._my_id)
        self._tracker = tracker.Tracker()
        self._token_m = token_manager.TokenManager()

        self._reactor = ThreadedReactor()
        self._reactor.listen_udp(self._my_node.addr[1],
                                 self._on_datagram_received)
        #self._rpc_m = RPCManager(self._reactor)
        self._querier = Querier(self._my_id)
        bootstrap_nodes = self.loaded_nodes or BOOTSTRAP_NODES
        del self.loaded_nodes
        self._routing_m = routing_m_mod.RoutingManager(self._my_node, 
                                                       bootstrap_nodes)
#        self._responder = Responder(self._my_id, self._routing_m,
#                                    self._tracker, self._token_m)

        self._lookup_m = lookup_m_mod.LookupManager(self._my_id)
        current_time = time.time()
        self._next_maintenance_ts = current_time
        self._next_save_state_ts = current_time + SAVE_STATE_DELAY
        
        self._running = False
    def getIMConfig(self):
        """getIMConfig()
		get modem configuration flags byte"""
        self.querier = Querier(InsteonAddress("00.00.00"))
        self.querier.setMsgHandler(IMConfigMsgHandler("getIMConfig"))
        msg = Msg.s_makeMessage("GetIMConfig")
        self.querier.sendMsg(msg)
Exemple #7
0
    def __init__(self, dht_addr):
        my_addr = dht_addr
        my_id = identifier.RandomId()
        my_node = Node(my_addr, my_id)
        tracker_ = tracker.Tracker()
        token_m = token_manager.TokenManager()

        self.reactor = ThreadedReactor()
        rpc_m = RPCManager(self.reactor, my_addr[1])
        querier_ = Querier(rpc_m, my_id)
        routing_m = RoutingManager(my_node, querier_,
                                   bootstrap_nodes)
        responder_ = Responder(my_id, routing_m,
                              tracker_, token_m)

        responder_.set_on_query_received_callback(
            routing_m.on_query_received)
        querier_.set_on_response_received_callback(
            routing_m.on_response_received)
        querier_.set_on_error_received_callback(
            routing_m.on_error_received)
        querier_.set_on_timeout_callback(routing_m.on_timeout)
        querier_.set_on_nodes_found_callback(routing_m.on_nodes_found)

        routing_m.do_bootstrap()

        rpc_m.add_msg_callback(QUERY,
                               responder_.on_query_received)

        self.lookup_m = LookupManager(my_id, querier_,
                                      routing_m)
        self._routing_m = routing_m
Exemple #8
0
 def do_query(self, query):
     """
     Renders the results page
     """
     q = Querier()
     results = None
     try:
         # extracts only the bindings from the result dictionary
         bindings = [r['results']['bindings'] for r in q.query(str(query)) if r['results']['bindings'] != []]
         results = self.unpack_results(bindings)
     except:
         # in case of any exception should render an error page
         results = "ERROR"
     return self.serve_template('results.txt', results=results)
	def cancelLinking(self):
		"""cancelLinking()
		takes modem out of linking or unlinking mode"""
		self.querier = Querier(InsteonAddress("00.00.00"))
		self.querier.setMsgHandler(DefaultMsgHandler("cancel linking"))
		msg = Msg.s_makeMessage("CancelALLLinking")
		self.querier.sendMsg(msg)
	def getid(self):
		"""getid()
		get modem id data"""
		self.querier = Querier(InsteonAddress("00.00.00"))
		self.querier.setMsgHandler(IMInfoMsgHandler("getid"))
		msg = Msg.s_makeMessage("GetIMInfo")
		self.querier.sendMsg(msg)
Exemple #11
0
    def __init__(self, version_label,
                 my_node, conf_path,
                 routing_m_mod, lookup_m_mod,
                 experimental_m_mod,
                 private_dht_name,
                 bootstrap_mode):
        self.bootstrapper = bootstrap.OverlayBootstrapper(conf_path)
        my_addr = my_node.addr
        self._my_id = my_node.id # id indicated by user 
        if not self._my_id:
            self._my_id = self._my_id = identifier.RandomId() # random id
        self._my_node = Node(my_addr, self._my_id, version=version_label)
        self.msg_f = message.MsgFactory(version_label, self._my_id,
                                        private_dht_name)
        self._querier = Querier()
        self._routing_m = routing_m_mod.RoutingManager(
            self._my_node, self.msg_f, self.bootstrapper)

        self._responder = responder.Responder(self._my_id, self._routing_m,
                                              self.msg_f, bootstrap_mode)
        self._tracker = self._responder._tracker
        
        self._lookup_m = lookup_m_mod.LookupManager(self._my_id, self.msg_f,
                                                    self.bootstrapper)
        self._experimental_m = experimental_m_mod.ExperimentalManager(
            self._my_node.id, self.msg_f) 
                  
        current_ts = time.time()
        self._next_maintenance_ts = current_ts
        self._next_timeout_ts = current_ts
        self._next_main_loop_call_ts = current_ts
        self._cached_lookups = []
Exemple #12
0
    def __init__(self, dht_addr):
        self.my_addr = dht_addr
        self.my_id = identifier.RandomId()
        self.my_node = Node(self.my_addr, self.my_id)
        self.tracker = tracker.Tracker()
        self.token_m = token_manager.TokenManager()

        self.reactor = ThreadedReactor()
        self.rpc_m = RPCManager(self.reactor, self.my_addr[1])
        self.querier = Querier(self.rpc_m, self.my_id)
        self.routing_m = RoutingManager(self.my_node, self.querier,
                                        bootstrap_nodes)
        self.responder = Responder(self.my_id, self.routing_m,
                                   self.tracker, self.token_m)

        self.responder.set_on_query_received_callback(
            self.routing_m.on_query_received)
        self.querier.set_on_response_received_callback(
            self.routing_m.on_response_received)
        self.querier.set_on_error_received_callback(
            self.routing_m.on_error_received)
        self.querier.set_on_timeout_callback(self.routing_m.on_timeout)
        self.querier.set_on_nodes_found_callback(self.routing_m.on_nodes_found)

        self.routing_m.do_bootstrap()

        self.rpc_m.add_msg_callback(QUERY,
                                    self.responder.on_query_received)

        self.lookup_m = LookupManager(self.my_id, self.querier,
                                      self.routing_m)
Exemple #13
0
    def __init__(self, dht_addr, state_filename,
                 routing_m_mod, lookup_m_mod,
                 experimental_m_mod,
                 private_dht_name):
        #TODO: don't do this evil stuff!!!
        message.private_dht_name = private_dht_name

        if size_estimation:
            self._size_estimation_file = open('size_estimation.dat', 'w')
        
        
        self.state_filename = state_filename
        saved_id, saved_bootstrap_nodes = state.load(self.state_filename)
        if saved_id:
            self._my_id = saved_id
        else:
            self._my_id = identifier.RandomId()
        self._my_node = Node(dht_addr, self._my_id)
        self._tracker = tracker.Tracker()
        self._token_m = token_manager.TokenManager()

        self._querier = Querier()
        self._routing_m = routing_m_mod.RoutingManager(self._my_node, 
                                                       saved_bootstrap_nodes)
        self._lookup_m = lookup_m_mod.LookupManager(self._my_id)
        self._experimental_m = experimental_m_mod.ExperimentalManager(self._my_node.id) 
                  
        current_ts = time.time()
        self._next_save_state_ts = current_ts + SAVE_STATE_DELAY
        self._next_maintenance_ts = current_ts
        self._next_timeout_ts = current_ts
        self._next_main_loop_call_ts = current_ts
        self._pending_lookups = []
Exemple #14
0
 def test_querier(self):
     """
     If the number of entries for Fe2O3 in the MP is larger or equal to 12.
     The larger case is for in case more have been added in the future.
     """
     self.assertTrue(len(Querier(API_KEY, 'Fe2O3').mp_all) >= 12,
                     msg='Number of entries for Fe2O3 is not correct')
Exemple #15
0
    def __init__(self, dht_addr, state_path,
                 routing_m_mod, lookup_m_mod,
                 private_dht_name):
        #TODO: don't do this evil stuff!!!
        message.private_dht_name = private_dht_name
        
        self.state_filename = os.path.join(state_path, STATE_FILENAME)
        self.load_state()
        if not self._my_id:
            self._my_id = identifier.RandomId()
        self._my_node = Node(dht_addr, self._my_id)
        self._tracker = tracker.Tracker()
        self._token_m = token_manager.TokenManager()

        self._reactor = ThreadedReactor()
        self._reactor.listen_udp(self._my_node.addr[1],
                                 self._on_datagram_received)
        #self._rpc_m = RPCManager(self._reactor)
        self._querier = Querier(self._my_id)
        bootstrap_nodes = self.loaded_nodes or BOOTSTRAP_NODES
        del self.loaded_nodes
        self._routing_m = routing_m_mod.RoutingManager(self._my_node, 
                                                       bootstrap_nodes)
#        self._responder = Responder(self._my_id, self._routing_m,
#                                    self._tracker, self._token_m)

        self._lookup_m = lookup_m_mod.LookupManager(self._my_id)
        current_time = time.time()
        self._next_maintenance_ts = current_time
        self._next_save_state_ts = current_time + SAVE_STATE_DELAY
        
        self._running = False
 def do_query(self, query):
     """
     Renders the results page
     """
     q = Querier()
     results = None
     try:
         # extracts only the bindings from the result dictionary
         bindings = [
             r['results']['bindings'] for r in q.query(str(query))
             if r['results']['bindings'] != []
         ]
         results = self.unpack_results(bindings)
     except:
         # in case of any exception should render an error page
         results = "ERROR"
     return self.serve_template('results.txt', results=results)
	def linkAsResponder(self, otherDevice, group):
		"""linkAsResponder(otherDevice, group)
		puts modem in link mode to respond to device "otherDevice" on group "group" """
		addr = InsteonAddress(otherDevice)
		self.querier = Querier(addr)
		self.querier.setMsgHandler(DefaultMsgHandler("start linking"))
		msg = Msg.s_makeMessage("StartALLLinking")
		msg.setByte("LinkCode", 0x00)
		msg.setByte("ALLLinkGroup", group)
		self.querier.sendMsg(msg)
	def unlinkAsController(self, otherDevice, group):
		"""unlinkAsController(otherDevice, group)
		puts modem in unlink mode to unlink as controller from device "otherDevice" on group "group" """
		addr = InsteonAddress(otherDevice)
		self.querier = Querier(addr)
		self.querier.setMsgHandler(DefaultMsgHandler("unlink as controller"))
		msg = Msg.s_makeMessage("StartALLLinking")
		msg.setByte("LinkCode", 0xFF)
		msg.setByte("ALLLinkGroup", group)
		self.querier.sendMsg(msg)
    def __init__(self, version_label,
                 my_node, state_filename,
                 routing_m_mod, lookup_m_mod,
                 experimental_m_mod,
                 private_dht_name,
                 bootstrap_mode):

        if size_estimation:
            self._size_estimation_file = open('size_estimation.dat', 'w')


        self.state_filename = state_filename
        saved_id, saved_bootstrap_nodes = state.load(self.state_filename)
        my_addr = my_node.addr
        self._my_id = my_node.id # id indicated by user
        if not self._my_id:
            self._my_id = saved_id # id loaded from file
        if not self._my_id:
            self._my_id = self._my_id = identifier.RandomId() # random id
        self._my_node = Node(my_addr, self._my_id, version=version_label)
        self.msg_f = message.MsgFactory(version_label, self._my_id,
                                        private_dht_name)
        self._querier = Querier()
        self._routing_m = routing_m_mod.RoutingManager(
            self._my_node, saved_bootstrap_nodes, self.msg_f)

        self._responder = responder.Responder(self._my_id, self._routing_m,
                                              self.msg_f, bootstrap_mode)
        self._tracker = self._responder._tracker

        self._lookup_m = lookup_m_mod.LookupManager(self._my_id, self.msg_f)
        self._experimental_m = experimental_m_mod.ExperimentalManager(
            self._my_node.id, self.msg_f)

        current_ts = time.time()
        self._next_save_state_ts = current_ts + SAVE_STATE_DELAY
        self._next_maintenance_ts = current_ts
        self._next_timeout_ts = current_ts
        self._next_main_loop_call_ts = current_ts
        self._pending_lookups = []
        self._cached_lookups = {}
Exemple #20
0
class Controller:
    
    def __init__(self, dht_addr):
        self.my_addr = dht_addr
        self.my_id = identifier.RandomId()
        self.my_node = Node(self.my_addr, self.my_id)
        self.tracker = tracker.Tracker()
        self.token_m = token_manager.TokenManager()

        self.reactor = ThreadedReactor()
        self.rpc_m = RPCManager(self.reactor, self.my_addr[1])
        self.querier = Querier(self.rpc_m, self.my_id)
        self.routing_m = RoutingManager(self.my_node, self.querier,
                                        bootstrap_nodes)
        self.responder = Responder(self.my_id, self.routing_m,
                                   self.tracker, self.token_m)

        self.responder.set_on_query_received_callback(
            self.routing_m.on_query_received)
        self.querier.set_on_response_received_callback(
            self.routing_m.on_response_received)
        self.querier.set_on_error_received_callback(
            self.routing_m.on_error_received)
        self.querier.set_on_timeout_callback(self.routing_m.on_timeout)
        self.querier.set_on_nodes_found_callback(self.routing_m.on_nodes_found)

        self.routing_m.do_bootstrap()

        self.rpc_m.add_msg_callback(QUERY,
                                    self.responder.on_query_received)

        self.lookup_m = LookupManager(self.my_id, self.querier,
                                      self.routing_m)

    def start(self):
        self.reactor.start()

    def stop(self):
        #TODO2: stop each manager
        self.reactor.stop()

    def get_peers(self, info_hash, callback_f, bt_port=None):
        return self.lookup_m.get_peers(info_hash, callback_f, bt_port)
class Controller:
    
    def __init__(self, dht_addr):
        self.my_addr = dht_addr
        self.my_id = identifier.RandomId()
        self.my_node = Node(self.my_addr, self.my_id)
        self.tracker = tracker.Tracker()
        self.token_m = token_manager.TokenManager()

        self.reactor = ThreadedReactor()
        self.rpc_m = RPCManager(self.reactor, self.my_addr[1])
        self.querier = Querier(self.rpc_m, self.my_id)
        self.routing_m = RoutingManager(self.my_node, self.querier,
                                        bootstrap_nodes)
        self.responder = Responder(self.my_id, self.routing_m,
                                   self.tracker, self.token_m)

        self.responder.set_on_query_received_callback(
            self.routing_m.on_query_received)
        self.querier.set_on_response_received_callback(
            self.routing_m.on_response_received)
        self.querier.set_on_error_received_callback(
            self.routing_m.on_error_received)
        self.querier.set_on_timeout_callback(self.routing_m.on_timeout)
        self.querier.set_on_nodes_found_callback(self.routing_m.on_nodes_found)

        self.routing_m.do_bootstrap()

        self.rpc_m.add_msg_callback(QUERY,
                                    self.responder.on_query_received)

        self.lookup_m = LookupManager(self.my_id, self.querier,
                                      self.routing_m)

    def start(self):
        self.reactor.start()

    def stop(self):
        #TODO2: stop each manager
        self.reactor.stop()

    def get_peers(self, info_hash, callback_f, bt_port=None):
        return self.lookup_m.get_peers(info_hash, callback_f, bt_port)
    def __init__(self, version_label,
                 my_node, state_filename,
                 routing_m_mod, lookup_m_mod,
                 experimental_m_mod,
                 private_dht_name,
                 bootstrap_mode):
        
        if size_estimation:
            self._size_estimation_file = open('size_estimation.dat', 'w')
        
        
        self.state_filename = state_filename
        saved_id, saved_bootstrap_nodes = state.load(self.state_filename)
        my_addr = my_node.addr
        self._my_id = my_node.id # id indicated by user 
        if not self._my_id:
            self._my_id = saved_id # id loaded from file
        if not self._my_id:
            self._my_id = self._my_id = identifier.RandomId() # random id
        self._my_node = Node(my_addr, self._my_id, version=version_label)
        self.msg_f = message.MsgFactory(version_label, self._my_id,
                                        private_dht_name)
        self._querier = Querier()
        self._routing_m = routing_m_mod.RoutingManager(
            self._my_node, saved_bootstrap_nodes, self.msg_f)

        self._responder = responder.Responder(self._my_id, self._routing_m,
                                              self.msg_f, bootstrap_mode)
        self._tracker = self._responder._tracker
        
        self._lookup_m = lookup_m_mod.LookupManager(self._my_id, self.msg_f)
        self._experimental_m = experimental_m_mod.ExperimentalManager(
            self._my_node.id, self.msg_f) 
                  
        current_ts = time.time()
        self._next_save_state_ts = current_ts + SAVE_STATE_DELAY
        self._next_maintenance_ts = current_ts
        self._next_timeout_ts = current_ts
        self._next_main_loop_call_ts = current_ts
        self._pending_lookups = []
        self._cached_lookups = []
class Device:
	name = ""
	address   = InsteonAddress()
	db        = None
	dbbuilder = None
	querier   = None
	def __init__(self, name, addr):
		self.name = name
		self.address = addr
		self.db = DB()
		self.querier = Querier(addr)
		addDev(self)

	def modifyDB(self, listener):
		self.dbbuilder.setListener(listener)
		# after db download complete, listener will perform action
		self.getdb()

	def setRecord(self, offset, laddr, group, linkType, data):
		msg = self.makeMsg(offset, laddr, group, linkType,data)
		self.querier.setMsgHandler(MsgHandler("got set record"))
		self.querier.sendMsg(msg)

	def makeMsg(self, offset, laddr, group, linkType, data):
		msg   = Msg.s_makeMessage("SendExtendedMessage")
		msg.setAddress("toAddress", InsteonAddress(self.getAddress()))
		msg.setByte("messageFlags", 0x1f)
		msg.setByte("command1", 0x2f)
		msg.setByte("command2", 0x00)
		msg.setByte("userData1", 0x00) # don't care info
		msg.setByte("userData2", 0x02) # set database
		msg.setByte("userData3", offset >> 8)  # high byte
		msg.setByte("userData4", offset & 0xff) # low byte
		msg.setByte("userData5", 8)  # number of bytes set:  1...8
		msg.setByte("userData6", linkType)
		msg.setByte("userData7", group)
		msg.setByte("userData8", laddr.getHighByte())
		msg.setByte("userData9", laddr.getMiddleByte())
		msg.setByte("userData10", laddr.getLowByte())
		# depends on mode: could be e.g. trigger point
		msg.setByte("userData11", data[0])
		msg.setByte("userData12", data[1]) # unused?
		msg.setByte("userData13", data[2]) # unused?
		rb = msg.getBytes("command1", 15);
		checksum = (~sum(rb) + 1) & 0xFF
		msg.setByte("userData14", checksum)
		return msg

	def getName(self):
		return self.name

	def getAddress(self):
		return self.address

	def getdb(self):
		"""getdb()
		download the device database and print it on the console"""
		iofun.out("getting db, be patient!")
		self.dbbuilder.clear()
		self.dbbuilder.start()

	def printdb(self):
		"""printdb()
		print the downloaded link database to the console"""
		self.dbbuilder.printdb()

	def getId(self):
		"""getId()
		get category, subcategory, firmware, hardware version"""
		self.querier.setMsgHandler(IdMsgHandler("id"))
		self.querier.querysd(0x10, 0x00)


#
#   link database management
#
	def addController(self, addr, group, data = None):
		"""addController(addr, group[, data])
		add device with "addr" as controller for group "group", with link data "data" """
		data = data if data else [00, 00, group];
		self.modifyDB(LinkRecordAdder(self, addr, group, data, True))
	def removeController(self, addr, group):
		"""removeController(addr, group)
		remove device with "addr" as controller for group "group", with link data "data" """
		self.modifyDB(LinkRecordRemover(self, addr, group, True))
	def addResponder(self, addr, group, data = None):
		"""addResponder(addr, group[, data])
		add device with "addr" as responder for group "group", with link data "data" """
		data = data if data else [00, 00, group];
		self.modifyDB(LinkRecordAdder(self, addr, group, data, False))
	def removeResponder(self, addr, group):
		"""removeResponder(addr, group)
		remove device with "addr" as responder for group "group" """
		self.modifyDB(LinkRecordRemover(self, addr, group, False))
	def removeDevice(self, addr):
		"""removeDevice(addr):
		removes all links to device with address "addr" from device database"""
		self.modifyDB(DeviceRemover(self, addr))
	def replaceDevice(self, oldAddr, newAddr):
		"""replaceDevice(oldAddr, newAddr):
		replaces all linkdb occurrences of oldAddr with newAddr """
		self.dbbuilder.setListener(AddressReplacer(self, oldAddr, newAddr))
		# after db download is complete, listener will perform action
		self.getdb()
	def removeLastRecord(self):
		"""removeLastRecord()
		removes the last device in the link database"""
		self.modifyDB(LastRecordRemover(self))
	def nukeDB(self):
		"""nukeDB()
		really WIPES OUT all records in the device's database!"""
		self.modifyDB(LastNRecordRemover(self, -1))
	def setOnLevelResponder(self, addr, group, level, ramprate = 28, button = 1):
		"""setOnLevelResponder(addr, group, level, ramprate = 28, button = 1)
		sets (on level, ramp rate, button) for controller with "addr" and group "group" """
		self.modifyDB(OnLevelModifier(self, addr, group, level, ramprate, button, False))
	def enterLinkingMode(self, group):
		"""enterLinkingMode(group)
		causes the device to enter linking mode"""
		self.querier.setMsgHandler(MsgHandler("enter linking mode"))
		self.querier.querysd(0x09, group);
	def exitLinkingMode(self):
		"""exitLinkingMode()
		causes the device to exit linking mode"""
		self.querier.setMsgHandler(MsgHandler("exit linking mode"))
		self.querier.querysd(0x08, 0x01);
	def enterUnlinkingMode(self, group):
		"""enterUnlinkingMode(group)
		causes the device to enter unlinking mode"""
		self.querier.setMsgHandler(MsgHandler("enter unlinking mode"))
		self.querier.querysd(0x0A, group);
	def __init__(self, name, addr):
		self.name = name
		self.address = addr
		self.db = DB()
		self.querier = Querier(addr)
		addDev(self)
class Device:
	name = ""
	address   = InsteonAddress()
	db        = None
	dbbuilder = None
	querier   = None
	def __init__(self, name, addr):
		self.name = name
		self.address = addr
		self.db = DB()
		self.querier = Querier(addr)
		addDev(self)

	def modifyDB(self, listener):
		self.dbbuilder.setListener(listener)
		# after db download complete, listener will perform action
		self.getdb()

	def setRecord(self, offset, laddr, group, linkType, data):
		msg = self.makeMsg(offset, laddr, group, linkType,data)
		self.querier.setMsgHandler(MsgHandler("got set record"))
		self.querier.sendMsg(msg)

	def makeMsg(self, offset, laddr, group, linkType, data):
		msg   = Msg.s_makeMessage("SendExtendedMessage")
		msg.setAddress("toAddress", InsteonAddress(self.getAddress()))
		msg.setByte("messageFlags", 0x1f)
		msg.setByte("command1", 0x2f)
		msg.setByte("command2", 0x00)
		msg.setByte("userData1", 0x00) # don't care info
		msg.setByte("userData2", 0x02) # set database
		msg.setByte("userData3", offset >> 8)  # high byte
		msg.setByte("userData4", offset & 0xff) # low byte
		msg.setByte("userData5", 8)  # number of bytes set:  1...8
		msg.setByte("userData6", linkType)
		msg.setByte("userData7", group)
		msg.setByte("userData8", laddr.getHighByte())
		msg.setByte("userData9", laddr.getMiddleByte())
		msg.setByte("userData10", laddr.getLowByte())
		# depends on mode: could be e.g. trigger point
		msg.setByte("userData11", data[0])
		msg.setByte("userData12", data[1]) # unused?
		msg.setByte("userData13", data[2]) # unused?
		rb = msg.getBytes("command1", 15);
		checksum = (~sum(rb) + 1) & 0xFF
		msg.setByte("userData14", checksum)
		return msg

	def getName(self):
		return self.name

	def getAddress(self):
		return self.address

	def getdb(self):
		"""getdb()
		download the device database and print it on the console"""
		iofun.out("getting db, be patient!")
		self.dbbuilder.clear()
		self.dbbuilder.start()

	def printdb(self):
		"""printdb()
		print the downloaded link database to the console"""
		self.dbbuilder.printdb()

	def getId(self):
		"""getId()
		get category, subcategory, firmware, hardware version"""
		self.querier.setMsgHandler(IdMsgHandler("id"))
		self.querier.querysd(0x10, 0x00)


#
#   link database management
#
	def addController(self, addr, group, data = None):
		"""addController(addr, group, data)
		add device with "addr" as controller for group "group", with link data "data" """
		data = data if data else [00, 00, group];
		self.modifyDB(LinkRecordAdder(self, addr, group, data, True))
	def removeController(self, addr, group):
		"""removeController(addr, group)
		remove device with "addr" as controller for group "group", with link data "data" """
		self.modifyDB(LinkRecordRemover(self, addr, group, True))
	def addResponder(self, addr, group, data = None):
		"""addResponder(addr, group, data)
		add device with "addr" as responder for group "group", with link data "data" """
		data = data if data else [00, 00, group];
		self.modifyDB(LinkRecordAdder(self, addr, group, data, False))
	def removeResponder(self, addr, group):
		"""removeResponder(addr, group)
		remove device with "addr" as responder for group "group" """
		self.modifyDB(LinkRecordRemover(self, addr, group, False))
	def removeDevice(self, addr):
		"""removeDevice(addr):
		removes all links to device with address "addr" from device database"""
		self.modifyDB(DeviceRemover(self, addr))
	def replaceDevice(self, oldAddr, newAddr):
		"""replaceDevice(oldAddr, newAddr):
		replaces all linkdb occurrences of oldAddr with newAddr """
		self.dbbuilder.setListener(AddressReplacer(self, oldAddr, newAddr))
		# after db download is complete, listener will perform action
		self.getdb()
	def removeLastRecord(self):
		"""removeLastRecord()
		removes the last device in the link database"""
		self.modifyDB(LastRecordRemover(self))
	def nukeDB(self):
		"""nukeDB()
		really WIPES OUT all records in the device's database!"""
		self.modifyDB(LastNRecordRemover(self, -1))
	def setOnLevelResponder(self, addr, group, level, ramprate = 28, button = 1):
		"""setOnLevelResponder(addr, group, level, ramprate = 28, button = 1)
		sets (on level, ramp rate, button) for controller with "addr" and group "group" """
		self.modifyDB(OnLevelModifier(self, addr, group, level, ramprate, button, False))
Exemple #26
0
class TestQuerier:
    def setup(self):
        global time
        time = querier.time = MockTime()
        self.querier = Querier(tc.CLIENT_ID)

    def test_generate_tids(self):
        #TODO: move to message
        num_tids = 1000
        if RUN_CPU_INTENSIVE_TESTS:
            num_tids = pow(2, 16) + 2  #CPU intensive
        for i in xrange(num_tids):
            eq_(self.querier._next_tid(), chr(i % 256) + chr((i / 256) % 256))

    def test_ping_with_reponse(self):
        # Client creates a query
        ping_msg = message.OutgoingPingQuery(tc.CLIENT_ID)
        q = Query(ping_msg, tc.SERVER_NODE)
        timeout_task = minitwisted.Task(TIMEOUT_DELAY, None)
        # Client registers query
        bencoded_msg = self.querier.register_query(q, timeout_task)
        # Client sends bencoded_msg
        time.sleep(1)
        # Server gets bencoded_msg and creates response
        ping_r_msg_out = message.OutgoingPingResponse(tc.SERVER_ID)
        bencoded_r = ping_r_msg_out.encode(q.tid)
        # The client receives the bencoded message
        ping_r_in = message.IncomingMsg(bencoded_r, tc.SERVER_ADDR)
        stored_q = self.querier.on_response_received(ping_r_in, tc.SERVER_ADDR)
        assert stored_q is q

    def test_ping_with_timeout(self):
        # Client creates a query
        ping_msg = message.OutgoingPingQuery(tc.CLIENT_ID)
        q = Query(ping_msg, tc.SERVER_NODE)
        timeout_task = minitwisted.Task(TIMEOUT_DELAY, None)
        # Client registers query
        bencoded_msg = self.querier.register_query(q, timeout_task)
        # Client sends bencoded_msg
        time.sleep(3)
        # The server never responds and the timeout is triggered
        stored_q = self.querier.on_timeout(tc.SERVER_ADDR)
        assert stored_q is q

    def test_unsolicited_response(self):
        # Server creates unsolicited response
        # It might well be that the server responds using another port,
        # and therefore, the addr is not matched
        # TODO: consider accepting responses from a different port
        ping_r_msg_out = message.OutgoingPingResponse(tc.SERVER_ID)
        bencoded_r = ping_r_msg_out.encode('zz')
        # The client receives the bencoded message
        ping_r_in = message.IncomingMsg(bencoded_r, tc.SERVER_ADDR)
        stored_q = self.querier.on_response_received(ping_r_in, tc.SERVER_ADDR)
        assert stored_q is None

    def test_response_with_different_tid(self):
        # Client creates a query
        ping_msg = message.OutgoingPingQuery(tc.CLIENT_ID)
        q = Query(ping_msg, tc.SERVER_NODE)
        timeout_task = minitwisted.Task(TIMEOUT_DELAY, None)
        # Client registers query
        bencoded_msg = self.querier.register_query(q, timeout_task)
        # Client sends bencoded_msg
        time.sleep(1)
        # Server gets bencoded_msg and creates response
        ping_r_msg_out = message.OutgoingPingResponse(tc.SERVER_ID)
        bencoded_r = ping_r_msg_out.encode('zz')
        # The client receives the bencoded message
        ping_r_in = message.IncomingMsg(bencoded_r, tc.SERVER_ADDR)
        stored_q = self.querier.on_response_received(ping_r_in, tc.SERVER_ADDR)
        assert stored_q is None

    def test_error(self):
        # Client creates a query
        ping_msg = message.OutgoingPingQuery(tc.CLIENT_ID)
        q = Query(ping_msg, tc.SERVER_NODE)
        timeout_task = minitwisted.Task(TIMEOUT_DELAY, None)
        # Client registers query
        bencoded_msg = self.querier.register_query(q, timeout_task)
        # Client sends bencoded_msg
        time.sleep(1)
        # Server gets bencoded_msg and creates response
        ping_r_msg_out = message.OutgoingErrorMsg(message.GENERIC_E)
        bencoded_r = ping_r_msg_out.encode(tc.TID)
        # The client receives the bencoded message
        ping_r_in = message.IncomingMsg(bencoded_r, tc.SERVER_ADDR)
        stored_q = self.querier.on_error_received(ping_r_in, tc.SERVER_ADDR)
        assert stored_q is None

    def tear_down(self):
        global time
        time.unmock()
        time = querier.time = time.actual_time
Exemple #27
0
class Controller:

    def __init__(self, dht_addr, state_path,
                 routing_m_mod, lookup_m_mod,
                 private_dht_name):
        #TODO: don't do this evil stuff!!!
        message.private_dht_name = private_dht_name
        
        self.state_filename = os.path.join(state_path, STATE_FILENAME)
        self.load_state()
        if not self._my_id:
            self._my_id = identifier.RandomId()
        self._my_node = Node(dht_addr, self._my_id)
        self._tracker = tracker.Tracker()
        self._token_m = token_manager.TokenManager()

        self._reactor = ThreadedReactor()
        self._reactor.listen_udp(self._my_node.addr[1],
                                 self._on_datagram_received)
        #self._rpc_m = RPCManager(self._reactor)
        self._querier = Querier(self._my_id)
        bootstrap_nodes = self.loaded_nodes or BOOTSTRAP_NODES
        del self.loaded_nodes
        self._routing_m = routing_m_mod.RoutingManager(self._my_node, 
                                                       bootstrap_nodes)
#        self._responder = Responder(self._my_id, self._routing_m,
#                                    self._tracker, self._token_m)

        self._lookup_m = lookup_m_mod.LookupManager(self._my_id)
        current_time = time.time()
        self._next_maintenance_ts = current_time
        self._next_save_state_ts = current_time + SAVE_STATE_DELAY
        
        self._running = False
        

    def start(self):
        assert not self._running
        self._running = True
        self._reactor.start()
        self._main_loop()

    def stop(self):
        assert self._running
        #TODO2: stop each manager
        self._reactor.stop()

    def save_state(self):
        rnodes = self._routing_m.get_main_rnodes()
        f = open(self.state_filename, 'w')
        f.write('%r\n' % self._my_id)
        for rnode in rnodes:
            f.write('%d\t%r\t%s\t%d\t%f\n' % (
                    self._my_id.log_distance(rnode.id),
                    rnode.id, rnode.addr[0], rnode.addr[1],
                    rnode.rtt * 1000))
        f.close()

    def load_state(self):
        self._my_id = None
        self.loaded_nodes = []
        try:
            f = open(self.state_filename)
        except(IOError):
            return
        # the first line contains this node's identifier
        hex_id = f.readline().strip()
        self._my_id = Id(hex_id)
        # the rest of the lines contain routing table nodes
        # FORMAT
        # log_distance hex_id ip port rtt
        for line in f:
            _, hex_id, ip, port, _ = line.split()
            addr = (ip, int(port))
            node_ = Node(addr, Id(hex_id))
            self.loaded_nodes.append(node_)
        f.close
        
    def get_peers(self, lookup_id, info_hash, callback_f, bt_port=0):
        assert self._running
        # look if I'm tracking this info_hash
        local_peers = self._tracker.get(info_hash)
        # do the lookup
        log_distance = info_hash.log_distance(self._my_id)
        bootstrap_rnodes = self._routing_m.get_closest_rnodes(log_distance,
                                                              None,
                                                              True)
        lookup_obj = self._lookup_m.get_peers(lookup_id, info_hash,
                                              callback_f, bt_port)
        lookup_queries_to_send = lookup_obj.start(bootstrap_rnodes)
        self._send_queries(lookup_queries_to_send)
        if not lookup_queries_to_send:
            # There are no nodes in my routing table, announce to myself
            self._announce(lookup_obj)
            # NOTICE: the callback is NOT triggered, zero is returned.
        return len(lookup_queries_to_send), local_peers
        
    def print_routing_table_stats(self):
        self._routing_m.print_stats()

    def _main_loop(self):
        current_time = time.time()
        # Routing table
        if current_time > self._next_maintenance_ts:
            (maintenance_delay,
             queries_to_send,
             maintenance_lookup_target) = self._routing_m.do_maintenance()
            self._send_queries(queries_to_send)
            if maintenance_lookup_target:
                log_distance = maintenance_lookup_target.log_distance(
                    self._my_id)
                bootstrap_nodes = self._routing_m.get_closest_rnodes(
                    log_distance, None, True)
                lookup_obj = self._lookup_m.maintenance_lookup(
                    maintenance_lookup_target)
                lookup_queries_to_send = lookup_obj.start(bootstrap_nodes)
                self._send_queries(lookup_queries_to_send)
            self._next_maintenance_ts = (current_time
                                         + maintenance_delay)
        # Auto-save routing table
        if current_time > self._next_save_state_ts:
            self.save_state()
            self._next_save_state_ts = current_time + SAVE_STATE_DELAY

        # Schedule next call
        delay = (min(self._next_maintenance_ts, self._next_save_state_ts)
                 - current_time)
        self._reactor.call_later(delay, self._main_loop)

    def _maintenance_lookup(self, target):
        self._lookup_m.maintenance_lookup(target)

    def _on_datagram_received(self, data, addr):
        try:
            msg = message.IncomingMsg(data, addr)
        except(message.MsgError):
            return # ignore message
        if msg.sender_id == self._my_id:
            logger.debug('Got a msg from myself:\n%r', msg)
            return
        
        if msg.type == message.QUERY:
            response_msg = self._get_response(msg)
            if response_msg:
                bencoded_response = response_msg.encode(msg.tid)
                self._reactor.sendto(bencoded_response, addr)

            maintenance_queries_to_send = self._routing_m.on_query_received(
                msg.sender_node)
            
        elif msg.type in (message.RESPONSE, message.ERROR):
            related_query = self._querier.on_response_received(msg, addr)
            if not related_query:
                # Query timed out or unrequested response
                return
            # lookup related tasks
            if related_query.lookup_obj:
                if msg.type == message.RESPONSE:
                    (lookup_queries_to_send,
                     peers,
                     num_parallel_queries,
                     lookup_done
                     ) = related_query.lookup_obj.on_response_received(
                        msg, msg.sender_node)
                else: #ERROR
                    peers = None # an error msg doesn't have peers
                    (lookup_queries_to_send,
                     num_parallel_queries,
                     lookup_done
                     ) = related_query.lookup_obj.on_error_received(
                        msg, msg.sender_node)
                self._send_queries(lookup_queries_to_send)
                
                if related_query.lookup_obj.callback_f:
                    
                    lookup_id = related_query.lookup_obj.lookup_id
                    if peers:
                        related_query.lookup_obj.callback_f(lookup_id, peers)
                    if lookup_done:
                        self._announce(related_query.lookup_obj)
                        related_query.lookup_obj.callback_f(lookup_id, None)
            # maintenance related tasks
            if msg.type == message.RESPONSE:
                maintenance_queries_to_send = \
                    self._routing_m.on_response_received(
                    msg.sender_node, related_query.rtt, msg.all_nodes)
            else:
                maintenance_queries_to_send = \
                    self._routing_m.on_error_received(
                    msg.sender_node)
        else: # unknown type
            return
        self._send_queries(maintenance_queries_to_send)

    def _get_response(self, msg):
        if msg.query == message.PING:
            return message.OutgoingPingResponse(self._my_id)
        elif msg.query == message.FIND_NODE:
            log_distance = msg.target.log_distance(self._my_id)
            rnodes = self._routing_m.get_closest_rnodes(log_distance,
                                                       NUM_NODES, False)
            return message.OutgoingFindNodeResponse(self._my_id,
                                                    rnodes)
        elif msg.query == message.GET_PEERS:
            token = self._token_m.get()
            log_distance = msg.info_hash.log_distance(self._my_id)
            rnodes = self._routing_m.get_closest_rnodes(log_distance,
                                                       NUM_NODES, False)
            peers = self._tracker.get(msg.info_hash)
            if peers:
                logger.debug('RESPONDING with PEERS:\n%r' % peers)
            return message.OutgoingGetPeersResponse(self._my_id,
                                                    token,
                                                    nodes=rnodes,
                                                    peers=peers)
        elif msg.query == message.ANNOUNCE_PEER:
            peer_addr = (msg.sender_addr[0], msg.bt_port)
            self._tracker.put(msg.info_hash, peer_addr)
            return message.OutgoingAnnouncePeerResponse(self._my_id)
        else:
            logger.debug('Invalid QUERY: %r' % (msg.query))
            #TODO: maybe send an error back?
        
    def _on_response_received(self, msg):
        pass

    def _on_timeout(self, addr):
        related_query = self._querier.on_timeout(addr)
        if not related_query:
            return # timeout cancelled (got response/error already)
        if related_query.lookup_obj:
            (lookup_queries_to_send,
             num_parallel_queries,
             lookup_done
             ) = related_query.lookup_obj.on_timeout(related_query.dstnode)
            self._send_queries(lookup_queries_to_send)
            if lookup_done and related_query.lookup_obj.callback_f:
                self._announce(related_query.lookup_obj)
                lookup_id = related_query.lookup_obj.lookup_id
                related_query.lookup_obj.callback_f(lookup_id, None)
        maintenance_queries_to_send = self._routing_m.on_timeout(
            related_query.dstnode)
        self._send_queries(maintenance_queries_to_send)

    def _announce(self, lookup_obj):
        queries_to_send, announce_to_myself = lookup_obj.announce()
        self._send_queries(queries_to_send)
        '''
        if announce_to_myself:
            self._tracker.put(lookup_obj._info_hash,
                              (self._my_node.addr[0], lookup_obj._bt_port))
        '''
        
    def _send_queries(self, queries_to_send, lookup_obj=None):
        if queries_to_send is None:
            return
        for query in queries_to_send:
            timeout_task = self._reactor.call_later(TIMEOUT_DELAY,
                                                    self._on_timeout,
                                                    query.dstnode.addr)
            bencoded_query = self._querier.register_query(query, timeout_task)
            self._reactor.sendto(bencoded_query, query.dstnode.addr)
class Modem2413U(Device):
    """==============  Insteon PowerLinc modem (PLM) ==============="""
    def __init__(self, name, addr):
        Device.__init__(self, name, addr)
        self.dbbuilder = ModemDBBuilder(addr, self.db)

    def __modifyModemDB(self, listener):
        self.dbbuilder.setListener(listener)
        # after db download complete, listener will perform action
        iofun.out("getting db, be patient!")
        self.dbbuilder.clear()
        self.dbbuilder.start()

    def getdb(self):
        """getdb()
		download the modem database and print it on the console"""
        self.dbbuilder.start()
        self.dbbuilder.wait()
        self.dbbuilder.dumpDB()
        out("Modem Link DB complete")

    def readdb(self):
        """readdb()
		download the modem database and return it"""
        self.dbbuilder.start()
        self.dbbuilder.wait()
        out("Modem Link DB complete!")
        return self.dbbuilder.getdb()

    def startWatch(self):
        """startWatch()
		modem will print all incoming messages on terminal"""
        self.querier = Querier(InsteonAddress("00.00.00"))
        self.querier.setMsgHandler(MsgDumper("modem"))
        self.querier.startWait(10000)

    def stopWatch(self):
        """stopWatch()
		stop modem from printing all incoming messages on terminal"""
        if (self.querier):
            self.querier.cancel()

    def getid(self):
        """getid()
		get modem id data"""
        self.querier = Querier(InsteonAddress("00.00.00"))
        self.querier.setMsgHandler(IMInfoMsgHandler("getid"))
        msg = Msg.s_makeMessage("GetIMInfo")
        self.querier.sendMsg(msg)

    def getIMConfig(self):
        """getIMConfig()
		get modem configuration flags byte"""
        self.querier = Querier(InsteonAddress("00.00.00"))
        self.querier.setMsgHandler(IMConfigMsgHandler("getIMConfig"))
        msg = Msg.s_makeMessage("GetIMConfig")
        self.querier.sendMsg(msg)

    def sendOn(self, group):
        """sendOn(group)
		sends ALLLink broadcast ON message to group "group" """
        msg = message.createStdMsg(InsteonAddress("00.00.00"), 0x0f, 0x11,
                                   0xFF, group)
        iofun.writeMsg(msg)
        iofun.out("sent msg: " + msg.toString())

    def sendOff(self, group):
        """sendOff(group)
		sends ALLLink broadcast OFF message to group "group" """
        msg = message.createStdMsg(InsteonAddress("00.00.00"), 0x0f, 0x13,
                                   0xFF, group)
        iofun.writeMsg(msg)
        iofun.out("sent msg: " + msg.toString())

    def linkAsController(self, group):
        """linkAsController(group)
		puts modem in link mode on group "group" """
        self.querier.setMsgHandler(DefaultMsgHandler("link as controller"))
        msg = Msg.s_makeMessage("StartALLLinking")
        msg.setByte("LinkCode", 0x01)
        msg.setByte("ALLLinkGroup", group)
        self.querier.sendMsg(msg)

    def linkAsResponder(self, group):
        """linkAsResponder(group)
		puts modem in link mode on group "group" """
        self.querier.setMsgHandler(DefaultMsgHandler("start linking"))
        msg = Msg.s_makeMessage("StartALLLinking")
        msg.setByte("LinkCode", 0x00)
        msg.setByte("ALLLinkGroup", group)
        self.querier.sendMsg(msg)

    def linkAsEither(self, group):
        """linkAsEither(group)
		puts modem in link mode to link as controller or responder on group "group" """
        self.querier.setMsgHandler(
            DefaultMsgHandler("link/unlink as controller or responder"))
        msg = Msg.s_makeMessage("StartALLLinking")
        msg.setByte("LinkCode", 0x03)
        msg.setByte("ALLLinkGroup", group)
        self.querier.sendMsg(msg)

    def respondToUnlink(self, group):
        """respondToUnlink(group)
		make modem respond to unlink message"""
        # could not get 0xFF to unlink
        self.linkAsEither(group)

    def unlinkAsController(self, group):
        """unlinkAsController(group)
		puts modem in unlink mode to unlink as controller on group "group" """
        self.querier.setMsgHandler(DefaultMsgHandler("unlink as controller"))
        msg = Msg.s_makeMessage("StartALLLinking")
        msg.setByte("LinkCode", 0xFF)
        msg.setByte("ALLLinkGroup", group)
        self.querier.sendMsg(msg)

    def cancelLinking(self):
        """cancelLinking()
		takes modem out of linking or unlinking mode"""
        self.querier.setMsgHandler(DefaultMsgHandler("cancel linking"))
        msg = Msg.s_makeMessage("CancelALLLinking")
        self.querier.sendMsg(msg)

    def addController(self, addr, group):
        """addController(addr, group):
		adds device with address "addr" to modem link database as controller for group "group" """
        self.modifyRecord(addr, group, 0x40, 0xa2, [0, 0, group],
                          "addController")

    def addResponder(self, addr, group, data=None):
        """addResponder(addr, group[, data]):
		adds device with address "addr" to modem link database as responder to group "group" """
        data = data if data else [00, 00, group]
        self.modifyRecord(addr, group, 0x41, 0xa2, data, "addResponder")

    def addSoftwareResponder(self, addr):
        """addSoftwareResponder(addr):
		adds device with address "addr" to modem link database as software responder"""
        self.modifyRecord(addr, 0xef, 0x41, 0xa2, [0, 0, 0xef],
                          "addSoftwareController")

    def removeResponderOrController(self, addr, group):
        """removeResponderOrController(addr, group)
		removes device with address "addr" and group "group" from modem link database"""
        self.__deleteFirstRecord(addr, group, "removeResponderOrController")

    def removeResponder(self, addr, group):
        """removeResponder(addr, group)
		could not be implemented for the modem. Use removeResponderOrController() instead!"""
        iofun.out("removeResponder(addr, group) could not be implemented" +
                  " for the modem. Use removeResponderOrController() instead!")

    def removeController(self, addr, group):
        """removeController(addr, group)
		could not be implemented for the modem. Use removeResponderOrController() instead!"""
        iofun.out("removeController(addr, group) could not be implemented" +
                  " for the modem. Use removeResponderOrController() instead!")

    def removeDevice(self, addr):
        """removeDevice(addr):
		removes all links to device with address "addr" from modem database"""
        self.__modifyModemDB(DeviceRemover(self, addr))

    def __deleteFirstRecord(self, addr, group, text="delete record"):
        self.modifyRecord(addr, group, 0x80, 0x00, [0, 0, 0], text)

    def modifyFirstOrAdd(self, addr, group, recordFlags, data):
        if (recordFlags & (1 << 6)):  # controller
            self.modifyRecord(addr, group, 0x40, recordFlags, data,
                              "modify first or add")
        else:
            self.modifyRecord(addr, group, 0x41, recordFlags, data,
                              "modify first or add")

    def modifyFirstControllerOrAdd(self, addr, group, data):
        self.modifyRecord(addr, group, 0x40, 0xe2, data,
                          "modify first ctrl found or add")

    def modifyFirstResponderOrAdd(self, addr, group, data):
        self.modifyRecord(addr, group, 0x41, 0xa2, data,
                          "modify first resp found or add")

    def modifyRecord(self, addr, group, controlCode, recordFlags, data, txt):
        msg = self.__makeModMsg(addr, group, controlCode, recordFlags, data,
                                txt)
        self.querier = Querier(self.address)
        self.querier.setMsgHandler(DefaultMsgHandler(txt))
        self.querier.sendMsg(msg)

    def __makeModMsg(self, addr, group, controlCode, recordFlags, data, txt):
        msg = Msg.s_makeMessage("ManageALLLinkRecord")
        msg.setByte("controlCode", controlCode)
        # mod. first ctrl found or add
        msg.setByte("recordFlags", recordFlags)
        msg.setByte("ALLLinkGroup", group)
        msg.setAddress("linkAddress", InsteonAddress(addr))
        msg.setByte("linkData1", data[0] & 0xFF)
        msg.setByte("linkData2", data[1] & 0xFF)
        msg.setByte("linkData3", data[2] & 0xFF)
        return msg

    def saveDB(self, filename):
        """saveDB(filename)
		save modem database to file "filename" """
        self.dbbuilder.start()
        self.dbbuilder.wait()
        self.dbbuilder.saveDB(filename)

    def loadDB(self, filename):
        """loadDB(filename)
		load modem database from file "filename" (note: this will not change the actual modem db) """
        self.dbbuilder.loadDB(filename)
        self.dbbuilder.dumpDB()

    def nukeDB(self):
        """nukeDB()
		delete complete modem database! """
        self.dbbuilder.start()
        self.dbbuilder.wait()
        self.dbbuilder.nukeDB(self)

    def restoreDB(self, filename):
        """restoreDB(filename)
		restore modem database from file "filename" """
        self.loadDB(filename)
        self.dbbuilder.restoreDB(self, filename)
Exemple #29
0
from csv import DictReader
from rbm import RBM, np
from querier import Querier
from propertyFinder import PropertyFinder

TRAINING_SAMPLE_SIZE = 100

localQuerier = Querier('http://127.0.0.1:9999/bigdata/sparql')
localPropertyFinder = PropertyFinder(localQuerier)

effectsList = [{'effect': "", 'disease': ""}]

csvFile = open("result.csv")
trainingSet = DictReader(csvFile)


trainingRows = []
trainingData = [[0 for i in range(len(effectsList))] for j in range(TRAINING_SAMPLE_SIZE)]

index1=0
for row in  trainingSet:
    if index1 < TRAINING_SAMPLE_SIZE :
        geneProperties = DictReader(localPropertyFinder.findGeneProperties(row['gene']))
        for prop in geneProperties:
            for index2, item in effectsList:
                if prop == item:
                    trainingData[index1][index2] = 1
        drugProperties = DictReader(localPropertyFinder.findDrugProperties(row['drug']))
        for prop in drugProperties:
            for index2, item in effectsList:
                if prop == item:
Exemple #30
0
class TestQuerier:

    def setup(self):
        global time
        time = querier.time = MockTime()
        self.querier = Querier(tc.CLIENT_ID)

    def test_generate_tids(self):
        #TODO: move to message
        num_tids = 1000
        if RUN_CPU_INTENSIVE_TESTS:
            num_tids =  pow(2, 16) + 2 #CPU intensive
        for i in xrange(num_tids):
            eq_(self.querier._next_tid(),
                chr(i%256)+chr((i/256)%256))

    def test_ping_with_reponse(self):
        # Client creates a query
        ping_msg = message.OutgoingPingQuery(tc.CLIENT_ID)
        q = Query(ping_msg, tc.SERVER_NODE)
        timeout_task = minitwisted.Task(TIMEOUT_DELAY, None)
        # Client registers query
        bencoded_msg = self.querier.register_query(q, timeout_task)
        # Client sends bencoded_msg
        time.sleep(1)
        # Server gets bencoded_msg and creates response
        ping_r_msg_out = message.OutgoingPingResponse(tc.SERVER_ID)
        bencoded_r = ping_r_msg_out.encode(q.tid)
        # The client receives the bencoded message
        ping_r_in = message.IncomingMsg(bencoded_r,
                                        tc.SERVER_ADDR)
        stored_q = self.querier.on_response_received(ping_r_in,
                                                     tc.SERVER_ADDR)
        assert stored_q is q

    def test_ping_with_timeout(self):
        # Client creates a query
        ping_msg = message.OutgoingPingQuery(tc.CLIENT_ID)
        q = Query(ping_msg, tc.SERVER_NODE)
        timeout_task = minitwisted.Task(TIMEOUT_DELAY, None)
        # Client registers query
        bencoded_msg = self.querier.register_query(q, timeout_task)
        # Client sends bencoded_msg
        time.sleep(3)
        # The server never responds and the timeout is triggered
        stored_q = self.querier.on_timeout(tc.SERVER_ADDR)
        assert stored_q is q

    def test_unsolicited_response(self):
        # Server creates unsolicited response
        # It might well be that the server responds using another port,
        # and therefore, the addr is not matched
        # TODO: consider accepting responses from a different port
        ping_r_msg_out = message.OutgoingPingResponse(tc.SERVER_ID)
        bencoded_r = ping_r_msg_out.encode('zz')
        # The client receives the bencoded message
        ping_r_in = message.IncomingMsg(bencoded_r,
                                        tc.SERVER_ADDR)
        stored_q = self.querier.on_response_received(ping_r_in,
                                                     tc.SERVER_ADDR)
        assert stored_q is None

    def test_response_with_different_tid(self):
        # Client creates a query
        ping_msg = message.OutgoingPingQuery(tc.CLIENT_ID)
        q = Query(ping_msg, tc.SERVER_NODE)
        timeout_task = minitwisted.Task(TIMEOUT_DELAY, None)
        # Client registers query
        bencoded_msg = self.querier.register_query(q, timeout_task)
        # Client sends bencoded_msg
        time.sleep(1)
        # Server gets bencoded_msg and creates response
        ping_r_msg_out = message.OutgoingPingResponse(tc.SERVER_ID)
        bencoded_r = ping_r_msg_out.encode('zz')
        # The client receives the bencoded message
        ping_r_in = message.IncomingMsg(bencoded_r,
                                        tc.SERVER_ADDR)
        stored_q = self.querier.on_response_received(ping_r_in,
                                                     tc.SERVER_ADDR)
        assert stored_q is None

    def test_error(self):
        # Client creates a query
        ping_msg = message.OutgoingPingQuery(tc.CLIENT_ID)
        q = Query(ping_msg, tc.SERVER_NODE)
        timeout_task = minitwisted.Task(TIMEOUT_DELAY, None)
        # Client registers query
        bencoded_msg = self.querier.register_query(q, timeout_task)
        # Client sends bencoded_msg
        time.sleep(1)
        # Server gets bencoded_msg and creates response
        ping_r_msg_out = message.OutgoingErrorMsg(message.GENERIC_E)
        bencoded_r = ping_r_msg_out.encode(tc.TID)
        # The client receives the bencoded message
        ping_r_in = message.IncomingMsg(bencoded_r,
                                        tc.SERVER_ADDR)
        stored_q = self.querier.on_error_received(ping_r_in,
                                                  tc.SERVER_ADDR)
        assert stored_q is None


        

    def tear_down(self):
        global time
        time.unmock()
        time = querier.time = time.actual_time
 def modifyRecord(self, addr, group, controlCode, recordFlags, data, txt):
     msg = self.__makeModMsg(addr, group, controlCode, recordFlags, data,
                             txt)
     self.querier = Querier(self.address)
     self.querier.setMsgHandler(DefaultMsgHandler(txt))
     self.querier.sendMsg(msg)
class Controller:

    def __init__(self, version_label,
                 my_node, state_filename,
                 routing_m_mod, lookup_m_mod,
                 experimental_m_mod,
                 private_dht_name,
                 bootstrap_mode):
        
        if size_estimation:
            self._size_estimation_file = open('size_estimation.dat', 'w')
        
        
        self.state_filename = state_filename
        saved_id, saved_bootstrap_nodes = state.load(self.state_filename)
        my_addr = my_node.addr
        self._my_id = my_node.id # id indicated by user 
        if not self._my_id:
            self._my_id = saved_id # id loaded from file
        if not self._my_id:
            self._my_id = self._my_id = identifier.RandomId() # random id
        self._my_node = Node(my_addr, self._my_id, version=version_label)
        self.msg_f = message.MsgFactory(version_label, self._my_id,
                                        private_dht_name)
        self._querier = Querier()
        self._routing_m = routing_m_mod.RoutingManager(
            self._my_node, saved_bootstrap_nodes, self.msg_f)

        self._responder = responder.Responder(self._my_id, self._routing_m,
                                              self.msg_f, bootstrap_mode)
        self._tracker = self._responder._tracker
        
        self._lookup_m = lookup_m_mod.LookupManager(self._my_id, self.msg_f)
        self._experimental_m = experimental_m_mod.ExperimentalManager(
            self._my_node.id, self.msg_f) 
                  
        current_ts = time.time()
        self._next_save_state_ts = current_ts + SAVE_STATE_DELAY
        self._next_maintenance_ts = current_ts
        self._next_timeout_ts = current_ts
        self._next_main_loop_call_ts = current_ts
        self._pending_lookups = []
        self._cached_lookups = []
                
    def on_stop(self):
        self._experimental_m.on_stop()

    def get_peers(self, lookup_id, info_hash, callback_f, bt_port, use_cache):
        """
        Start a get\_peers lookup whose target is 'info\_hash'. The handler
        'callback\_f' will be called with two arguments ('lookup\_id' and a
        'peer list') whenever peers are discovered. Once the lookup is
        completed, the handler will be called with 'lookup\_id' and None as
        arguments.

        This method is designed to be used as minitwisted's external handler.

        """
        datagrams_to_send = []
        logger.debug('get_peers %d %r' % (bt_port, info_hash))
        if use_cache:
            peers = self._get_cached_peers(info_hash)
            if peers and callable(callback_f):
                callback_f(lookup_id, peers, None)
                callback_f(lookup_id, None, None)
                return datagrams_to_send
        self._pending_lookups.append(self._lookup_m.get_peers(lookup_id,
                                                              info_hash,
                                                              callback_f,
                                                              bt_port))
        queries_to_send =  self._try_do_lookup()
        datagrams_to_send = self._register_queries(queries_to_send)
        return datagrams_to_send
    
    def _get_cached_peers(self, info_hash):
        oldest_valid_ts = time.time() - CACHE_VALID_PERIOD
        for ts, cached_info_hash, peers in self._cached_lookups:
            if ts > oldest_valid_ts and info_hash == cached_info_hash:
                return peers

    def _add_cache_peers(self, info_hash, peers):
        oldest_valid_ts = time.time() - CACHE_VALID_PERIOD
        while self._cached_lookups and self._cached_lookups[0][0] < oldest_valid_ts:
            # clean up old entries
            del self._cached_lookups[0]
        if self._cached_lookups and self._cached_lookups[-1][1] == info_hash:
            self._cached_lookups[-1][2].extend(peers)
        else:
            self._cached_lookups.append((time.time(), info_hash, peers))

    def _try_do_lookup(self):
        queries_to_send = []
        if self._pending_lookups:
            lookup_obj = self._pending_lookups[0]
        else:
            return queries_to_send
        distance = lookup_obj.info_hash.distance(self._my_id)
        bootstrap_rnodes = self._routing_m.get_closest_rnodes(distance.log,
                                                              0,
                                                              True)
        #TODO: get the full bucket
        if bootstrap_rnodes:
            del self._pending_lookups[0]
            # look if I'm tracking this info_hash
            peers = self._tracker.get(lookup_obj.info_hash)
            callback_f = lookup_obj.callback_f
            if peers:
                self._add_cache_peers(lookup_obj.info_hash, peers)
                if callable(callback_f):
                    callback_f(lookup_obj.lookup_id, peers, None)
            # do the lookup
            queries_to_send = lookup_obj.start(bootstrap_rnodes)
        else:
            next_lookup_attempt_ts = time.time() + .2
            self._next_main_loop_call_ts = min(self._next_main_loop_call_ts,
                                               next_lookup_attempt_ts)
        return queries_to_send
    
    def print_routing_table_stats(self):
        self._routing_m.print_stats()

    def main_loop(self):
        """
        Perform maintenance operations. The main operation is routing table
        maintenance where staled nodes are added/probed/replaced/removed as
        needed. The routing management module specifies the implementation
        details.  This includes keeping track of queries that have not been
        responded for a long time (timeout) with the help of
        querier.Querier. The routing manager and the lookup manager will be
        informed of those timeouts.

        This method is designed to be used as minitwisted's heartbeat handler.

        """

        queries_to_send = []
        current_ts = time.time()
        #TODO: I think this if should be removed
        # At most, 1 second between calls to main_loop after the first call
        if current_ts >= self._next_main_loop_call_ts:
            self._next_main_loop_call_ts = current_ts + 1
        else:
            # It's too early
            return self._next_main_loop_call_ts, []
        # Retry failed lookup (if any)
        queries_to_send.extend(self._try_do_lookup())
        
        # Take care of timeouts
        if current_ts >= self._next_timeout_ts:
            (self._next_timeout_ts,
             timeout_queries) = self._querier.get_timeout_queries()
            for query in timeout_queries:
                queries_to_send.extend(self._on_timeout(query))

        # Routing table maintenance
        if time.time() >= self._next_maintenance_ts:
            (maintenance_delay,
             queries,
             maintenance_lookup) = self._routing_m.do_maintenance()
            self._next_maintenance_ts = current_ts + maintenance_delay
            self._next_main_loop_call_ts = min(self._next_main_loop_call_ts,
                                               self._next_maintenance_ts)
            queries_to_send.extend(queries)
            if maintenance_lookup:
                target, rnodes = maintenance_lookup
                lookup_obj = self._lookup_m.maintenance_lookup(target)
                queries_to_send.extend(lookup_obj.start(rnodes))
            
        # Auto-save routing table
        if current_ts >= self._next_save_state_ts:
            state.save(self._my_id,
                       self._routing_m.get_main_rnodes(),
                       self.state_filename)
            self._next_save_state_ts = current_ts + SAVE_STATE_DELAY
            self._next_main_loop_call_ts = min(self._next_main_loop_call_ts,
                                               self._next_maintenance_ts,
                                               self._next_timeout_ts,
                                               self._next_save_state_ts)
        # Return control to reactor
        datagrams_to_send = self._register_queries(queries_to_send)
        return self._next_main_loop_call_ts, datagrams_to_send

    def _maintenance_lookup(self, target):
        self._lookup_m.maintenance_lookup(target)

    def on_datagram_received(self, datagram):
        """
        Perform the actions associated to the arrival of the given
        datagram. The datagram will be ignored in cases such as invalid
        format. Otherwise, the datagram will be decoded and different modules
        will be informed to take action on it. For instance, if the datagram
        contains a response to a lookup query, both routing and lookup manager
        will be informed. Additionally, if that response contains peers, the
        lookup's handler will be called (see get\_peers above).
        This method is designed to be used as minitwisted's networking handler.

        """
        exp_queries_to_send = []
        
        data = datagram.data
        addr = datagram.addr
        datagrams_to_send = []
        try:
            msg = self.msg_f.incoming_msg(datagram)
            
        except(message.MsgError):
            # ignore message
            return self._next_main_loop_call_ts, datagrams_to_send

        if msg.type == message.QUERY:
           
            if msg.src_node.id == self._my_id:
                logger.debug('Got a msg from myself:\n%r', msg)
                return self._next_main_loop_call_ts, datagrams_to_send
            #zinat: inform experimental_module
            exp_queries_to_send = self._experimental_m.on_query_received(msg)
            
            response_msg = self._responder.get_response(msg)
            if response_msg:
                bencoded_response = response_msg.stamp(msg.tid)
                datagrams_to_send.append(
                    message.Datagram(bencoded_response, addr))
            maintenance_queries_to_send = self._routing_m.on_query_received(
                msg.src_node)
            
        elif msg.type == message.RESPONSE:
            related_query = self._querier.get_related_query(msg)
            if not related_query:
                # Query timed out or unrequested response
                return self._next_main_loop_call_ts, datagrams_to_send
            ## zinat: if related_query.experimental_obj:
            exp_queries_to_send = self._experimental_m.on_response_received(
                                                        msg, related_query)
            #TODO: you need to get datagrams to be able to send messages (raul)
            # lookup related tasks
            if related_query.lookup_obj:
                (lookup_queries_to_send,
                 peers,
                 num_parallel_queries,
                 lookup_done
                 ) = related_query.lookup_obj.on_response_received(
                    msg, msg.src_node)
                datagrams = self._register_queries(lookup_queries_to_send)
                datagrams_to_send.extend(datagrams)

                lookup_obj = related_query.lookup_obj
                lookup_id = lookup_obj.lookup_id
                callback_f = lookup_obj.callback_f
                if peers:
                    self._add_cache_peers(lookup_obj.info_hash, peers)
                    if callable(callback_f):
                        callback_f(lookup_id, peers, msg.src_node)
                if lookup_done:
                    if callable(callback_f):
                        callback_f(lookup_id, None, msg.src_node)
                    queries_to_send = self._announce(
                        related_query.lookup_obj)
                    datagrams = self._register_queries(
                        queries_to_send)
                    datagrams_to_send.extend(datagrams)
                        
                # Size estimation
                if size_estimation and lookup_done:
                    line = '%d %d\n' % (
                        related_query.lookup_obj.get_number_nodes_within_region())
                    self._size_estimation_file.write(line)
                    self._size_estimation_file.flush()
                    
            # maintenance related tasks
            maintenance_queries_to_send = \
                self._routing_m.on_response_received(
                msg.src_node, related_query.rtt, msg.all_nodes)

        elif msg.type == message.ERROR:
            related_query = self._querier.get_related_query(msg)
            if not related_query:
                # Query timed out or unrequested response
                return self._next_main_loop_call_ts, datagrams_to_send
            #TODO: zinat: same as response
            exp_queries_to_send = self._experimental_m.on_error_received(
                msg, related_query)
            # lookup related tasks
            if related_query.lookup_obj:
                peers = None # an error msg doesn't have peers
                (lookup_queries_to_send,
                 num_parallel_queries,
                 lookup_done
                 ) = related_query.lookup_obj.on_error_received(msg)
                datagrams = self._register_queries(lookup_queries_to_send)
                datagrams_to_send.extend(datagrams)

                if lookup_done:
                    # Size estimation
                    if size_estimation:
                        line = '%d %d\n' % (
                            related_query.lookup_obj.get_number_nodes_within_region())
                        self._size_estimation_file.write(line)
                        self._size_estimation_file.flush()




                    
                    datagrams = self._announce(related_query.lookup_obj)
                    datagrams_to_send.extend(datagrams)
                callback_f = related_query.lookup_obj.callback_f
                if callback_f and callable(callback_f):
                    lookup_id = related_query.lookup_obj.lookup_id
                    if lookup_done:
                        callback_f(lookup_id, None, msg.src_node)
			    # maintenance related tasks
            maintenance_queries_to_send = \
                self._routing_m.on_error_received(addr)

        else: # unknown type
            return self._next_main_loop_call_ts, datagrams_to_send
        # we are done with the plugins
        # now we have maintenance_queries_to_send, let's send them!
        datagrams = self._register_queries(maintenance_queries_to_send)
        datagrams_to_send.extend(datagrams)
        if exp_queries_to_send:
            datagrams = self._register_queries(exp_queries_to_send)
            datagrams_to_send.extend(datagrams)
        return self._next_main_loop_call_ts, datagrams_to_send

    def _on_query_received(self):
        return
    def _on_response_received(self):
        return
    def _on_error_received(self):
        return

    def _on_timeout(self, related_query):
        queries_to_send = []
        #TODO: on_timeout should return queries (raul)
        exp_queries_to_send = self._experimental_m.on_timeout(related_query)
        if related_query.lookup_obj:
            (lookup_queries_to_send,
             num_parallel_queries,
             lookup_done
             ) = related_query.lookup_obj.on_timeout(related_query.dst_node)
            queries_to_send.extend(lookup_queries_to_send)
            callback_f = related_query.lookup_obj.callback_f
            if lookup_done:
                # Size estimation
                if size_estimation:
                    line = '%d %d\n' % (
                        related_query.lookup_obj.get_number_nodes_within_region())
                    self._size_estimation_file.write(line)
                    self._size_estimation_file.flush()


                queries_to_send.extend(self._announce(
                        related_query.lookup_obj))
                lookup_id = related_query.lookup_obj.lookup_id
                if callback_f and callable(callback_f):
                    related_query.lookup_obj.callback_f(lookup_id, None, None)
        maintenance_queries_to_send = self._routing_m.on_timeout(
            related_query.dst_node)
        if maintenance_queries_to_send:
            queries_to_send.extend(maintenance_queries_to_send)
        if exp_queries_to_send:
            datagrams = self._register_queries(exp_queries_to_send)
            datagrams_to_send.extend(datagrams)
        return queries_to_send

    def _announce(self, lookup_obj):
        queries_to_send, announce_to_myself = lookup_obj.announce()
        return queries_to_send
    '''
    if announce_to_myself:
    self._tracker.put(lookup_obj._info_hash,
    (self._my_node.addr[0], lookup_obj._bt_port))
    '''
    
    def _register_queries(self, queries_to_send, lookup_obj=None):
        if not queries_to_send:
            return []
        timeout_call_ts, datagrams_to_send = self._querier.register_queries(
            queries_to_send)
        self._next_main_loop_call_ts = min(self._next_main_loop_call_ts,
                                           timeout_call_ts)
        return datagrams_to_send
Exemple #33
0
  ?clinvarVariant clinvarv:Variant_Phenotype ?disease
  ?clinvarVariant ?effect ?disease
}
"""

# Vide a priori
query7 = """
SELECT DISTINCT ?effect ?disease

WHERE {
  ?clinvarVariant clinvar:x-medgen ?disease
  ?clinvarVariant ?effect ?disease
}
"""

localQuerier = Querier(url)

# resultQuery1 = localQuerier.query(prefix+query1)
# csvFile1 = open("csvFile1.csv", 'w')
# csvFile1.write(resultQuery1)
# print "[Running Time] %s sec" % (time.time() - start_time)

# resultQuery2 = localQuerier.query(prefix+query2)
# csvFile2 = open("csvFile2.csv", 'w')
# csvFile2.write(resultQuery2)
# print "[Running Time] %s sec" % (time.time() - start_time)

# resultQuery3 = localQuerier.query(prefix+query3)
# csvFile3 = open("csvFile3.csv", 'w')
# csvFile3.write(resultQuery3)
# print "[Running Time] %s sec" % (time.time() - start_time)
class Modem2413U(Device):
	"""==============  Insteon PowerLinc modem (PLM) ==============="""
	def __init__(self, name, addr):
		Device.__init__(self, name, addr)
		self.dbbuilder = ModemDBBuilder(addr, self.db)
	def __modifyModemDB(self, listener):
		self.dbbuilder.setListener(listener)
		# after db download complete, listener will perform action
		iofun.out("getting db, be patient!")
		self.dbbuilder.clear()
		self.dbbuilder.start()

	def getdb(self):
		"""getdb()
		download the modem database and print it on the console"""
		self.dbbuilder.start()
		self.dbbuilder.wait()
		self.dbbuilder.dumpDB()
		out("Modem Link DB complete")
	def startWatch(self):
		"""startWatch()
		modem will print all incoming messages on terminal"""
		self.querier = Querier(InsteonAddress("00.00.00"))
		self.querier.setMsgHandler(MsgDumper("modem"))
		self.querier.startWait(10000)
	def stopWatch(self):
		"""stopWatch()
		stop modem from printing all incoming messages on terminal"""
		if (self.querier):
			self.querier.cancel()
	def getid(self):
		"""getid()
		get modem id data"""
		self.querier = Querier(InsteonAddress("00.00.00"))
		self.querier.setMsgHandler(IMInfoMsgHandler("getid"))
		msg = Msg.s_makeMessage("GetIMInfo")
		self.querier.sendMsg(msg)
	def sendOn(self, group):
		"""sendOn(group)
		sends ALLLink broadcast ON message to group "group" """
		msg = message.createStdMsg(InsteonAddress("00.00.00"), 0x0f,
									0x11, 0xFF, group)
		iofun.writeMsg(msg)
		iofun.out("sent msg: " + msg.toString())
	def sendOff(self, group):
		"""sendOff(group)
		sends ALLLink broadcast OFF message to group "group" """
		msg = message.createStdMsg(InsteonAddress("00.00.00"), 0x0f,
									0x13, 0xFF, group)
		iofun.writeMsg(msg)
		iofun.out("sent msg: " + msg.toString())

	def linkAsController(self, otherDevice, group):
		"""linkAsController(otherDevice, group)
		puts modem in link mode to control device "otherDevice" on group "group" """
		addr = InsteonAddress(otherDevice)
		self.querier = Querier(addr)
		self.querier.setMsgHandler(DefaultMsgHandler("link as controller"))
		msg = Msg.s_makeMessage("StartALLLinking")
		msg.setByte("LinkCode", 0x01)
		msg.setByte("ALLLinkGroup", group)
		self.querier.sendMsg(msg)
	def linkAsResponder(self, otherDevice, group):
		"""linkAsResponder(otherDevice, group)
		puts modem in link mode to respond to device "otherDevice" on group "group" """
		addr = InsteonAddress(otherDevice)
		self.querier = Querier(addr)
		self.querier.setMsgHandler(DefaultMsgHandler("start linking"))
		msg = Msg.s_makeMessage("StartALLLinking")
		msg.setByte("LinkCode", 0x00)
		msg.setByte("ALLLinkGroup", group)
		self.querier.sendMsg(msg)
	def linkAsEither(self, otherDevice, group):
		"""linkAsEither(otherDevice, group)
		puts modem in link mode to link as controller or responder to device "otherDevice" on group "group" """
		addr = InsteonAddress(otherDevice)
		self.querier = Querier(addr)
		self.querier.setMsgHandler(
			DefaultMsgHandler("link/unlink as controller or responder"))
		msg = Msg.s_makeMessage("StartALLLinking")
		msg.setByte("LinkCode", 0x03)
		msg.setByte("ALLLinkGroup", group)
		self.querier.sendMsg(msg)
	def respondToUnlink(self, otherDevice, group):
		"""respondToUnlink(otherDevice, group)
		make modem respond to unlink message from other device"""
		# could not get 0xFF to unlink
		self.linkAsEither(otherDevice, group)
	def unlinkAsController(self, otherDevice, group):
		"""unlinkAsController(otherDevice, group)
		puts modem in unlink mode to unlink as controller from device "otherDevice" on group "group" """
		addr = InsteonAddress(otherDevice)
		self.querier = Querier(addr)
		self.querier.setMsgHandler(DefaultMsgHandler("unlink as controller"))
		msg = Msg.s_makeMessage("StartALLLinking")
		msg.setByte("LinkCode", 0xFF)
		msg.setByte("ALLLinkGroup", group)
		self.querier.sendMsg(msg)
	def cancelLinking(self):
		"""cancelLinking()
		takes modem out of linking or unlinking mode"""
		self.querier = Querier(InsteonAddress("00.00.00"))
		self.querier.setMsgHandler(DefaultMsgHandler("cancel linking"))
		msg = Msg.s_makeMessage("CancelALLLinking")
		self.querier.sendMsg(msg)
	def addController(self, addr, group):
		"""addController(addr, group):
		adds device with address "addr" to modem link database as controller for group "group" """
		self.modifyRecord(addr, group, 0x40, 0xa2, [0,0,group], "addController")
	def addResponder(self, addr, group):
		"""addResponder(addr, group):
		adds device with address "addr" to modem link database as responder to group "group" """
		self.modifyRecord(addr, group, 0x41, 0xa2, [0,0,group], "addResponder")
	def addSoftwareResponder(self, addr):
		"""addSoftwareResponder(addr):
		adds device with address "addr" to modem link database as software responder"""
		self.modifyRecord(addr, 0xef, 0x41, 0xa2, [0,0,0xef],
						  "addSoftwareController")
	def removeResponderOrController(self, addr, group):
		"""removeResponderOrController(addr, group)
		removes device with address "addr" and group "group" from modem link database"""
		self.__deleteFirstRecord(addr, group, "removeResponderOrController")
	def removeResponder(self, addr, group):
		"""removeResponder(addr, group)
		could not be implemented for the modem. Use removeResponderOrController() instead!"""
		iofun.out("removeResponder(addr, group) could not be implemented" +
				  " for the modem. Use removeResponderOrController() instead!")
	def removeController(self, addr, group):
		"""removeController(addr, group)
		could not be implemented for the modem. Use removeResponderOrController() instead!"""
		iofun.out("removeController(addr, group) could not be implemented" +
				  " for the modem. Use removeResponderOrController() instead!")
	def removeDevice(self, addr):
		"""removeDevice(addr):
		removes all links to device with address "addr" from modem database"""
		self.__modifyModemDB(DeviceRemover(self, addr))
	def __deleteFirstRecord(self, addr, group, text = "delete record"):
		self.modifyRecord(addr, group, 0x80, 0x00, [0,0,0], text)
	def modifyFirstOrAdd(self, addr, group, recordFlags, data):
		if (recordFlags & (1 << 6)): # controller
			self.modifyRecord(addr, group, 0x40, recordFlags,
							  data, "modify first or add")
		else:
			self.modifyRecord(addr, group, 0x41, recordFlags,
							  data, "modify first or add")
	def modifyFirstControllerOrAdd(self, addr, group, data):
		self.modifyRecord(addr, group, 0x40, 0xe2, data,
						  "modify first ctrl found or add")
	def modifyFirstResponderOrAdd(self, addr, group, data):
		self.modifyRecord(addr, group, 0x41, 0xa2, data,
						  "modify first resp found or add")
	def modifyRecord(self, addr, group, controlCode, recordFlags, data, txt):
		msg = self.__makeModMsg(addr, group, controlCode, recordFlags, data, txt)
		self.querier = Querier(self.address)
		self.querier.setMsgHandler(DefaultMsgHandler(txt))
		self.querier.sendMsg(msg)
	def __makeModMsg(self, addr, group, controlCode, recordFlags, data, txt):
		msg = Msg.s_makeMessage("ManageALLLinkRecord");
		msg.setByte("controlCode", controlCode); # mod. first ctrl found or add
		msg.setByte("recordFlags", recordFlags);
		msg.setByte("ALLLinkGroup", group);
		msg.setAddress("linkAddress", InsteonAddress(addr));
		msg.setByte("linkData1", data[0] & 0xFF)
		msg.setByte("linkData2", data[1] & 0xFF)
		msg.setByte("linkData3", data[2] & 0xFF)
		return msg;
	def saveDB(self, filename):
		"""saveDB(filename)
		save modem database to file "filename" """
		self.dbbuilder.start()
		self.dbbuilder.wait()
		self.dbbuilder.saveDB(filename)
	def loadDB(self, filename):
		"""loadDB(filename)
		restore modem database from file "filename" """
		self.dbbuilder.loadDB(filename)
		self.dbbuilder.dumpDB()
	def nukeDB(self):
		"""nukeDB()
		delete complete modem database! """
		self.dbbuilder.start()
		self.dbbuilder.wait()
		self.dbbuilder.nukeDB(self)
	def restoreDB(self, filename):
		"""restoreDB()
		restore modem database from file "filename" """
		self.loadDB(filename)
		self.dbbuilder.restoreDB(self, filename)
    def startWatch(self):
        """startWatch()
		modem will print all incoming messages on terminal"""
        self.querier = Querier(InsteonAddress("00.00.00"))
        self.querier.setMsgHandler(MsgDumper("modem"))
        self.querier.startWait(10000)
	def __init__(self, name, addr):
		self.name = name
		self.address = addr
		self.db = DB()
		self.querier = Querier(addr)
		addDev(self)
Exemple #37
0
class TestQuerier:

    def setup(self):
        time.mock_mode()
        self.querier = Querier()#tc.CLIENT_ID)

    def test_generate_tids(self):
        #TODO: move to message
        if RUN_CPU_INTENSIVE_TESTS:
            num_tids =  pow(2, 16) + 2 #CPU intensive
        else:
            num_tids = 1000
        for i in xrange(num_tids):
            eq_(self.querier._next_tid(),
                chr(i%256)+chr((i/256)%256))

    def test_ping_with_reponse(self):
        # Client creates a query
        ping_msg = clients_msg_f.outgoing_ping_query(tc.SERVER_NODE)
        q = ping_msg
        # Client registers query
        timeout_ts, bencoded_msgs = self.querier.register_queries([q])
        # Client sends bencoded_msg
        # Server gets bencoded_msg and creates response
        ping_r_msg_out = servers_msg_f.outgoing_ping_response(tc.CLIENT_NODE)
        bencoded_r = ping_r_msg_out.stamp(ping_msg.tid)
        time.sleep(1)
        eq_(self.querier.get_timeout_queries()[1], [])
        # The client receives the bencoded message (after 1 second)
        ping_r_in = clients_msg_f.incoming_msg(
            Datagram(bencoded_r, tc.SERVER_ADDR))
        related_query = self.querier.get_related_query(ping_r_in)
        assert related_query is ping_msg

    def test_ping_with_timeout(self):
        # Client creates a query
        ping_msg = clients_msg_f.outgoing_ping_query(tc.SERVER_NODE)
        q = ping_msg
        # Client registers query
        bencoded_msg = self.querier.register_queries([q])
        # Client sends bencoded_msg
        time.sleep(3)
        # The server never responds and the timeout is triggered
        timeout_queries = self.querier.get_timeout_queries()
        eq_(len(timeout_queries[1]), 1)
        assert timeout_queries[1][0] is ping_msg

    def test_unsolicited_response(self):
        # Server creates unsolicited response
        # It might well be that the server responds using another port,
        # and therefore, the addr is not matched
        # TODO: consider accepting responses from a different port
        ping_r_msg_out = servers_msg_f.outgoing_ping_response(tc.CLIENT_NODE)
        bencoded_r = ping_r_msg_out.stamp('zz')
        # The client receives the bencoded message
        ping_r_in = clients_msg_f.incoming_msg(
                Datagram(bencoded_r, tc.SERVER_ADDR))
        related_query = self.querier.get_related_query(ping_r_in)
        assert related_query is None

    def test_response_with_different_tid(self):
        # Client creates a query
        ping_msg = clients_msg_f.outgoing_ping_query(tc.SERVER_NODE)
        q = ping_msg
        # Client registers query
        bencoded_msg = self.querier.register_queries([q])
        # Client sends bencoded_msg
        time.sleep(1)
        # Server gets bencoded_msg and creates response
        ping_r_msg_out = servers_msg_f.outgoing_ping_response(tc.CLIENT_NODE)
        bencoded_r = ping_r_msg_out.stamp('zz')
        # The client receives the bencoded message
        ping_r_in = clients_msg_f.incoming_msg(
                    Datagram(bencoded_r, tc.SERVER_ADDR))
        related_query = self.querier.get_related_query(ping_r_in)
        assert related_query is None
        
    def test_error_received(self):
        # Client creates a query
        msg = clients_msg_f.outgoing_ping_query(tc.SERVER_NODE)
        q = msg
        # Client registers query
        bencoded_msg = self.querier.register_queries([q])
        # Client sends bencoded_msg
        time.sleep(1)
        # Server gets bencoded_msg and creates response
        ping_r_msg_out = servers_msg_f.outgoing_error(tc.CLIENT_NODE,
                                                  message.GENERIC_E)
        bencoded_r = ping_r_msg_out.stamp(msg.tid)
        # The client receives the bencoded message
        ping_r_in = clients_msg_f.incoming_msg(
                    Datagram(bencoded_r, tc.SERVER_ADDR))
        related_query = self.querier.get_related_query(ping_r_in)
        assert related_query is msg

    def test_many_queries(self):
        # Client creates a query
        msgs = [clients_msg_f.outgoing_ping_query(
                tc.SERVER_NODE) for i in xrange(10)]
        queries = msgs
        # Client registers query
        bencoded_msg = self.querier.register_queries(queries)
        # Client sends bencoded_msg
        time.sleep(1)
        # response for queries[3]
        ping_r_msg_out = servers_msg_f.outgoing_ping_response(tc.CLIENT_NODE)
        bencoded_r = ping_r_msg_out.stamp(msgs[3].tid)
        ping_r_in = clients_msg_f.incoming_msg(
                        Datagram(bencoded_r, tc.SERVER_ADDR))
        related_query = self.querier.get_related_query(ping_r_in)
        assert related_query is msgs[3]
        # error for queries[2]
        ping_r_msg_out = servers_msg_f.outgoing_error(tc.CLIENT_NODE,
                                                  message.GENERIC_E)
        bencoded_r = ping_r_msg_out.stamp(msgs[2].tid)
        ping_r_in = clients_msg_f.incoming_msg(
                        Datagram(bencoded_r, tc.SERVER_ADDR))
        related_query = self.querier.get_related_query(ping_r_in)
        assert related_query is msgs[2]
        # response to wrong addr
        ping_r_msg_out = servers_msg_f.outgoing_ping_response(tc.CLIENT_NODE)
        bencoded_r = ping_r_msg_out.stamp(msgs[5].tid)
        ping_r_in = clients_msg_f.incoming_msg(
                        Datagram(bencoded_r, tc.SERVER2_ADDR))
        related_query = self.querier.get_related_query(ping_r_in)
        assert related_query is None
        # response with wrong tid
        ping_r_msg_out = servers_msg_f.outgoing_ping_response(tc.CLIENT_NODE)
        bencoded_r = ping_r_msg_out.stamp('ZZ')
        ping_r_in = clients_msg_f.incoming_msg(
                        Datagram(bencoded_r, tc.SERVER_ADDR))
        related_query = self.querier.get_related_query(ping_r_in)
        assert related_query is None
        # Still no time to trigger timeouts
        eq_(self.querier.get_timeout_queries()[1], [])
        time.sleep(1)
        # Now, the timeouts can be triggered
        timeout_queries = self.querier.get_timeout_queries()
        expected_msgs = msgs[:2] + msgs[4:]
        eq_(len(timeout_queries[1]), len(expected_msgs))
        for related_query, expected_msg in zip(
            timeout_queries[1], expected_msgs):
            assert related_query is expected_msg

    def teardown(self):
        time.normal_mode()
class Controller():
    def __init__(self, dht_addr, state_path, routing_m_mod, lookup_m_mod,
                 private_dht_name):
        message.private_dht_name = private_dht_name
        self.state_filename = os.path.join(state_path, STATE_FILENAME)
        self.load_state()
        if not self._my_id:
            self._my_id = identifier.RandomId()
        self._my_node = Node(dht_addr, self._my_id)
        self._tracker = tracker.Tracker()
        self._token_m = token_manager.TokenManager()
        self._reactor = ThreadedReactor()
        self._reactor.listen_udp(self._my_node.addr[1],
                                 self._on_datagram_received)
        self._querier = Querier(self._my_id)
        bootstrap_nodes = self.loaded_nodes or BOOTSTRAP_NODES
        del self.loaded_nodes
        self._routing_m = routing_m_mod.RoutingManager(self._my_node,
                                                       bootstrap_nodes)
        self._lookup_m = lookup_m_mod.LookupManager(self._my_id)
        current_time = time.time()
        self._next_maintenance_ts = current_time
        self._next_save_state_ts = current_time + SAVE_STATE_DELAY
        self._running = False

    def start(self):
        self._running = True
        self._reactor.start()
        self._main_loop()

    def stop(self):
        self._reactor.stop()

    def save_state(self):
        rnodes = self._routing_m.get_main_rnodes()
        f = open(self.state_filename, 'w')
        f.write('%r\n' % self._my_id)
        for rnode in rnodes:
            f.write('%d\t%r\t%s\t%d\t%f\n' %
                    (self._my_id.log_distance(rnode.id), rnode.id,
                     rnode.addr[0], rnode.addr[1], rnode.rtt * 1000))

        f.close()

    def load_state(self):
        self._my_id = None
        self.loaded_nodes = []
        try:
            f = open(self.state_filename)
        except IOError:
            return

        try:
            hex_id = f.readline().strip()
            self._my_id = Id(hex_id)
            for line in f:
                _, hex_id, ip, port, _ = line.split()
                addr = (ip, int(port))
                node_ = Node(addr, Id(hex_id))
                self.loaded_nodes.append(node_)

            f.close()
        except:
            self._my_id = None
            self.loaded_nodes = []
            logger.error('state.dat is corrupted')

    def get_peers(self, lookup_id, info_hash, callback_f, bt_port=0):
        logger.critical('get_peers %d %r' % (bt_port, info_hash))
        if time.time() > self._next_maintenance_ts + 1:
            logger.critical('minitwisted crashed or stopped!')
            return
        peers = self._tracker.get(info_hash)
        if peers:
            callback_f(lookup_id, peers)
        log_distance = info_hash.log_distance(self._my_id)
        bootstrap_rnodes = self._routing_m.get_closest_rnodes(
            log_distance, None, True)
        lookup_obj = self._lookup_m.get_peers(lookup_id, info_hash, callback_f,
                                              bt_port)
        lookup_queries_to_send = lookup_obj.start(bootstrap_rnodes)
        self._send_queries(lookup_queries_to_send)
        return len(lookup_queries_to_send)

    def print_routing_table_stats(self):
        self._routing_m.print_stats()

    def _main_loop(self):
        current_time = time.time()
        if current_time > self._next_maintenance_ts:
            maintenance_delay, queries_to_send, maintenance_lookup_target = self._routing_m.do_maintenance(
            )
            self._send_queries(queries_to_send)
            if maintenance_lookup_target:
                log_distance = maintenance_lookup_target.log_distance(
                    self._my_id)
                bootstrap_nodes = self._routing_m.get_closest_rnodes(
                    log_distance, None, True)
                lookup_obj = self._lookup_m.maintenance_lookup(
                    maintenance_lookup_target)
                lookup_queries_to_send = lookup_obj.start(bootstrap_nodes)
                self._send_queries(lookup_queries_to_send)
            self._next_maintenance_ts = current_time + maintenance_delay
        if current_time > self._next_save_state_ts:
            self.save_state()
            self._next_save_state_ts = current_time + SAVE_STATE_DELAY
        delay = min(self._next_maintenance_ts,
                    self._next_save_state_ts) - current_time
        self._reactor.call_later(delay, self._main_loop)

    def _maintenance_lookup(self, target):
        self._lookup_m.maintenance_lookup(target)

    def _on_datagram_received(self, data, addr):
        try:
            msg = message.IncomingMsg(data, addr)
        except message.MsgError:
            return

        if msg.type == message.QUERY:
            if msg.sender_id == self._my_id:
                logger.debug('Got a msg from myself:\n%r', msg)
                return
            response_msg = self._get_response(msg)
            if response_msg:
                bencoded_response = response_msg.encode(msg.tid)
                self._reactor.sendto(bencoded_response, addr)
            maintenance_queries_to_send = self._routing_m.on_query_received(
                msg.sender_node)
        elif msg.type == message.RESPONSE:
            related_query = self._querier.on_response_received(msg, addr)
            if not related_query:
                return
            if related_query.lookup_obj:
                if msg.type == message.RESPONSE:
                    lookup_queries_to_send, peers, num_parallel_queries, lookup_done = related_query.lookup_obj.on_response_received(
                        msg, msg.sender_node)
                self._send_queries(lookup_queries_to_send)
                if related_query.lookup_obj.callback_f:
                    lookup_id = related_query.lookup_obj.lookup_id
                    if peers:
                        related_query.lookup_obj.callback_f(lookup_id, peers)
                    if lookup_done:
                        self._announce(related_query.lookup_obj)
                        related_query.lookup_obj.callback_f(lookup_id, None)
            maintenance_queries_to_send = self._routing_m.on_response_received(
                msg.sender_node, related_query.rtt, msg.all_nodes)
        elif msg.type == message.ERROR:
            related_query = self._querier.on_error_received(msg, addr)
            if not related_query:
                return
            if related_query.lookup_obj:
                peers = None
                lookup_queries_to_send, num_parallel_queries, lookup_done = related_query.lookup_obj.on_error_received(
                    msg, addr)
                self._send_queries(lookup_queries_to_send)
            if related_query.lookup_obj.callback_f:
                lookup_id = related_query.lookup_obj.lookup_id
                if lookup_done:
                    self._announce(related_query.lookup_obj)
                    related_query.lookup_obj.callback_f(lookup_id, None)
            maintenance_queries_to_send = self._routing_m.on_error_received(
                addr)
        else:
            return
        self._send_queries(maintenance_queries_to_send)

    def _get_response(self, msg):
        if msg.query == message.PING:
            return message.OutgoingPingResponse(self._my_id)
        if msg.query == message.FIND_NODE:
            log_distance = msg.target.log_distance(self._my_id)
            rnodes = self._routing_m.get_closest_rnodes(
                log_distance, NUM_NODES, False)
            return message.OutgoingFindNodeResponse(self._my_id, rnodes)
        if msg.query == message.GET_PEERS:
            token = self._token_m.get()
            log_distance = msg.info_hash.log_distance(self._my_id)
            rnodes = self._routing_m.get_closest_rnodes(
                log_distance, NUM_NODES, False)
            peers = self._tracker.get(msg.info_hash)
            if peers:
                logger.debug('RESPONDING with PEERS:\n%r' % peers)
            return message.OutgoingGetPeersResponse(self._my_id,
                                                    token,
                                                    nodes=rnodes,
                                                    peers=peers)
        if msg.query == message.ANNOUNCE_PEER:
            peer_addr = (msg.sender_addr[0], msg.bt_port)
            self._tracker.put(msg.info_hash, peer_addr)
            return message.OutgoingAnnouncePeerResponse(self._my_id)
        logger.debug('Invalid QUERY: %r' % msg.query)

    def _on_response_received(self, msg):
        pass

    def _on_timeout(self, addr):
        related_query = self._querier.on_timeout(addr)
        if not related_query:
            return
        if related_query.lookup_obj:
            lookup_queries_to_send, num_parallel_queries, lookup_done = related_query.lookup_obj.on_timeout(
                related_query.dstnode)
            self._send_queries(lookup_queries_to_send)
            if lookup_done and related_query.lookup_obj.callback_f:
                self._announce(related_query.lookup_obj)
                lookup_id = related_query.lookup_obj.lookup_id
                related_query.lookup_obj.callback_f(lookup_id, None)
        maintenance_queries_to_send = self._routing_m.on_timeout(
            related_query.dstnode)
        self._send_queries(maintenance_queries_to_send)

    def _announce(self, lookup_obj):
        queries_to_send, announce_to_myself = lookup_obj.announce()
        self._send_queries(queries_to_send)

    def _send_queries(self, queries_to_send, lookup_obj=None):
        if queries_to_send is None:
            return
        for query in queries_to_send:
            timeout_task = self._reactor.call_later(TIMEOUT_DELAY,
                                                    self._on_timeout,
                                                    query.dstnode.addr)
            bencoded_query = self._querier.register_query(query, timeout_task)
            self._reactor.sendto(bencoded_query, query.dstnode.addr)
Exemple #39
0
class TestQuerier:
    def setup(self):
        time.mock_mode()
        self.querier = Querier()  #tc.CLIENT_ID)

    def test_generate_tids(self):
        #TODO: move to message
        if RUN_CPU_INTENSIVE_TESTS:
            num_tids = pow(2, 16) + 2  #CPU intensive
        else:
            num_tids = 1000
        for i in xrange(num_tids):
            eq_(self.querier._next_tid(), chr(i % 256) + chr((i / 256) % 256))

    def test_ping_with_reponse(self):
        # Client creates a query
        ping_msg = clients_msg_f.outgoing_ping_query(tc.SERVER_NODE)
        q = ping_msg
        # Client registers query
        timeout_ts, bencoded_msgs = self.querier.register_queries([q])
        # Client sends bencoded_msg
        # Server gets bencoded_msg and creates response
        ping_r_msg_out = servers_msg_f.outgoing_ping_response(tc.CLIENT_NODE)
        bencoded_r = ping_r_msg_out.stamp(ping_msg.tid)
        time.sleep(1)
        eq_(self.querier.get_timeout_queries()[1], [])
        # The client receives the bencoded message (after 1 second)
        ping_r_in = clients_msg_f.incoming_msg(
            Datagram(bencoded_r, tc.SERVER_ADDR))
        related_query = self.querier.get_related_query(ping_r_in)
        assert related_query is ping_msg

    def test_ping_with_timeout(self):
        # Client creates a query
        ping_msg = clients_msg_f.outgoing_ping_query(tc.SERVER_NODE)
        q = ping_msg
        # Client registers query
        bencoded_msg = self.querier.register_queries([q])
        # Client sends bencoded_msg
        time.sleep(3)
        # The server never responds and the timeout is triggered
        timeout_queries = self.querier.get_timeout_queries()
        eq_(len(timeout_queries[1]), 1)
        assert timeout_queries[1][0] is ping_msg

    def test_unsolicited_response(self):
        # Server creates unsolicited response
        # It might well be that the server responds using another port,
        # and therefore, the addr is not matched
        # TODO: consider accepting responses from a different port
        ping_r_msg_out = servers_msg_f.outgoing_ping_response(tc.CLIENT_NODE)
        bencoded_r = ping_r_msg_out.stamp('zz')
        # The client receives the bencoded message
        ping_r_in = clients_msg_f.incoming_msg(
            Datagram(bencoded_r, tc.SERVER_ADDR))
        related_query = self.querier.get_related_query(ping_r_in)
        assert related_query is None

    def test_response_with_different_tid(self):
        # Client creates a query
        ping_msg = clients_msg_f.outgoing_ping_query(tc.SERVER_NODE)
        q = ping_msg
        # Client registers query
        bencoded_msg = self.querier.register_queries([q])
        # Client sends bencoded_msg
        time.sleep(1)
        # Server gets bencoded_msg and creates response
        ping_r_msg_out = servers_msg_f.outgoing_ping_response(tc.CLIENT_NODE)
        bencoded_r = ping_r_msg_out.stamp('zz')
        # The client receives the bencoded message
        ping_r_in = clients_msg_f.incoming_msg(
            Datagram(bencoded_r, tc.SERVER_ADDR))
        related_query = self.querier.get_related_query(ping_r_in)
        assert related_query is None

    def test_error_received(self):
        # Client creates a query
        msg = clients_msg_f.outgoing_ping_query(tc.SERVER_NODE)
        q = msg
        # Client registers query
        bencoded_msg = self.querier.register_queries([q])
        # Client sends bencoded_msg
        time.sleep(1)
        # Server gets bencoded_msg and creates response
        ping_r_msg_out = servers_msg_f.outgoing_error(tc.CLIENT_NODE,
                                                      message.GENERIC_E)
        bencoded_r = ping_r_msg_out.stamp(msg.tid)
        # The client receives the bencoded message
        ping_r_in = clients_msg_f.incoming_msg(
            Datagram(bencoded_r, tc.SERVER_ADDR))
        related_query = self.querier.get_related_query(ping_r_in)
        assert related_query is msg

    def test_many_queries(self):
        # Client creates a query
        msgs = [
            clients_msg_f.outgoing_ping_query(tc.SERVER_NODE)
            for i in xrange(10)
        ]
        queries = msgs
        # Client registers query
        bencoded_msg = self.querier.register_queries(queries)
        # Client sends bencoded_msg
        time.sleep(1)
        # response for queries[3]
        ping_r_msg_out = servers_msg_f.outgoing_ping_response(tc.CLIENT_NODE)
        bencoded_r = ping_r_msg_out.stamp(msgs[3].tid)
        ping_r_in = clients_msg_f.incoming_msg(
            Datagram(bencoded_r, tc.SERVER_ADDR))
        related_query = self.querier.get_related_query(ping_r_in)
        assert related_query is msgs[3]
        # error for queries[2]
        ping_r_msg_out = servers_msg_f.outgoing_error(tc.CLIENT_NODE,
                                                      message.GENERIC_E)
        bencoded_r = ping_r_msg_out.stamp(msgs[2].tid)
        ping_r_in = clients_msg_f.incoming_msg(
            Datagram(bencoded_r, tc.SERVER_ADDR))
        related_query = self.querier.get_related_query(ping_r_in)
        assert related_query is msgs[2]
        # response to wrong addr
        ping_r_msg_out = servers_msg_f.outgoing_ping_response(tc.CLIENT_NODE)
        bencoded_r = ping_r_msg_out.stamp(msgs[5].tid)
        ping_r_in = clients_msg_f.incoming_msg(
            Datagram(bencoded_r, tc.SERVER2_ADDR))
        related_query = self.querier.get_related_query(ping_r_in)
        assert related_query is None
        # response with wrong tid
        ping_r_msg_out = servers_msg_f.outgoing_ping_response(tc.CLIENT_NODE)
        bencoded_r = ping_r_msg_out.stamp('ZZ')
        ping_r_in = clients_msg_f.incoming_msg(
            Datagram(bencoded_r, tc.SERVER_ADDR))
        related_query = self.querier.get_related_query(ping_r_in)
        assert related_query is None
        # Still no time to trigger timeouts
        eq_(self.querier.get_timeout_queries()[1], [])
        time.sleep(1)
        # Now, the timeouts can be triggered
        timeout_queries = self.querier.get_timeout_queries()
        expected_msgs = msgs[:2] + msgs[4:]
        eq_(len(timeout_queries[1]), len(expected_msgs))
        for related_query, expected_msg in zip(timeout_queries[1],
                                               expected_msgs):
            assert related_query is expected_msg

    def teardown(self):
        time.normal_mode()
from dash.dependencies import Input, Output

# Neo4j Setup
from py2neo import Graph
import json

# Import class to make queries to Neo4j
from querier import Querier

# Dash Setup
external_stylesheets = ['https://codepen.io/chriddyp/pen/bWLwgP.css']
app = dash.Dash(__name__, external_stylesheets=external_stylesheets)
server = app.server

# Connect to Neo4j
querier = Querier()


# This function returns all components for refresh
def serve_layout():
    return html.Div(children=[dark_theme()], style={'padding': '0px'})


# Set the website refresh to call a function
app.layout = serve_layout

# Colors for dark theme
theme = {
    'dark': True,
    'detail': '#149414',
    'primary': '#00EA64',
Exemple #41
0
 def setup(self):
     global time
     time = querier.time = MockTime()
     self.querier = Querier(tc.CLIENT_ID)
Exemple #42
0
 def setup(self):
     time.mock_mode()
     self.querier = Querier()#tc.CLIENT_ID)
Exemple #43
0
class Controller():

    def __init__(self, dht_addr, state_path, routing_m_mod, lookup_m_mod, private_dht_name):
        message.private_dht_name = private_dht_name
        self.state_filename = os.path.join(state_path, STATE_FILENAME)
        self.load_state()
        if not self._my_id:
            self._my_id = identifier.RandomId()
        self._my_node = Node(dht_addr, self._my_id)
        self._tracker = tracker.Tracker()
        self._token_m = token_manager.TokenManager()
        self._reactor = ThreadedReactor()
        self._reactor.listen_udp(self._my_node.addr[1], self._on_datagram_received)
        self._querier = Querier(self._my_id)
        bootstrap_nodes = self.loaded_nodes or BOOTSTRAP_NODES
        del self.loaded_nodes
        self._routing_m = routing_m_mod.RoutingManager(self._my_node, bootstrap_nodes)
        self._lookup_m = lookup_m_mod.LookupManager(self._my_id)
        current_time = time.time()
        self._next_maintenance_ts = current_time
        self._next_save_state_ts = current_time + SAVE_STATE_DELAY
        self._running = False

    def start(self):
        self._running = True
        self._reactor.start()
        self._main_loop()

    def stop(self):
        self._reactor.stop()

    def save_state(self):
        rnodes = self._routing_m.get_main_rnodes()
        f = open(self.state_filename, 'w')
        f.write('%r\n' % self._my_id)
        for rnode in rnodes:
            f.write('%d\t%r\t%s\t%d\t%f\n' % (self._my_id.log_distance(rnode.id),
             rnode.id,
             rnode.addr[0],
             rnode.addr[1],
             rnode.rtt * 1000))

        f.close()

    def load_state(self):
        self._my_id = None
        self.loaded_nodes = []
        try:
            f = open(self.state_filename)
        except IOError:
            return

        try:
            hex_id = f.readline().strip()
            self._my_id = Id(hex_id)
            for line in f:
                _, hex_id, ip, port, _ = line.split()
                addr = (ip, int(port))
                node_ = Node(addr, Id(hex_id))
                self.loaded_nodes.append(node_)

            f.close()
        except:
            self._my_id = None
            self.loaded_nodes = []
            logger.error('state.dat is corrupted')

    def get_peers(self, lookup_id, info_hash, callback_f, bt_port = 0):
        logger.critical('get_peers %d %r' % (bt_port, info_hash))
        if time.time() > self._next_maintenance_ts + 1:
            logger.critical('minitwisted crashed or stopped!')
            return
        peers = self._tracker.get(info_hash)
        if peers:
            callback_f(lookup_id, peers)
        log_distance = info_hash.log_distance(self._my_id)
        bootstrap_rnodes = self._routing_m.get_closest_rnodes(log_distance, None, True)
        lookup_obj = self._lookup_m.get_peers(lookup_id, info_hash, callback_f, bt_port)
        lookup_queries_to_send = lookup_obj.start(bootstrap_rnodes)
        self._send_queries(lookup_queries_to_send)
        return len(lookup_queries_to_send)

    def print_routing_table_stats(self):
        self._routing_m.print_stats()

    def _main_loop(self):
        current_time = time.time()
        if current_time > self._next_maintenance_ts:
            maintenance_delay, queries_to_send, maintenance_lookup_target = self._routing_m.do_maintenance()
            self._send_queries(queries_to_send)
            if maintenance_lookup_target:
                log_distance = maintenance_lookup_target.log_distance(self._my_id)
                bootstrap_nodes = self._routing_m.get_closest_rnodes(log_distance, None, True)
                lookup_obj = self._lookup_m.maintenance_lookup(maintenance_lookup_target)
                lookup_queries_to_send = lookup_obj.start(bootstrap_nodes)
                self._send_queries(lookup_queries_to_send)
            self._next_maintenance_ts = current_time + maintenance_delay
        if current_time > self._next_save_state_ts:
            self.save_state()
            self._next_save_state_ts = current_time + SAVE_STATE_DELAY
        delay = min(self._next_maintenance_ts, self._next_save_state_ts) - current_time
        self._reactor.call_later(delay, self._main_loop)

    def _maintenance_lookup(self, target):
        self._lookup_m.maintenance_lookup(target)

    def _on_datagram_received(self, data, addr):
        try:
            msg = message.IncomingMsg(data, addr)
        except message.MsgError:
            return

        if msg.type == message.QUERY:
            if msg.sender_id == self._my_id:
                logger.debug('Got a msg from myself:\n%r', msg)
                return
            response_msg = self._get_response(msg)
            if response_msg:
                bencoded_response = response_msg.encode(msg.tid)
                self._reactor.sendto(bencoded_response, addr)
            maintenance_queries_to_send = self._routing_m.on_query_received(msg.sender_node)
        elif msg.type == message.RESPONSE:
            related_query = self._querier.on_response_received(msg, addr)
            if not related_query:
                return
            if related_query.lookup_obj:
                if msg.type == message.RESPONSE:
                    lookup_queries_to_send, peers, num_parallel_queries, lookup_done = related_query.lookup_obj.on_response_received(msg, msg.sender_node)
                self._send_queries(lookup_queries_to_send)
                if related_query.lookup_obj.callback_f:
                    lookup_id = related_query.lookup_obj.lookup_id
                    if peers:
                        related_query.lookup_obj.callback_f(lookup_id, peers)
                    if lookup_done:
                        self._announce(related_query.lookup_obj)
                        related_query.lookup_obj.callback_f(lookup_id, None)
            maintenance_queries_to_send = self._routing_m.on_response_received(msg.sender_node, related_query.rtt, msg.all_nodes)
        elif msg.type == message.ERROR:
            related_query = self._querier.on_error_received(msg, addr)
            if not related_query:
                return
            if related_query.lookup_obj:
                peers = None
                lookup_queries_to_send, num_parallel_queries, lookup_done = related_query.lookup_obj.on_error_received(msg, addr)
                self._send_queries(lookup_queries_to_send)
            if related_query.lookup_obj.callback_f:
                lookup_id = related_query.lookup_obj.lookup_id
                if lookup_done:
                    self._announce(related_query.lookup_obj)
                    related_query.lookup_obj.callback_f(lookup_id, None)
            maintenance_queries_to_send = self._routing_m.on_error_received(addr)
        else:
            return
        self._send_queries(maintenance_queries_to_send)

    def _get_response(self, msg):
        if msg.query == message.PING:
            return message.OutgoingPingResponse(self._my_id)
        if msg.query == message.FIND_NODE:
            log_distance = msg.target.log_distance(self._my_id)
            rnodes = self._routing_m.get_closest_rnodes(log_distance, NUM_NODES, False)
            return message.OutgoingFindNodeResponse(self._my_id, rnodes)
        if msg.query == message.GET_PEERS:
            token = self._token_m.get()
            log_distance = msg.info_hash.log_distance(self._my_id)
            rnodes = self._routing_m.get_closest_rnodes(log_distance, NUM_NODES, False)
            peers = self._tracker.get(msg.info_hash)
            if peers:
                logger.debug('RESPONDING with PEERS:\n%r' % peers)
            return message.OutgoingGetPeersResponse(self._my_id, token, nodes=rnodes, peers=peers)
        if msg.query == message.ANNOUNCE_PEER:
            peer_addr = (msg.sender_addr[0], msg.bt_port)
            self._tracker.put(msg.info_hash, peer_addr)
            return message.OutgoingAnnouncePeerResponse(self._my_id)
        logger.debug('Invalid QUERY: %r' % msg.query)

    def _on_response_received(self, msg):
        pass

    def _on_timeout(self, addr):
        related_query = self._querier.on_timeout(addr)
        if not related_query:
            return
        if related_query.lookup_obj:
            lookup_queries_to_send, num_parallel_queries, lookup_done = related_query.lookup_obj.on_timeout(related_query.dstnode)
            self._send_queries(lookup_queries_to_send)
            if lookup_done and related_query.lookup_obj.callback_f:
                self._announce(related_query.lookup_obj)
                lookup_id = related_query.lookup_obj.lookup_id
                related_query.lookup_obj.callback_f(lookup_id, None)
        maintenance_queries_to_send = self._routing_m.on_timeout(related_query.dstnode)
        self._send_queries(maintenance_queries_to_send)

    def _announce(self, lookup_obj):
        queries_to_send, announce_to_myself = lookup_obj.announce()
        self._send_queries(queries_to_send)

    def _send_queries(self, queries_to_send, lookup_obj = None):
        if queries_to_send is None:
            return
        for query in queries_to_send:
            timeout_task = self._reactor.call_later(TIMEOUT_DELAY, self._on_timeout, query.dstnode.addr)
            bencoded_query = self._querier.register_query(query, timeout_task)
            self._reactor.sendto(bencoded_query, query.dstnode.addr)
Exemple #44
0
 def setup(self):
     time.mock_mode()
     self.querier = Querier()  #tc.CLIENT_ID)
Exemple #45
0
"""
This program queries the Materials Project database with specified
materials using user's API key.
"""

__author__ = "Erpan Arkin"
__email__ = "*****@*****.**"

description = 'Materials Project Querying Tool'

parser = ArgumentParser(description=description)
parser.add_argument('API_KEY',
                    metavar='API_KEY',
                    type=str,
                    help="User's API Key to connect to the MP")
parser.add_argument('MATERIAL',
                    metavar='MATERIAL',
                    type=str,
                    help='Chemical formula of the target material, e.g. Fe2O3')
parser.add_argument('-p',
                    action='store_true',
                    help="plot all entries' id verse energy per atom")

args = parser.parse_args()

my_query = Querier(args.API_KEY, args.MATERIAL)
my_query.GS_finder()
my_query.print_results()
if args.p:
    my_query.plot()
Exemple #46
0
class Controller:
    def __init__(self, version_label, my_node, conf_path, routing_m_mod,
                 lookup_m_mod, experimental_m_mod, private_dht_name,
                 bootstrap_mode):
        self.bootstrapper = bootstrap.OverlayBootstrapper(conf_path)
        my_addr = my_node.addr
        self._my_id = my_node.id  # id indicated by user
        if not self._my_id:
            self._my_id = self._my_id = identifier.RandomId()  # random id
        self._my_node = Node(my_addr, self._my_id, version=version_label)
        self.msg_f = message.MsgFactory(version_label, self._my_id,
                                        private_dht_name)
        self._querier = Querier()
        self._routing_m = routing_m_mod.RoutingManager(self._my_node,
                                                       self.msg_f,
                                                       self.bootstrapper)

        self._responder = responder.Responder(self._my_id, self._routing_m,
                                              self.msg_f, bootstrap_mode)
        self._tracker = self._responder._tracker

        self._lookup_m = lookup_m_mod.LookupManager(self._my_id, self.msg_f,
                                                    self.bootstrapper)
        self._experimental_m = experimental_m_mod.ExperimentalManager(
            self._my_node.id, self.msg_f)

        current_ts = time.time()
        self._next_maintenance_ts = current_ts
        self._next_timeout_ts = current_ts
        self._next_main_loop_call_ts = current_ts
        self._cached_lookups = []

    def on_stop(self):
        self._experimental_m.on_stop()
        self.bootstrapper.save_to_file()

    def get_peers(self, lookup_id, info_hash, callback_f, bt_port, use_cache):
        """
        Start a get\_peers lookup whose target is 'info\_hash'. The handler
        'callback\_f' will be called with three arguments ('lookup\_id',
        'peers', 'node') whenever peers are discovered. Once the lookup is
        completed, the handler will be called with arguments:
        ('lookup\_id', None, None).

        This method is called by minitwisted, using the minitwisted thread.

        """
        datagrams_to_send = []
        logger.debug('get_peers %d %r' % (bt_port, info_hash))
        if use_cache:
            peers = self._get_cached_peers(info_hash)
            if peers and callback_f and callable(callback_f):
                callback_f(lookup_id, peers, None)
                callback_f(lookup_id, None, None)
                return datagrams_to_send
        lookup_obj = self._lookup_m.get_peers(lookup_id, info_hash, callback_f,
                                              bt_port)
        queries_to_send = []
        distance = lookup_obj.info_hash.distance(self._my_id)
        bootstrap_rnodes = self._routing_m.get_closest_rnodes(
            distance.log, 0, True)  #TODO: get the full bucket
        # look if I'm tracking this info_hash
        peers = self._tracker.get(lookup_obj.info_hash)
        callback_f = lookup_obj.callback_f
        if peers:
            self._add_cache_peers(lookup_obj.info_hash, peers)
            if callback_f and callable(callback_f):
                callback_f(lookup_obj.lookup_id, peers, None)
        # do the lookup
        # NOTE: if bootstrap_rnodes is empty, a OVERLAY BOOTSTRAP will be
        # done.
        queries_to_send = lookup_obj.start(bootstrap_rnodes, self.bootstrapper)

        datagrams_to_send = self._register_queries(queries_to_send)
        return datagrams_to_send

    def _get_cached_peers(self, info_hash):
        oldest_valid_ts = time.time() - CACHE_VALID_PERIOD
        for ts, cached_info_hash, peers in self._cached_lookups:
            if ts > oldest_valid_ts and info_hash == cached_info_hash:
                return peers

    def _add_cache_peers(self, info_hash, peers):
        oldest_valid_ts = time.time() - CACHE_VALID_PERIOD
        while self._cached_lookups and self._cached_lookups[0][
                0] < oldest_valid_ts:
            # clean up old entries
            del self._cached_lookups[0]
        if self._cached_lookups and self._cached_lookups[-1][1] == info_hash:
            self._cached_lookups[-1][2].extend(peers)
        else:
            self._cached_lookups.append((time.time(), info_hash, peers))

    def print_routing_table_stats(self):
        self._routing_m.print_stats()

    def print_routing_table(self):
        self._routing_m.print_table()

    def main_loop(self):
        """
        Perform maintenance operations. The main operation is routing table
        maintenance where staled nodes are added/probed/replaced/removed as
        needed. The routing management module specifies the implementation
        details.  This includes keeping track of queries that have not been
        responded for a long time (timeout) with the help of
        querier.Querier. The routing manager and the lookup manager will be
        informed of those timeouts.

        This method is designed to be used as minitwisted's heartbeat handler.

        """

        queries_to_send = []
        current_ts = time.time()
        #TODO: I think this if should be removed
        # At most, 1 second between calls to main_loop after the first call
        if current_ts >= self._next_main_loop_call_ts:
            self._next_main_loop_call_ts = current_ts + 1
        else:
            # It's too early
            return self._next_main_loop_call_ts, []

        # Take care of timeouts
        if current_ts >= self._next_timeout_ts:
            (self._next_timeout_ts,
             timeout_queries) = self._querier.get_timeout_queries()
            for query in timeout_queries:
                queries_to_send.extend(self._on_timeout(query))

        # Routing table maintenance
        if time.time() >= self._next_maintenance_ts:
            (maintenance_delay, queries,
             maintenance_lookup) = self._routing_m.do_maintenance()
            self._next_maintenance_ts = current_ts + maintenance_delay
            self._next_main_loop_call_ts = min(self._next_main_loop_call_ts,
                                               self._next_maintenance_ts)
            queries_to_send.extend(queries)
            if maintenance_lookup:
                target, rnodes = maintenance_lookup
                lookup_obj = self._lookup_m.maintenance_lookup(target)
                queries_to_send.extend(lookup_obj.start(rnodes))

        # Return control to reactor
        datagrams_to_send = self._register_queries(queries_to_send)
        return self._next_main_loop_call_ts, datagrams_to_send

    def _maintenance_lookup(self, target):
        self._lookup_m.maintenance_lookup(target)

    def on_datagram_received(self, datagram):
        """
        Perform the actions associated to the arrival of the given
        datagram. The datagram will be ignored in cases such as invalid
        format. Otherwise, the datagram will be decoded and different modules
        will be informed to take action on it. For instance, if the datagram
        contains a response to a lookup query, both routing and lookup manager
        will be informed. Additionally, if that response contains peers, the
        lookup's handler will be called (see get\_peers above).
        This method is designed to be used as minitwisted's networking handler.

        """
        exp_queries_to_send = []

        data = datagram.data
        addr = datagram.addr
        datagrams_to_send = []
        try:
            msg = self.msg_f.incoming_msg(datagram)

        except (message.MsgError):
            # ignore message
            return self._next_main_loop_call_ts, datagrams_to_send

        if msg.type == message.QUERY:

            if msg.src_node.id == self._my_id:
                logger.debug('Got a msg from myself:\n%r', msg)
                return self._next_main_loop_call_ts, datagrams_to_send
            #zinat: inform experimental_module
            exp_queries_to_send = self._experimental_m.on_query_received(msg)

            response_msg = self._responder.get_response(msg)
            if response_msg:
                bencoded_response = response_msg.stamp(msg.tid)
                datagrams_to_send.append(
                    message.Datagram(bencoded_response, addr))
            maintenance_queries_to_send = self._routing_m.on_query_received(
                msg.src_node)

        elif msg.type == message.RESPONSE:
            related_query = self._querier.get_related_query(msg)
            if not related_query:
                # Query timed out or unrequested response
                return self._next_main_loop_call_ts, datagrams_to_send
            ## zinat: if related_query.experimental_obj:
            exp_queries_to_send = self._experimental_m.on_response_received(
                msg, related_query)
            #TODO: you need to get datagrams to be able to send messages (raul)
            # lookup related tasks
            if related_query.lookup_obj:
                (lookup_queries_to_send, peers, num_parallel_queries,
                 lookup_done) = related_query.lookup_obj.on_response_received(
                     msg, msg.src_node)
                datagrams = self._register_queries(lookup_queries_to_send)
                datagrams_to_send.extend(datagrams)

                lookup_obj = related_query.lookup_obj
                lookup_id = lookup_obj.lookup_id
                callback_f = lookup_obj.callback_f
                if peers:
                    self._add_cache_peers(lookup_obj.info_hash, peers)
                    if callback_f and callable(callback_f):
                        callback_f(lookup_id, peers, msg.src_node)
                if lookup_done:
                    if callback_f and callable(callback_f):
                        callback_f(lookup_id, None, msg.src_node)
                    queries_to_send = self._announce(related_query.lookup_obj)
                    datagrams = self._register_queries(queries_to_send)
                    datagrams_to_send.extend(datagrams)

            # maintenance related tasks
            maintenance_queries_to_send = \
                self._routing_m.on_response_received(
                msg.src_node, related_query.rtt, msg.all_nodes)

        elif msg.type == message.ERROR:
            related_query = self._querier.get_related_query(msg)
            if not related_query:
                # Query timed out or unrequested response
                return self._next_main_loop_call_ts, datagrams_to_send
            #TODO: zinat: same as response
            exp_queries_to_send = self._experimental_m.on_error_received(
                msg, related_query)
            # lookup related tasks
            if related_query.lookup_obj:
                peers = None  # an error msg doesn't have peers
                (lookup_queries_to_send, num_parallel_queries,
                 lookup_done) = related_query.lookup_obj.on_error_received(
                     msg, addr)
                datagrams = self._register_queries(lookup_queries_to_send)
                datagrams_to_send.extend(datagrams)

                callback_f = related_query.lookup_obj.callback_f
                if callback_f and callable(callback_f):
                    lookup_id = related_query.lookup_obj.lookup_id
                    if lookup_done:
                        callback_f(lookup_id, None, msg.src_node)
                if lookup_done:
                    datagrams = self._announce(related_query.lookup_obj)
                    datagrams_to_send.extend(datagrams)
            # maintenance related tasks
            maintenance_queries_to_send = \
                self._routing_m.on_error_received(addr)

        else:  # unknown type
            return self._next_main_loop_call_ts, datagrams_to_send
        # we are done with the plugins
        # now we have maintenance_queries_to_send, let's send them!
        datagrams = self._register_queries(maintenance_queries_to_send)
        datagrams_to_send.extend(datagrams)
        if exp_queries_to_send:
            datagrams = self._register_queries(exp_queries_to_send)
            datagrams_to_send.extend(datagrams)
        return self._next_main_loop_call_ts, datagrams_to_send

    def _on_query_received(self):
        return

    def _on_response_received(self):
        return

    def _on_error_received(self):
        return

    def _on_timeout(self, related_query):
        queries_to_send = []
        #TODO: on_timeout should return queries (raul)
        exp_queries_to_send = self._experimental_m.on_timeout(related_query)
        if related_query.lookup_obj:
            (lookup_queries_to_send, num_parallel_queries,
             lookup_done) = related_query.lookup_obj.on_timeout(
                 related_query.dst_node)
            queries_to_send.extend(lookup_queries_to_send)
            callback_f = related_query.lookup_obj.callback_f
            if lookup_done:
                lookup_id = related_query.lookup_obj.lookup_id
                if callback_f and callable(callback_f):
                    related_query.lookup_obj.callback_f(lookup_id, None, None)
                queries_to_send.extend(self._announce(
                    related_query.lookup_obj))
        maintenance_queries_to_send = self._routing_m.on_timeout(
            related_query.dst_node)
        if maintenance_queries_to_send:
            queries_to_send.extend(maintenance_queries_to_send)
        if exp_queries_to_send:
            datagrams = self._register_queries(exp_queries_to_send)
            datagrams_to_send.extend(datagrams)
        return queries_to_send

    def _announce(self, lookup_obj):
        queries_to_send, announce_to_myself = lookup_obj.announce()
        return queries_to_send

    '''
    if announce_to_myself:
    self._tracker.put(lookup_obj._info_hash,
    (self._my_node.addr[0], lookup_obj._bt_port))
    '''

    def _register_queries(self, queries_to_send, lookup_obj=None):
        if not queries_to_send:
            return []
        timeout_call_ts, datagrams_to_send = self._querier.register_queries(
            queries_to_send)
        self._next_main_loop_call_ts = min(self._next_main_loop_call_ts,
                                           timeout_call_ts)
        return datagrams_to_send
	def startWatch(self):
		"""startWatch()
		modem will print all incoming messages on terminal"""
		self.querier = Querier(InsteonAddress("00.00.00"))
		self.querier.setMsgHandler(MsgDumper("modem"))
		self.querier.startWait(10000)
Exemple #48
0
 def setup(self):
     global time
     time = querier.time = MockTime()
     self.querier = Querier(tc.CLIENT_ID)
	def modifyRecord(self, addr, group, controlCode, recordFlags, data, txt):
		msg = self.__makeModMsg(addr, group, controlCode, recordFlags, data, txt)
		self.querier = Querier(self.address)
		self.querier.setMsgHandler(DefaultMsgHandler(txt))
		self.querier.sendMsg(msg)
Exemple #50
0
class Controller:

    def __init__(self, dht_addr, state_path, routing_m_mod, lookup_m_mod):
        self.state_filename = os.path.join(state_path, STATE_FILENAME)
        self.load_state()
        if not self._my_id:
            self._my_id = identifier.RandomId()
        self._my_node = Node(dht_addr, self._my_id)
        self._tracker = tracker.Tracker()
        self._token_m = token_manager.TokenManager()

        self._reactor = ThreadedReactor()
        self._reactor.listen_udp(self._my_node.addr[1],
                                 self._on_datagram_received)
        #self._rpc_m = RPCManager(self._reactor)
        self._querier = Querier(self._my_id)
        bootstrap_nodes = self.loaded_nodes or BOOTSTRAP_NODES
        del self.loaded_nodes
        self._routing_m = routing_m_mod.RoutingManager(self._my_node, 
                                                       bootstrap_nodes)
#        self._responder = Responder(self._my_id, self._routing_m,
#                                    self._tracker, self._token_m)

        self._lookup_m = lookup_m_mod.LookupManager(self._my_id)
        current_time = time.time()
        self._next_maintenance_ts = current_time
        self._next_save_state_ts = current_time + SAVE_STATE_DELAY
        
        self._running = False
        

    def start(self):
        assert not self._running
        self._running = True
        self._reactor.start()
        self._main_loop()

    def stop(self):
        assert self._running
        #TODO2: stop each manager
        self._reactor.stop()

    def save_state(self):
        rnodes = self._routing_m.get_main_rnodes()
        f = open(self.state_filename, 'w')
        f.write('%r\n' % self._my_id)
        for rnode in rnodes:
            f.write('%d\t%r\t%s\t%d\t%f\n' % (
                    self._my_id.log_distance(rnode.id),
                    rnode.id, rnode.addr[0], rnode.addr[1],
                    rnode.rtt * 1000))
        f.close()

    def load_state(self):
        self._my_id = None
        self.loaded_nodes = []
        try:
            f = open(self.state_filename)
        except(IOError):
            return
        # the first line contains this node's identifier
        hex_id = f.readline().strip()
        self._my_id = Id(hex_id)
        # the rest of the lines contain routing table nodes
        # FORMAT
        # log_distance hex_id ip port rtt
        for line in f:
            _, hex_id, ip, port, _ = line.split()
            addr = (ip, int(port))
            node_ = Node(addr, Id(hex_id))
            self.loaded_nodes.append(node_)
        f.close
        
    def get_peers(self, lookup_id, info_hash, callback_f, bt_port=0):
        assert self._running
        log_distance = info_hash.log_distance(self._my_id)
        bootstrap_rnodes = self._routing_m.get_closest_rnodes(log_distance,
                                                              None,
                                                              True)
        lookup_obj = self._lookup_m.get_peers(info_hash, callback_f, bt_port)
        #TODO: propagate lookup_id to the lookup plugin
        lookup_obj.lookup_id = lookup_id
        ################################################
        lookup_queries_to_send = lookup_obj.start(bootstrap_rnodes)
        self._send_queries(lookup_queries_to_send)
        return len(lookup_queries_to_send)
        
    def print_routing_table_stats(self):
        self._routing_m.print_stats()

    def _main_loop(self):
        current_time = time.time()
        # Routing table
        if current_time > self._next_maintenance_ts:
            (maintenance_delay,
             queries_to_send,
             maintenance_lookup_target) = self._routing_m.do_maintenance()
            self._send_queries(queries_to_send)
            if maintenance_lookup_target:
                log_distance = maintenance_lookup_target.log_distance(
                    self._my_id)
                bootstrap_nodes = self._routing_m.get_closest_rnodes(
                    log_distance, None, True)
                lookup_obj = self._lookup_m.maintenance_lookup(
                    maintenance_lookup_target)
                lookup_queries_to_send = lookup_obj.start(bootstrap_nodes)
                self._send_queries(lookup_queries_to_send)
            self._next_maintenance_ts = (current_time
                                         + maintenance_delay)
        # Auto-save routing table
        if current_time > self._next_save_state_ts:
            self.save_state()
            self._next_save_state_ts = current_time + SAVE_STATE_DELAY

        # Schedule next call
        delay = (min(self._next_maintenance_ts, self._next_save_state_ts)
                 - current_time)
        self._reactor.call_later(delay, self._main_loop)

    def _maintenance_lookup(self, target):
        self._lookup_m.maintenance_lookup(target)

    def _on_datagram_received(self, data, addr):
        try:
            msg = message.IncomingMsg(data, addr)
        except(message.MsgError):
            return # ignore message
        
        if msg.type == message.QUERY:
            response_msg = self._get_response(msg)
            if response_msg:
                bencoded_response = response_msg.encode(msg.tid)
                self._reactor.sendto(bencoded_response, addr)

            maintenance_queries_to_send = self._routing_m.on_query_received(
                msg.sender_node)
            
        elif msg.type in (message.RESPONSE, message.ERROR):
            related_query = self._querier.on_response_received(msg, addr)
            if not related_query:
                # Query timed out or unrequested response
                return
            # lookup related tasks
            if related_query.lookup_obj:
                if msg.type == message.RESPONSE:
                    (lookup_queries_to_send,
                     peers,
                     num_parallel_queries,
                     lookup_done
                     ) = related_query.lookup_obj.on_response_received(
                        msg, msg.sender_node)
                else: #ERROR
                    peers = None # an error msg doesn't have peers
                    (lookup_queries_to_send,
                     num_parallel_queries,
                     lookup_done
                     ) = related_query.lookup_obj.on_error_received(
                        msg, msg.sender_node)
                self._send_queries(lookup_queries_to_send)
                
                if related_query.lookup_obj.callback_f:
                    lookup_id = related_query.lookup_obj.lookup_id
                    if peers:
                        related_query.lookup_obj.callback_f(lookup_id, peers)
                    if lookup_done:
                        related_query.lookup_obj.callback_f(lookup_id, None)
            # maintenance related tasks
            if msg.type == message.RESPONSE:
                maintenance_queries_to_send = \
                    self._routing_m.on_response_received(
                    msg.sender_node, related_query.rtt, msg.all_nodes)
            else:
                maintenance_queries_to_send = \
                    self._routing_m.on_error_received(
                    msg.sender_node)
        else: # unknown type
            return
        self._send_queries(maintenance_queries_to_send)

    def _get_response(self, msg):
        if msg.query == message.PING:
            return message.OutgoingPingResponse(self._my_id)
        elif msg.query == message.FIND_NODE:
            log_distance = msg.target.log_distance(self._my_id)
            rnodes = self._routing_m.get_closest_rnodes(log_distance,
                                                       NUM_NODES, False)
            return message.OutgoingFindNodeResponse(self._my_id,
                                                    rnodes)
        elif msg.query == message.GET_PEERS:
            token = self._token_m.get()
            log_distance = msg.info_hash.log_distance(self._my_id)
            rnodes = self._routing_m.get_closest_rnodes(log_distance,
                                                       NUM_NODES, False)
            peers = self._tracker.get(msg.info_hash)
            if peers:
                logger.debug('RESPONDING with PEERS:\n%r' % peers)
            return message.OutgoingGetPeersResponse(self._my_id,
                                                    token,
                                                    nodes=rnodes,
                                                    peers=peers)
        elif msg.query == message.ANNOUNCE_PEER:
            peer_addr = (msg.sender_addr[0], msg.bt_port)
            self._tracker.put(msg.info_hash, peer_addr)
            return message.OutgoingAnnouncePeerResponse(self._my_id)
        else:
            logger.debug('Invalid QUERY: %r' % (msg.query))
            #TODO: maybe send an error back?
        
    def _on_response_received(self, msg):
        pass

    def _on_timeout(self, addr):
        related_query = self._querier.on_timeout(addr)
        if not related_query:
            return # timeout cancelled (got response/error already)
        if related_query.lookup_obj:
            (lookup_queries_to_send,
             num_parallel_queries,
             lookup_done
             ) = related_query.lookup_obj.on_timeout(related_query.dstnode)
            self._send_queries(lookup_queries_to_send)
            if lookup_done and related_query.lookup_obj.callback_f:
                lookup_id = related_query.lookup_obj.lookup_id
                related_query.lookup_obj.callback_f(lookup_id, None)
        maintenance_queries_to_send = self._routing_m.on_timeout(
            related_query.dstnode)
        self._send_queries(maintenance_queries_to_send)

    def _send_queries(self, queries_to_send, lookup_obj=None):
        if queries_to_send is None:
            return
        for query in queries_to_send:
            timeout_task = self._reactor.call_later(TIMEOUT_DELAY,
                                                    self._on_timeout,
                                                    query.dstnode.addr)
            bencoded_query = self._querier.register_query(query, timeout_task)
            self._reactor.sendto(bencoded_query, query.dstnode.addr)
Exemple #51
0
class Controller:
    def __init__(self, version_label, my_node, state_filename, routing_m_mod,
                 lookup_m_mod, experimental_m_mod, private_dht_name,
                 bootstrap_mode):

        if size_estimation:
            self._size_estimation_file = open('size_estimation.dat', 'w')

        self.state_filename = state_filename
        saved_id, saved_bootstrap_nodes = state.load(self.state_filename)
        my_addr = my_node.addr
        self._my_id = my_node.id  # id indicated by user
        if not self._my_id:
            self._my_id = saved_id  # id loaded from file
        if not self._my_id:
            self._my_id = self._my_id = identifier.RandomId()  # random id
        self._my_node = Node(my_addr, self._my_id, version=version_label)
        self.msg_f = message.MsgFactory(version_label, self._my_id,
                                        private_dht_name)
        self._querier = Querier()
        self._routing_m = routing_m_mod.RoutingManager(self._my_node,
                                                       saved_bootstrap_nodes,
                                                       self.msg_f)

        self._responder = responder.Responder(self._my_id, self._routing_m,
                                              self.msg_f, bootstrap_mode)
        self._tracker = self._responder._tracker

        self._lookup_m = lookup_m_mod.LookupManager(self._my_id, self.msg_f)
        self._experimental_m = experimental_m_mod.ExperimentalManager(
            self._my_node.id, self.msg_f)

        current_ts = time.time()
        self._next_save_state_ts = current_ts + SAVE_STATE_DELAY
        self._next_maintenance_ts = current_ts
        self._next_timeout_ts = current_ts
        self._next_main_loop_call_ts = current_ts
        self._pending_lookups = []
        self._cached_lookups = {}

    def on_stop(self):
        self._experimental_m.on_stop()

    def get_peers(self, lookup_id, info_hash, callback_f, bt_port, use_cache):
        """
        Start a get\_peers lookup whose target is 'info\_hash'. The handler
        'callback\_f' will be called with two arguments ('lookup\_id' and a
        'peer list') whenever peers are discovered. Once the lookup is
        completed, the handler will be called with 'lookup\_id' and None as
        arguments.

        This method is designed to be used as minitwisted's external handler.

        """
        datagrams_to_send = []
        logger.debug('get_peers %d %r' % (bt_port, info_hash))
        if use_cache:
            peers = self._get_cached_peers(info_hash)
            if peers and callback_f and callable(callback_f):
                callback_f(lookup_id, peers, None)
                callback_f(lookup_id, None, None)
                return datagrams_to_send

        self._pending_lookups.append(
            self._lookup_m.get_peers(lookup_id, info_hash, callback_f,
                                     bt_port))
        queries_to_send = self._try_do_lookup()
        datagrams_to_send = self._register_queries(queries_to_send)
        return datagrams_to_send

    def _clean_peer_caches(self):
        oldest_valid_ts = time.time() - CACHE_VALID_PERIOD

        for key, values in self._cached_lookups.items():
            ts, _ = values
            if ts < oldest_valid_ts:
                del self._cached_lookups[key]

    def _get_cached_peers(self, info_hash):
        self._clean_peer_caches()
        if info_hash in self._cached_lookups:
            return self._cached_lookups[info_hash][1]

    def _add_cache_peers(self, info_hash, peers):
        self._clean_peer_caches()

        if info_hash not in self._cached_lookups:
            self._cached_lookups[info_hash] = (time.time(), [])
        self._cached_lookups[info_hash][1].extend(peers)

    def _try_do_lookup(self):
        queries_to_send = []
        current_time = time.time()
        while self._pending_lookups:
            pending_lookup = self._pending_lookups[0]
            # Drop all pending lookups older than PENDING_LOOKUP_TIMEOUT
            if time.time() > pending_lookup.start_ts + PENDING_LOOKUP_TIMEOUT:
                del self._pending_lookups[0]
            else:
                break
        if self._pending_lookups:
            lookup_obj = self._pending_lookups[0]
        else:
            return queries_to_send
        distance = lookup_obj.info_hash.distance(self._my_id)
        bootstrap_rnodes = self._routing_m.get_closest_rnodes(
            distance.log, 0, True)
        # TODO: get the full bucket
        if bootstrap_rnodes:
            del self._pending_lookups[0]
            # look if I'm tracking this info_hash
            peers = self._tracker.get(lookup_obj.info_hash)
            callback_f = lookup_obj.callback_f
            if peers:
                self._add_cache_peers(lookup_obj.info_hash, peers)
                if callback_f and callable(callback_f):
                    callback_f(lookup_obj.lookup_id, peers, None)
            # do the lookup
            queries_to_send = lookup_obj.start(bootstrap_rnodes)
        else:
            next_lookup_attempt_ts = time.time() + .2
            self._next_main_loop_call_ts = min(self._next_main_loop_call_ts,
                                               next_lookup_attempt_ts)
        return queries_to_send

    def print_routing_table_stats(self):
        self._routing_m.print_stats()

    def main_loop(self):
        """
        Perform maintenance operations. The main operation is routing table
        maintenance where staled nodes are added/probed/replaced/removed as
        needed. The routing management module specifies the implementation
        details.  This includes keeping track of queries that have not been
        responded for a long time (timeout) with the help of
        querier.Querier. The routing manager and the lookup manager will be
        informed of those timeouts.

        This method is designed to be used as minitwisted's heartbeat handler.

        """

        if prctlimported:
            prctl.set_name("Tribler" + currentThread().getName())

        queries_to_send = []
        current_ts = time.time()
        # TODO: I think this if should be removed
        # At most, 1 second between calls to main_loop after the first call
        if current_ts >= self._next_main_loop_call_ts:
            self._next_main_loop_call_ts = current_ts + 1
        else:
            # It's too early
            return self._next_main_loop_call_ts, []
        # Retry failed lookup (if any)
        queries_to_send.extend(self._try_do_lookup())

        # Take care of timeouts
        if current_ts >= self._next_timeout_ts:
            (self._next_timeout_ts,
             timeout_queries) = self._querier.get_timeout_queries()
            for query in timeout_queries:
                queries_to_send.extend(self._on_timeout(query))

        # Routing table maintenance
        if time.time() >= self._next_maintenance_ts:
            (maintenance_delay, queries,
             maintenance_lookup) = self._routing_m.do_maintenance()
            self._next_maintenance_ts = current_ts + maintenance_delay
            self._next_main_loop_call_ts = min(self._next_main_loop_call_ts,
                                               self._next_maintenance_ts)
            queries_to_send.extend(queries)
            if maintenance_lookup:
                target, rnodes = maintenance_lookup
                lookup_obj = self._lookup_m.maintenance_lookup(target)
                queries_to_send.extend(lookup_obj.start(rnodes))

        # Auto-save routing table
        if current_ts >= self._next_save_state_ts:
            state.save(self._my_id, self._routing_m.get_main_rnodes(),
                       self.state_filename)
            self._next_save_state_ts = current_ts + SAVE_STATE_DELAY
            self._next_main_loop_call_ts = min(self._next_main_loop_call_ts,
                                               self._next_maintenance_ts,
                                               self._next_timeout_ts,
                                               self._next_save_state_ts)
        # Return control to reactor
        datagrams_to_send = self._register_queries(queries_to_send)
        return self._next_main_loop_call_ts, datagrams_to_send

    def _maintenance_lookup(self, target):
        self._lookup_m.maintenance_lookup(target)

    def on_datagram_received(self, datagram):
        """
        Perform the actions associated to the arrival of the given
        datagram. The datagram will be ignored in cases such as invalid
        format. Otherwise, the datagram will be decoded and different modules
        will be informed to take action on it. For instance, if the datagram
        contains a response to a lookup query, both routing and lookup manager
        will be informed. Additionally, if that response contains peers, the
        lookup's handler will be called (see get\_peers above).
        This method is designed to be used as minitwisted's networking handler.

        """
        exp_queries_to_send = []

        data = datagram.data
        addr = datagram.addr
        datagrams_to_send = []
        try:
            msg = self.msg_f.incoming_msg(datagram)

        except (message.MsgError):
            # ignore message
            return self._next_main_loop_call_ts, datagrams_to_send

        if msg.type == message.QUERY:

            if msg.src_node.id == self._my_id:
                logger.debug('Got a msg from myself:\n%r', msg)
                return self._next_main_loop_call_ts, datagrams_to_send
            # zinat: inform experimental_module
            exp_queries_to_send = self._experimental_m.on_query_received(msg)

            response_msg = self._responder.get_response(msg)
            if response_msg:
                bencoded_response = response_msg.stamp(msg.tid)
                datagrams_to_send.append(
                    message.Datagram(bencoded_response, addr))
            maintenance_queries_to_send = self._routing_m.on_query_received(
                msg.src_node)

        elif msg.type == message.RESPONSE:
            related_query = self._querier.get_related_query(msg)
            if not related_query:
                # Query timed out or unrequested response
                return self._next_main_loop_call_ts, datagrams_to_send
            # zinat: if related_query.experimental_obj:
            exp_queries_to_send = self._experimental_m.on_response_received(
                msg, related_query)
            # TODO: you need to get datagrams to be able to send messages (raul)
            # lookup related tasks
            if related_query.lookup_obj:
                (lookup_queries_to_send, peers, num_parallel_queries,
                 lookup_done) = related_query.lookup_obj.on_response_received(
                     msg, msg.src_node)
                datagrams = self._register_queries(lookup_queries_to_send)
                datagrams_to_send.extend(datagrams)

                lookup_obj = related_query.lookup_obj
                lookup_id = lookup_obj.lookup_id
                callback_f = lookup_obj.callback_f
                if peers:
                    self._add_cache_peers(lookup_obj.info_hash, peers)
                    if callback_f and callable(callback_f):
                        callback_f(lookup_id, peers, msg.src_node)
                # Size estimation
                if size_estimation and lookup_done:
                    line = '%d %d\n' % (related_query.lookup_obj.
                                        get_number_nodes_within_region())
                    self._size_estimation_file.write(line)
                    self._size_estimation_file.flush()
                if lookup_done:
                    if callback_f and callable(callback_f):
                        callback_f(lookup_id, None, msg.src_node)
                    queries_to_send = self._announce(related_query.lookup_obj)
                    datagrams = self._register_queries(queries_to_send)
                    datagrams_to_send.extend(datagrams)

            # maintenance related tasks
            maintenance_queries_to_send = \
                self._routing_m.on_response_received(
                    msg.src_node, related_query.rtt, msg.all_nodes)

        elif msg.type == message.ERROR:
            related_query = self._querier.get_related_query(msg)
            if not related_query:
                # Query timed out or unrequested response
                return self._next_main_loop_call_ts, datagrams_to_send
            # TODO: zinat: same as response
            exp_queries_to_send = self._experimental_m.on_error_received(
                msg, related_query)
            # lookup related tasks
            if related_query.lookup_obj:
                peers = None  # an error msg doesn't have peers
                (lookup_queries_to_send, num_parallel_queries,
                 lookup_done) = related_query.lookup_obj.on_error_received(
                     msg, addr)
                datagrams = self._register_queries(lookup_queries_to_send)
                datagrams_to_send.extend(datagrams)

                callback_f = related_query.lookup_obj.callback_f
                if callback_f and callable(callback_f):
                    lookup_id = related_query.lookup_obj.lookup_id
                    if lookup_done:
                        callback_f(lookup_id, None, msg.src_node)
                if lookup_done:
                    # Size estimation
                    if size_estimation:
                        line = '%d %d\n' % (related_query.lookup_obj.
                                            get_number_nodes_within_region())
                        self._size_estimation_file.write(line)
                        self._size_estimation_file.flush()

                    datagrams = self._announce(related_query.lookup_obj)
                    datagrams_to_send.extend(datagrams)
            # maintenance related tasks
            maintenance_queries_to_send = \
                self._routing_m.on_error_received(addr)

        else:  # unknown type
            return self._next_main_loop_call_ts, datagrams_to_send
        # we are done with the plugins
        # now we have maintenance_queries_to_send, let's send them!
        datagrams = self._register_queries(maintenance_queries_to_send)
        datagrams_to_send.extend(datagrams)
        if exp_queries_to_send:
            datagrams = self._register_queries(exp_queries_to_send)
            datagrams_to_send.extend(datagrams)
        return self._next_main_loop_call_ts, datagrams_to_send

    def _on_query_received(self):
        return

    def _on_response_received(self):
        return

    def _on_error_received(self):
        return

    def _on_timeout(self, related_query):
        queries_to_send = []
        # TODO: on_timeout should return queries (raul)
        exp_queries_to_send = self._experimental_m.on_timeout(related_query)
        if related_query.lookup_obj:
            (lookup_queries_to_send, num_parallel_queries,
             lookup_done) = related_query.lookup_obj.on_timeout(
                 related_query.dst_node)
            queries_to_send.extend(lookup_queries_to_send)
            callback_f = related_query.lookup_obj.callback_f
            if lookup_done:
                # Size estimation
                if size_estimation:
                    line = '%d %d\n' % (related_query.lookup_obj.
                                        get_number_nodes_within_region())
                    self._size_estimation_file.write(line)
                    self._size_estimation_file.flush()

                lookup_id = related_query.lookup_obj.lookup_id
                if callback_f and callable(callback_f):
                    related_query.lookup_obj.callback_f(lookup_id, None, None)
                queries_to_send.extend(self._announce(
                    related_query.lookup_obj))
        maintenance_queries_to_send = self._routing_m.on_timeout(
            related_query.dst_node)
        if maintenance_queries_to_send:
            queries_to_send.extend(maintenance_queries_to_send)
        if exp_queries_to_send:
            datagrams = self._register_queries(exp_queries_to_send)
            queries_to_send.extend(datagrams)
        return queries_to_send

    def _announce(self, lookup_obj):
        queries_to_send, announce_to_myself = lookup_obj.announce()
        return queries_to_send

    '''
    if announce_to_myself:
    self._tracker.put(lookup_obj._info_hash,
    (self._my_node.addr[0], lookup_obj._bt_port))
    '''

    def _register_queries(self, queries_to_send, lookup_obj=None):
        if not queries_to_send:
            return []
        timeout_call_ts, datagrams_to_send = self._querier.register_queries(
            queries_to_send)
        self._next_main_loop_call_ts = min(self._next_main_loop_call_ts,
                                           timeout_call_ts)
        return datagrams_to_send