Esempio n. 1
0
    def __init__(self, infohash, num_pieces):
        # Number of pieces in the torrent
        self.num_pieces = num_pieces 
        
        # Vector for reserved-state infromation per piece
        self.reserved_pieces = [False] * num_pieces
        # Torrent infohash
        self.infohash = infohash # readonly so no locking on this

        # List of sent challenges 
        self.sent_challenges_by_challenge = {}
        self.sent_challenges_by_permid = {}

        # List of asked helpers 
        self.asked_helpers_lock = Lock()
        self.asked_helpers = [] # protected by asked_helpers_lock
        
        # List of confirmed helpers 
        self.confirmed_helpers_lock = Lock()
        self.confirmed_helpers = [] # protected by confirmed_helpers_lock
        
        # Dictionary for keeping evidence of helpers and the pieces requested to them
        # Key: permid of a helper
        # Value: list of pieces requested to the helper 
        self.requested_pieces = {} 
        
        # optimization
        # List of reserved pieces ???
        self.reserved = []
        
        # Tribler overlay warm
        self.overlay_bridge = OverlayThreadingBridge.getInstance()
        
        # BT1Download object
        self.downloader = None
Esempio n. 2
0
    def __init__(self, torrent_hash, num_pieces, coordinator_permid, coordinator = None):
        self.overlay_bridge = OverlayThreadingBridge.getInstance()
        self.torrent_hash = torrent_hash
        if coordinator_permid is not None and coordinator_permid == '':
            self.coordinator_permid = None
        else:
            self.coordinator_permid = coordinator_permid
        self.coordinator_ip = None  # see is_coordinator()
        self.coordinator_port = -1

        if self.coordinator_permid is not None:
            peerdb = PeerDBHandler.getInstance()
            peer = peerdb.getPeer(coordinator_permid)
            if peer is not None:
                self.coordinator_ip = peer['ip']
                self.coordinator_port = peer['port']

        self.reserved_pieces = [False] * num_pieces
        self.ignored_pieces = [False] * num_pieces
        self.coordinator = coordinator
        self.counter = 0
        self.completed = False
        self.distr_reserved_pieces = [False] * num_pieces
        self.marker = [True] * num_pieces
        self.round = 0
        self.encoder = None
        self.continuations = []
        self.outstanding = None
        self.last_req_time = 0
    def __init__(self, infohash, num_pieces):
        self.reserved_pieces = [False] * num_pieces
        self.infohash = infohash # readonly so no locking on this

        self.lock = Lock()
        self.asked_helpers = [] # protected by lock
        # optimization
        self.reserved = []
        self.overlay_bridge = OverlayThreadingBridge.getInstance()
 def set_stop_collecting_threshold(self,value):
     self.sesslock.acquire()
     try:
         from BaseLib.Core.Overlay.OverlayThreadingBridge import OverlayThreadingBridge
         
         SessionConfigInterface.set_stop_collecting_threshold(self,value)
         olbridge = OverlayThreadingBridge.getInstance()
         task = lambda:self.olthread_set_stop_collecting_threshold(value)
         olbridge.add_task(task,0)
     finally:
         self.sesslock.release()
 def set_torrent_collecting_rate(self,value):
     self.sesslock.acquire()
     try:
         from BaseLib.Core.Overlay.OverlayThreadingBridge import OverlayThreadingBridge
         
         SessionConfigInterface.set_torrent_collecting_rate(self,value)
         olbridge = OverlayThreadingBridge.getInstance()
         task = lambda:self.olthread_set_torrent_collecting_rate(value)
         olbridge.add_task(task,0)
     finally:
         self.sesslock.release()
Esempio n. 6
0
    def setUp(self):
        
        print >>sys.stderr,time.asctime(),'-', "test: TestOverlayThreadingBridge.setUp()"

        self.config_path = tempfile.mkdtemp()
        config = {}
        config['state_dir'] = self.config_path
        config['torrent_collecting_dir'] = self.config_path
        config['install_dir'] = os.path.join('..','..')
        config['peer_icon_path'] = os.path.join(self.config_path,'peer_icons')
        config['superpeer'] = False
        sqlitecachedb.init(config, self.rawserver_fatalerrorfunc)
        
        secover1 = SecureOverlay.getInstance()
        secover1.resetSingleton()
        secover2 = SecureOverlay.getInstance()
        secover2.resetSingleton()
        
        overbridge1 = OverlayThreadingBridge()
        overbridge1.register_bridge(secover1,None)
        overbridge1.resetSingleton()

        overbridge2 = OverlayThreadingBridge()
        overbridge2.register_bridge(secover2,None)
        overbridge2.resetSingleton()

        
        self.peer1 = Peer(self,1234,overbridge1)
        self.peer2 = Peer(self,5678,overbridge2)
        self.peer1.start()
        self.peer2.start()
        self.wanted = False
        self.wanted2 = False
        self.got = False
        self.got2 = False
        self.first = True

        print >>sys.stderr,time.asctime(),'-', "test: setUp: peer1 permid is",show_permid_short(self.peer1.my_permid)
        print >>sys.stderr,time.asctime(),'-', "test: setUp: peer2 permid is",show_permid_short(self.peer2.my_permid)

        time.sleep(2) # let server threads start
    def setUp(self):
        
        print >>sys.stderr,"test: TestOverlayThreadingBridge.setUp()"
        
        secover1 = SecureOverlay.getInstance()
        secover1.resetSingleton()
        secover2 = SecureOverlay.getInstance()
        secover2.resetSingleton()
        
        overbridge1 = OverlayThreadingBridge()
        overbridge1.register_bridge(secover1,None)

        overbridge2 = OverlayThreadingBridge()
        overbridge2.register_bridge(secover2,None)

        
        self.peer1 = Peer(self,1234,overbridge1)
        self.peer2 = Peer(self,5678,overbridge2)
        self.peer1.start()
        self.peer2.start()
        self.wanted = False
        self.wanted2 = False
        self.got = False
        self.got2 = False
        self.first = True

        print >>sys.stderr,"test: setUp: peer1 permid is",show_permid_short(self.peer1.my_permid)
        print >>sys.stderr,"test: setUp: peer2 permid is",show_permid_short(self.peer2.my_permid)

        time.sleep(2) # let server threads start
Esempio n. 8
0
    def __init__(self, session):
        if self.__singleton:
            raise RuntimeError, "Crawler is Singleton"
        self._overlay_bridge = OverlayThreadingBridge.getInstance()
        self._session = session
        self._crawler_db = CrawlerDBHandler.getInstance()

        # _message_handlers contains message-id:(request-callback, reply-callback, last-request-timestamp)
        # the handlers are called when either a CRAWL_REQUEST or CRAWL_REPLY message is received
        self._message_handlers = {}

        # _crawl_initiators is a list with (initiator-callback,
        # frequency, accept_frequency) tuples the initiators are called
        # when a new connection is received
        self._crawl_initiators = []

        # _initiator_dealines contains [deadline, frequency,
        # accept_frequency, initiator-callback, permid, selversion,
        # failure-counter] deadlines register information on when to
        # call the crawl initiators again for a specific permid
        self._initiator_deadlines = []
        
        # _dialback_deadlines contains message_id:(deadline, permid) pairs
        # client peers should connect back to -a- crawler indicated by
        # permid after deadline expired
        self._dialback_deadlines = {}

        # _channels contains permid:buffer-dict pairs. Where
        # buffer_dict contains channel-id:(timestamp, buffer,
        # channel_data) pairs. Where buffer is the payload from
        # multipart messages that are received so far. Channels are
        # used to match outstanding replies to given requests
        self._channels = {}

        # start checking for expired deadlines
        self._check_deadlines(True)

        # start checking for ancient channels
        self._check_channels()
Esempio n. 9
0
    def __init__(self, session):
        if self.__singleton:
            raise RuntimeError, "Crawler is Singleton"
        self._overlay_bridge = OverlayThreadingBridge.getInstance()
        self._session = session
        self._crawler_db = CrawlerDBHandler.getInstance()

        # _message_handlers contains message-id:(request-callback, reply-callback, last-request-timestamp)
        # the handlers are called when either a CRAWL_REQUEST or CRAWL_REPLY message is received
        self._message_handlers = {}

        # _crawl_initiators is a list with (initiator-callback,
        # frequency, accept_frequency) tuples the initiators are called
        # when a new connection is received
        self._crawl_initiators = []

        # _initiator_dealines contains [deadline, frequency,
        # accept_frequency, initiator-callback, permid, selversion,
        # failure-counter] deadlines register information on when to
        # call the crawl initiators again for a specific permid
        self._initiator_deadlines = []

        # _dialback_deadlines contains message_id:(deadline, permid) pairs
        # client peers should connect back to -a- crawler indicated by
        # permid after deadline expired
        self._dialback_deadlines = {}

        # _channels contains permid:buffer-dict pairs. Where
        # buffer_dict contains channel-id:(timestamp, buffer,
        # channel_data) pairs. Where buffer is the payload from
        # multipart messages that are received so far. Channels are
        # used to match outstanding replies to given requests
        self._channels = {}

        # start checking for expired deadlines
        self._check_deadlines(True)

        # start checking for ancient channels
        self._check_channels()
Esempio n. 10
0
 def __init__(self):
     self.metadata_queue = {}
     self.metadata_queue_lock = Lock()
     self.overlay_bridge = OverlayThreadingBridge.getInstance()
     self.received_challenges = {}
Esempio n. 11
0
    print >>sys.stderr,time.asctime(),'-', "superpeer: config is",config

    if config['overlaylogpostfix'] == '':
        config['overlaylogpostfix'] = 'sp'+str(config['port'])+'.log'

    #
    # Currently we use an in-memory database for superpeers.
    # SQLite supports only per-thread in memory databases.
    # As our Session initialization is currently setup, the MainThread
    # creates the DB and adds the superpeer entries, and the OverlayThread
    # does most DB operations. So two threads accessing the DB.
    #
    # To work around this I start the Session using the OverlayThread.
    # Dirty, but a simple solution.
    # 
    overlay_bridge = OverlayThreadingBridge.getInstance()
    overlay_bridge.add_task(olthread_start_session,0)
    
    #
    # NetworkThread and OverlayThread will now do their work. The MainThread
    # running this here code should wait indefinitely to avoid exiting the 
    # process.
    #
    try:
        while True:
            # time.sleep(sys.maxint) has "issues" on 64bit architectures; divide it
            # by some value (2048) to solve problem
            time.sleep(sys.maxint/2048)
    except:
        print_exc()
    
Esempio n. 12
0
    print >>sys.stderr,"superpeer: config is",config

    if config['overlaylogpostfix'] == '':
        config['overlaylogpostfix'] = 'sp'+str(config['port'])+'.log'

    #
    # Currently we use an in-memory database for superpeers.
    # SQLite supports only per-thread in memory databases.
    # As our Session initialization is currently setup, the MainThread
    # creates the DB and adds the superpeer entries, and the OverlayThread
    # does most DB operations. So two threads accessing the DB.
    #
    # To work around this I start the Session using the OverlayThread.
    # Dirty, but a simple solution.
    # 
    overlay_bridge = OverlayThreadingBridge.getInstance()
    overlay_bridge.add_task(olthread_start_session,0)
    
    #
    # NetworkThread and OverlayThread will now do their work. The MainThread
    # running this here code should wait indefinitely to avoid exiting the 
    # process.
    #
    try:
        while True:
            # time.sleep(sys.maxint) has "issues" on 64bit architectures; divide it
            # by some value (2048) to solve problem
            time.sleep(sys.maxint/2048)
    except:
        print_exc()