class ProgramInfo(object): def __init__(self, program_path): self.depends = {} self.load(program_path) pass def load(self, program_path): # 加载依赖信息 self.path = program_path project_file = self.path + '/octopus.prj' prj_conf = pyetc.load(project_file, project_schema.ENV) self.depends = prj_conf.CONFIG.depends self.package = prj_conf.CONFIG.package self.packages = prj_conf.CONFIG.packages # 从部署信息中加载版本信息. deploy_metainfo = self.path + '/METAINFO/deploy.inf' module2ver = read_deploy(deploy_metainfo) for (mod_name, mod_ver) in module2ver.items(): if mod_name in self.depends: self.depends[mod_name]['build'] = mod_ver self.scripts = prj_conf.CONFIG.scripts build_metainfo_file = self.path + '/METAINFO/build.inf' self.buildmeta = Metainfo() if os.path.exists(build_metainfo_file): self.buildmeta.load(build_metainfo_file) def get_depends(self): return self.depends def get_scripts(self): return self.scripts def get_buildmeta(self): return self.buildmeta
def test_constructor(self): # TODO Test the constructor with additional malformed .torrent # files and look for the appropriate exceptions with pytest.raises(IOError): Metainfo("") with pytest.raises(ValueError): Metainfo("test_metainfo.py")
def __init__(self, ip_addr, port, filename): atexit.register(self.exit_handler) #list of peers (and connection info) that this client is connected to self.connection_list = list() self.listen_list = list() self.metainfo = Metainfo(filename) self.filename = filename self.ip_addr = ip_addr self.port = port self.peer_id = os.urandom(20) self.info_hash = self.metainfo.info_hash self.uploaded = 0 self.downloaded = 0 #if client has file, set left to 0 and bitfield to full self.tracker_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.tracker_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) self.tracker_socket.connect((socket.gethostbyname('localhost'), 6969)) self.bitfield = BitArray(self.metainfo.num_pieces) if (self.check_for_file()): self.bitfield.set(True) self.left = 0 else: self.bitfield.set(False) self.left = self.metainfo.file_length self.send_GET_request(0) self.listening_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.listening_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) self.listening_socket.bind( (socket.gethostbyname(self.ip_addr), self.port)) self.listening_socket.listen(5) # while True: # (opentracker_socket, opentracker_addr) = self.socket.accept() # response = socket.recv(4096) # print(response) tracker_response = self.tracker_socket.recv(1024) print("Response: ", tracker_response) status = re.split(" ", str(tracker_response)[2:-1]) if (status[1] != "200"): print("ERROR") else: print("parsing tracker response...") self.response = tracker_response self.handle_tracker_response() listening_thread = threading.Thread(target=self.listen_for_handshake) listening_thread.daemon = True listening_thread.start()
def gen_build_metainfo(work_path, build_no): """ 在当前位置下生成METAINFO/ """ svn = SVNUtil() svn_info = svn.info() metainfo = Metainfo() metainfo.svn = '%s@%d' % (svn_info['svn'], svn_info['revision']) metainfo.build_number = build_no if work_path.endswith('/'): target_dir = work_path + 'METAINFO/' else: target_dir = work_path + '/METAINFO/' if not os.path.isdir(target_dir): os.makedirs(target_dir) metainfo.save(target_dir + '/build.inf')
def __init__(self, client, filename, port, peer_id): self._client = client self._filename = filename self._port = port self._peer_id = peer_id # _peers is a list of peers that the TorrentManager is trying # to communicate with self._peers = [] # _bitfields is a dictionary mapping peers to a bitfield of the pieces # each has self._bitfields = {} try: self._metainfo = Metainfo(filename) except (IOError, ValueError) as err: if isinstance(err, IOError): message = err.strerror+' ('+filename+')' else: message = err.message+' ('+filename+')' logger.error(message) raise TorrentManagerError(message) # _have is the bitfield for this torrent. It is initialized to reflect # which pieces are already available on disk. self._filemanager = FileManager(self._metainfo) self._have = self._filemanager.have() try: self._tracker_proxy = TrackerProxy(self._metainfo, self._port, self._peer_id) except TrackerError as err: logger.critical("Could not connect to tracker at {}" .format(self._metainfo.announce)) logger.debug(" TrackerError: {}".format(err.message)) raise TorrentManagerError(err.message) self._needed = {piece: (0, []) for piece in list(self._have.findall('0b0'))} self._interested = {} self._requesting = {} self._partial = [] self._reactor = Reactor() self._reactor.schedule_timer(_TIMER_INTERVAL, self.timer_event) self._tick = 1 print "Starting to serve torrent {} off of Cracker website...".format(filename) self._connect_to_peers(20)
class TorrentManager(object): def __init__(self, client, filename, port, peer_id): self._client = client self._filename = filename self._port = port self._peer_id = peer_id # _peers is a list of peers that the TorrentManager is trying # to communicate with self._peers = [] # _bitfields is a dictionary mapping peers to a bitfield of the pieces # each has self._bitfields = {} try: self._metainfo = Metainfo(filename) except (IOError, ValueError) as err: if isinstance(err, IOError): message = err.strerror+' ('+filename+')' else: message = err.message+' ('+filename+')' logger.error(message) raise TorrentManagerError(message) # _have is the bitfield for this torrent. It is initialized to reflect # which pieces are already available on disk. self._filemanager = FileManager(self._metainfo) self._have = self._filemanager.have() try: self._tracker_proxy = TrackerProxy(self._metainfo, self._port, self._peer_id) except TrackerError as err: logger.critical("Could not connect to tracker at {}" .format(self._metainfo.announce)) logger.debug(" TrackerError: {}".format(err.message)) raise TorrentManagerError(err.message) self._needed = {piece: (0, []) for piece in list(self._have.findall('0b0'))} self._interested = {} self._requesting = {} self._partial = [] self._reactor = Reactor() self._reactor.schedule_timer(_TIMER_INTERVAL, self.timer_event) self._tick = 1 print "Starting to serve torrent {} off of Cracker website...".format(filename) self._connect_to_peers(20) def _connect_to_peers(self, n): # Get addresses of n peers from the tracker and try to establish # a connection with each addrs = self._tracker_proxy.get_peers(n) for addr in addrs: peer = PeerProxy(self, self._peer_id, (addr['ip'], addr['port']), info_hash=self._metainfo.info_hash) self._peers.append(peer) self._bitfields[peer] = BitArray(self._metainfo.num_pieces) def _remove_peer(self, peer): # Clean up references to the peer in various data structures self._peers.remove(peer) pieces = list(self._bitfields[peer].findall('0b1')) for piece in pieces: if piece in self._needed: occurences, peers = self._needed[piece] if peer in peers: peers.remove(peer) self._needed[piece] = (occurences-1, peers) del self._bitfields[peer] if peer in self._interested: del self._interested[peer] elif peer in self._requesting: # If the peer is in the middle of downloading a piece, save # the state in the partial list index, offset, sha1, _, _ = self._requesting[peer] self._partial.append((index, offset, sha1)) del self._requesting[peer] def _rarest(self): # Returns a list of tuples which includes a piece index sorted by # the number of peers which have the piece in ascending order return sorted([(occurences, peers, index) for (index, (occurences, peers)) in self._needed.items() if occurences != 0]) def _show_interest(self, peer): if not peer.is_interested(): logger.debug("Expressing interest in peer {}" .format(str(peer.addr()))) peer.interested() if not peer.is_peer_choked(): self._request(peer) def _check_interest(self, peer): # If the peer is not already interested or requesting, identify a piece # for it to download and show interest to the peer. if peer not in self._interested and peer not in self._requesting: # Compute the set of needed pieces which the peer has that are not # already designated for another peer needed = self._have.copy() needed.invert() of_interest = list((needed & self._bitfields[peer]).findall('0b1')) dont_consider = [i for i, _, _, _ in self._interested.values()] dont_consider.extend([i for i, _, _, _, _ in self._requesting.values()]) if len(of_interest) > 0: for index, offset, sha1 in self._partial: if index in of_interest: self._partial.remove((index, offset, sha1)) self._interested[peer] = (index, offset, sha1, self._tick) self._show_interest(peer) return for _, _, index in self._rarest(): if index in of_interest and index not in dont_consider: self._interested[peer] = (index, 0, hashlib.sha1(), self._tick) self._show_interest(peer) return if peer not in self._interested and peer.is_interested(): logger.debug("Expressing lack of interest in peer {}" .format(str(peer.addr()))) peer.not_interested() self._connect_to_peers(1) def _request(self, peer): if peer in self._interested: index, offset, sha1, _ = self._interested[peer] del self._interested[peer] self._requesting[peer] = (index, offset, sha1, self._tick, 0) index, received_bytes, _, _, _ = self._requesting[peer] bytes_to_request = self._bytes_to_request(index, received_bytes) logger.debug("Requesting pc: {} off: {} len: {} from {}" .format(index, received_bytes, bytes_to_request, str(peer.addr()))) peer.request(index, received_bytes, bytes_to_request) def _is_last_piece(self, index): return index == self._metainfo.num_pieces-1 def _length_of_last_piece(self): return (self._metainfo.total_length - (self._metainfo.num_pieces-1)*self._metainfo.piece_length) def _length_of_piece(self, index): if self._is_last_piece(index): return self._length_of_last_piece() else: return self._metainfo.piece_length def _in_last_block(self, index, offset): if self._is_last_piece(index): piece_length = self._length_of_last_piece() else: piece_length = self._metainfo.piece_length return piece_length-offset < _BLOCK_SIZE def _bytes_to_request(self, index, offset): if not self._in_last_block(index, offset): return _BLOCK_SIZE else: return self._length_of_piece(index) - offset def info_hash(self): return self._metainfo.info_hash # PeerProxy callbacks def get_bitfield(self): return self._have def peer_unconnected(self, peer): logger.info("Peer {} is unconnected".format(str(peer.addr()))) self._remove_peer(peer) self._connect_to_peers(1) def peer_bitfield(self, peer, bitfield): # Validate the bitfield length = len(bitfield) if (length < self._metainfo.num_pieces or (length > self._metainfo.num_pieces and bitfield[self._metainfo.num_pieces:length].any(1))): logger.debug("Invalid bitfield from peer {}" .format(str(peer.addr()))) peer.drop_connection() self._remove_peer(peer) self._connect_to_peers(1) return # Set the peer's bitfield and updated needed to reflect which pieces # the peer has logger.debug("Peer at {} sent bitfield".format(str(peer.addr()))) self._bitfields[peer] = bitfield[0:self._metainfo.num_pieces] pieces = list(self._bitfields[peer].findall('0b1')) for piece in pieces: if piece in self._needed: occurences, peers = self._needed[piece] if peer not in peers: peers.append(peer) self._needed[piece] = (occurences+1, peers) # Check whether there may be interest obtaining a piece from this peer self._check_interest(peer) def peer_has(self, peer, index): # Update the peer's bitfield and needed to reflect the availability # of the piece logger.debug("Peer at {} has piece {}".format(str(peer.addr()), index)) if index < self._metainfo.num_pieces: self._bitfields[peer][index] = 1 else: raise IndexError if index in self._needed: occurences, peers = self._needed[index] if peer not in peers: peers.append(peer) self._needed[index] = (occurences+1, peers) # Check whether there may be interest obtaining a piece from this # peer self._check_interest(peer) def peer_choked(self, peer): logger.debug("Peer {} choked".format(str(peer.addr()))) if peer in self._interested: del self._interested[peer] elif peer in self._requesting: # When choked in the middle of obtaining a piece, save the # progress in the partial list index, offset, sha1, _, _ = self._requesting[peer] self._partial.append((index, offset, sha1)) del self._requesting[peer] def peer_unchoked(self, peer): logger.debug("Peer {} unchoked".format(str(peer.addr()))) if peer in self._interested: self._request(peer) def peer_sent_block(self, peer, index, begin, buf): if peer not in self._requesting: # If a peer is very slow in responding, a block could come after # it has timed out. Just ignore the data at this point and # ignore the slow peer logger.debug("Received block from peer {} which has timed out" .format(str(peer.addr()))) return piece, received_bytes, sha1, _, _ = self._requesting[peer] if piece == index and begin == received_bytes: # When the next expected block is received, update the hash value # and write the block to file sha1.update(buf) self._filemanager.write_block(index, begin, buf) self._requesting[peer] = (piece, received_bytes + len(buf), sha1, self._tick, 0) if received_bytes + len(buf) < self._length_of_piece(index): # Request the next block in the piece self._request(peer) else: # On receipt of the last block in the piece, verify the hash # and update the records to reflect receipt of the piece if sha1.digest() == self._metainfo.piece_hash(index): logger.info("Successfully got piece {} from {}" .format(index, str(peer.addr()))) del self._needed[index] percent = 100 * (1 - (len(self._needed) / float(self._metainfo.num_pieces))) print "{0}: Downloaded {1:1.4f}%".format(self._filename, percent) self._have[index] = 1 else: logger.info("Unsuccessfully got piece {} from {}" .format(index, str(peer.addr()))) del self._requesting[peer] if self._needed != {}: # Try to find another piece for this peer to get self._check_interest(peer) else: logger.info("Successfully downloaded entire torrent {}" .format(self._filename)) self._client.download_complete(self._filename) def peer_interested(self, peer): pass def peer_not_interested(self, peer): pass def peer_request(self, peer, index, begin, length): pass def peer_canceled(self, peer, index, begin, length): pass # Reactor callback def timer_event(self): self._reactor.schedule_timer(_TIMER_INTERVAL, self.timer_event) self._tick += 1 # For any peers that have been interested but unchoked for an # excessive period of time, stop being interested, free up assigned # piece and connect to another peer for peer, (_, _, _, tick) in self._interested.items(): if tick + 4 == self._tick: logger.debug("Timed out on interest for peer {}" .format(str(peer.addr()))) peer.not_interested() del self._interested[peer] self._connect_to_peers(1) # For any peer that has an outstanding request for an excessive period # of time, resend the request message in case it got lost or is being # ignored for peer, (index, offset, sha1, tick, retries) \ in self._requesting.items(): if tick + 5 == self._tick: logger.debug("Timed out on request for peer {}" .format(str(peer.addr()))) if retries < _MAX_RETRIES: self._requesting[peer] = (index, offset, sha1, self._tick, retries+1) self._request(peer) else: self._partial.append((index, offset, sha1)) del self._requesting[peer] peer.not_interested() self._connect_to_peers(1)
def initialize(self): """ initialize() returns a deferred which fires when initialization of the TorrentMgr is complete or an error has occurred. """ if not self._state == self._States.Uninitialized: error = "TorrentMgr can't be reinitialized" logger.debug(error) d = Deferred() d.errback(TorrentMgrError(error)) return d try: self._metainfo = Metainfo(self._filename) except ValueError as err: logger.error(err) d = Deferred() d.errback(TorrentMgrError(err.message)) return d except IOError as err: logger.error(err) d = Deferred() d.errback(TorrentMgrError(err.strerror)) return d # _peers is a list of peers that the TorrentMgr is trying # to communicate with self._peers = [] # _bitfields is a dictionary mapping peers to a bitfield of the pieces # each has self._bitfields = {} # _have is the bitfield for this torrent. It is initialized to reflect # which pieces are already available on disk. self._filemgr = FileMgr(self._metainfo) self._have = self._filemgr.have() # _needed is a dictionary of pieces which are still needed. # The value for each piece is a tuple of the number of peers which # have the piece and a list of those peers. self._needed = {piece: (0, []) for piece in list(self._have.findall('0b0'))} # _interested is a dictionary of peers to whom interest has been # expressed. The value for each peer is a tuple of the piece that # has been reserved for the peer, the number of bytes of the piece that # have already been received, the sha1 hash of the bytes received so # far and the value of the tick at the time interest was expressed. self._interested = {} # _requesting is a dictionary of peers to whom a block request has been # made. The value for each peer is a tuple of the piece that is being # requested, the number of bytes that have already been received, the # shal2 hash of the bytes received so far, the value of the tick at # the time the request was made and the number of retries that have # been attempted self._requesting = {} # _partial is a list which tracks pieces that were interrupted while # being downloaded. Each entry is a tuple containing the index of the # piece, the number of bytes received so far and the sha1 hash of those # bytes. self._partial = [] self._tracker_proxy = TrackerProxy(self._metainfo, self._port, self._peer_id) def success(result): self._state = self._States.Initialized def failure(failure): logger.critical("Could not connect to tracker at {}" .format(self._metainfo.announce)) message = failure.value.message logger.debug(" Tracker Error: {}".format(message)) raise TorrentMgrError(message) return self._tracker_proxy.start().addCallbacks(success, failure)
class TorrentMgr(object): class _States(object): (Uninitialized, Initialized, Started) = range(3) def __init__(self, filename, port, peer_id, reactor): self._filename = filename self._port = port self._peer_id = peer_id self._reactor = reactor self._state = self._States.Uninitialized def initialize(self): """ initialize() returns a deferred which fires when initialization of the TorrentMgr is complete or an error has occurred. """ if not self._state == self._States.Uninitialized: error = "TorrentMgr can't be reinitialized" logger.debug(error) d = Deferred() d.errback(TorrentMgrError(error)) return d try: self._metainfo = Metainfo(self._filename) except ValueError as err: logger.error(err) d = Deferred() d.errback(TorrentMgrError(err.message)) return d except IOError as err: logger.error(err) d = Deferred() d.errback(TorrentMgrError(err.strerror)) return d # _peers is a list of peers that the TorrentMgr is trying # to communicate with self._peers = [] # _bitfields is a dictionary mapping peers to a bitfield of the pieces # each has self._bitfields = {} # _have is the bitfield for this torrent. It is initialized to reflect # which pieces are already available on disk. self._filemgr = FileMgr(self._metainfo) self._have = self._filemgr.have() # _needed is a dictionary of pieces which are still needed. # The value for each piece is a tuple of the number of peers which # have the piece and a list of those peers. self._needed = {piece: (0, []) for piece in list(self._have.findall('0b0'))} # _interested is a dictionary of peers to whom interest has been # expressed. The value for each peer is a tuple of the piece that # has been reserved for the peer, the number of bytes of the piece that # have already been received, the sha1 hash of the bytes received so # far and the value of the tick at the time interest was expressed. self._interested = {} # _requesting is a dictionary of peers to whom a block request has been # made. The value for each peer is a tuple of the piece that is being # requested, the number of bytes that have already been received, the # shal2 hash of the bytes received so far, the value of the tick at # the time the request was made and the number of retries that have # been attempted self._requesting = {} # _partial is a list which tracks pieces that were interrupted while # being downloaded. Each entry is a tuple containing the index of the # piece, the number of bytes received so far and the sha1 hash of those # bytes. self._partial = [] self._tracker_proxy = TrackerProxy(self._metainfo, self._port, self._peer_id) def success(result): self._state = self._States.Initialized def failure(failure): logger.critical("Could not connect to tracker at {}" .format(self._metainfo.announce)) message = failure.value.message logger.debug(" Tracker Error: {}".format(message)) raise TorrentMgrError(message) return self._tracker_proxy.start().addCallbacks(success, failure) def start(self): if not self._state == self._States.Initialized: raise TorrentMgrError("TorrentMgr must be initialized to be " "started") self._reactor.callLater(_TIMER_INTERVAL, self.timer_event) self._tick = 1 logger.info("Starting to serve torrent {}".format(self._filename)) print "Starting to serve torrent {}".format(self._filename) self._state = self._States.Started self._connect_to_peers(20) def percent(self): if not self._state == self._States.Uninitialized: return 100 * (1 - (len(self._needed) / float(self._metainfo.num_pieces))) else: raise TorrentMgrError("Can't get percent on uninitialized " "TorrentMgr") def info_hash(self): if not self._state == self._States.Uninitialized: return self._metainfo.info_hash else: raise TorrentMgrError("Can't get hash on uninitialized " "TorrentMgr") def name(self): return self._metainfo.name def _connect_to_peers(self, n): # Get addresses of n peers from the tracker and try to establish # a connection with each def handle_addrs(addrs): for addr in addrs: peer = PeerProxy(self, self._peer_id, (addr['ip'], addr['port']), self._reactor, info_hash=self._metainfo.info_hash) self._peers.append(peer) self._bitfields[peer] = BitArray(self._metainfo.num_pieces) self._tracker_proxy.get_peers(n).addCallback(handle_addrs) def _remove_peer(self, peer): # Clean up references to the peer in various data structures self._peers.remove(peer) pieces = list(self._bitfields[peer].findall('0b1')) for piece in pieces: if piece in self._needed: occurences, peers = self._needed[piece] if peer in peers: peers.remove(peer) self._needed[piece] = (occurences-1, peers) del self._bitfields[peer] if peer in self._interested: del self._interested[peer] elif peer in self._requesting: # If the peer is in the middle of downloading a piece, save # the state in the partial list index, offset, sha1, _, _ = self._requesting[peer] self._partial.append((index, offset, sha1)) del self._requesting[peer] def _rarest(self): # Returns a list of tuples which includes a piece index sorted by # the number of peers which have the piece in ascending order return sorted([(occurences, peers, index) for (index, (occurences, peers)) in self._needed.items() if occurences != 0]) def _show_interest(self, peer): if not peer.is_interested(): logger.debug("Expressing interest in peer {}" .format(str(peer.addr()))) peer.interested() if not peer.is_peer_choked(): self._request(peer) def _check_interest(self, peer): # If the peer is not already interested or requesting, identify a piece # for it to download and show interest to the peer. if not peer in self._interested and not peer in self._requesting: # Compute the set of needed pieces which the peer has that are not # already designated for another peer needed = self._have.copy() needed.invert() of_interest = list((needed & self._bitfields[peer]).findall('0b1')) dont_consider = [i for i, _, _, _ in self._interested.values()] dont_consider.extend([i for i, _, _, _, _ in self._requesting.values()]) # When there are potential pieces for the peer to download, give # preference to a piece that has already been partially # downloaded followed by the rarest available piece if len(of_interest) > 0: for index, offset, sha1 in self._partial: if index in of_interest: self._partial.remove((index, offset, sha1)) self._interested[peer] = (index, offset, sha1, self._tick) self._show_interest(peer) return for _, _, index in self._rarest(): if index in of_interest and not index in dont_consider: self._interested[peer] = (index, 0, hashlib.sha1(), self._tick) self._show_interest(peer) return # If there is no further piece for a peer which was previously # interested to download, make it not interested and connect to # another peer if not peer in self._interested and peer.is_interested(): logger.debug("Expressing lack of interest in peer {}" .format(str(peer.addr()))) peer.not_interested() self._connect_to_peers(1) def _request(self, peer): if peer in self._interested: index, offset, sha1, _ = self._interested[peer] del self._interested[peer] self._requesting[peer] = (index, offset, sha1, self._tick, 0) index, received_bytes, _, _, _ = self._requesting[peer] bytes_to_request = self._bytes_to_request(index, received_bytes) logger.debug("Requesting pc: {} off: {} len: {} from {}" .format(index, received_bytes, bytes_to_request, str(peer.addr()))) peer.request(index, received_bytes, bytes_to_request) def _is_last_piece(self, index): return index == self._metainfo.num_pieces-1 def _length_of_last_piece(self): return (self._metainfo.total_length - (self._metainfo.num_pieces-1)*self._metainfo.piece_length) def _length_of_piece(self, index): if self._is_last_piece(index): return self._length_of_last_piece() else: return self._metainfo.piece_length def _in_last_block(self, index, offset): if self._is_last_piece(index): piece_length = self._length_of_last_piece() else: piece_length = self._metainfo.piece_length return piece_length-offset < _BLOCK_SIZE def _bytes_to_request(self, index, offset): if not self._in_last_block(index, offset): return _BLOCK_SIZE else: return self._length_of_piece(index) - offset # PeerProxy callbacks def get_bitfield(self): return self._have def peer_unconnected(self, peer): logger.info("Peer {} is unconnected".format(str(peer.addr()))) self._remove_peer(peer) self._connect_to_peers(1) def peer_bitfield(self, peer, bitfield): # Validate the bitfield length = len(bitfield) if (length < self._metainfo.num_pieces or (length > self._metainfo.num_pieces and bitfield[self._metainfo.num_pieces:length].any(1))): logger.debug("Invalid bitfield from peer {}" .format(str(peer.addr()))) peer.drop_connection() self._remove_peer(peer) self._connect_to_peers(1) return # Set the peer's bitfield and updated needed to reflect which pieces # the peer has logger.debug("Peer at {} sent bitfield".format(str(peer.addr()))) self._bitfields[peer] = bitfield[0:self._metainfo.num_pieces] pieces = list(self._bitfields[peer].findall('0b1')) for piece in pieces: if piece in self._needed: occurences, peers = self._needed[piece] if not peer in peers: peers.append(peer) self._needed[piece] = (occurences+1, peers) # Check whether there may be interest obtaining a piece from this peer self._check_interest(peer) def peer_has(self, peer, index): # Update the peer's bitfield and needed to reflect the availability # of the piece logger.debug("Peer at {} has piece {}".format(str(peer.addr()), index)) if index < self._metainfo.num_pieces: self._bitfields[peer][index] = 1 else: raise IndexError if index in self._needed: occurences, peers = self._needed[index] if not peer in peers: peers.append(peer) self._needed[index] = (occurences+1, peers) # Check whether there may be interest obtaining a piece from this # peer self._check_interest(peer) def peer_choked(self, peer): logger.debug("Peer {} choked".format(str(peer.addr()))) if peer in self._interested: del self._interested[peer] elif peer in self._requesting: # When choked in the middle of obtaining a piece, save the # progress in the partial list index, offset, sha1, _, _ = self._requesting[peer] self._partial.append((index, offset, sha1)) del self._requesting[peer] def peer_unchoked(self, peer): logger.debug("Peer {} unchoked".format(str(peer.addr()))) if peer in self._interested: self._request(peer) def peer_sent_block(self, peer, index, begin, buf): if not peer in self._requesting: # If a peer is very slow in responding, a block could come after # it has timed out. Just ignore the data at this point and # ignore the slow peer logger.debug("Received block from peer {} which has timed out" .format(str(peer.addr()))) return piece, received_bytes, sha1, _, _ = self._requesting[peer] if piece == index and begin == received_bytes: # When the next expected block is received, update the hash value # and write the block to file sha1.update(buf) self._filemgr.write_block(index, begin, buf) self._requesting[peer] = (piece, received_bytes + len(buf), sha1, self._tick, 0) if received_bytes + len(buf) < self._length_of_piece(index): # Request the next block in the piece self._request(peer) else: # On receipt of the last block in the piece, verify the hash # and update the records to reflect receipt of the piece if sha1.digest() == self._metainfo.piece_hash(index): logger.info("Successfully received piece {} from {}" .format(index, str(peer.addr()))) del self._needed[index] print "{0}: Downloaded {1:1.4f}%".format(self._filename, self.percent()) self._have[index] = 1 else: logger.info("Unsuccessfully received piece {} from {}" .format(index, str(peer.addr()))) del self._requesting[peer] if self._needed != {}: # Try to find another piece for this peer to get self._check_interest(peer) else: logger.info("Successfully downloaded entire torrent {}" .format(self._filename)) def peer_interested(self, peer): pass def peer_not_interested(self, peer): pass def peer_request(self, peer, index, begin, length): pass def peer_canceled(self, peer, index, begin, length): pass # Reactor callback def timer_event(self): self._reactor.callLater(_TIMER_INTERVAL, self.timer_event) self._tick += 1 # For any peers that have been interested but unchoked for an # excessive period of time, stop being interested, free up assigned # piece and connect to another peer for peer, (_, _, _, tick) in self._interested.items(): if tick + 4 == self._tick: logger.debug("Timed out on interest for peer {}" .format(str(peer.addr()))) peer.not_interested() del self._interested[peer] self._connect_to_peers(1) # For any peer that has an outstanding request for an excessive period # of time, resend the request message in case it got lost or is being # ignored for peer, (index, offset, sha1, tick, retries) \ in self._requesting.items(): if tick + 5 == self._tick: logger.debug("Timed out on request for peer {}" .format(str(peer.addr()))) if retries < _MAX_RETRIES: self._requesting[peer] = (index, offset, sha1, self._tick, retries+1) self._request(peer) else: logger.debug("Giving up on peer {}" .format(str(peer.addr()))) self._partial.append((index, offset, sha1)) del self._requesting[peer] peer.not_interested() self._connect_to_peers(1)
def __init__(self, client, filename, port, peer_id): self._client = client self._filename = filename self._port = port self._peer_id = peer_id # _peers is a list of peers that the TorrentMgr is trying # to communicate with self._peers = [] # _bitfields is a dictionary mapping peers to a bitfield of the pieces # each has self._bitfields = {} try: self._metainfo = Metainfo(filename) except (IOError, ValueError) as err: if isinstance(err, IOError): message = err.strerror+' ('+filename+')' else: message = err.message+' ('+filename+')' logger.error(message) raise TorrentMgrError(message) # _have is the bitfield for this torrent. It is initialized to reflect # which pieces are already available on disk. self._filemgr = FileMgr(self._metainfo) self._have = self._filemgr.have() try: self._tracker_proxy = TrackerProxy(self._metainfo, self._port, self._peer_id) except TrackerError as err: logger.critical("Could not connect to tracker at {}" .format(self._metainfo.announce)) logger.debug(" TrackerError: {}".format(err.message)) raise TorrentMgrError(err.message) # _needed is a dictionary of pieces which are still needed. # The value for each piece is a tuple of the number of peers which # have the piece and a list of those peers. self._needed = {piece: (0, []) for piece in list(self._have.findall('0b0'))} # _interested is a dictionary of peers to whom interest has been # expressed. The value for each peer is a tuple of the piece that # has been reserved for the peer, the number of bytes of the piece that # have already been received, the sha1 hash of the bytes received so # far and the value of the tick at the time interest was expressed. self._interested = {} # _requesting is a dictionary of peers to whom a block request has been # made. The value for each peer is a tuple of the piece that is being # requested, the number of bytes that have already been received, the # shal2 hash of the bytes received so far, the value of the tick at # the time the request was made and the number of retries that have # been attempted self._requesting = {} # _partial is a list which tracks pieces that were interrupted while # being downloaded. Each entry is a tuple containing the index of the # piece, the number of bytes received so far and the sha1 hash of those # bytes. self._partial = [] self._reactor = Reactor() self._reactor.schedule_timer(_TIMER_INTERVAL, self.timer_event) self._tick = 1 print "Starting to serve torrent {}".format(filename) self._connect_to_peers(20)
def setup_class(self): self.singlefile = Metainfo("ubuntu.torrent") self.multifile = Metainfo("multi.torrent")
def __init__(self, client, filename, port, peer_id): self._client = client self._filename = filename self._port = port self._peer_id = peer_id # _peers is a list of peers that the TorrentMgr is trying # to communicate with self._peers = [] # _bitfields is a dictionary mapping peers to a bitfield of the pieces # each has self._bitfields = {} try: self._metainfo = Metainfo(filename) except (IOError, ValueError) as err: if isinstance(err, IOError): message = err.strerror + ' (' + filename + ')' else: message = err.message + ' (' + filename + ')' logger.error(message) raise TorrentMgrError(message) # _have is the bitfield for this torrent. It is initialized to reflect # which pieces are already available on disk. self._filemgr = FileMgr(self._metainfo) self._have = self._filemgr.have() try: self._tracker_proxy = TrackerProxy(self._metainfo, self._port, self._peer_id) except TrackerError as err: logger.critical("Could not connect to tracker at {}".format( self._metainfo.announce)) logger.debug(" TrackerError: {}".format(err.message)) raise TorrentMgrError(err.message) # _needed is a dictionary of pieces which are still needed. # The value for each piece is a tuple of the number of peers which # have the piece and a list of those peers. self._needed = { piece: (0, []) for piece in list(self._have.findall('0b0')) } # _interested is a dictionary of peers to whom interest has been # expressed. The value for each peer is a tuple of the piece that # has been reserved for the peer, the number of bytes of the piece that # have already been received, the sha1 hash of the bytes received so # far and the value of the tick at the time interest was expressed. self._interested = {} # _requesting is a dictionary of peers to whom a block request has been # made. The value for each peer is a tuple of the piece that is being # requested, the number of bytes that have already been received, the # shal2 hash of the bytes received so far, the value of the tick at # the time the request was made and the number of retries that have # been attempted self._requesting = {} # _partial is a list which tracks pieces that were interrupted while # being downloaded. Each entry is a tuple containing the index of the # piece, the number of bytes received so far and the sha1 hash of those # bytes. self._partial = [] self._reactor = Reactor() self._reactor.schedule_timer(_TIMER_INTERVAL, self.timer_event) self._tick = 1 print "Starting to serve torrent {}".format(filename) self._connect_to_peers(20)
class TorrentMgr(object): def __init__(self, client, filename, port, peer_id): self._client = client self._filename = filename self._port = port self._peer_id = peer_id # _peers is a list of peers that the TorrentMgr is trying # to communicate with self._peers = [] # _bitfields is a dictionary mapping peers to a bitfield of the pieces # each has self._bitfields = {} try: self._metainfo = Metainfo(filename) except (IOError, ValueError) as err: if isinstance(err, IOError): message = err.strerror + ' (' + filename + ')' else: message = err.message + ' (' + filename + ')' logger.error(message) raise TorrentMgrError(message) # _have is the bitfield for this torrent. It is initialized to reflect # which pieces are already available on disk. self._filemgr = FileMgr(self._metainfo) self._have = self._filemgr.have() try: self._tracker_proxy = TrackerProxy(self._metainfo, self._port, self._peer_id) except TrackerError as err: logger.critical("Could not connect to tracker at {}".format( self._metainfo.announce)) logger.debug(" TrackerError: {}".format(err.message)) raise TorrentMgrError(err.message) # _needed is a dictionary of pieces which are still needed. # The value for each piece is a tuple of the number of peers which # have the piece and a list of those peers. self._needed = { piece: (0, []) for piece in list(self._have.findall('0b0')) } # _interested is a dictionary of peers to whom interest has been # expressed. The value for each peer is a tuple of the piece that # has been reserved for the peer, the number of bytes of the piece that # have already been received, the sha1 hash of the bytes received so # far and the value of the tick at the time interest was expressed. self._interested = {} # _requesting is a dictionary of peers to whom a block request has been # made. The value for each peer is a tuple of the piece that is being # requested, the number of bytes that have already been received, the # shal2 hash of the bytes received so far, the value of the tick at # the time the request was made and the number of retries that have # been attempted self._requesting = {} # _partial is a list which tracks pieces that were interrupted while # being downloaded. Each entry is a tuple containing the index of the # piece, the number of bytes received so far and the sha1 hash of those # bytes. self._partial = [] self._reactor = Reactor() self._reactor.schedule_timer(_TIMER_INTERVAL, self.timer_event) self._tick = 1 print "Starting to serve torrent {}".format(filename) self._connect_to_peers(20) def _connect_to_peers(self, n): # Get addresses of n peers from the tracker and try to establish # a connection with each addrs = self._tracker_proxy.get_peers(n) for addr in addrs: peer = PeerProxy(self, self._peer_id, (addr['ip'], addr['port']), info_hash=self._metainfo.info_hash) self._peers.append(peer) self._bitfields[peer] = BitArray(self._metainfo.num_pieces) def _remove_peer(self, peer): # Clean up references to the peer in various data structures self._peers.remove(peer) pieces = list(self._bitfields[peer].findall('0b1')) for piece in pieces: if piece in self._needed: occurences, peers = self._needed[piece] if peer in peers: peers.remove(peer) self._needed[piece] = (occurences - 1, peers) del self._bitfields[peer] if peer in self._interested: del self._interested[peer] elif peer in self._requesting: # If the peer is in the middle of downloading a piece, save # the state in the partial list index, offset, sha1, _, _ = self._requesting[peer] self._partial.append((index, offset, sha1)) del self._requesting[peer] def _rarest(self): # Returns a list of tuples which includes a piece index sorted by # the number of peers which have the piece in ascending order return sorted([(occurences, peers, index) for (index, (occurences, peers)) in self._needed.items() if occurences != 0]) def _show_interest(self, peer): if not peer.is_interested(): logger.debug("Expressing interest in peer {}".format( str(peer.addr()))) peer.interested() if not peer.is_peer_choked(): self._request(peer) def _check_interest(self, peer): # If the peer is not already interested or requesting, identify a piece # for it to download and show interest to the peer. if peer not in self._interested and peer not in self._requesting: # Compute the set of needed pieces which the peer has that are not # already designated for another peer needed = self._have.copy() needed.invert() of_interest = list((needed & self._bitfields[peer]).findall('0b1')) dont_consider = [i for i, _, _, _ in self._interested.values()] dont_consider.extend( [i for i, _, _, _, _ in self._requesting.values()]) # When there are potential pieces for the peer to download, give # preference to a piece that has already been partially # downloaded followed by the rarest available piece if len(of_interest) > 0: for index, offset, sha1 in self._partial: if index in of_interest: self._partial.remove((index, offset, sha1)) self._interested[peer] = (index, offset, sha1, self._tick) self._show_interest(peer) return for _, _, index in self._rarest(): if index in of_interest and index not in dont_consider: self._interested[peer] = (index, 0, hashlib.sha1(), self._tick) self._show_interest(peer) return # If there is no further piece for a peer which was previously # interested to download, make it not interested and connect to # another peer if peer not in self._interested and peer.is_interested(): logger.debug("Expressing lack of interest in peer {}".format( str(peer.addr()))) peer.not_interested() self._connect_to_peers(1) def _request(self, peer): if peer in self._interested: index, offset, sha1, _ = self._interested[peer] del self._interested[peer] self._requesting[peer] = (index, offset, sha1, self._tick, 0) index, received_bytes, _, _, _ = self._requesting[peer] bytes_to_request = self._bytes_to_request(index, received_bytes) logger.debug("Requesting pc: {} off: {} len: {} from {}".format( index, received_bytes, bytes_to_request, str(peer.addr()))) peer.request(index, received_bytes, bytes_to_request) def _is_last_piece(self, index): return index == self._metainfo.num_pieces - 1 def _length_of_last_piece(self): return (self._metainfo.total_length - (self._metainfo.num_pieces - 1) * self._metainfo.piece_length) def _length_of_piece(self, index): if self._is_last_piece(index): return self._length_of_last_piece() else: return self._metainfo.piece_length def _in_last_block(self, index, offset): if self._is_last_piece(index): piece_length = self._length_of_last_piece() else: piece_length = self._metainfo.piece_length return piece_length - offset < _BLOCK_SIZE def _bytes_to_request(self, index, offset): if not self._in_last_block(index, offset): return _BLOCK_SIZE else: return self._length_of_piece(index) - offset def info_hash(self): return self._metainfo.info_hash # PeerProxy callbacks def get_bitfield(self): return self._have def peer_unconnected(self, peer): logger.info("Peer {} is unconnected".format(str(peer.addr()))) self._remove_peer(peer) self._connect_to_peers(1) def peer_bitfield(self, peer, bitfield): # Validate the bitfield length = len(bitfield) if (length < self._metainfo.num_pieces or (length > self._metainfo.num_pieces and bitfield[self._metainfo.num_pieces:length].any(1))): logger.debug("Invalid bitfield from peer {}".format( str(peer.addr()))) peer.drop_connection() self._remove_peer(peer) self._connect_to_peers(1) return # Set the peer's bitfield and updated needed to reflect which pieces # the peer has logger.debug("Peer at {} sent bitfield".format(str(peer.addr()))) self._bitfields[peer] = bitfield[0:self._metainfo.num_pieces] pieces = list(self._bitfields[peer].findall('0b1')) for piece in pieces: if piece in self._needed: occurences, peers = self._needed[piece] if peer not in peers: peers.append(peer) self._needed[piece] = (occurences + 1, peers) # Check whether there may be interest obtaining a piece from this peer self._check_interest(peer) def peer_has(self, peer, index): # Update the peer's bitfield and needed to reflect the availability # of the piece logger.debug("Peer at {} has piece {}".format(str(peer.addr()), index)) if index < self._metainfo.num_pieces: self._bitfields[peer][index] = 1 else: raise IndexError if index in self._needed: occurences, peers = self._needed[index] if peer not in peers: peers.append(peer) self._needed[index] = (occurences + 1, peers) # Check whether there may be interest obtaining a piece from this # peer self._check_interest(peer) def peer_choked(self, peer): logger.debug("Peer {} choked".format(str(peer.addr()))) if peer in self._interested: del self._interested[peer] elif peer in self._requesting: # When choked in the middle of obtaining a piece, save the # progress in the partial list index, offset, sha1, _, _ = self._requesting[peer] self._partial.append((index, offset, sha1)) del self._requesting[peer] def peer_unchoked(self, peer): logger.debug("Peer {} unchoked".format(str(peer.addr()))) if peer in self._interested: self._request(peer) def peer_sent_block(self, peer, index, begin, buf): if peer not in self._requesting: # If a peer is very slow in responding, a block could come after # it has timed out. Just ignore the data at this point and # ignore the slow peer logger.debug( "Received block from peer {} which has timed out".format( str(peer.addr()))) return piece, received_bytes, sha1, _, _ = self._requesting[peer] if piece == index and begin == received_bytes: # When the next expected block is received, update the hash value # and write the block to file sha1.update(buf) self._filemgr.write_block(index, begin, buf) self._requesting[peer] = (piece, received_bytes + len(buf), sha1, self._tick, 0) if received_bytes + len(buf) < self._length_of_piece(index): # Request the next block in the piece self._request(peer) else: # On receipt of the last block in the piece, verify the hash # and update the records to reflect receipt of the piece if sha1.digest() == self._metainfo.piece_hash(index): logger.info("Successfully got piece {} from {}".format( index, str(peer.addr()))) del self._needed[index] percent = 100 * ( 1 - (len(self._needed) / float(self._metainfo.num_pieces))) print "{0}: Downloaded {1:1.4f}%".format( self._filename, percent) self._have[index] = 1 else: logger.info("Unsuccessfully got piece {} from {}".format( index, str(peer.addr()))) del self._requesting[peer] if self._needed != {}: # Try to find another piece for this peer to get self._check_interest(peer) else: logger.info( "Successfully downloaded entire torrent {}".format( self._filename)) self._client.download_complete(self._filename) def peer_interested(self, peer): pass def peer_not_interested(self, peer): pass def peer_request(self, peer, index, begin, length): pass def peer_canceled(self, peer, index, begin, length): pass # Reactor callback def timer_event(self): self._reactor.schedule_timer(_TIMER_INTERVAL, self.timer_event) self._tick += 1 # For any peers that have been interested but unchoked for an # excessive period of time, stop being interested, free up assigned # piece and connect to another peer for peer, (_, _, _, tick) in self._interested.items(): if tick + 4 == self._tick: logger.debug("Timed out on interest for peer {}".format( str(peer.addr()))) peer.not_interested() del self._interested[peer] self._connect_to_peers(1) # For any peer that has an outstanding request for an excessive period # of time, resend the request message in case it got lost or is being # ignored for peer, (index, offset, sha1, tick, retries) \ in self._requesting.items(): if tick + 5 == self._tick: logger.debug("Timed out on request for peer {}".format( str(peer.addr()))) if retries < _MAX_RETRIES: self._requesting[peer] = (index, offset, sha1, self._tick, retries + 1) self._request(peer) else: self._partial.append((index, offset, sha1)) del self._requesting[peer] peer.not_interested() self._connect_to_peers(1)
from torrent import Torrent from metainfo import Metainfo if __name__ == '__main__': torrent_name = './resource/Haywyre-Panorama_Form.torrent' torrent_name = './resource/Berklee Online.torrent' torrent_name = './resource/BLUMBROS X MAKJ - LS6.torrent' #metainfo = Metainfo(torrent_name) #print(metainfo.info) torrent = Torrent(Metainfo(torrent_name)) torrent.start_torrent()
def initialize(self): """ initialize() returns a deferred which fires when initialization of the TorrentMgr is complete or an error has occurred. """ if self._state != self._States.Uninitialized: error = "TorrentMgr can't be reinitialized" logger.debug(error) d = Deferred() d.errback(TorrentMgrError(error)) return d try: self._metainfo = Metainfo(self._filename) except ValueError as err: logger.error(err) d = Deferred() d.errback(TorrentMgrError(err.message)) return d except IOError as err: logger.error(err) d = Deferred() d.errback(TorrentMgrError(err.strerror)) return d # _peers is a list of peers that the TorrentMgr is trying # to communicate with self._peers = [] # _bitfields is a dictionary mapping peers to a bitfield of the pieces # each has self._bitfields = {} # _have is the bitfield for this torrent. It is initialized to reflect # which pieces are already available on disk. self._filemgr = FileMgr(self._metainfo) self._have = self._filemgr.have() # _needed is a dictionary of pieces which are still needed. # The value for each piece is a tuple of the number of peers which # have the piece and a list of those peers. self._needed = { piece: (0, []) for piece in list(self._have.findall('0b0')) } # _interested is a dictionary of peers to whom interest has been # expressed. The value for each peer is a tuple of the piece that # has been reserved for the peer, the number of bytes of the piece that # have already been received, the sha1 hash of the bytes received so # far and the value of the tick at the time interest was expressed. self._interested = {} # _requesting is a dictionary of peers to whom a block request has been # made. The value for each peer is a tuple of the piece that is being # requested, the number of bytes that have already been received, the # shal2 hash of the bytes received so far, the value of the tick at # the time the request was made and the number of retries that have # been attempted self._requesting = {} # _partial is a list which tracks pieces that were interrupted while # being downloaded. Each entry is a tuple containing the index of the # piece, the number of bytes received so far and the sha1 hash of those # bytes. self._partial = [] self._tracker_proxy = TrackerProxy(self._metainfo, self._port, self._peer_id) def success(result): self._state = self._States.Initialized def failure(failure): logger.critical("Could not connect to tracker at {}".format( self._metainfo.announce)) message = failure.value.message logger.debug(" Tracker Error: {}".format(message)) raise TorrentMgrError(message) return self._tracker_proxy.start().addCallbacks(success, failure)
def __init__(self, ip_addr, port, filename): #list of peers (and connection info) that this client is connected to self.connection_list = list() self.listen_list = list() self.metainfo = Metainfo(filename) self.filename = filename self.ip_addr = ip_addr self.port = port self.peer_id = os.urandom(20) self.info_hash = self.metainfo.info_hash self.uploaded = 0 self.downloaded = 0 #if client has file, set left to 0 and bitfield to full self.tracker_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.tracker_socket.connect((socket.gethostbyname('localhost'), 6969)) self.bitfield = BitArray(self.metainfo.num_pieces) if(self.check_for_file()): self.bitfield.set(True) self.left = 0 else: self.bitfield.set(False) self.left = self.metainfo.file_length self.send_GET_request(0) self.listening_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM) self.listening_socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) self.listening_socket.bind((socket.gethostbyname(self.ip_addr), self.port)) self.listening_socket.listen(5) # while True: # (opentracker_socket, opentracker_addr) = self.socket.accept() # response = socket.recv(4096) # print(response) tracker_response = self.tracker_socket.recv(2048) print(tracker_response) status = re.split(" ", tracker_response.decode('utf-8')) if(status[1] != "200"): print("ERROR") else: print("parsing tracker response...") self.response = tracker_response self.handle_tracker_response() #listen for handshakes i = 0 while True: i = i + 1 peer_connection, address = self.listening_socket.accept() print(i) buf = peer_connection.recv(1024) print("maybe receive stuff") #print(len(buf)) #if message is handshake if buf[0]==18 and buf[1:19] == b'URTorrent protocol' and buf[19:27] == b'\x00\x00\x00\x00\x00\x00\x00\x00' and buf[27:47] == self.info_hash.digest(): #if len(buf) > 0 and "URTorrent" in str(buf): #ip = connection_socket.getsockname()[0] #port = connection_socket.getsockname()[1] #connection_socket.close() print("Received valid handshake", buf) peer_connection.send(self.generate_handshake_msg()) #split off thread to listen for piece requests on this socket peer_connection.settimeout(120) threading.Thread(target = self.listen_to_peer, args = (peer_connection, address)).start()