def make_meta_file(path, url, piece_length, progress=None, title=None, comment=None, safe=None, content_type=None, target=None, webseeds=None, name=None, private=False, created_by=None, trackers=None): data = {'creation date': int(gmtime())} if url: data['announce'] = url.strip() a, b = os.path.split(path) if not target: if b == '': f = a + '.torrent' else: f = os.path.join(a, b + '.torrent') else: f = target if progress is None: session_id = component.get("RPCServer").get_session_id() if not session_id: progress = dummy else: progress = RemoteFileProgress(component.get("RPCServer").get_session_id()) info = makeinfo(path, piece_length, progress, name, content_type, private) #check_info(info) h = file(f, 'wb') data['info'] = info if title: data['title'] = title.encode("utf8") if comment: data['comment'] = comment.encode("utf8") if safe: data['safe'] = safe.encode("utf8") httpseeds = [] url_list = [] if webseeds: for webseed in webseeds: if webseed.endswith(".php"): httpseeds.append(webseed) else: url_list.append(webseed) if url_list: data['url-list'] = url_list if httpseeds: data['httpseeds'] = httpseeds if created_by: data['created by'] = created_by.encode("utf8") if trackers and (len(trackers[0]) > 1 or len(trackers) > 1): data['announce-list'] = trackers data["encoding"] = "UTF-8" h.write(bencode(data)) h.close()
def filedata(self): """The contents of the .torrent file. Returns: bytes: The bencoded metainfo. """ if not self._filedata: self._filedata = bencode.bencode(self._metainfo) return self._filedata
def test_add_torrent_file(self): options = {} filename = os.path.join(os.path.dirname(__file__), "test.torrent") import base64 torrent_id = self.core.add_torrent_file(filename, base64.encodestring(open(filename).read()), options) # Get the info hash from the test.torrent from deluge.bencode import bdecode, bencode info_hash = sha(bencode(bdecode(open(filename).read())["info"])).hexdigest() self.assertEquals(torrent_id, info_hash)
def test_add_torrent_file(self): options = {} filename = common.get_test_data_file('test.torrent') with open(filename, 'rb') as _file: filedump = base64.encodestring(_file.read()) torrent_id = yield self.core.add_torrent_file(filename, filedump, options) # Get the info hash from the test.torrent from deluge.bencode import bdecode, bencode with open(filename, 'rb') as _file: info_hash = sha(bencode(bdecode(_file.read())[b'info'])).hexdigest() self.assertEquals(torrent_id, info_hash)
def test_add_torrent_file(self): options = {} filename = "../test.torrent" import base64 torrent_id = self.core.add_torrent_file( filename, base64.encodestring(open(filename).read()), options) # Get the info hash from the test.torrent from deluge.bencode import bdecode, bencode info_hash = sha(bencode(bdecode( open(filename).read())["info"])).hexdigest() self.assertEquals(torrent_id, info_hash)
def export_torrents(torrents, out_dir): """Convert json torrent info to deluge state dir format. Not incremental. Output directory is created and populated from scratch.""" tm = TorrentManagerState() fr = OrderedDict() for torrent_id, t in sorted(torrents.items()): for k, v in t.iteritems(): if k == "data": with open(os.path.join(out_dir, torrent_id + ".torrent"), "w") as fp: fp.write(bencode.bencode(v)) elif k == "state": v = unsort_dicts(v) tm.torrents.append(TorrentState(**v)) elif k == "fastresume": fr[torrent_id] = bencode.bencode(v) with open(os.path.join(out_dir, "torrents.state"), "w") as fp: pickle.dump(tm, fp) with open(os.path.join(out_dir, "torrents.fastresume"), "w") as fp: fp.write(bencode.bencode(fr))
def test_add_torrent_file(self): options = {} filename = common.get_test_data_file('test.torrent') with open(filename, 'rb') as _file: filedump = base64.encodestring(_file.read()) torrent_id = yield self.core.add_torrent_file(filename, filedump, options) # Get the info hash from the test.torrent from deluge.bencode import bdecode, bencode with open(filename, 'rb') as _file: info_hash = sha(bencode(bdecode( _file.read())[b'info'])).hexdigest() self.assertEquals(torrent_id, info_hash)
def test_prefetch_metadata(self): from deluge._libtorrent import lt with open(common.get_test_data_file('test.torrent'), 'rb') as _file: t_info = lt.torrent_info(lt.bdecode(_file.read())) mock_alert = mock.MagicMock() mock_alert.handle.info_hash = mock.MagicMock( return_value='ab570cdd5a17ea1b61e970bb72047de141bce173' ) mock_alert.handle.get_torrent_info = mock.MagicMock(return_value=t_info) magnet = 'magnet:?xt=urn:btih:ab570cdd5a17ea1b61e970bb72047de141bce173' d = self.tm.prefetch_metadata(magnet, 30) self.tm.on_alert_metadata_received(mock_alert) expected = ( 'ab570cdd5a17ea1b61e970bb72047de141bce173', bencode( { 'piece length': 32768, 'sha1': ( b'2\xce\xb6\xa8"\xd7\xf0\xd4\xbf\xdc^K\xba\x1bh' b'\x9d\xc5\xb7\xac\xdd' ), 'name': 'azcvsupdater_2.6.2.jar', 'private': 0, 'pieces': ( b'\xdb\x04B\x05\xc3\'\xdab\xb8su97\xa9u' b'\xca<w\\\x1ef\xd4\x9b\x16\xa9}\xc0\x9f:\xfd' b'\x97qv\x83\xa2"\xef\x9d7\x0by!\rl\xe5v\xb7' b'\x18{\xf7/"P\xe9\x8d\x01D\x9e8\xbd\x16\xe3' b'\xfb-\x9d\xaa\xbcM\x11\xba\x92\xfc\x13F\xf0' b'\x1c\x86x+\xc8\xd0S\xa9\x90`\xa1\xe4\x82\xe8' b'\xfc\x08\xf7\xe3\xe5\xf6\x85\x1c%\xe7%\n\xed' b'\xc0\x1f\xa1;\x9a\xea\xcf\x90\x0c/F>\xdf\xdagA' b'\xc42|\xda\x82\xf5\xa6b\xa1\xb8#\x80wI\xd8f' b'\xf8\xbd\xacW\xab\xc3s\xe0\xbbw\xf2K\xbe\xee' b'\xa8rG\xe1W\xe8\xb7\xc2i\xf3\xd8\xaf\x9d\xdc' b'\xd0#\xf4\xc1\x12u\xcd\x0bE?:\xe8\x9c\x1cu' b'\xabb(oj\r^\xd5\xd5A\x83\x88\x9a\xa1J\x1c?' b'\xa1\xd6\x8c\x83\x9e&' ), 'length': 307949, 'name.utf-8': b'azcvsupdater_2.6.2.jar', 'ed2k': b'>p\xefl\xfa]\x95K\x1b^\xc2\\;;e\xb7', } ), ) self.assertEqual(expected, self.successResultOf(d))
def import_torrents(in_dir, out_dir): """Convert deluge and vuze torrent state dirs to json format. Can run incrementally. New torrent data just replaces old torrent data in json dir.""" torrents = load_torrents(out_dir) for f in os.listdir(in_dir): if not f.endswith(".torrent"): continue torrent_path = os.path.join(in_dir, f) data = bencode.bdecode(open(torrent_path).read()) torrent_id = hashlib.sha1(bencode.bencode(data["info"])).hexdigest() torrent_name = f[:-len(".torrent")] if re.match("^[0-9a-f]{40}$", torrent_name): check(torrent_name == torrent_id, torrent_name) t = torrents.setdefault(torrent_id, OrderedDict()) t["data"] = data t.setdefault("download", OrderedDict()).setdefault("mtime", os.path.getmtime(torrent_path)) state_file = os.path.join(in_dir, "torrents.state") if os.path.exists(state_file): state = pickle.load(open(state_file)) check(set(obj_dict(state).keys()) == {"torrents"}) default_torrent = TorrentState() for st in state.torrents: torrent_id = st.torrent_id t = torrents.setdefault(torrent_id, OrderedDict()) t["state"] = tstate = sort_dicts(obj_dict(st)) for k, v in tstate.items(): if v == getattr(default_torrent, k): del tstate[k] resume_file = os.path.join(in_dir, "torrents.fastresume") if os.path.exists(resume_file): resume = bencode.bdecode(open(resume_file).read()) for torrent_id, rt in resume.iteritems(): t = torrents.setdefault(torrent_id, OrderedDict()) t["fastresume"] = bencode.bdecode(rt) # Sort dictionary keys for torrent_id, t in torrents.iteritems(): sort_keys(t, ("data", "state", "fastresume", "download")) save_torrents(torrents, out_dir)
def save(self, torrent_path, progress=None): """Creates and saves the torrent file to `path`. Args: torrent_path (str): Location to save the torrent file. progress(func, optional): The function to be called when a piece is hashed. The provided function should be in the format `func(num_completed, num_pieces)`. Raises: InvalidPath: If the data_path has not been set. """ if not self.data_path: raise InvalidPath('Need to set a data_path!') torrent = {'info': {}} if self.comment: torrent['comment'] = self.comment if self.private: torrent['info']['private'] = True if self.trackers: torrent['announce'] = self.trackers[0][0] torrent['announce-list'] = self.trackers else: torrent['announce'] = '' if self.webseeds: httpseeds = [] webseeds = [] for w in self.webseeds: if w.endswith('.php'): httpseeds.append(w) else: webseeds.append(w) if httpseeds: torrent['httpseeds'] = httpseeds if webseeds: torrent['url-list'] = webseeds datasize = get_path_size(self.data_path) if self.piece_size: piece_size = self.piece_size * 1024 else: # We need to calculate a piece size piece_size = 16384 while (datasize // piece_size) > 1024 and piece_size < (8192 * 1024): piece_size *= 2 # Calculate the number of pieces we will require for the data num_pieces = datasize // piece_size if datasize % piece_size: num_pieces += 1 torrent['info']['piece length'] = piece_size torrent['info']['name'] = os.path.split(self.data_path)[1] # Create the info if os.path.isdir(self.data_path): files = [] padding_count = 0 # Collect a list of file paths and add padding files if necessary for (dirpath, dirnames, filenames) in os.walk(self.data_path): for index, filename in enumerate(filenames): size = get_path_size( os.path.join(self.data_path, dirpath, filename)) p = dirpath[len(self.data_path):] p = p.lstrip('/') p = p.split('/') if p[0]: p += [filename] else: p = [filename] files.append((size, p)) # Add a padding file if necessary if self.pad_files and (index + 1) < len(filenames): left = size % piece_size if left: p = list(p) p[-1] = '_____padding_file_' + str(padding_count) files.append((piece_size - left, p)) padding_count += 1 # Run the progress function with 0 completed pieces if progress: progress(0, num_pieces) fs = [] pieces = [] # Create the piece hashes buf = b'' for size, path in files: path = [s.encode('UTF-8') for s in path] fs.append({b'length': size, b'path': path}) if path[-1].startswith(b'_____padding_file_'): buf += b'\0' * size pieces.append(sha(buf).digest()) buf = b'' fs[-1][b'attr'] = b'p' else: with open( os.path.join(self.data_path.encode('utf8'), *path), 'rb') as _file: r = _file.read(piece_size - len(buf)) while r: buf += r if len(buf) == piece_size: pieces.append(sha(buf).digest()) # Run the progress function if necessary if progress: progress(len(pieces), num_pieces) buf = b'' else: break r = _file.read(piece_size - len(buf)) torrent['info']['files'] = fs if buf: pieces.append(sha(buf).digest()) if progress: progress(len(pieces), num_pieces) buf = '' elif os.path.isfile(self.data_path): torrent['info']['length'] = get_path_size(self.data_path) pieces = [] with open(self.data_path, 'rb') as _file: r = _file.read(piece_size) while r: pieces.append(sha(r).digest()) if progress: progress(len(pieces), num_pieces) r = _file.read(piece_size) torrent['info']['pieces'] = b''.join(pieces) # Write out the torrent file with open(torrent_path, 'wb') as _file: _file.write(bencode(utf8_encode_structure(torrent)))
def make_meta_file(path, url, piece_length, progress=None, title=None, comment=None, safe=None, content_type=None, target=None, webseeds=None, name=None, private=False, created_by=None, trackers=None): data = {'creation date': int(gmtime())} if url: data['announce'] = url.strip() a, b = os.path.split(path) if not target: if b == '': f = a + '.torrent' else: f = os.path.join(a, b + '.torrent') else: f = target if progress is None: session_id = component.get("RPCServer").get_session_id() if not session_id: progress = dummy else: progress = RemoteFileProgress( component.get("RPCServer").get_session_id()) info = makeinfo(path, piece_length, progress, name, content_type, private) #check_info(info) h = file(f, 'wb') data['info'] = info if title: data['title'] = title.encode("utf8") if comment: data['comment'] = comment.encode("utf8") if safe: data['safe'] = safe.encode("utf8") httpseeds = [] url_list = [] if webseeds: for webseed in webseeds: if webseed.endswith(".php"): httpseeds.append(webseed) else: url_list.append(webseed) if url_list: data['url-list'] = url_list if httpseeds: data['httpseeds'] = httpseeds if created_by: data['created by'] = created_by.encode("utf8") if trackers and (len(trackers[0]) > 1 or len(trackers) > 1): data['announce-list'] = trackers data["encoding"] = "UTF-8" h.write(bencode(data)) h.close()
log.warning("Unable to open %s: %s", filename, e) raise e else: self.__m_filedata = filedump def parse(self, filetree=1): if not self.__m_filedata: log.error("No data to process!") return try: self.__m_metadata = bencode.bdecode(self.__m_filedata) except Exception, e: log.warning("Failed to decode torrent data %s: %s", self.filename if self.filename else "", e) raise e self.__m_info_hash = sha(bencode.bencode(self.__m_metadata["info"])).hexdigest() # Get encoding from torrent file if available self.encoding = None if "encoding" in self.__m_metadata: self.encoding = self.__m_metadata["encoding"] elif "codepage" in self.__m_metadata: self.encoding = str(self.__m_metadata["codepage"]) if not self.encoding: self.encoding = "UTF-8" # Check if 'name.utf-8' is in the torrent and if not try to decode the string # using the encoding found. if "name.utf-8" in self.__m_metadata["info"]: self.__m_name = decode_string(self.__m_metadata["info"]["name.utf-8"]) else:
def __init__(self, filename, filetree=1): # Get the torrent data from the torrent file try: log.debug('Attempting to open %s.', filename) with open(filename, 'rb') as _file: self.__m_filedata = _file.read() except IOError as ex: log.warning('Unable to open %s: %s', filename, ex) raise ex try: self.__m_metadata = bencode.bdecode(self.__m_filedata) except bencode.BTFailure as ex: log.warning('Failed to decode %s: %s', filename, ex) raise ex self.__m_info_hash = sha(bencode.bencode(self.__m_metadata['info'])).hexdigest() # Get encoding from torrent file if available self.encoding = None if 'encoding' in self.__m_metadata: self.encoding = self.__m_metadata['encoding'] elif 'codepage' in self.__m_metadata: self.encoding = str(self.__m_metadata['codepage']) if not self.encoding: self.encoding = 'UTF-8' # Check if 'name.utf-8' is in the torrent and if not try to decode the string # using the encoding found. if 'name.utf-8' in self.__m_metadata['info']: self.__m_name = decode_bytes(self.__m_metadata['info']['name.utf-8']) else: self.__m_name = decode_bytes(self.__m_metadata['info']['name'], self.encoding) # Get list of files from torrent info paths = {} dirs = {} if 'files' in self.__m_metadata['info']: prefix = '' if len(self.__m_metadata['info']['files']) > 1: prefix = self.__m_name for index, f in enumerate(self.__m_metadata['info']['files']): if 'path.utf-8' in f: path = decode_bytes(os.path.join(prefix, *f['path.utf-8'])) del f['path.utf-8'] else: path = os.path.join(prefix, decode_bytes(os.path.join(*f['path']), self.encoding)) f['path'] = path f['index'] = index if 'sha1' in f and len(f['sha1']) == 20: f['sha1'] = f['sha1'].encode('hex') if 'ed2k' in f and len(f['ed2k']) == 16: f['ed2k'] = f['ed2k'].encode('hex') if 'filehash' in f and len(f['filehash']) == 20: f['filehash'] = f['filehash'].encode('hex') paths[path] = f dirname = os.path.dirname(path) while dirname: dirinfo = dirs.setdefault(dirname, {}) dirinfo['length'] = dirinfo.get('length', 0) + f['length'] dirname = os.path.dirname(dirname) if filetree == 2: def walk(path, item): if item['type'] == 'dir': item.update(dirs[path]) else: item.update(paths[path]) item['download'] = True file_tree = FileTree2(list(paths)) file_tree.walk(walk) else: def walk(path, item): if isinstance(item, dict): return item return [paths[path]['index'], paths[path]['length'], True] file_tree = FileTree(paths) file_tree.walk(walk) self.__m_files_tree = file_tree.get_tree() else: if filetree == 2: self.__m_files_tree = { 'contents': { self.__m_name: { 'type': 'file', 'index': 0, 'length': self.__m_metadata['info']['length'], 'download': True } } } else: self.__m_files_tree = { self.__m_name: (0, self.__m_metadata['info']['length'], True) } self.__m_files = [] if 'files' in self.__m_metadata['info']: prefix = '' if len(self.__m_metadata['info']['files']) > 1: prefix = self.__m_name for f in self.__m_metadata['info']['files']: self.__m_files.append({ 'path': f['path'], 'size': f['length'], 'download': True }) else: self.__m_files.append({ 'path': self.__m_name, 'size': self.__m_metadata['info']['length'], 'download': True })
def save(self, torrent_path, progress=None): """Creates and saves the torrent file to `path`. Args: torrent_path (str): Location to save the torrent file. progress(func, optional): The function to be called when a piece is hashed. The provided function should be in the format `func(num_completed, num_pieces)`. Raises: InvalidPath: If the data_path has not been set. """ if not self.data_path: raise InvalidPath('Need to set a data_path!') torrent = { 'info': {} } if self.comment: torrent['comment'] = self.comment if self.private: torrent['info']['private'] = True if self.trackers: torrent['announce'] = self.trackers[0][0] torrent['announce-list'] = self.trackers else: torrent['announce'] = '' if self.webseeds: httpseeds = [] webseeds = [] for w in self.webseeds: if w.endswith('.php'): httpseeds.append(w) else: webseeds.append(w) if httpseeds: torrent['httpseeds'] = httpseeds if webseeds: torrent['url-list'] = webseeds datasize = get_path_size(self.data_path) if self.piece_size: piece_size = self.piece_size * 1024 else: # We need to calculate a piece size piece_size = 16384 while (datasize // piece_size) > 1024 and piece_size < (8192 * 1024): piece_size *= 2 # Calculate the number of pieces we will require for the data num_pieces = datasize // piece_size if datasize % piece_size: num_pieces += 1 torrent['info']['piece length'] = piece_size torrent['info']['name'] = os.path.split(self.data_path)[1] # Create the info if os.path.isdir(self.data_path): files = [] padding_count = 0 # Collect a list of file paths and add padding files if necessary for (dirpath, dirnames, filenames) in os.walk(self.data_path): for index, filename in enumerate(filenames): size = get_path_size(os.path.join(self.data_path, dirpath, filename)) p = dirpath[len(self.data_path):] p = p.lstrip('/') p = p.split('/') if p[0]: p += [filename] else: p = [filename] files.append((size, p)) # Add a padding file if necessary if self.pad_files and (index + 1) < len(filenames): left = size % piece_size if left: p = list(p) p[-1] = '_____padding_file_' + str(padding_count) files.append((piece_size - left, p)) padding_count += 1 # Run the progress function with 0 completed pieces if progress: progress(0, num_pieces) fs = [] pieces = [] # Create the piece hashes buf = '' for size, path in files: path = [s.decode(sys.getfilesystemencoding()).encode('UTF-8') for s in path] fs.append({'length': size, 'path': path}) if path[-1].startswith('_____padding_file_'): buf += '\0' * size pieces.append(sha(buf).digest()) buf = '' fs[-1]['attr'] = 'p' else: with open(os.path.join(self.data_path, *path), 'rb') as _file: r = _file.read(piece_size - len(buf)) while r: buf += r if len(buf) == piece_size: pieces.append(sha(buf).digest()) # Run the progress function if necessary if progress: progress(len(pieces), num_pieces) buf = '' else: break r = _file.read(piece_size - len(buf)) torrent['info']['files'] = fs if buf: pieces.append(sha(buf).digest()) if progress: progress(len(pieces), num_pieces) buf = '' elif os.path.isfile(self.data_path): torrent['info']['length'] = get_path_size(self.data_path) pieces = [] with open(self.data_path, 'rb') as _file: r = _file.read(piece_size) while r: pieces.append(sha(r).digest()) if progress: progress(len(pieces), num_pieces) r = _file.read(piece_size) torrent['info']['pieces'] = b''.join(pieces) # Write out the torrent file with open(torrent_path, 'wb') as _file: _file.write(bencode(utf8_encode_structure(torrent)))
def save(self, torrent_path, progress=None): """ Creates and saves the torrent file to `path`. :param torrent_path: where to save the torrent file :type torrent_path: string :param progress: a function to be called when a piece is hashed :type progress: function(num_completed, num_pieces) :raises InvalidPath: if the data_path has not been set """ if not self.data_path: raise InvalidPath("Need to set a data_path!") torrent = {"info": {}} if self.comment: torrent["comment"] = self.comment.encode("UTF-8") if self.private: torrent["info"]["private"] = True if self.trackers: torrent["announce"] = self.trackers[0][0] torrent["announce-list"] = self.trackers else: torrent["announce"] = "" if self.webseeds: httpseeds = [] webseeds = [] for w in self.webseeds: if w.endswith(".php"): httpseeds.append(w) else: webseeds.append(w) if httpseeds: torrent["httpseeds"] = httpseeds if webseeds: torrent["url-list"] = webseeds datasize = get_path_size(self.data_path) if self.piece_size: piece_size = piece_size * 1024 else: # We need to calculate a piece size piece_size = 16384 while (datasize / piece_size) > 1024 and piece_size < (8192 * 1024): piece_size *= 2 # Calculate the number of pieces we will require for the data num_pieces = datasize / piece_size if datasize % piece_size: num_pieces += 1 torrent["info"]["piece length"] = piece_size # Create the info if os.path.isdir(self.data_path): torrent["info"]["name"] = os.path.split(self.data_path)[1] files = [] padding_count = 0 # Collect a list of file paths and add padding files if necessary for (dirpath, dirnames, filenames) in os.walk(self.data_path): for index, filename in enumerate(filenames): size = get_path_size(os.path.join(self.data_path, dirpath, filename)) p = dirpath[len(self.data_path) :] p = p.lstrip("/") p = p.split("/") if p[0]: p += [filename] else: p = [filename] files.append((size, p)) # Add a padding file if necessary if self.pad_files and (index + 1) < len(filenames): left = size % piece_size if left: p = list(p) p[-1] = "_____padding_file_" + str(padding_count) files.append((piece_size - left, p)) padding_count += 1 # Run the progress function with 0 completed pieces if progress: progress(0, num_pieces) fs = [] pieces = [] # Create the piece hashes buf = "" for size, path in files: path = [s.decode(sys.getfilesystemencoding()).encode("UTF-8") for s in path] fs.append({"length": size, "path": path}) if path[-1].startswith("_____padding_file_"): buf += "\0" * size pieces.append(sha(buf).digest()) buf = "" fs[-1]["attr"] = "p" else: fd = open(os.path.join(self.data_path, *path), "rb") r = fd.read(piece_size - len(buf)) while r: buf += r if len(buf) == piece_size: pieces.append(sha(buf).digest()) # Run the progress function if necessary if progress: progress(len(pieces), num_pieces) buf = "" else: break r = fd.read(piece_size - len(buf)) fd.close() if buf: pieces.append(sha(buf).digest()) if progress: progress(len(pieces), num_pieces) buf = "" torrent["info"]["pieces"] = "".join(pieces) torrent["info"]["files"] = fs elif os.path.isfile(self.data_path): torrent["info"]["name"] = os.path.split(self.data_path)[1] torrent["info"]["length"] = get_path_size(self.data_path) pieces = [] fd = open(self.data_path, "rb") r = fd.read(piece_size) while r: pieces.append(sha(r).digest()) if progress: progress(len(pieces), num_pieces) r = fd.read(piece_size) torrent["info"]["pieces"] = "".join(pieces) # Write out the torrent file open(torrent_path, "wb").write(bencode(torrent))
def test_bencode_unicode_value(self): self.assertEqual(bencode.bencode('abc'), b'3:abc')
def test_bencode_unicode_metainfo(self): filename = common.get_test_data_file('test.torrent') with open(filename, 'rb') as _file: metainfo = bencode.bdecode(_file.read())[b'info'] bencode.bencode({b'info': metainfo})
def make_meta_file( path, url, piece_length, progress=None, title=None, comment=None, safe=None, content_type=None, target=None, webseeds=None, name=None, private=False, created_by=None, trackers=None, ): data = {'creation date': int(gmtime())} if url: data['announce'] = url.strip() a, b = os.path.split(path) if not target: if b == '': f = a + '.torrent' else: f = os.path.join(a, b + '.torrent') else: f = target if progress is None: progress = dummy try: session_id = component.get('RPCServer').get_session_id() except KeyError: pass else: if session_id: progress = RemoteFileProgress(session_id) info = makeinfo(path, piece_length, progress, name, content_type, private) # check_info(info) data['info'] = info if title: data['title'] = title.encode('utf8') if comment: data['comment'] = comment.encode('utf8') if safe: data['safe'] = safe.encode('utf8') httpseeds = [] url_list = [] if webseeds: for webseed in webseeds: if webseed.endswith('.php'): httpseeds.append(webseed) else: url_list.append(webseed) if url_list: data['url-list'] = url_list if httpseeds: data['httpseeds'] = httpseeds if created_by: data['created by'] = created_by.encode('utf8') if trackers and (len(trackers[0]) > 1 or len(trackers) > 1): data['announce-list'] = trackers data['encoding'] = 'UTF-8' with open(f, 'wb') as file_: file_.write(bencode(utf8_encode_structure(data)))
raise e else: self.__m_filedata = filedump def parse(self, filetree=1): if not self.__m_filedata: log.error("No data to process!") return try: self.__m_metadata = bencode.bdecode(self.__m_filedata) except Exception, e: log.warning("Failed to decode torrent data %s: %s", self.filename if self.filename else "", e) raise e self.__m_info_hash = sha(bencode.bencode( self.__m_metadata["info"])).hexdigest() # Get encoding from torrent file if available self.encoding = None if "encoding" in self.__m_metadata: self.encoding = self.__m_metadata["encoding"] elif "codepage" in self.__m_metadata: self.encoding = str(self.__m_metadata["codepage"]) if not self.encoding: self.encoding = "UTF-8" # Check if 'name.utf-8' is in the torrent and if not try to decode the string # using the encoding found. if "name.utf-8" in self.__m_metadata["info"]: self.__m_name = decode_string( self.__m_metadata["info"]["name.utf-8"])
def __init__(self, filename='', filetree=1, metainfo=None, metadata=None): # Get the torrent metainfo from the torrent file if metadata: self._metainfo_dict = {b'info': bencode.bdecode(metadata)} self._metainfo = bencode.bencode(self._metainfo_dict) else: self._metainfo = metainfo if filename and not self._metainfo: log.debug('Attempting to open %s.', filename) try: with open(filename, 'rb') as _file: self._metainfo = _file.read() except IOError as ex: log.warning('Unable to open %s: %s', filename, ex) return try: self._metainfo_dict = bencode.bdecode(self._metainfo) except bencode.BTFailure as ex: log.warning('Failed to decode %s: %s', filename, ex) return info_dict = self._metainfo_dict[b'info'] self._info_hash = sha(bencode.bencode(info_dict)).hexdigest() # Get encoding from torrent file if available encoding = self._metainfo_dict.get(b'encoding', None) codepage = self._metainfo_dict.get(b'codepage', None) if not encoding: encoding = codepage if codepage else b'UTF-8' # Decode 'name' with encoding unless 'name.utf-8' found. if b'name.utf-8' in info_dict: self._name = decode_bytes(info_dict[b'name.utf-8']) else: if encoding: encoding = encoding.decode() self._name = decode_bytes(info_dict[b'name'], encoding) # Get list of files from torrent info if b'files' in info_dict: paths = {} dirs = {} prefix = self._name if len(info_dict[b'files']) > 1 else '' for index, f in enumerate(info_dict[b'files']): if b'path.utf-8' in f: path = decode_bytes(os.path.join(*f[b'path.utf-8'])) del f[b'path.utf-8'] else: path = decode_bytes(os.path.join(*f[b'path']), encoding) if prefix: path = os.path.join(prefix, path) f[b'path'] = path f[b'index'] = index if b'sha1' in f and len(f[b'sha1']) == 20: f[b'sha1'] = f[b'sha1'].encode(b'hex') if b'ed2k' in f and len(f[b'ed2k']) == 16: f[b'ed2k'] = f['ed2k'].encode(b'hex') if b'filehash' in f and len(f[b'filehash']) == 20: f[b'filehash'] = f[b'filehash'].encode(b'hex') paths[path] = f dirname = os.path.dirname(path) while dirname: dirinfo = dirs.setdefault(dirname, {}) dirinfo[b'length'] = dirinfo.get(b'length', 0) + f[b'length'] dirname = os.path.dirname(dirname) if filetree == 2: def walk(path, item): if item['type'] == 'dir': item.update(dirs[path]) else: item.update(paths[path]) item['download'] = True file_tree = FileTree2(list(paths)) file_tree.walk(walk) else: def walk(path, item): if isinstance(item, dict): return item return [ paths[path][b'index'], paths[path][b'length'], True ] file_tree = FileTree(paths) file_tree.walk(walk) self._files_tree = file_tree.get_tree() else: if filetree == 2: self._files_tree = { 'contents': { self._name: { 'type': 'file', 'index': 0, 'length': info_dict[b'length'], 'download': True, } } } else: self._files_tree = { self._name: (0, info_dict[b'length'], True) } self._files = [] if b'files' in info_dict: prefix = '' if len(info_dict[b'files']) > 1: prefix = self._name for f in info_dict[b'files']: self._files.append({ 'path': f[b'path'], 'size': f[b'length'], 'download': True }) else: self._files.append({ 'path': self._name, 'size': info_dict[b'length'], 'download': True })
class TorrentInfo(object): """ Collects information about a torrent file. :param filename: The path to the torrent :type filename: string """ def __init__(self, filename, filetree=1): # Get the torrent data from the torrent file try: log.debug("Attempting to open %s.", filename) self.__m_filedata = open(filename, "rb").read() self.__m_metadata = bencode.bdecode(self.__m_filedata) except Exception, e: log.warning("Unable to open %s: %s", filename, e) raise e self.__m_info_hash = sha(bencode.bencode(self.__m_metadata["info"])).hexdigest() # Get encoding from torrent file if available self.encoding = None if "encoding" in self.__m_metadata: self.encoding = self.__m_metadata["encoding"] elif "codepage" in self.__m_metadata: self.encoding = str(self.__m_metadata["codepage"]) if not self.encoding: self.encoding = "UTF-8" # Check if 'name.utf-8' is in the torrent and if not try to decode the string # using the encoding found. if "name.utf-8" in self.__m_metadata["info"]: self.__m_name = utf8_encoded(self.__m_metadata["info"]["name.utf-8"]) else: self.__m_name = utf8_encoded(self.__m_metadata["info"]["name"], self.encoding) # Get list of files from torrent info paths = {} dirs = {} if self.__m_metadata["info"].has_key("files"): prefix = "" if len(self.__m_metadata["info"]["files"]) > 1: prefix = self.__m_name for index, f in enumerate(self.__m_metadata["info"]["files"]): if "path.utf-8" in f: path = os.path.join(prefix, *f["path.utf-8"]) del f["path.utf-8"] else: path = utf8_encoded(os.path.join(prefix, utf8_encoded(os.path.join(*f["path"]), self.encoding)), self.encoding) f["path"] = path f["index"] = index if "sha1" in f and len(f["sha1"]) == 20: f["sha1"] = f["sha1"].encode('hex') if "ed2k" in f and len(f["ed2k"]) == 16: f["ed2k"] = f["ed2k"].encode('hex') paths[path] = f dirname = os.path.dirname(path) while dirname: dirinfo = dirs.setdefault(dirname, {}) dirinfo["length"] = dirinfo.get("length", 0) + f["length"] dirname = os.path.dirname(dirname) if filetree == 2: def walk(path, item): if item["type"] == "dir": item.update(dirs[path]) else: item.update(paths[path]) item["download"] = True file_tree = FileTree2(paths.keys()) file_tree.walk(walk) else: def walk(path, item): if type(item) is dict: return item return [paths[path]["index"], paths[path]["length"], True] file_tree = FileTree(paths) file_tree.walk(walk) self.__m_files_tree = file_tree.get_tree() else: if filetree == 2: self.__m_files_tree = { "contents": { self.__m_name: { "type": "file", "index": 0, "length": self.__m_metadata["info"]["length"], "download": True } } } else: self.__m_files_tree = { self.__m_name: (0, self.__m_metadata["info"]["length"], True) } self.__m_files = [] if self.__m_metadata["info"].has_key("files"): prefix = "" if len(self.__m_metadata["info"]["files"]) > 1: prefix = self.__m_name for f in self.__m_metadata["info"]["files"]: self.__m_files.append({ 'path': f["path"], 'size': f["length"], 'download': True }) else: self.__m_files.append({ "path": self.__m_name, "size": self.__m_metadata["info"]["length"], "download": True })
def __init__(self, filename='', filetree=1, torrent_file=None): self._filedata = None if torrent_file: self._metainfo = torrent_file elif filename: log.debug('Attempting to open %s.', filename) try: with open(filename, 'rb') as _file: self._filedata = _file.read() except IOError as ex: log.warning('Unable to open %s: %s', filename, ex) return try: self._metainfo = bencode.bdecode(self._filedata) except bencode.BTFailure as ex: log.warning('Failed to decode %s: %s', filename, ex) return else: log.warning('Requires valid arguments.') return # info_dict with keys decoded to unicode. info_dict = {k.decode(): v for k, v in self._metainfo[b'info'].items()} self._info_hash = sha(bencode.bencode(info_dict)).hexdigest() # Get encoding from torrent file if available encoding = info_dict.get('encoding', None) codepage = info_dict.get('codepage', None) if not encoding: encoding = codepage if codepage else b'UTF-8' if encoding: encoding = encoding.decode() # Decode 'name' with encoding unless 'name.utf-8' found. if 'name.utf-8' in info_dict: self._name = decode_bytes(info_dict['name.utf-8']) else: self._name = decode_bytes(info_dict['name'], encoding) # Get list of files from torrent info self._files = [] if 'files' in info_dict: paths = {} dirs = {} prefix = self._name if len(info_dict['files']) > 1 else '' for index, f in enumerate(info_dict['files']): f = {k.decode(): v for k, v in f.items()} if 'path.utf-8' in f: path = decode_bytes(os.path.join(*f['path.utf-8'])) del f['path.utf-8'] else: path = decode_bytes(os.path.join(*f['path']), encoding) if prefix: path = os.path.join(prefix, path) self._files.append({ 'path': path, 'size': f['length'], 'download': True }) f['path'] = path f['index'] = index if 'sha1' in f and len(f['sha1']) == 20: f['sha1'] = hexlify(f['sha1']).decode() if 'ed2k' in f and len(f['ed2k']) == 16: f['ed2k'] = hexlify(f['ed2k']).decode() if 'filehash' in f and len(f['filehash']) == 20: f['filehash'] = hexlify(f['filehash']).decode() paths[path] = f dirname = os.path.dirname(path) while dirname: dirinfo = dirs.setdefault(dirname, {}) dirinfo['length'] = dirinfo.get('length', 0) + f['length'] dirname = os.path.dirname(dirname) if filetree == 2: def walk(path, item): if item['type'] == 'dir': item.update(dirs[path]) else: item.update(paths[path]) item['download'] = True file_tree = FileTree2(list(paths)) file_tree.walk(walk) else: def walk(path, item): if isinstance(item, dict): return item return [paths[path]['index'], paths[path]['length'], True] file_tree = FileTree(paths) file_tree.walk(walk) self._files_tree = file_tree.get_tree() else: self._files.append({ 'path': self._name, 'size': info_dict['length'], 'download': True }) if filetree == 2: self._files_tree = { 'contents': { self._name: { 'type': 'file', 'index': 0, 'length': info_dict['length'], 'download': True, } } } else: self._files_tree = {self._name: (0, info_dict['length'], True)}
def save(self, torrent_path, progress=None): """ Creates and saves the torrent file to `path`. :param torrent_path: where to save the torrent file :type torrent_path: string :param progress: a function to be called when a piece is hashed :type progress: function(num_completed, num_pieces) :raises InvalidPath: if the data_path has not been set """ if not self.data_path: raise InvalidPath("Need to set a data_path!") torrent = { "info": {} } if self.comment: torrent["comment"] = self.comment.encode("UTF-8") if self.private: torrent["info"]["private"] = True if self.trackers: torrent["announce"] = self.trackers[0][0] torrent["announce-list"] = self.trackers else: torrent["announce"] = "" if self.webseeds: httpseeds = [] webseeds = [] for w in self.webseeds: if w.endswith(".php"): httpseeds.append(w) else: webseeds.append(w) if httpseeds: torrent["httpseeds"] = httpseeds if webseeds: torrent["url-list"] = webseeds datasize = get_path_size(self.data_path) if self.piece_size: piece_size = piece_size * 1024 else: # We need to calculate a piece size piece_size = 16384 while (datasize / piece_size) > 1024 and piece_size < (8192 * 1024): piece_size *= 2 # Calculate the number of pieces we will require for the data num_pieces = datasize / piece_size if datasize % piece_size: num_pieces += 1 torrent["info"]["piece length"] = piece_size # Create the info if os.path.isdir(self.data_path): torrent["info"]["name"] = os.path.split(self.data_path)[1] files = [] padding_count = 0 # Collect a list of file paths and add padding files if necessary for (dirpath, dirnames, filenames) in os.walk(self.data_path): for index, filename in enumerate(filenames): size = get_path_size(os.path.join(self.data_path, dirpath, filename)) p = dirpath[len(self.data_path):] p = p.lstrip("/") p = p.split("/") if p[0]: p += [filename] else: p = [filename] files.append((size, p)) # Add a padding file if necessary if self.pad_files and (index + 1) < len(filenames): left = size % piece_size if left: p = list(p) p[-1] = "_____padding_file_" + str(padding_count) files.append((piece_size - left, p)) padding_count += 1 # Run the progress function with 0 completed pieces if progress: progress(0, num_pieces) fs = [] pieces = [] # Create the piece hashes buf = "" for size, path in files: path = [s.decode(sys.getfilesystemencoding()).encode("UTF-8") for s in path] fs.append({"length": size, "path": path}) if path[-1].startswith("_____padding_file_"): buf += "\0" * size pieces.append(sha(buf).digest()) buf = "" fs[-1]["attr"] = "p" else: fd = open(os.path.join(self.data_path, *path), "rb") r = fd.read(piece_size - len(buf)) while r: buf += r if len(buf) == piece_size: pieces.append(sha(buf).digest()) # Run the progress function if necessary if progress: progress(len(pieces), num_pieces) buf = "" else: break r = fd.read(piece_size - len(buf)) fd.close() if buf: pieces.append(sha(buf).digest()) if progress: progress(len(pieces), num_pieces) buf = "" torrent["info"]["pieces"] = "".join(pieces) torrent["info"]["files"] = fs elif os.path.isfile(self.data_path): torrent["info"]["name"] = os.path.split(self.data_path)[1] torrent["info"]["length"] = get_path_size(self.data_path) pieces = [] fd = open(self.data_path, "rb") r = fd.read(piece_size) while r: pieces.append(sha(r).digest()) if progress: progress(len(pieces), num_pieces) r = fd.read(piece_size) torrent["info"]["pieces"] = "".join(pieces) # Write out the torrent file open(torrent_path, "wb").write(bencode(torrent))
def parse(self, filetree=1): if not self.__m_filedata: log.error("No data to process!") return try: self.__m_metadata = bencode.bdecode(self.__m_filedata) except Exception as e: log.warning("Failed to decode torrent data %s: %s", self.filename if self.filename else "", e) raise e self.__m_info_hash = sha(bencode.bencode( self.__m_metadata["info"])).hexdigest() # Get encoding from torrent file if available self.encoding = None if "encoding" in self.__m_metadata: self.encoding = self.__m_metadata["encoding"] elif "codepage" in self.__m_metadata: self.encoding = str(self.__m_metadata["codepage"]) if not self.encoding: self.encoding = "UTF-8" # Check if 'name.utf-8' is in the torrent and if not try to decode the string # using the encoding found. if "name.utf-8" in self.__m_metadata["info"]: self.__m_name = decode_string( self.__m_metadata["info"]["name.utf-8"]) else: self.__m_name = decode_string(self.__m_metadata["info"]["name"], self.encoding) # Get list of files from torrent info paths = {} dirs = {} if "files" in self.__m_metadata["info"]: prefix = "" if len(self.__m_metadata["info"]["files"]) > 1: prefix = self.__m_name for index, f in enumerate(self.__m_metadata["info"]["files"]): if "path.utf-8" in f: path = os.path.join(prefix, *f["path.utf-8"]) else: path = decode_string( os.path.join( prefix, decode_string(os.path.join(*f["path"]), self.encoding)), self.encoding) f["index"] = index paths[path] = f dirname = os.path.dirname(path) while dirname: dirinfo = dirs.setdefault(dirname, {}) dirinfo["length"] = dirinfo.get("length", 0) + f["length"] dirname = os.path.dirname(dirname) if filetree == 2: def walk(path, item): if item["type"] == "dir": item.update(dirs[path]) else: item.update(paths[path]) item["download"] = True file_tree = FileTree2(paths.keys()) file_tree.walk(walk) else: def walk(path, item): if type(item) is dict: return item return [paths[path]["index"], paths[path]["length"], True] file_tree = FileTree(paths) file_tree.walk(walk) self.__m_files_tree = file_tree.get_tree() else: if filetree == 2: self.__m_files_tree = { "contents": { self.__m_name: { "type": "file", "index": 0, "length": self.__m_metadata["info"]["length"], "download": True } } } else: self.__m_files_tree = { self.__m_name: (0, self.__m_metadata["info"]["length"], True) } self.__m_files = [] if "files" in self.__m_metadata["info"]: prefix = "" if len(self.__m_metadata["info"]["files"]) > 1: prefix = self.__m_name for f in self.__m_metadata["info"]["files"]: if "path.utf-8" in f: path = os.path.join(prefix, *f["path.utf-8"]) else: path = decode_string( os.path.join( prefix, decode_string(os.path.join(*f["path"]), self.encoding)), self.encoding) self.__m_files.append({ 'path': path, 'size': f["length"], 'download': True }) else: self.__m_files.append({ "path": self.__m_name, "size": self.__m_metadata["info"]["length"], "download": True })