Exemplo n.º 1
0
 def test_not_raise_exception_when_add_hash_fields(self):
     parse_torrent_file(self.FILE, hash_fields={'info_hash': (20, False)})
     with open(self.FILE, 'rb') as f:
         TorrentFileParser(f).hash_field('info_hash').parse()
     with open(self.FILE, 'rb') as f:
         data = f.read()
         decode(data, hash_fields={'info_hash': (20, False)})
Exemplo n.º 2
0
 def test_parse_correctness(self):
     data = parse_torrent_file(self.TEST_FILENAME)
     self.assertIn(['udp://p4p.arenabg.ch:1337/announce'],
                   data['announce-list'])
     self.assertEqual(data['comment'],
                      'Torrent downloaded from https://rarbg.to')
     self.assertEqual(data['creation date'], 1472762993)
Exemplo n.º 3
0
def query_torrent(title, path, found=False):
    data = tp.parse_torrent_file(path)
    # does the torrent contain more than one file and check if the file name we want is in the list
    if not found and 'files' in data['info']:
        for d in data['info']['files']:
            for f in d['path']:
                if title in f:
                    found = True
    elif title in data['info']['name']:
        found = True
    if found:
        res = {'title': title}
        if 'metadata' in data:
            if 'title' in data['metadata']:
                res['title'] = data['metadata']['title']
            if 'cover url' in data['metadata']:
                res['image'] = data['metadata']['cover url']
            if 'description' in data['metadata']:
                res['details'] = data['metadata']['description']
            if 'taglist' in data['metadata']:
                res['tags'] = [{
                    "name": x
                } for x in data['metadata']['taglist']]

        print(json.dumps(res))
        exit(0)
Exemplo n.º 4
0
def get_torrent_info(file_path):
    res = ''

    try:
        data = tp.parse_torrent_file(file_path)

        url = data['publisher-url']
        name = data['info']['name']
        files = data['info']['files']

        for f in files:
            tf_path = ''
            path_parts = f['path']
            for part in path_parts:
                tf_path += '/' + part

            res += tf_path + '\n'

        res = name + '\n' + url + '\n---------------\n' + res
    except:
        msg = "-- Parse Exception: {0}\n{1}\n{2}"
        msg = msg.format('', sys.exc_info()[0], sys.exc_info()[1])
        print('\n' + msg + '\n')

    return res
Exemplo n.º 5
0
def find_path(torrent_file, search_path):
    # Get a list of files in the torrent
    torrent = tp.parse_torrent_file(torrent_file)
    torrent_files = []

    if 'files' in torrent['info']:
        for tf in torrent['info']['files']:
            torrent_files.append(tf['path'])
    else:  # If the torrent contains only a single file, the format is a bit different
        torrent_files.append([torrent['info']['name']])

    # Search for a file matching (by filename) any defined in the torrent
    torrent_files_only = []

    # Get a list of just the files in the torrent, ignoring structure
    for f in torrent_files:
        torrent_files_only.append(f[len(f) - 1])

    for path, _, files in os.walk(search_path):
        for f in files:
            if f in torrent_files_only:
                folder_depth = len(torrent_files[0]) - 1
                root = path[::-1][path[::-1].replace('\\', 'x', folder_depth -
                                                     1).find('\\') + 1:][::-1]

                if (not strict_mode) or assert_valid(torrent_files, root):
                    return root
                else:
                    return 'partial'
Exemplo n.º 6
0
    def test_parse_torrent_file_to_ordered_dict(self):
        data = parse_torrent_file(self.REAL_FILE, True)
        self.assertIsInstance(data, collections.OrderedDict)

        with open(self.REAL_FILE, 'rb') as fp:
            data = TorrentFileParser(fp, True).parse()
        self.assertIsInstance(data, collections.OrderedDict)
Exemplo n.º 7
0
    def get_torrent_data(self, filepath):
        self.data = tp.parse_torrent_file(filepath)
        text = ""

        for key in self.data:
            if key == "creation date":
                try:
                    dt = datetime.date.fromtimestamp(self.data[key])
                    text = text + str(key) + " = " + str(dt) + "\n\n"
                    continue
                except OverflowError as of:
                    print(str(of))
                except OSError as ose:
                    print(str(ose))

            if key == "announce-list":
                d = self.data[key]
                text = text + str(key) + " = "
                for da in d:
                    for dat in da:
                        text = text + str(dat) + " , \n"

                text = text + "\n\n"
                continue

            if key == "info":
                for ckey in self.data[key]:
                    if ckey == "files":
                        text = text + "Files = "
                        for d in self.data[key][ckey]:
                            text = text + "Size" + " : " + str(
                                self.human_readable_size(d["length"])) + " , "
                            text = text + "Filename" + " : " + str(
                                d["path"]) + " \n"
                    if "Files =" not in text:
                        if ckey == "name":
                            text = text + " File = " + self.data[key]["name"]
                        if ckey == "length":
                            text = text + "Size : " + str(
                                self.human_readable_size(
                                    self.data[key]["length"]))

                text = text + "\n\n"
                continue

            if key == "nodes":
                d = self.data[key]
                text = text + str(key) + " = "
                for da in d:
                    text = text + str(da[0]) + ":" + str(da[1]) + " , "
                text = text + "\n\n"
                continue

            text = text + str(key) + " = " + str(self.data[key]) + "\n\n"

        return text
Exemplo n.º 8
0
 def post(self):
     content = json.loads(request.get_json(silent=True))
     url = str(content['url']).format(client_config["auth_key"],
                                      client_config["torrent_pass"])
     name = content['name']
     print(name)
     store_url = client_config["watch_folder"] + str(name) + ".torrent"
     urllib.request.urlretrieve(url, store_url)
     data = tp.parse_torrent_file(store_url)
     return str(data["info"]["name"])
Exemplo n.º 9
0
def info(hash=None, label=None, mode=None, filename=None):
    if all([filename, label]):
        hashfile = os.path.join(config.GENERAL['torrentfile_dir'], label,
                                filename)
    elif all([hash, label, mode]):
        hashfile = os.path.join(config.GENERAL['torrentfile_dir'], label,
                                hash + '.' + mode)
    elif all([hash, label]):
        searchfolder = os.path.join(config.GENERAL['torrentfile_dir'], label)
        logger.debug('sd: %s' % searchfolder)
        hashfile = None
        for fn in os.listdir(searchfolder):
            if fnmatch.fnmatch(fn, hash + '.*'):
                hashfile = os.path.join(searchfolder, fn)
                logger.debug('hf: %s' % hashfile)
    else:
        hashfile = None
    if hashfile and os.path.exists(hashfile):
        hashtype = 'hash'
        logger.debug("HashFile: %s" % hashfile)
        try:
            hashinfo = json.load(open(hashfile))
        except:
            hashtype = 'unknown'
        if hashtype == 'unknown':
            try:
                hashinfo = torrent_parser.parse_torrent_file(hashfile)
                hashinfo['name'] = hashinfo['info']['name']
            except:
                hashtype = 'nzb'
                hashinfo = {'name': 'Manually Added NZB File'}
        if 'name' not in hashinfo.keys():
            if 'sourceTitle' in hashinfo.keys():
                hashinfo['name'] = hashinfo['sourceTitle']
            elif 'BookName' in hashinfo.keys():
                hashinfo['name'] = hashinfo['BookName']
            elif 'mylar_release_name' in hashinfo.keys():
                hashinfo['name'] = hashinfo['mylar_release_name']
            elif 'mylar_release_nzbname' in hashinfo.keys():
                hashinfo['name'] = hashinfo['mylar_release_nzbname']
            elif 'Title' in hashinfo.keys() and 'AuxInfo' in hashinfo.keys():
                hashinfo['name'] = '%s %s' % (hashinfo['Title'],
                                              hashinfo['AuxInfo'])
            elif 'lidarr_release_title' in hashinfo.keys():
                hashinfo['name'] = hashinfo['lidarr_release_title']
            elif 'radarr_release_title' in hashinfo.keys():
                hashinfo['name'] = hashinfo['radarr_release_title']
            elif 'sonarr_release_title' in hashinfo.keys():
                hashinfo['name'] = hashinfo['sonarr_release_title']
            else:
                hashinfo['name'] = 'Unknown'
        logger.debug("HashInfo: %s" % hashinfo)
        return hashinfo
    else:
        return {'name': 'Hash File Not Found: %s' % hashfile}
Exemplo n.º 10
0
    def __init__(self, torrent_file):
        self.meta_data = tp.parse_torrent_file(torrent_file)
        self.files = []
        self.base_url = self.meta_data['announce']
        self.total_length = self.meta_data['info']['length']
        self.output_file = self.meta_data['info']['name']

        with open(torrent_file, 'rb') as f:
            meta_info = f.read()
            self.raw_meta = bencoding.Decoder(meta_info).decode()
            info = bencoding.Encoder(self.raw_meta[b'info']).encode()
            self.info_hash = sha1(info).digest()
Exemplo n.º 11
0
def add_torrent(torrent_file, dl_path):
    torrent = tp.parse_torrent_file(torrent_file)
    head, tail = os.path.split(dl_path)

    if ('files' in torrent['info']
        ):  # Torrent contains a folder, rather than a single file
        # Ensure that the torrent's root folder name is the same as the local folder's name
        torrent['info']['name'] = tail
        tp.create_torrent_file(torrent_file, torrent)

        # Adjust the DL path to be one folder up, so that it matches up correctly
        dl_path = head

    config = configparser.ConfigParser()
    config.read('config.ini')

    if not 'qBittorrent' in config:
        print('Torrent Loader requires that qBittorrent WebUI is enabled.')
        address = input('Address of WebUI (e.g. http://localhost:8080/): ')
        secured = input('Does WebUI require a login? (y/n) ')

        username = '******'
        password = '******'

        if secured == 'y':
            username = input('Username: '******'Password: '******'qBittorrent'] = {
            'address': address,
            'secured': secured,
            'username': username,
            'password': password
        }

        print()

    with open('config.ini', 'w') as config_file:
        config.write(config_file)

    qb = Client(config['qBittorrent']['address'])
    if config['qBittorrent']['secured'] == 'y':
        qb.login(config['qBittorrent']['username'],
                 config['qBittorrent']['password'])

    try:
        qb.download_from_file(open(torrent_file, 'rb'), savepath=dl_path)
        print('Added "' + torrent_file + '", content found in "' + dl_path +
              '"')
    except:
        print('An error occurred; the torrent probably already exists (' +
              torrent_file + ')')
 def raw_parse_from_file(self, file):
     '''
     This function, will parse the content from a *.torrent file, retriving all the values
     :param file: this value, represents the path to the *.torrent file
     :type file: str
     :return: this function, returns a dict with all the raw values from the *.torrent file
     :rtype: dict
     '''
     data = {}
     try:
         data = tp.parse_torrent_file(file)
     except Exception as err:
         print(err)
     return data
Exemplo n.º 13
0
 def parse_torrent(self,torrent_name):
     parsed_torrent = tp.parse_torrent_file(torrent_name)
     print("Torrent File " + torrent_name +
      " was parsed and the announce address is : " + parsed_torrent['announce'])
     tracker_ip_port = parsed_torrent['announce'].split(":")
     self.fileName = parsed_torrent['info']['name']
     self.tracker_ip = tracker_ip_port[0]
     self.tracker_port = tracker_ip_port[1]
     self.num_pieces = math.ceil(int(parsed_torrent['info']['length'])/int(parsed_torrent['info']['piece length']))
     hash = hashlib.sha1()
     hash.update(repr(parsed_torrent['info']).encode('utf-8'))
     self.info_hash = hash.hexdigest()
     print(self.info_hash)
     self.external_ip = get('https://api.ipify.org').text
Exemplo n.º 14
0
    def get_torr_info(self, file_path):
        with open(file_path, 'rb') as f:
            meta_info = f.read()
            torrent = bencode.decode(meta_info)

        self.file = torrent
        self.info_hash = hashlib.sha1(bencode.bencode(
            torrent["info"])).digest()
        self.peer_id = '-PC0001-' + ''.join(
            [str(random.randint(0, 9)) for _ in range(12)])
        self.parsed_torrent = parse_torrent_file(file_path)
        self.torrent_size = self.parsed_torrent['info']['length']
        self.tracker_url = self.parsed_torrent['announce']
        self.piece_hashes += self.parsed_torrent['info']['pieces']
        self.piece_size = self.parsed_torrent['info']['piece length']
Exemplo n.º 15
0
def main(file_list, url_or_torrent_path):
    for uri in url_or_torrent_path:
        torrent = tp.parse_torrent_file(uri)
        if file_list:
            if 'files' in torrent['info']:
                for file_info in torrent['info']['files']:
                    include_path = len(file_info['path']) > 1
                    file_path = os.path.join(*file_info['path'])
                    file_length = file_info["length"]
                    click.echo(
                        f'{os.path.basename(uri)}\t{sizeof_fmt(file_length)}\t{file_path}'
                    )
            pass
        else:
            print(json.dumps(torrent, sort_keys=True, indent=4))
Exemplo n.º 16
0
def addTracker(btFileName,trackList):
    data = tp.parse_torrent_file(btFileName)
    track = data['announce-list']
    for each in track:
        if each[0] in trackList:
            trackList.remove(each[0])

    if(len(trackList)>0):
        print("add new track ",len(trackList))
        for i in range(len(trackList)):
            track.append(trackList[i:i+1])
            
        tp.create_torrent_file(btFileName, data)

    else:
        print("there is nothing to add",btFileName)
Exemplo n.º 17
0
def get_info_from_torrent(file):

    data = tp.parse_torrent_file(file)
    info = data['info']
    file_dir = info['name']
    if 'files' in info.keys():
        biggest = 0
        file_path = ''
        files = info['files']
        for file in files:
            if file['length'] > biggest and (file['path'][0].endswith(
                    'mp4' or 'mkv' or 'avi' or 'ts' or 'mov')):
                file_path = file['path'][0]
        file_path = file_dir + '\\' + file_path
        return file_dir, file_path
    else:
        return file_dir, file_dir
Exemplo n.º 18
0
def torrent_check(domain, bucket):
    if bucket.bucket_name is not None:
        try:
            import urllib.request
            tmp_file_name, headers = urllib.request.urlretrieve(
                'http://{}.s3.amazonaws.com/index.html?torrent'.format(
                    bucket.bucket_name))
            import torrent_parser as tp
            torrent_data = tp.parse_torrent_file(tmp_file_name)
            bucket.provider = "aws"
            bucket.bucket_name = torrent_data['info']['x-amz-bucket']
            bucket.certain = True
            print(
                '[!] AWS bucket found in the torrent file (torrent check): {}'.
                format(bucket.bucket_name))
        except Exception as e:
            print('[i] Error when trying to extract name from torrent.')
            pass
Exemplo n.º 19
0
def retrieve_meta_info(infohash):
    torrent_path = "/data2/torrent/{}.torrent".format(infohash)
    content = tp.parse_torrent_file(torrent_path)
    meta_info = content.pop('info')
    name = meta_info.pop('name')
    result = {
        'name': name,
    }
    if 'length' in meta_info:
        length = meta_info.pop('length')
        result['length'] = length
    else:
        files = meta_info.pop('files')
        new_files = []
        for item in files:
            new_files.append({'length': item['length'], 'path': item['path']})
        result['files'] = new_files
    return result
Exemplo n.º 20
0
    def __init__(self):
        view = CLI()

        args = self.parse_args()
        base_path = Path(args.path)
        data = tp.parse_torrent_file(args.torrent)

        for file in data["info"]["files"]:
            self.paths.append(Path(base_path, *file["path"]))

        not_in_path = self.check_dir(base_path)

        print("Files not in path: ", len(not_in_path))
        if len(not_in_path) != 0:
            print("Files:")
            view.display_file_list(not_in_path)

            if not args.dry_run and view.ask_file_delete():
                self.delete_files(not_in_path)
Exemplo n.º 21
0
    def CheckTorrent(self, torrent_file):
        parsed = tp.parse_torrent_file(torrent_file)
        info = parsed['info']
        piece_len = info['piece length']
        pieces = info['pieces']
        file_infos = info['files']
        torrent_name = info['name']

        datadir = pathlib.Path(self._datadir, torrent_name)

        with concurrent.futures.ThreadPoolExecutor(
                max_workers=self._checkers) as executor:
            futures = []
            try:
                for piece_index, piece_sha1, piece_paths, offset in self._CollectPieces(
                        piece_len, pieces, file_infos):
                    if not self._IsWantedDataFile(piece_paths):
                        #logging.debug(
                        #    "Skipping files which matched no data_file_globs: %r",
                        #    piece_paths)
                        continue
                    futures.append(
                        executor.submit(TorrentChecker._Check, self, datadir,
                                        piece_index, piece_sha1, piece_len,
                                        piece_paths, offset))
                for future in tqdm.tqdm(
                        concurrent.futures.as_completed(futures),
                        total=len(futures),
                        unit='piece',
                        dynamic_ncols=True,
                        leave=False):
                    future.result()
            except:
                self._logger.warning("Cancelling pending work")
                for future in futures:
                    future.cancel()
                self._cancelled = True
                raise
Exemplo n.º 22
0
def main():
    os.chdir("Torrents")
    torrentClassList = []
    for torrent in os.listdir("."):
        print("Getting Metadata for torrent file: " + torrent)
        try:
            data = tp.parse_torrent_file(torrent)
            torrentClassList.append(
                TorrentInfoClass(torrent, analyze_data(data, torrent)))
        except:
            print "Bad Torrent File " + torrent

    os.chdir("..")
    tempDir = glob.glob("./data")
    if len(tempDir) == 0:
        os.mkdir("./data")
    os.chdir("./data")

    for x in torrentClassList:
        x.printSelf()

        with open(x.className + "Data", "w") as f:
            pickle.dump(x, f)
Exemplo n.º 23
0
 def test_dont_need_dict_outmost(self):
     data = parse_torrent_file(self.STRING_FILE)
     self.assertEqual(data, 'announce')
Exemplo n.º 24
0
 def test_int_is_negative(self):
     data = parse_torrent_file(self.NEG_FILE)
     self.assertEqual(data['neg'], -1)
Exemplo n.º 25
0
 def test_parse_correctness(self):
     data = parse_torrent_file(self.REAL_FILE)
     self.assertIn(['udp://tracker.publicbt.com:80/announce'],
                   data['announce-list'])
     self.assertEqual(data['creation date'], 1409254242)
Exemplo n.º 26
0
 def test_parse_torrent_file_use_shortcut(self):
     parse_torrent_file(self.REAL_FILE)
Exemplo n.º 27
0
 def parse_torrent(self, file_name):
     path = self.path + "/" + file_name + ".torrent"
     t = torrent_parser.parse_torrent_file(path)
     return t
Exemplo n.º 28
0
 def __init__(self, torrent_path):
     self.torrent_path = torrent_path
     self.torrent_data = tp.parse_torrent_file(torrent_path)
     self.config = Config()
Exemplo n.º 29
0
 def test_parse_torrent_file_use_shortcut(self):
     parse_torrent_file(self.TEST_FILENAME)
Exemplo n.º 30
0
                 f'{part}: Existing file {url_data.file!r} has wrong sha1 hashsum. Disk is {sha1!r}, online is {url_data.sha1!r}.\n'
                 f'SHA1 hash of Humble Bundle Trove items is often unreliable. Therefore this message can be ignored.'
             )
             # needs_download = True  # this seems to be unreliable.
         # end if
     else:
         logger.debug(
             f'{part}: File {url_data.file!r} already exists. Not checking file hashes though.'
         )
     # end if
 elif url_data.type == TYPE_BITTORRENT:
     logger.debug(
         f'{part}: File {url_data.file!r} already exists. Checking being valid torrent file.'
     )
     try:
         torrent_parser.parse_torrent_file(url_data.file)
     except torrent_parser.InvalidTorrentDataException as e:
         logger.warning(
             f'{part}: Could not parse existing torrent file.\n{e!s}',
             exc_info=True)
         needs_download = True
     # end try
 else:  # neither torrent nor direct web download
     logger.fatal(
         f'{part}: Could not check size, md5 or sha1 for file {url_data.file!r} as it is not of type'
         f' {TYPE_WEB!r} or {TYPE_BITTORRENT!r},  but is of type {url_data.type!r}.'
     )
     needs_download = None
 # end if
 if needs_download is None:
     logger.success(