Exemplo n.º 1
0
def torrent_hash(fname):
    f = open(fname, 'rb')
    d = bdecode(f.read())
    f.close()
    check_message(d)
    hash = hashlib.sha1(bencode(d['info'])).hexdigest().upper()
    fn = os.path.basename(fname)
    return '%s - %s' % (hash,fn)
Exemplo n.º 2
0
def torrent_hash(fname):
    f = open(fname, 'rb')
    d = bdecode(f.read())
    f.close()
    check_message(d)
    hash = hashlib.sha1(bencode(d['info'])).hexdigest().upper()
    fn = os.path.basename(fname)
    return '%s - %s' % (hash, fn)
Exemplo n.º 3
0
    for p in to_add:  # then, parse new and changed torrents
        new_file = new_files[p]
        v = new_file[0]
        if new_file[1] in new_parsed:  # duplicate
            if p not in blocked or files[p][0] != v:
                errfunc('**warning** ' + p + ' is a duplicate torrent for ' +
                        new_parsed[new_file[1]]['path'])
            new_blocked[p] = None
            continue

        if NOISY:
            errfunc('adding ' + p)
        try:
            ff = open(p, 'rb')
            d = bdecode(ff.read())
            check_message(d)
            h = sha(bencode(d['info'])).digest()
            new_file[1] = h
            if new_parsed.has_key(h):
                errfunc('**warning** ' + p + ' is a duplicate torrent for ' +
                        new_parsed[h]['path'])
                new_blocked[p] = None
                continue

            a = {}
            a['path'] = p
            f = os.path.basename(p)
            a['file'] = f
            i = d['info']
            l = 0
            nf = 0
Exemplo n.º 4
0
    for p in to_add:                # then, parse new and changed torrents
        new_file = new_files[p]
        v = new_file[0]
        if new_file[1] in new_parsed:  # duplicate
            if p not in blocked or files[p][0] != v:
                errfunc(_("**warning** %s is a duplicate torrent for %s") %
                        (p, new_parsed[new_file[1]]['path']))
            new_blocked[p] = None
            continue

        if NOISY:
            errfunc('adding '+p)
        try:
            ff = open(p, 'rb')
            d = bdecode(ff.read())
            check_message(d)
            h = sha(bencode(d['info'])).digest()
            new_file[1] = h
            if new_parsed.has_key(h):
                errfunc(_("**warning** %s is a duplicate torrent for %s") %
                        (p, new_parsed[h]['path']))
                new_blocked[p] = None
                continue

            a = {}
            a['path'] = p
            f = os.path.basename(p)
            a['file'] = f
            i = d['info']
            l = 0
            nf = 0
    def __init__(self, metainfo):
        self.bad_torrent_wrongfield = False
        self.bad_torrent_unsolvable = False
        self.bad_torrent_noncharacter = False
        self.bad_conversion = False
        self.bad_windows = False
        self.bad_path = False
        self.reported_errors = False
        self.is_batch = False
        self.orig_files = None
        self.files_fs = None
        self.total_bytes = 0
        self.sizes = []
        self.comment = None

        btformats.check_message(metainfo, check_paths=False)
        info = metainfo['info']
        if info.has_key('length'):
            self.total_bytes = info['length']
            self.sizes.append(self.total_bytes)
        else:
            self.is_batch = True
            r = []
            self.orig_files = []
            self.sizes = []
            i = 0
            for f in info['files']:
                l = f['length']
                self.total_bytes += l
                self.sizes.append(l)
                path = self._get_attr_utf8(f, 'path')
                for x in path:
                    if not btformats.allowed_path_re.match(x):
                        if l > 0:
                            raise BTFailure(_("Bad file path component: ")+x)
                        # BitComet makes bad .torrent files with empty
                        # filename part
                        self.bad_path = True
                        break
                else:
                    p = []
                    for x in path:
                        p.append((self._enforce_utf8(x), x))
                    path = p
                    self.orig_files.append('/'.join([x[0] for x in path]))
                    k = []
                    for u,o in path:
                        tf2 = self._to_fs_2(u)
                        k.append((tf2, u, o))
                    r.append((k,i))
                    i += 1
            # If two or more file/subdirectory names in the same directory
            # would map to the same name after encoding conversions + Windows
            # workarounds, change them. Files are changed as
            # 'a.b.c'->'a.b.0.c', 'a.b.1.c' etc, directories or files without
            # '.' as 'a'->'a.0', 'a.1' etc. If one of the multiple original
            # names was a "clean" conversion, that one is always unchanged
            # and the rest are adjusted.
            r.sort()
            self.files_fs = [None] * len(r)
            prev = [None]
            res = []
            stack = [{}]
            for x in r:
                j = 0
                x, i = x
                while x[j] == prev[j]:
                    j += 1
                del res[j:]
                del stack[j+1:]
                name = x[j][0][1]
                if name in stack[-1]:
                    for name in generate_names(x[j][1], j != len(x) - 1):
                        name = self._to_fs(name)
                        if name not in stack[-1]:
                            break
                stack[-1][name] = None
                res.append(name)
                for j in range(j + 1, len(x)):
                    name = x[j][0][1]
                    stack.append({name: None})
                    res.append(name)
                self.files_fs[i] = os.path.join(*res)
                prev = x

        self.name = self._get_field_utf8(info, 'name')
        self.name_fs = self._to_fs(self.name)
        self.piece_length = info['piece length']
        self.is_trackerless = False
        if metainfo.has_key('announce'):
            self.announce = metainfo['announce']
        elif metainfo.has_key('nodes'):
            self.is_trackerless = True
            self.nodes = metainfo['nodes']

        if metainfo.has_key('comment'):
            self.comment = metainfo['comment']
            
        self.hashes = [info['pieces'][x:x+20] for x in xrange(0,
            len(info['pieces']), 20)]
        self.infohash = sha(bencode(info)).digest()
    def __init__(self, metainfo):
        """metainfo is a dict.  When read from a metainfo (i.e.,
           .torrent file), the file must first be bdecoded before
           being passed to ConvertedMetainfo."""
        self.bad_torrent_wrongfield = False
        self.bad_torrent_unsolvable = False
        self.bad_torrent_noncharacter = False
        self.bad_conversion = False
        self.bad_windows = False
        self.bad_path = False
        self.reported_errors = False
        self.is_batch = False
        self.orig_files = None
        self.files_fs = None
        self.total_bytes = 0
        self.sizes = []
        self.comment = None
        self.title = None          # descriptive title text for whole torrent
        self.creation_date = None
        self.metainfo = metainfo
        self.encoding = None
        self.caches = None

        btformats.check_message(metainfo, check_paths=False)
        info = metainfo['info']
        self.is_private = info.has_key("private") and info['private']
        if 'encoding' in metainfo:
            self.encoding = metainfo['encoding']
        elif 'codepage' in metainfo:
            self.encoding = 'cp%s' % metainfo['codepage']
        if self.encoding is not None:
            try:
                for s in u'this is a test', u'these should also work in any encoding: 0123456789\0':
                    assert s.encode(self.encoding).decode(self.encoding) == s
            except:
                self.encoding = 'iso-8859-1'
                self.bad_torrent_unsolvable = True
        if info.has_key('length'):
            self.total_bytes = info['length']
            self.sizes.append(self.total_bytes)
        else:
            self.is_batch = True
            r = []
            self.orig_files = []
            self.sizes = []
            i = 0
            for f in info['files']:
                l = f['length']
                self.total_bytes += l
                self.sizes.append(l)
                path = self._get_attr(f, 'path')
                if len(path[-1]) == 0:
                    if l > 0:
                        raise BTFailure(_("Bad file path component: ")+x)
                    # BitComet makes .torrent files with directories
                    # listed along with the files, which we don't support
                    # yet, in part because some idiot interpreted this as
                    # a bug in BitComet rather than a feature.
                    path.pop(-1)

                for x in path:
                    if not btformats.allowed_path_re.match(x):
                        raise BTFailure(_("Bad file path component: ")+x)

                self.orig_files.append('/'.join(path))
                k = []
                for u in path:
                    tf2 = self._to_fs_2(u)
                    k.append((tf2, u))
                r.append((k,i))
                i += 1
            # If two or more file/subdirectory names in the same directory
            # would map to the same name after encoding conversions + Windows
            # workarounds, change them. Files are changed as
            # 'a.b.c'->'a.b.0.c', 'a.b.1.c' etc, directories or files without
            # '.' as 'a'->'a.0', 'a.1' etc. If one of the multiple original
            # names was a "clean" conversion, that one is always unchanged
            # and the rest are adjusted.
            r.sort()
            self.files_fs = [None] * len(r)
            prev = [None]
            res = []
            stack = [{}]
            for x in r:
                j = 0
                x, i = x
                while x[j] == prev[j]:
                    j += 1
                del res[j:]
                del stack[j+1:]
                name = x[j][0][1]
                if name in stack[-1]:
                    for name in generate_names(x[j][1], j != len(x) - 1):
                        name = self._to_fs(name)
                        if name not in stack[-1]:
                            break
                stack[-1][name] = None
                res.append(name)
                for j in xrange(j + 1, len(x)):
                    name = x[j][0][1]
                    stack.append({name: None})
                    res.append(name)
                self.files_fs[i] = os.path.join(*res)
                prev = x

        self.name = self._get_attr(info, 'name')
        self.name_fs = self._to_fs(self.name)
        self.piece_length = info['piece length']

        self.announce = metainfo.get('announce')
        self.announce_list = metainfo.get('announce-list')
        if 'announce-list' not in metainfo and 'announce' not in metainfo:
            self.is_trackerless = True
        else:
            self.is_trackerless = False

        self.nodes = metainfo.get('nodes')

        self.title = metainfo.get('title')
        self.comment = metainfo.get('comment')
        self.creation_date = metainfo.get('creation date')
        self.locale = metainfo.get('locale')

        self.url_list = metainfo.get('url-list', [])
        if not isinstance(self.url_list, list):
            self.url_list = [self.url_list, ]

        self.caches = metainfo.get('caches')

        self.hashes = [info['pieces'][x:x+20] for x in xrange(0,
            len(info['pieces']), 20)]
        self.infohash = InfoHashType(sha(bencode(info)).digest())
Exemplo n.º 7
0
    def __init__(self, metainfo):
        self.bad_torrent_wrongfield = False
        self.bad_torrent_unsolvable = False
        self.bad_torrent_noncharacter = False
        self.bad_conversion = False
        self.bad_windows = False
        self.bad_path = False
        self.reported_errors = False
        self.is_batch = False
        self.orig_files = None
        self.files_fs = None
        self.total_bytes = 0
        self.sizes = []
        self.comment = None

        btformats.check_message(metainfo, check_paths=False)
        info = metainfo['info']
        if info.has_key('length'):
            self.total_bytes = info['length']
            self.sizes.append(self.total_bytes)
        else:
            self.is_batch = True
            r = []
            self.orig_files = []
            self.sizes = []
            i = 0
            for f in info['files']:
                l = f['length']
                self.total_bytes += l
                self.sizes.append(l)
                path = self._get_attr_utf8(f, 'path')
                for x in path:
                    if not btformats.allowed_path_re.match(x):
                        if l > 0:
                            raise BTFailure(_("Bad file path component: ")+x)
                        # BitComet makes bad .torrent files with empty
                        # filename part
                        self.bad_path = True
                        break
                else:
                    p = []
                    for x in path:
                        p.append((self._enforce_utf8(x), x))
                    path = p
                    self.orig_files.append('/'.join([x[0] for x in path]))
                    k = []
                    for u,o in path:
                        tf2 = self._to_fs_2(u)
                        k.append((tf2, u, o))
                    r.append((k,i))
                    i += 1
            # If two or more file/subdirectory names in the same directory
            # would map to the same name after encoding conversions + Windows
            # workarounds, change them. Files are changed as
            # 'a.b.c'->'a.b.0.c', 'a.b.1.c' etc, directories or files without
            # '.' as 'a'->'a.0', 'a.1' etc. If one of the multiple original
            # names was a "clean" conversion, that one is always unchanged
            # and the rest are adjusted.
            r.sort()
            self.files_fs = [None] * len(r)
            prev = [None]
            res = []
            stack = [{}]
            for x in r:
                j = 0
                x, i = x
                while x[j] == prev[j]:
                    j += 1
                del res[j:]
                del stack[j+1:]
                name = x[j][0][1]
                if name in stack[-1]:
                    for name in generate_names(x[j][1], j != len(x) - 1):
                        name = self._to_fs(name)
                        if name not in stack[-1]:
                            break
                stack[-1][name] = None
                res.append(name)
                for j in range(j + 1, len(x)):
                    name = x[j][0][1]
                    stack.append({name: None})
                    res.append(name)
                self.files_fs[i] = os.path.join(*res)
                prev = x

        self.name = self._get_field_utf8(info, 'name')
        self.name_fs = self._to_fs(self.name)
        self.piece_length = info['piece length']
        self.is_trackerless = False
        if metainfo.has_key('announce'):
            self.announce = metainfo['announce']
        elif metainfo.has_key('nodes'):
            self.is_trackerless = True
            self.nodes = metainfo['nodes']

        if metainfo.has_key('comment'):
            self.comment = metainfo['comment']
            
        self.hashes = [info['pieces'][x:x+20] for x in xrange(0,
            len(info['pieces']), 20)]
        self.infohash = sha(bencode(info)).digest()
Exemplo n.º 8
0
    def __init__(self, metainfo):
        self.bad_torrent_wrongfield = False
        self.bad_torrent_unsolvable = False
        self.bad_conversion = False
        self.bad_windows = False
        self.reported_errors = False
        self.is_batch = False
        self.orig_files = None
        self.files_fs = None
        self.total_bytes = 0
        self.sizes = []

        btformats.check_message(metainfo)
        info = metainfo['info']
        if info.has_key('length'):
            self.total_bytes = info['length']
            self.sizes.append(self.total_bytes)
        else:
            self.is_batch = True
            r = []
            self.orig_files = []
            self.sizes = []
            for i, f in enumerate(info['files']):
                l = f['length']
                self.total_bytes += l
                self.sizes.append(l)
                path = self._get_attr_utf8(f, 'path')
                path = [(self._enforce_utf8(x), x) for x in path]
                self.orig_files.append('/'.join([x[0] for x in path]))
                r.append(([(self._to_fs_2(u), u, o) for u, o in path], i))
            # If two or more file/subdirectory names in the same directory
            # would map to the same name after encoding conversions + Windows
            # workarounds, change them. Files are changed as
            # 'a.b.c'->'a.b.0.c', 'a.b.1.c' etc, directories or files without
            # '.' as 'a'->'a.0', 'a.1' etc. If one of the multiple original
            # names was a "clean" conversion, that one is always unchanged
            # and the rest are adjusted.
            r.sort()
            self.files_fs = [None] * len(r)
            prev = [None]
            res = []
            stack = [{}]
            for x in r:
                j = 0
                x, i = x
                while x[j] == prev[j]:
                    j += 1
                del res[j:]
                del stack[j + 1:]
                name = x[j][0][1]
                if name in stack[-1]:
                    for name in generate_names(x[j][1], j != len(x) - 1):
                        name = self._to_fs(name)
                        if name not in stack[-1]:
                            break
                stack[-1][name] = None
                res.append(name)
                for j in range(j + 1, len(x)):
                    name = x[j][0][1]
                    stack.append({name: None})
                    res.append(name)
                self.files_fs[i] = os.path.join(*res)
                prev = x

        self.name = self._get_field_utf8(info, 'name')
        self.name_fs = self._to_fs(self.name)
        self.piece_length = info['piece length']
        self.announce = metainfo['announce']
        self.hashes = [
            info['pieces'][x:x + 20]
            for x in xrange(0, len(info['pieces']), 20)
        ]
        self.infohash = sha(bencode(info)).digest()