Ejemplo n.º 1
0
 def collage(self, coll_id, search_terms={}):
     """Simplistic representation of a collage, might be split out later"""
     search_terms["id"] = coll_id
     req = session.base_get("collages.php", params=search_terms)
     movies = []
     for movie in ptpapi.util.snarf_cover_view_data(req.content):
         movie["Torrents"] = []
         for group in movie["GroupingQualities"]:
             movie["Torrents"].extend(group["Torrents"])
         movies.append(ptpapi.Movie(data=movie))
     return movies
Ejemplo n.º 2
0
def do_fields(api, args):
    print("Movie:")
    m = ptpapi.Movie(ID=1)
    for values in m.key_finder.values():
        for val in values:
            print("- {0}".format(val))
    print("Torrent:")
    t = ptpapi.Torrent(ID=1)
    for values in t.key_finder.values():
        for val in values:
            print("- {0}".format(val))
Ejemplo n.º 3
0
 def artist(self, art_id, search_terms={}):
     """Simplistic representation of an artist page, might be split out later"""
     search_terms["id"] = art_id
     req = session.base_get("artist.php", params=search_terms)
     movies = []
     for movie in ptpapi.util.snarf_cover_view_data(
         req.content, key=b"ungroupedCoverViewJsonData"
     ):
         movie["Torrents"] = []
         for group in movie["GroupingQualities"]:
             movie["Torrents"].extend(group["Torrents"])
         movies.append(ptpapi.Movie(data=movie))
     return movies
Ejemplo n.º 4
0
 def search(self, filters):
     """Perform a movie search"""
     if "name" in filters:
         filters["searchstr"] = filters["name"]
     filters["json"] = "noredirect"
     ret_array = []
     for movie in session.base_get("torrents.php", params=filters).json()["Movies"]:
         if "Directors" not in movie:
             movie["Directors"] = []
         if "ImdbId" not in movie:
             movie["ImdbId"] = "0"
         movie["Title"] = html_parser.HTMLParser().unescape(movie["Title"])
         ret_array.append(ptpapi.Movie(data=movie))
     return ret_array
Ejemplo n.º 5
0
def parse_terms(termlist):
    """Takes an array of terms, and sorts them out into 4 categories:
       * torrent URLs
       * movie URLs
       * targets (where to perform the search e.g. collages or bookmarks)
       * all other search parameters
    """
    torrents = []
    movies = []
    terms = {}
    target = 'torrents'

    for arg in termlist:
        url = urlparse(arg)
        url_args = parse_qs(url.query)
        if url.path == '/collages.php':
            target = 'collage'
            terms = url_args
        elif url.path == "/artist.php":
            target = 'artist'
            terms = url_args
        elif url.path == '/torrents.php':
            if 'torrentid' in url_args:
                torrents.append(ptpapi.Torrent(url_args['torrentid'][0]))
            elif 'id' in url_args:
                if 'action' in url_args and url_args['action'][0] == 'download':
                    torrents.append(ptpapi.Torrent(url_args['id'][0]))
                else:
                    movies.append(ptpapi.Movie(url_args['id'][0]))
            else:
                terms = url_args
        else:
            term = arg.partition('=')
            if not term[2]:
                if term[0] == 'bookmarks':
                    target = 'bookmarks'
                else:
                    terms['searchstr'] = term[0]
            else:
                # Provide aliases for commonly used terms
                term_map = {
                    'taglist': ['genre', 'genres', 'tags'],
                    'searchstr': ['name', 'title']
                }
                for key, value in term_map.items():
                    if term[0] in value:
                        term = (key, term[1], term[2])
                terms[term[0]] = term[2]
    return (target, movies, torrents, terms)
Ejemplo n.º 6
0
 def search(self, filters):
     """Perform a movie search"""
     if 'name' in filters:
         filters['searchstr'] = filters['name']
     filters['json'] = 'noredirect'
     ret_array = []
     for movie in session.base_get('torrents.php',
                                   params=filters).json()['Movies']:
         if 'Directors' not in movie:
             movie['Directors'] = []
         if 'ImdbId' not in movie:
             movie['ImdbId'] = '0'
         movie['Title'] = html_parser.HTMLParser().unescape(movie['Title'])
         ret_array.append(ptpapi.Movie(data=movie))
     return ret_array
Ejemplo n.º 7
0
 def load_parent_data(self):
     self.data['Movie'] = ptpapi.Movie(ID=self['GroupId'])
Ejemplo n.º 8
0
def main():
    parser = argparse.ArgumentParser(
        description='Attempt to find torrents to reseed on PTP from other sites'
    )
    parser.add_argument('-i',
                        '--id',
                        help='Only full PTP links for now',
                        nargs='*')
    parser.add_argument('--debug',
                        help='Print lots of debugging statements',
                        action="store_const",
                        dest="loglevel",
                        const=logging.DEBUG,
                        default=logging.WARNING)
    parser.add_argument('-v',
                        '--verbose',
                        help='Be verbose',
                        action="store_const",
                        dest="loglevel",
                        const=logging.INFO)
    parser.add_argument('-l',
                        '--limit',
                        help="Limit need-for-seed results to N movies",
                        default=100,
                        type=int)
    parser.add_argument('-s',
                        '--search',
                        help="Allow filtering the need-for-seed results",
                        default=None)
    parser.add_argument('-r',
                        '--required-remote-seeds',
                        help="The number of seeds required on the remote site",
                        default=1,
                        type=int)
    parser.add_argument(
        '-m',
        '--min-ptp-seeds',
        help="Set the minimum number of seeds before a reseed will happen",
        default=0,
        type=int)
    args = parser.parse_args()

    logging.basicConfig(level=args.loglevel)
    logger = logging.getLogger('reseed-machine')

    logger.info("Logging into PTP")
    ptp = ptpapi.login()
    logger.info("Logging into CG")
    cg = CGAPI()
    logger.info("Logging into KG")
    kg = KGAPI()
    sites = [cg, kg]

    if args.id:
        movies = args.id
    else:
        filters = {}
        if args.search:
            for arg in args.search.split(','):
                filters[arg.split('=')[0]] = arg.split('=')[1]
        movies = [t['Link'] for t in ptp.need_for_seed(filters)][:args.limit]

    for i in movies:
        ptp_movie = None
        if '://passthepopcorn.me' in i:
            parsed_url = parse_qs(urlparse(i).query)
            ptp_movie = ptpapi.Movie(ID=parsed_url['id'][0])

        if ptp_movie is None:
            logger.error("Could not figure out ID '{0}'".format(i))
        else:
            try:
                ptp_movie['ImdbId']
            except KeyError:
                logger.warn("ImdbId not found from '{0}', skipping".format(i))
                continue
            find_match(ptp_movie,
                       sites,
                       min_seeds=args.min_ptp_seeds,
                       remote_seeds=args.required_remote_seeds)
Ejemplo n.º 9
0
def main():
    """The entrypoint"""
    parser = define_parser()
    args = parser.parse_args()
    logger = logging.getLogger('ptp-reseed')

    logging.basicConfig(level=args.loglevel)

    # Load pyroscope
    load_config.ConfigLoader().load()
    proxy = config.engine.open()

    # Futile attempt to impose our loglevel upon pyroscope
    logging.basicConfig(level=args.loglevel)

    # Load PTP API
    ptp = ptpapi.login()

    loaded = []
    would_load = []
    already_loaded = []
    not_found = []

    if args.files == ['-'] or args.files == []:
        filelist = sys.stdin
    else:
        filelist = args.files

    if args.compare_paths:
        logger.debug('Loading existing torrents for pre-matching')
        loaded_paths = find_existing_torrents(proxy)
    else:
        loaded_paths = []

    for filename in filelist:
        match = Match(None)
        filename = filename.strip("\n").decode('utf-8')

        logger.info(u'Starting reseed attempt on file {0}'.format(filename))

        if not os.path.exists(filename):
            logger.error(u"File/directory {0} does not exist".format(filename))
            continue

        if args.url:
            parsed_url = parse_qs(urlparse(args.url).query)
            if 'torrentid' in parsed_url:
                match = match_by_torrent(
                    ptpapi.Torrent(ID=parsed_url['torrentid'][0]),
                    filename.encode())
            elif 'id' in parsed_url:
                match = match_by_movie(ptpapi.Movie(ID=parsed_url['id'][0]),
                                       filename.encode())
        elif filename:
            for match_type in ptpapi.config.config.get('Reseed',
                                                       'findBy').split(','):
                try:
                    if match_type == 'filename':
                        if os.path.abspath(filename) in loaded_paths:
                            logger.info(
                                u'Path {0} already in rtorrent, skipping'.
                                format(os.path.abspath(filename)))
                        else:
                            logger.debug(u'Path {0} not in rtorrent'.format(
                                os.path.abspath(filename)))
                            match = match_against_file(ptp, filename,
                                                       args.limit)
                    elif match_type == 'title':
                        match = match_by_guessed_name(ptp, filename,
                                                      args.limit)
                    else:
                        logger.error(
                            u"Match type {0} not recognized for {1}, skipping".
                            format(match_type, filename))
                    if match:
                        break
                except Exception:
                    print(u"Error while attempting to match file '{0}'".format(
                        filename))
                    raise

        # Make sure we have the minimum information required
        if not match:
            not_found.append(filename)
            logger.error(
                u"Could not find an associated torrent for '{0}', cannot reseed"
                .format(filename))
            continue

        if args.create_in_directory:
            create_in = args.create_in_directory
        elif ptpapi.config.config.has_option('Reseed', 'createInDirectory'):
            create_in = ptpapi.config.config.get('Reseed', 'createInDirectory')
        else:
            create_in = None
        create_matched_files(match,
                             directory=create_in,
                             action=args.action,
                             dry_run=args.dry_run)
        logger.info(u"Found match, now loading torrent {0} to path {1}".format(
            match.ID, match.path))
        if args.dry_run:
            would_load.append(filename)
            logger.debug("Dry-run: Stopping before actual load")
            continue
        if load_torrent(proxy, match.ID, match.path):
            loaded.append(filename)
        else:
            already_loaded.append(filename)

    if args.summary:
        print('==> Loaded:')
        print('\n'.join(loaded))
        print('==> Would have loaded:')
        print('\n'.join(would_load))
        print('==> Already loaded:')
        print('\n'.join(already_loaded))
        print('==> Not found:')
        print('\n'.join(not_found))

    exit_code = 0
    if len(not_found) == 1:
        exit_code = 1
    elif len(not_found) > 1:
        exit_code = 2
    elif len(already_loaded) > 0:
        exit_code = 3

    logger.debug("Total session tokens consumed: %s",
                 ptpapi.session.session.consumed_tokens)
    logger.debug("Exiting...")
    sys.exit(exit_code)