Пример #1
0
def main():
    ptp = ptpapi.login()
    cg = cgapi.CGAPI()
    cg.login()
    kg = kgapi.KGAPI()
    kg.login()
    seen = pickle.load( open( 'seen.p', 'rb'))
    with open(sys.argv[1], 'r') as fh:
        for url in fh:
            ptp_id = parse_qs(urlparse(url).query)['id'][0]
            if ptp_id in seen:
                logger.error('Already seen movie %s' % ptp_id)
                continue
            else:
                findByURL(cg, kg, url)                
                seen.append(ptp_id)
                pickle.dump( seen,  open( "seen.p", "wb" ) )
                sleep(7)
Пример #2
0
def main():
    parser = argparse.ArgumentParser(
        description='Attempt to find torrents to reseed on PTP from other sites'
    )
    parser.add_argument('-i',
                        '--id',
                        help='Only full PTP links for now',
                        nargs='*')
    parser.add_argument('--debug',
                        help='Print lots of debugging statements',
                        action="store_const",
                        dest="loglevel",
                        const=logging.DEBUG,
                        default=logging.WARNING)
    parser.add_argument('-v',
                        '--verbose',
                        help='Be verbose',
                        action="store_const",
                        dest="loglevel",
                        const=logging.INFO)
    parser.add_argument('-l',
                        '--limit',
                        help="Limit need-for-seed results to N movies",
                        default=100,
                        type=int)
    parser.add_argument('-s',
                        '--search',
                        help="Allow filtering the need-for-seed results",
                        default=None)
    parser.add_argument('-r',
                        '--required-remote-seeds',
                        help="The number of seeds required on the remote site",
                        default=1,
                        type=int)
    parser.add_argument(
        '-m',
        '--min-ptp-seeds',
        help="Set the minimum number of seeds before a reseed will happen",
        default=0,
        type=int)
    args = parser.parse_args()

    logging.basicConfig(level=args.loglevel)
    logger = logging.getLogger('reseed-machine')

    logger.info("Logging into PTP")
    ptp = ptpapi.login()
    logger.info("Logging into CG")
    cg = CGAPI()
    logger.info("Logging into KG")
    kg = KGAPI()
    sites = [cg, kg]

    if args.id:
        movies = args.id
    else:
        filters = {}
        if args.search:
            for arg in args.search.split(','):
                filters[arg.split('=')[0]] = arg.split('=')[1]
        movies = [t['Link'] for t in ptp.need_for_seed(filters)][:args.limit]

    for i in movies:
        ptp_movie = None
        if '://passthepopcorn.me' in i:
            parsed_url = parse_qs(urlparse(i).query)
            ptp_movie = ptpapi.Movie(ID=parsed_url['id'][0])

        if ptp_movie is None:
            logger.error("Could not figure out ID '{0}'".format(i))
        else:
            try:
                ptp_movie['ImdbId']
            except KeyError:
                logger.warn("ImdbId not found from '{0}', skipping".format(i))
                continue
            find_match(ptp_movie,
                       sites,
                       min_seeds=args.min_ptp_seeds,
                       remote_seeds=args.required_remote_seeds)
Пример #3
0
def main():
    """The entrypoint"""
    parser = define_parser()
    args = parser.parse_args()
    logger = logging.getLogger('ptp-reseed')

    logging.basicConfig(level=args.loglevel)

    # Load pyroscope
    load_config.ConfigLoader().load()
    proxy = config.engine.open()

    # Futile attempt to impose our loglevel upon pyroscope
    logging.basicConfig(level=args.loglevel)

    # Load PTP API
    ptp = ptpapi.login()

    loaded = []
    would_load = []
    already_loaded = []
    not_found = []

    if args.files == ['-'] or args.files == []:
        filelist = sys.stdin
    else:
        filelist = args.files

    if args.compare_paths:
        logger.debug('Loading existing torrents for pre-matching')
        loaded_paths = find_existing_torrents(proxy)
    else:
        loaded_paths = []

    for filename in filelist:
        match = Match(None)
        filename = filename.strip("\n").decode('utf-8')

        logger.info(u'Starting reseed attempt on file {0}'.format(filename))

        if not os.path.exists(filename):
            logger.error(u"File/directory {0} does not exist".format(filename))
            continue

        if args.url:
            parsed_url = parse_qs(urlparse(args.url).query)
            if 'torrentid' in parsed_url:
                match = match_by_torrent(
                    ptpapi.Torrent(ID=parsed_url['torrentid'][0]),
                    filename.encode())
            elif 'id' in parsed_url:
                match = match_by_movie(ptpapi.Movie(ID=parsed_url['id'][0]),
                                       filename.encode())
        elif filename:
            for match_type in ptpapi.config.config.get('Reseed',
                                                       'findBy').split(','):
                try:
                    if match_type == 'filename':
                        if os.path.abspath(filename) in loaded_paths:
                            logger.info(
                                u'Path {0} already in rtorrent, skipping'.
                                format(os.path.abspath(filename)))
                        else:
                            logger.debug(u'Path {0} not in rtorrent'.format(
                                os.path.abspath(filename)))
                            match = match_against_file(ptp, filename,
                                                       args.limit)
                    elif match_type == 'title':
                        match = match_by_guessed_name(ptp, filename,
                                                      args.limit)
                    else:
                        logger.error(
                            u"Match type {0} not recognized for {1}, skipping".
                            format(match_type, filename))
                    if match:
                        break
                except Exception:
                    print(u"Error while attempting to match file '{0}'".format(
                        filename))
                    raise

        # Make sure we have the minimum information required
        if not match:
            not_found.append(filename)
            logger.error(
                u"Could not find an associated torrent for '{0}', cannot reseed"
                .format(filename))
            continue

        if args.create_in_directory:
            create_in = args.create_in_directory
        elif ptpapi.config.config.has_option('Reseed', 'createInDirectory'):
            create_in = ptpapi.config.config.get('Reseed', 'createInDirectory')
        else:
            create_in = None
        create_matched_files(match,
                             directory=create_in,
                             action=args.action,
                             dry_run=args.dry_run)
        logger.info(u"Found match, now loading torrent {0} to path {1}".format(
            match.ID, match.path))
        if args.dry_run:
            would_load.append(filename)
            logger.debug("Dry-run: Stopping before actual load")
            continue
        if load_torrent(proxy, match.ID, match.path):
            loaded.append(filename)
        else:
            already_loaded.append(filename)

    if args.summary:
        print('==> Loaded:')
        print('\n'.join(loaded))
        print('==> Would have loaded:')
        print('\n'.join(would_load))
        print('==> Already loaded:')
        print('\n'.join(already_loaded))
        print('==> Not found:')
        print('\n'.join(not_found))

    exit_code = 0
    if len(not_found) == 1:
        exit_code = 1
    elif len(not_found) > 1:
        exit_code = 2
    elif len(already_loaded) > 0:
        exit_code = 3

    logger.debug("Total session tokens consumed: %s",
                 ptpapi.session.session.consumed_tokens)
    logger.debug("Exiting...")
    sys.exit(exit_code)
Пример #4
0
def main():
    logger = logging.getLogger(__name__)
    parser = argparse.ArgumentParser(description='Extensible command line utility for PTP')
    add_verbosity_args(parser)
    subparsers = parser.add_subparsers()

    search_parent = argparse.ArgumentParser()
    add_verbosity_args(search_parent)
    search_parent.add_argument('search_terms', help="""A list of terms in [field]=[text] format.
                               If the '=' is omitted, the field is assumed to be 'name'.""", nargs='+', metavar='term')
    search_parent.add_argument('-n', '--dry-run', help="Don't actually download any torrents", action='store_true')
    search_parent.add_argument('-l', '--limit', help="Limit search results to N movies", default=100, type=int)
    search_parent.add_argument('-f', '--filter', help="Define a filter to download movies with",
                               default=ptpapi.config.config.get('Main', 'filter'))
    search_parent.add_argument('-m', '--movie-format', help="Set the output for movies", default=None)
    search_parent.add_argument('-t', '--torrent-format', help="Set the output for torrents", default=None)
    search_parent.add_argument('-o', '--output-directory', help="Location for any downloaded files", default=None)
    search_parent.add_argument('-p', '--pages', help="The number of pages to download", default=1, type=int)

    search_parser = subparsers.add_parser('search', help='Search for or download movies', add_help=False, parents=[search_parent])
    search_parser.add_argument('-d', '--download', help="Download any movies found", action="store_true")
    search_parser.set_defaults(func=do_search)

    download_parser = subparsers.add_parser('download', help='An alias for `search -d`', add_help=False, parents=[search_parent])
    download_parser.add_argument('-d', '--download', help="Download any movies found", action="store_true", default=True)
    download_parser.set_defaults(func=do_search)

    archive_parser = subparsers.add_parser('archive', help='Commands related to the archive project.')
    archive_parser.add_argument('-c', '--container-id', help="Specify which container to act on when fetching", type=int)
    archive_parser.add_argument('-d', '--fetch-downloaded', help="Fetch all list of all movies marked as 'Downloaded'", action="store_true")
    archive_parser.set_defaults(func=do_archive)

    inbox_parser = subparsers.add_parser('inbox', help='Reads messages in your inbox')
    add_verbosity_args(inbox_parser)
    inbox_parser.add_argument('-u', '--unread', help="Only show unread messages", action="store_true")
    inbox_parser.add_argument('-m', '--mark-read', help="Mark messages as read", type=lambda s: [int(n) for n in s.split(',')])
    inbox_parser.add_argument('--mark-all-read', help="Scan and mark all messages as read. "
                              "WARNING: If new messages arrive while this is running, the script can get caught in a loop until it reaches the end of the inbox's pages", action="store_true")
    inbox_parser.add_argument('--user', help="Filter messages by the sender")
    inbox_parser.add_argument('-c', '--conversation', help="Get the messages of a specific conversation", type=int)
    inbox_parser.add_argument('-p', '--page', help="Start at a certain page", type=int, default=1)
    inbox_parser.set_defaults(func=do_inbox)

    raw_parser = subparsers.add_parser('raw', help='Fetch the raw HTML of pages')
    add_verbosity_args(raw_parser)
    raw_parser.add_argument('url', help="A list of urls to download", nargs='+')
    raw_parser.set_defaults(func=do_raw)

    userstats_parser = subparsers.add_parser('userstats', help='Gather users\' stats from profile pages')
    add_verbosity_args(userstats_parser)
    userstats_parser.add_argument('-i', '--user-id', help="The user to look at", nargs='?', default=None)
    userstats_parser.add_argument('--hummingbird', help="Imitate Hummingbird's format", action="store_true")
    userstats_parser.set_defaults(func=do_userstats)

    field_parser = subparsers.add_parser('fields', help='List the fields available for each PTPAPI resource')
    add_verbosity_args(field_parser)
    field_parser.set_defaults(func=do_fields)

    search_field_parser = subparsers.add_parser('search-fields', help='List the fields available when searching')
    add_verbosity_args(search_field_parser)
    search_field_parser.set_defaults(func=do_search_fields)

    log_parser = subparsers.add_parser('log', help='Show the log of recent events')
    add_verbosity_args(log_parser)
    log_parser.add_argument('-r', '--reverse', help='Sort in reverse', action='store_true')
    log_parser.add_argument('-f', '--follow', help='Print new entries as they appear', action="store_true")
    log_parser.set_defaults(func=do_log)

    args = parser.parse_args()

    logging.basicConfig(level=args.loglevel)

    api = ptpapi.login()

    args.func(api, args)
    logger.debug("Total session tokens consumed: %s", ptpapi.session.session.consumed_tokens)
    logger.debug("Exiting...")