Exemple #1
0
def paid_content_scraper(apis: list[start]):
    for api in apis:
        paid_contents = []
        paid_contents = api.get_paid_content()
        authed = api.auth
        if not authed.active:
            return
        authed.subscriptions = authed.subscriptions
        for paid_content in paid_contents:
            author = paid_content.get("author")
            author = paid_content.get("fromUser", author)
            subscription = api.get_subscription(
                check=True, identifier=author["id"])
            if not subscription:
                subscription = create_subscription(author)
                authed.subscriptions.append(subscription)
            if paid_content["responseType"] == "post":
                if paid_content["isArchived"]:
                    print(f"Model: {author['username']}")
                    # print(
                    #     "ERROR, PLEASE REPORT THIS AS AN ISSUE AND TELL ME WHICH MODEL YOU'RE SCRAPIMG, THANKS")
                    # input()
                    # exit()
            api_type = paid_content["responseType"].capitalize()+"s"
            api_media = getattr(subscription.scraped, api_type)
            api_media.append(paid_content)
            print
        count = 0
        max_count = len(authed.subscriptions)
        for subscription in authed.subscriptions:
            string = f"Scraping - {subscription.username} | {count} / {max_count}"
            print(string)
            subscription.sessions = api.sessions
            username = subscription.username
            site_name = "OnlyFans"
            media_type = format_media_types()
            count += 1
            for api_type, paid_content in subscription.scraped:
                if api_type == "Archived":
                    if any(x for k, x in paid_content if not x):
                        input(
                            "OPEN A ISSUE GITHUB ON GITHUB WITH THE MODEL'S USERNAME AND THIS ERROR, THANKS")
                        exit(0)
                    continue
                formatted_directories = format_directories(
                    j_directory, site_name, username, metadata_directory_format, media_type, api_type)
                metadata_directory = formatted_directories["metadata_directory"]
                metadata_path = os.path.join(
                    metadata_directory, api_type+".db")
                new_metadata = media_scraper(paid_content, api,
                                             formatted_directories, username, api_type)
                new_metadata = new_metadata["content"]
                if new_metadata:
                    api_path = os.path.join(api_type, "")
                    old_metadata, delete_metadatas = process_legacy_metadata(
                        api, new_metadata, formatted_directories, subscription, api_type, api_path, metadata_path, site_name)
                    parent_type = ""
                    new_metadata = new_metadata + old_metadata
                    w = process_metadata(metadata_path, new_metadata,
                                         site_name, parent_type, api_path, subscription, delete_metadatas)
Exemple #2
0
def paid_content_scraper(apis: list[start]):
    for api in apis:
        paid_contents = []
        paid_contents = api.get_paid_content()
        authed = api.auth
        authed.subscriptions = authed.subscriptions
        for paid_content in paid_contents:
            author = paid_content.get("author")
            author = paid_content.get("fromUser", author)
            subscription = api.get_subscription(
                check=True, identifier=author["id"])
            if not subscription:
                subscription = create_subscription(author)
                authed.subscriptions.append(subscription)
            if paid_content["responseType"] == "post":
                if paid_content["isArchived"]:
                    print(f"Model: {author['username']}")
                    # print(
                    #     "ERROR, PLEASE REPORT THIS AS AN ISSUE AND TELL ME WHICH MODEL YOU'RE SCRAPIMG, THANKS")
                    # input()
                    # exit()
            api_type = paid_content["responseType"].capitalize()+"s"
            api_media = getattr(subscription.scraped, api_type)
            api_media.append(paid_content)
            print
        count = 0
        max_count = len(authed.subscriptions)
        for subscription in authed.subscriptions:
            string = f"Scraping - {subscription.username} | {count} / {max_count}"
            print(string)
            subscription.sessions = api.sessions
            username = subscription.username
            site_name = "OnlyFans"
            media_type = format_media_types()
            count += 1
            for api_type, paid_content in subscription.scraped:
                if api_type == "Archived":
                    continue
                formatted_directories = format_directories(
                    j_directory, site_name, username, metadata_directory_format, media_type, api_type)
                metadata_directory = formatted_directories["metadata_directory"]
                metadata_path = os.path.join(
                    metadata_directory, api_type+".json")
                new_metadata = media_scraper(paid_content, api,
                                             formatted_directories, username, api_type)
                if new_metadata:
                    api_path = os.path.join(api_type, "")
                    new_metadata_object = process_metadata(
                        api, new_metadata, formatted_directories, subscription, api_type, api_path, metadata_path, site_name)
                    new_metadata_set = new_metadata_object.convert()
                    if export_metadata:
                        export_archive(new_metadata_set,
                                       metadata_path, json_settings)
Exemple #3
0
def paid_content_scraper(apis: list[start]):
    for api in apis:
        paid_contents = api.get_paid_content()
        authed = api.auth
        authed.subscriptions = authed.subscriptions
        for paid_content in paid_contents:
            author = paid_content.get("author")
            author = paid_content.get("fromUser", author)
            subscription = api.get_subscription(check=True,
                                                identifier=author["id"])
            if not subscription:
                subscription = create_subscription(author)
                authed.subscriptions.append(subscription)
            api_type = paid_content["responseType"].capitalize() + "s"
            api_media = getattr(subscription.scraped, api_type)
            api_media.append(paid_content)
            print
        count = 0
        max_count = len(authed.subscriptions)
        for subscription in authed.subscriptions:
            string = f"Scraping - {subscription.username} | {count} / {max_count}"
            print(string)
            subscription.sessions = api.sessions
            username = subscription.username
            site_name = "OnlyFans"
            media_type = format_media_types()
            count += 1
            for api_type, paid_content in subscription.scraped:
                formatted_directories = format_directories(
                    j_directory, site_name, username,
                    metadata_directory_format, media_type, api_type)
                metadata_directory = formatted_directories[
                    "metadata_directory"]
                metadata_path = os.path.join(metadata_directory,
                                             api_type + ".json")
                new_metadata = media_scraper(paid_content, api,
                                             formatted_directories, username,
                                             api_type)
                if new_metadata:
                    api_path = os.path.join(api_type, "")
                    new_metadata_object = process_metadata(
                        api, new_metadata, formatted_directories, subscription,
                        api_type, api_path, metadata_path, site_name)
                    new_metadata_set = new_metadata_object.convert()
                    if export_metadata:
                        export_archive(new_metadata_set, metadata_path,
                                       json_settings)
Exemple #4
0
def paid_content_scraper(api):
    paid_contents = api.get_paid_content(refresh=False)
    results = []
    for paid_content in paid_contents:
        author = paid_content.get("author")
        author = paid_content.get("fromUser", author)
        subscription = create_subscription(author)
        subscription.sessions = api.sessions
        subscription.download_info["directory"] = j_directory
        username = subscription.username
        model_directory = os.path.join(j_directory, username)
        metadata_folder = os.path.join(model_directory, "Metadata")
        api_type = paid_content["responseType"].capitalize()+"s"
        metadata_path = os.path.join(
            metadata_folder, api_type+".json")
        site_name = "OnlyFans"
        media_type = format_media_types()
        formatted_directories = main_helper.format_directories(
            j_directory, site_name, username, media_type, api_type)
        new_item = media_scraper([paid_content], api,
                                 formatted_directories, username, api_type)
        for directory in new_item["directories"]:
            os.makedirs(directory, exist_ok=True)
        download_metadata = prepare_metadata(new_item).metadata
        subscription.set_scraped(api_type, download_metadata)
        metadata = prepare_metadata(new_item, export=True).metadata
        metadata = jsonpickle.encode(
            metadata, unpicklable=False)
        new_metadata = jsonpickle.decode(metadata)
        old_metadata = import_archive(metadata_path)
        if old_metadata:
            old_metadata = metadata_fixer(directory=metadata_path.replace(
                ".json", ""), metadata_types=old_metadata)
            unrefined = compare_metadata(
                new_metadata, old_metadata, new_chain=True)
            unrefined = prepare_metadata(unrefined, export=True).metadata
            new_metadata = jsonpickle.encode(
                unrefined, unpicklable=False)
            new_metadata = jsonpickle.decode(new_metadata)
            results.append(new_metadata)
        os.makedirs(model_directory, exist_ok=True)
        a = export_archive(new_metadata, metadata_path, json_settings)
        x = download_media(api, subscription)
    return results
Exemple #5
0
def paid_content_scraper(api):
    paid_contents = api.get_paid_content(refresh=False)
    results = []
    for paid_content in paid_contents:
        metadata_locations = {}
        author = paid_content.get("author")
        author = paid_content.get("fromUser", author)
        subscription = create_subscription(author)
        subscription.sessions = api.sessions
        subscription.download_info["directory"] = j_directory
        username = subscription.username
        model_directory = os.path.join(j_directory, username)
        api_type = paid_content["responseType"].capitalize() + "s"
        subscription.download_info["metadata_locations"] = j_directory
        subscription.download_info["metadata_locations"] = metadata_locations
        site_name = "OnlyFans"
        media_type = format_media_types()
        formatted_directories = format_directories(j_directory, site_name,
                                                   username,
                                                   metadata_directory_format,
                                                   media_type, api_type)
        metadata_directory = formatted_directories["metadata_directory"]
        metadata_path = os.path.join(metadata_directory, api_type + ".json")
        metadata_locations[api_type] = metadata_path
        new_metadata = media_scraper([paid_content], api,
                                     formatted_directories, username, api_type)
        for directory in new_metadata["directories"]:
            os.makedirs(directory, exist_ok=True)
        api_path = os.path.join(api_type, "")
        new_metadata_object = process_metadata(api, new_metadata,
                                               formatted_directories,
                                               subscription, api_type,
                                               api_path, metadata_path,
                                               site_name)
        new_metadata_set = new_metadata_object.convert()
        if export_metadata:
            export_archive(new_metadata_set, metadata_path, json_settings)
        download_media(api, subscription)
    return results