Пример #1
0
def check_profiles():
    file_name = "config.json"
    path = os.path.join('.settings', file_name)
    import helpers.main_helper as main_helper
    from apis.onlyfans.onlyfans import auth_details
    json_config, json_config2 = main_helper.get_config(path)
    json_settings = json_config["settings"]
    profile_directories = json_settings["profile_directories"]
    profile_directory = profile_directories[0]
    matches = ["OnlyFans"]
    for match in matches:
        q = os.path.abspath(profile_directory)
        profile_site_directory = os.path.join(q, match)
        if os.path.exists(profile_site_directory):
            e = os.listdir(profile_site_directory)
            e = [os.path.join(profile_site_directory, x, "auth.json")
                 for x in e]
            e = [x for x in e if os.path.exists(x)]
            if e:
                continue
        default_profile_directory = os.path.join(
            profile_site_directory, "default")
        os.makedirs(default_profile_directory, exist_ok=True)
        auth_filepath = os.path.join(default_profile_directory, "auth.json")
        if not os.path.exists(auth_filepath):
            new_item = {}
            new_item["auth"] = auth_details().export()
            main_helper.export_data(new_item, auth_filepath)
            string = f"{auth_filepath} has been created. Fill in the relevant details and then press enter to continue."
            input(string)
        print
    print
Пример #2
0
def account_setup(api: start, identifiers: list = [], jobs: dict = {}):
    status = False
    subscriptions = []
    authed = api.login()
    if isinstance(authed, create_auth):
        profile_directory = json_global_settings["profile_directories"][0]
        profile_directory = os.path.abspath(profile_directory)
        profile_directory = os.path.join(profile_directory, authed.username)
        profile_metadata_directory = os.path.join(
            profile_directory, "Metadata")
        metadata_filepath = os.path.join(
            profile_metadata_directory, "Mass Messages.json")
        print
        if authed.isPerformer:
            imported = import_archive(metadata_filepath)
            if "auth" in imported:
                imported = imported["auth"]
            mass_messages = api.get_mass_messages(resume=imported)
            if mass_messages:
                main_helper.export_data(mass_messages, metadata_filepath)
        # chats = api.get_chats()
        if identifiers or jobs["scrape_names"]:
            subscriptions += manage_subscriptions(
                api, -1, identifiers=identifiers)
        # collection = []
        # for subscription in subscriptions:
        #     delattr(subscription,"download_info")
        #     delattr(subscription,"sessions")
        #     delattr(subscription,"scraped")
        #     delattr(subscription,"is_me")
        #     delattr(subscription,"links")
        #     collection.append(subscription)
        # collection = jsonpickle.encode(
        #     collection, unpicklable=False)
        # collection = jsonpickle.decode(collection)
        # export_archive(collection, metadata_filepath,
        #                 json_settings)
        status = True
    return status, subscriptions
Пример #3
0
def account_setup(api: start, identifier=""):
    status = False
    authed = api.login()
    if isinstance(authed, create_auth):
        jobs = json_settings["jobs"]
        profile_directory = json_global_settings["profile_directories"][0]
        profile_directory = os.path.abspath(profile_directory)
        profile_directory = os.path.join(profile_directory, authed.username)
        profile_metadata_directory = os.path.join(profile_directory,
                                                  "Metadata")
        metadata_filepath = os.path.join(profile_metadata_directory,
                                         "Mass Messages.json")
        print
        if authed.isPerformer:
            imported = import_archive(metadata_filepath)
            mass_messages = api.get_mass_messages(resume=imported)
            main_helper.export_data(mass_messages, metadata_filepath)
        # chats = api.get_chats()
        if not identifier and jobs["scrape_names"]:
            # metadata_filepath = os.path.join(
            #     profile_metadata_directory, "Subscriptions.json")
            # imported = import_archive(metadata_filepath)
            subscriptions = api.get_subscriptions()
        # collection = []
        # for subscription in subscriptions:
        #     delattr(subscription,"download_info")
        #     delattr(subscription,"sessions")
        #     delattr(subscription,"scraped")
        #     delattr(subscription,"is_me")
        #     delattr(subscription,"links")
        #     collection.append(subscription)
        # collection = jsonpickle.encode(
        #     collection, unpicklable=False)
        # collection = jsonpickle.decode(collection)
        # export_archive(collection, metadata_filepath,
        #                 json_settings)
        status = True
    return status
Пример #4
0
def start_datascraper(json_config,
                      site_name_lower,
                      apis: list = [],
                      webhooks=True):
    json_settings = json_config["settings"]
    json_webhooks = json_settings["webhooks"]
    json_sites = json_config["supported"]
    domain = json_settings["auto_site_choice"]
    main_helper.assign_vars(json_config)

    json_site_settings = json_sites[site_name_lower]["settings"]

    auto_scrape_names = json_site_settings["auto_scrape_names"]
    if isinstance(auto_scrape_names, str):
        temp_identifiers = auto_scrape_names.split(",")
        identifiers = [x for x in temp_identifiers if x]
    else:
        identifiers = []
    auto_profile_choice = json_site_settings["auto_profile_choice"]
    subscription_array = []
    original_sessions = []
    original_sessions = api_helper.create_session(settings=json_settings)
    original_sessions = [x for x in original_sessions if x]
    if not original_sessions:
        print("Unable to create session")
        return False
    archive_time = timeit.default_timer()
    if site_name_lower == "onlyfans":
        site_name = "OnlyFans"
        original_api = OnlyFans
        module = m_onlyfans
        if not apis:
            session_manager = api_helper.session_manager()
            apis = main_helper.process_profiles(json_settings, session_manager,
                                                site_name, original_api)

        subscription_array = []
        auth_count = -1
        jobs = json_site_settings["jobs"]
        subscription_list = module.format_options(apis, "users")
        if not auto_profile_choice:
            print("Choose Profile")
        apis = choose_option(subscription_list, auto_profile_choice)
        apis = [x.pop(0) for x in apis]
        for api in apis:
            api.session_manager.copy_sessions(original_sessions)
            module.assign_vars(api.auth.auth_details, json_config,
                               json_site_settings, site_name)
            setup = False
            setup, subscriptions = module.account_setup(api, identifiers, jobs)
            if not setup:
                if webhooks:
                    x = main_helper.process_webhooks([api], "auth_webhook",
                                                     "failed")
                auth_details = {}
                auth_details["auth"] = api.auth.auth_details.__dict__
                profile_directory = api.auth.profile_directory
                if profile_directory:
                    user_auth_filepath = os.path.join(
                        api.auth.profile_directory, "auth.json")
                    main_helper.export_data(auth_details, user_auth_filepath)
                continue
            subscription_array += subscriptions
            x = main_helper.process_webhooks([api], "auth_webhook",
                                             "succeeded")
        subscription_list = module.format_options(subscription_array,
                                                  "usernames")
        if jobs["scrape_paid_content"]:
            print("Scraping Paid Content")
            paid_content = module.paid_content_scraper(apis, identifiers)
        if jobs["scrape_names"]:
            print("Scraping Subscriptions")
            names = main_helper.process_names(module, subscription_list,
                                              auto_scrape_names, apis,
                                              json_config, site_name_lower,
                                              site_name)
        x = main_helper.process_downloads(apis, module)
        if webhooks:
            x = main_helper.process_webhooks(apis, "download_webhook",
                                             "succeeded")
    elif site_name_lower == "starsavn":
        site_name = "StarsAVN"
        original_api = StarsAVN
        module = m_starsavn
        apis = main_helper.process_profiles(json_settings, original_sessions,
                                            site_name, original_api)
        auto_profile_choice = json_site_settings["auto_profile_choice"]
        subscription_array = []
        auth_count = -1
        jobs = json_site_settings["jobs"]
        subscription_list = module.format_options(apis, "users")
        apis = choose_option(subscription_list, auto_profile_choice)
        apis = [x.pop(0) for x in apis]
        for api in apis:
            module.assign_vars(api.auth.auth_details, json_config,
                               json_site_settings, site_name)
            identifier = ""
            setup = False
            setup = module.account_setup(api, identifier=identifier)
            if not setup:
                auth_details = api.auth.auth_details.__dict__
                user_auth_filepath = os.path.join(api.auth.profile_directory,
                                                  "auth.json")
                main_helper.export_data(auth_details, user_auth_filepath)
                continue
            if jobs["scrape_names"]:
                array = module.manage_subscriptions(api,
                                                    auth_count,
                                                    identifier=identifier)
                subscription_array += array
        subscription_list = module.format_options(subscription_array,
                                                  "usernames")
        if jobs["scrape_paid_content"]:
            print("Scraping Paid Content")
            paid_content = module.paid_content_scraper(apis)
        if jobs["scrape_names"]:
            print("Scraping Subscriptions")
            names = main_helper.process_names(module, subscription_list,
                                              auto_scrape_names, apis,
                                              json_config, site_name_lower,
                                              site_name)
        x = main_helper.process_downloads(apis, module)
    stop_time = str(int(timeit.default_timer() - archive_time) / 60)[:4]
    print('Archive Completed in ' + stop_time + ' Minutes')
    return apis
Пример #5
0
def process_mass_messages(api: start, subscription, metadata_directory, mass_messages) -> list:
    def compare_message(queue_id, remote_messages):
        for message in remote_messages:
            if "isFromQueue" in message and message["isFromQueue"]:
                if queue_id == message["queueId"]:
                    return message
                print
        print
    global_found = []
    chats = []
    session = api.sessions[0]
    salt = json_global_settings["random_string"]
    encoded = f"{session.ip}{salt}"
    encoded = encoded.encode('utf-8')
    hash = hashlib.md5(encoded).hexdigest()
    profile_directory = json_global_settings["profile_directories"][0]
    profile_directory = os.path.abspath(profile_directory)
    profile_directory = os.path.join(profile_directory, subscription.username)
    profile_metadata_directory = os.path.join(profile_directory, "Metadata")
    mass_message_path = os.path.join(
        profile_metadata_directory, "Mass Messages.json")
    chats_path = os.path.join(profile_metadata_directory, "Chats.json")
    if os.path.exists(chats_path):
        chats = import_archive(chats_path)
    date_object = datetime.today()
    date_string = date_object.strftime("%d-%m-%Y %H:%M:%S")
    for mass_message in mass_messages:
        if "status" not in mass_message:
            mass_message["status"] = ""
        if "found" not in mass_message:
            mass_message["found"] = {}
        if "hashed_ip" not in mass_message:
            mass_message["hashed_ip"] = ""
        mass_message["hashed_ip"] = mass_message.get("hashed_ip", hash)
        mass_message["date_hashed"] = mass_message.get(
            "date_hashed", date_string)
        if mass_message["isCanceled"]:
            continue
        queue_id = mass_message["id"]
        text = mass_message["textCropped"]
        text = html.unescape(text)
        mass_found = mass_message["found"]
        media_type = mass_message.get("mediaType")
        media_types = mass_message.get("mediaTypes")
        if mass_found or (not media_type and not media_types):
            continue
        identifier = None
        if chats:
            list_chats = chats
            for chat in list_chats:
                identifier = chat["identifier"]
                messages = chat["messages"]["list"]
                mass_found = compare_message(queue_id, messages)
                if mass_found:
                    mass_message["found"] = mass_found
                    mass_message["status"] = True
                    break
        if not mass_found:
            list_chats = subscription.search_messages(text=text, limit=2)
            if not list_chats:
                continue
            for item in list_chats["list"]:
                user = item["withUser"]
                identifier = user["id"]
                messages = []
                print("Getting Messages")
                keep = ["id", "username"]
                list_chats2 = [
                    x for x in chats if x["identifier"] == identifier]
                if list_chats2:
                    chat2 = list_chats2[0]
                    messages = chat2["messages"]["list"]
                    messages = subscription.get_messages(
                        identifier=identifier, resume=messages)
                    for message in messages:
                        message["withUser"] = {
                            k: item["withUser"][k] for k in keep}
                        message["fromUser"] = {
                            k: message["fromUser"][k] for k in keep}
                    mass_found = compare_message(queue_id, messages)
                    if mass_found:
                        mass_message["found"] = mass_found
                        mass_message["status"] = True
                        break
                else:
                    item2 = {}
                    item2["identifier"] = identifier
                    item2["messages"] = subscription.get_messages(
                        identifier=identifier)
                    chats.append(item2)
                    messages = item2["messages"]["list"]
                    for message in messages:
                        message["withUser"] = {
                            k: item["withUser"][k] for k in keep}
                        message["fromUser"] = {
                            k: message["fromUser"][k] for k in keep}
                    mass_found = compare_message(queue_id, messages)
                    if mass_found:
                        mass_message["found"] = mass_found
                        mass_message["status"] = True
                        break
                    print
                print
            print
        if not mass_found:
            mass_message["status"] = False
    main_helper.export_data(chats, chats_path)
    for mass_message in mass_messages:
        found = mass_message["found"]
        if found and found["media"]:
            user = found["withUser"]
            identifier = user["id"]
            print
            date_hashed_object = datetime.strptime(
                mass_message["date_hashed"], "%d-%m-%Y %H:%M:%S")
            next_date_object = date_hashed_object+timedelta(days=1)
            print
            if mass_message["hashed_ip"] != hash or date_object > next_date_object:
                print("Getting Message By ID")
                x = subscription.get_message_by_id(
                    identifier=identifier, identifier2=found["id"], limit=1)
                new_found = x["result"]["list"][0]
                new_found["withUser"] = found["withUser"]
                mass_message["found"] = new_found
                mass_message["hashed_ip"] = hash
                mass_message["date_hashed"] = date_string
            global_found.append(found)
        print
    print
    main_helper.export_data(
        mass_messages, mass_message_path)
    return global_found
Пример #6
0
def start_datascraper(json_config: dict, site_name_lower: str, api: Optional[OnlyFans.start] = None, webhooks=True):
    json_settings = json_config["settings"]
    json_webhooks = json_settings["webhooks"]
    json_sites = json_config["supported"]
    domain = json_settings["auto_site_choice"]
    main_helper.assign_vars(json_config)

    json_site_settings = json_sites[site_name_lower]["settings"]

    auto_model_choice = json_site_settings["auto_model_choice"]
    if isinstance(auto_model_choice, str):
        temp_identifiers = auto_model_choice.split(",")
        identifiers = [x for x in temp_identifiers if x]
    else:
        identifiers = []
    auto_profile_choice = json_site_settings["auto_profile_choice"]
    subscription_array = []
    original_sessions = []
    original_sessions = api_helper.create_session(
        settings=json_settings)
    original_sessions = [x for x in original_sessions if x]
    if not original_sessions:
        print("Unable to create session")
        return False
    archive_time = timeit.default_timer()
    if site_name_lower == "onlyfans":
        site_name = "OnlyFans"
        module = m_onlyfans
        if not api:
            api = OnlyFans.start()
            api = main_helper.process_profiles(
                json_settings, original_sessions, site_name, api)
            print

        subscription_array = []
        auth_count = 0
        jobs = json_site_settings["jobs"]
        subscription_list = module.format_options(
            api.auths, "users")
        if not auto_profile_choice:
            print("Choose Profile")
        auths = choose_option(
            subscription_list, auto_profile_choice, True)
        api.auths = [x.pop(0) for x in auths]
        for auth in api.auths:
            if not auth.auth_details:
                continue
            module.assign_vars(auth.auth_details, json_config,
                               json_site_settings, site_name)
            setup = False
            setup, subscriptions = module.account_setup(
                auth, identifiers, jobs, auth_count)
            if not setup:
                if webhooks:
                    x = main_helper.process_webhooks(
                        api, "auth_webhook", "failed")
                auth_details = {}
                auth_details["auth"] = auth.auth_details.__dict__
                profile_directory = auth.profile_directory
                if profile_directory:
                    user_auth_filepath = os.path.join(
                        auth.profile_directory, "auth.json")
                    main_helper.export_data(
                        auth_details, user_auth_filepath)
                continue
            auth_count += 1
            subscription_array += subscriptions
            x = main_helper.process_webhooks(
                api, "auth_webhook", "succeeded")
        subscription_list = module.format_options(
            subscription_array, "usernames")
        if jobs["scrape_paid_content"]:
            print("Scraping Paid Content")
            paid_content = module.paid_content_scraper(api, identifiers)
        if jobs["scrape_names"]:
            print("Scraping Subscriptions")
            names = main_helper.process_names(
                module, subscription_list, auto_model_choice, api, json_config, site_name_lower, site_name)
        x = main_helper.process_downloads(api, module)
        if webhooks:
            x = main_helper.process_webhooks(
                api, "download_webhook", "succeeded")
    elif site_name_lower == "starsavn":
        pass
        # site_name = "StarsAVN"
        # original_api = StarsAVN
        # module = m_starsavn
        # apis = main_helper.process_profiles(
        #     json_settings, original_sessions, site_name, original_api)
        # auto_profile_choice = json_site_settings["auto_profile_choice"]
        # subscription_array = []
        # auth_count = -1
        # jobs = json_site_settings["jobs"]
        # subscription_list = module.format_options(
        #     apis, "users")
        # apis = choose_option(
        #     subscription_list, auto_profile_choice)
        # apis = [x.pop(0) for x in apis]
        # for api in apis:
        #     module.assign_vars(api.auth.auth_details, json_config,
        #                        json_site_settings, site_name)
        #     identifier = ""
        #     setup = False
        #     setup = module.account_setup(api, identifier=identifier)
        #     if not setup:
        #         auth_details = api.auth.auth_details.__dict__
        #         user_auth_filepath = os.path.join(
        #             api.auth.profile_directory, "auth.json")
        #         main_helper.export_data(
        #             auth_details, user_auth_filepath)
        #         continue
        #     if jobs["scrape_names"]:
        #         array = module.manage_subscriptions(
        #             api, auth_count, identifier=identifier)
        #         subscription_array += array
        # subscription_list = module.format_options(
        #     subscription_array, "usernames")
        # if jobs["scrape_paid_content"]:
        #     print("Scraping Paid Content")
        #     paid_content = module.paid_content_scraper(apis)
        # if jobs["scrape_names"]:
        #     print("Scraping Subscriptions")
        #     names = main_helper.process_names(
        #         module, subscription_list, auto_model_choice, apis, json_config, site_name_lower, site_name)
        # x = main_helper.process_downloads(apis, module)
    stop_time = str(
        int(timeit.default_timer() - archive_time) / 60)[:4]
    print('Archive Completed in ' + stop_time + ' Minutes')
    return api
Пример #7
0
def start_datascraper(json_config,
                      site_name_lower,
                      apis: list = [],
                      webhooks=True):
    json_settings = json_config["settings"]
    json_sites = json_config["supported"]
    domain = json_settings["auto_site_choice"]
    main_helper.assign_vars(json_config)

    json_site_settings = json_sites[site_name_lower]["settings"]

    auto_scrape_names = json_site_settings["auto_scrape_names"]
    if isinstance(auto_scrape_names, str):
        temp_identifiers = auto_scrape_names.split(",")
        identifiers = [x for x in temp_identifiers if x]
    else:
        identifiers = []
    auto_profile_choice = json_site_settings["auto_profile_choice"]
    subscription_array = []
    original_sessions = []
    original_sessions = api_helper.create_session(settings=json_settings)
    original_sessions = [x for x in original_sessions if x]
    if not original_sessions:
        print("Unable to create session")
        return False
    session_manager = api_helper.session_manager()
    session_manager.sessions = original_sessions
    session_manager = api_helper.stimulate_sessions(session_manager)

    # offset_array = ["https://checkip.amazonaws.com"]*2
    # pool = api_helper.multiprocessing()
    # count = 60
    # while True:
    #     test_ip2 = pool.starmap(multi, product(
    #         offset_array, [original_sessions[0]]))[0]
    #     if test_ip == test_ip2:
    #         print(f"SAME IP: {test_ip2} - WAITING {count} second(s)")
    #         time.sleep(count)
    #     else:
    #         print(f"NEW IP: {test_ip2} - TIME: {count}")
    #         print
    #     count+=1
    #     print
    archive_time = timeit.default_timer()
    if site_name_lower == "onlyfans":
        site_name = "OnlyFans"
        original_api = OnlyFans
        module = m_onlyfans
        if not apis:
            apis = main_helper.process_profiles(json_settings, session_manager,
                                                site_name, original_api)
        else:
            for api in apis:
                api.session_manager = session_manager
        subscription_array = []
        auth_count = -1
        jobs = json_site_settings["jobs"]
        subscription_list = module.format_options(apis, "users")
        if not auto_profile_choice:
            print("Choose Profile")
        apis = choose_option(subscription_list, auto_profile_choice)
        apis = [x.pop(0) for x in apis]
        for api in apis:
            module.assign_vars(api.auth.auth_details, json_config,
                               json_site_settings, site_name)
            setup = False
            setup, subscriptions = module.account_setup(api, identifiers, jobs)
            if not setup:
                auth_details = {}
                auth_details["auth"] = api.auth.auth_details.__dict__
                profile_directory = api.auth.profile_directory
                if profile_directory:
                    user_auth_filepath = os.path.join(
                        api.auth.profile_directory, "auth.json")
                    main_helper.export_data(auth_details, user_auth_filepath)
                continue
            subscription_array += subscriptions
        subscription_list = module.format_options(subscription_array,
                                                  "usernames")
        if jobs["scrape_paid_content"]:
            print("Scraping Paid Content")
            paid_content = module.paid_content_scraper(apis, identifiers)
        if jobs["scrape_names"]:
            print("Scraping Subscriptions")
            names = main_helper.process_names(module, subscription_list,
                                              auto_scrape_names, apis,
                                              json_config, site_name_lower,
                                              site_name)
        x = main_helper.process_downloads(apis, module)
        if webhooks:
            x = main_helper.process_webhooks(apis)
    elif site_name_lower == "starsavn":
        site_name = "StarsAVN"
        original_api = StarsAVN
        module = m_starsavn
        apis = main_helper.process_profiles(json_settings, original_sessions,
                                            site_name, original_api)
        auto_profile_choice = json_site_settings["auto_profile_choice"]
        subscription_array = []
        auth_count = -1
        jobs = json_site_settings["jobs"]
        subscription_list = module.format_options(apis, "users")
        apis = choose_option(subscription_list, auto_profile_choice)
        apis = [x.pop(0) for x in apis]
        for api in apis:
            module.assign_vars(api.auth.auth_details, json_config,
                               json_site_settings, site_name)
            identifier = ""
            setup = False
            setup = module.account_setup(api, identifier=identifier)
            if not setup:
                auth_details = api.auth.auth_details.__dict__
                user_auth_filepath = os.path.join(api.auth.profile_directory,
                                                  "auth.json")
                main_helper.export_data(auth_details, user_auth_filepath)
                continue
            if jobs["scrape_names"]:
                array = module.manage_subscriptions(api,
                                                    auth_count,
                                                    identifier=identifier)
                subscription_array += array
        subscription_list = module.format_options(subscription_array,
                                                  "usernames")
        if jobs["scrape_paid_content"]:
            print("Scraping Paid Content")
            paid_content = module.paid_content_scraper(apis)
        if jobs["scrape_names"]:
            print("Scraping Subscriptions")
            names = main_helper.process_names(module, subscription_list,
                                              auto_scrape_names, apis,
                                              json_config, site_name_lower,
                                              site_name)
        x = main_helper.process_downloads(apis, module)
    stop_time = str(int(timeit.default_timer() - archive_time) / 60)[:4]
    print('Archive Completed in ' + stop_time + ' Minutes')
    session_manager.kill = True
    return apis
Пример #8
0
async def start_datascraper(
    json_config: dict[Any,Any],
    site_name_lower: str,
    api: Optional[OnlyFans.start| Fansly.start] = None,
    webhooks:bool=True,
) -> Optional[OnlyFans.start]:
    json_settings = json_config["settings"]
    json_webhooks = json_settings["webhooks"]
    json_sites = json_config["supported"]
    domain = json_settings["auto_site_choice"]
    main_helper.assign_vars(json_config)

    json_site_settings = json_sites[site_name_lower]["settings"]

    auto_model_choice = json_site_settings["auto_model_choice"]
    if isinstance(auto_model_choice, str):
        temp_identifiers = auto_model_choice.split(",")
        identifiers = [x for x in temp_identifiers if x]
    else:
        identifiers = []
    auto_profile_choice = json_site_settings["auto_profile_choice"]
    subscription_array = []
    proxies:list[str] = await api_helper.test_proxies(json_settings["proxies"])
    if json_settings["proxies"] and not proxies:
        print("Unable to create session")
        return None
    archive_time = timeit.default_timer()
    match site_name_lower:
        case "onlyfans":
            site_name = "OnlyFans"
            module = m_onlyfans
            if not api:
                api = OnlyFans.start(max_threads=json_settings["max_threads"])
                api.settings = json_config
                api = main_helper.process_profiles(json_settings, proxies, site_name, api)
                print

            subscription_array = []
            auth_count = 0
            jobs = json_site_settings["jobs"]
            subscription_list = module.format_options(api.auths, "users")
            if not auto_profile_choice:
                print("Choose Profile")
            auths = choose_option(subscription_list, auto_profile_choice, True)
            api.auths = [x.pop(0) for x in auths]
            for auth in api.auths:
                if not auth.auth_details:
                    continue
                module.assign_vars(
                    auth.auth_details, json_config, json_site_settings, site_name
                )
                setup = False
                setup, subscriptions = await module.account_setup(
                    auth, identifiers, jobs, auth_count
                )
                if not setup:
                    if webhooks:
                        await main_helper.process_webhooks(api, "auth_webhook", "failed")
                    auth_details = {}
                    auth_details["auth"] = auth.auth_details.export()
                    profile_directory = auth.profile_directory
                    if profile_directory:
                        user_auth_filepath = os.path.join(
                            auth.profile_directory, "auth.json"
                        )
                        main_helper.export_data(auth_details, user_auth_filepath)
                    continue
                auth_count += 1
                subscription_array += subscriptions
                await main_helper.process_webhooks(api, "auth_webhook", "succeeded")
                # Do stuff with authed user
            subscription_list = module.format_options(
                subscription_array, "usernames", api.auths
            )
            if jobs["scrape_paid_content"] and api.has_active_auths():
                print("Scraping Paid Content")
                await module.paid_content_scraper(api, identifiers)
            if jobs["scrape_names"] and api.has_active_auths():
                print("Scraping Subscriptions")
                await main_helper.process_names(
                    module,
                    subscription_list,
                    auto_model_choice,
                    api,
                    json_config,
                    site_name_lower,
                    site_name,
                )
            await main_helper.process_downloads(api, module)
            if webhooks:
                await main_helper.process_webhooks(api, "download_webhook", "succeeded")
        case "fansly":
            site_name = "Fansly"
            module = m_fansly
            if not api:
                api = Fansly.start(max_threads=json_settings["max_threads"])
                api.settings = json_config
                api = main_helper.process_profiles(json_settings, proxies, site_name, api)

            subscription_array = []
            auth_count = 0
            jobs = json_site_settings["jobs"]
            subscription_list = module.format_options(api.auths, "users")
            if not auto_profile_choice:
                print("Choose Profile")
            auths = choose_option(subscription_list, auto_profile_choice, True)
            api.auths = [x.pop(0) for x in auths]
            for auth in api.auths:
                if not auth.auth_details:
                    continue
                module.assign_vars(
                    auth.auth_details, json_config, json_site_settings, site_name
                )
                setup = False
                setup, subscriptions = await module.account_setup(
                    auth, identifiers, jobs, auth_count
                )
                if not setup:
                    if webhooks:
                        await main_helper.process_webhooks(api, "auth_webhook", "failed")
                    auth_details = {}
                    auth_details["auth"] = auth.auth_details.export()
                    profile_directory = auth.profile_directory
                    if profile_directory:
                        user_auth_filepath = os.path.join(
                            auth.profile_directory, "auth.json"
                        )
                        main_helper.export_data(auth_details, user_auth_filepath)
                    continue
                auth_count += 1
                subscription_array += subscriptions
                await main_helper.process_webhooks(api, "auth_webhook", "succeeded")
                # Do stuff with authed user
            subscription_list = module.format_options(
                subscription_array, "usernames", api.auths
            )
            if jobs["scrape_paid_content"] and api.has_active_auths():
                print("Scraping Paid Content")
                await module.paid_content_scraper(api, identifiers)
            if jobs["scrape_names"] and api.has_active_auths():
                print("Scraping Subscriptions")
                await main_helper.process_names(
                    module,
                    subscription_list,
                    auto_model_choice,
                    api,
                    json_config,
                    site_name_lower,
                    site_name,
                )
            await main_helper.process_downloads(api, module)
            if webhooks:
                await main_helper.process_webhooks(api, "download_webhook", "succeeded")
        case "starsavn":
            pass
            # site_name = "StarsAVN"
            # original_api = StarsAVN
            # module = m_starsavn
            # apis = main_helper.process_profiles(
            #     json_settings, original_sessions, site_name, original_api)
            # auto_profile_choice = json_site_settings["auto_profile_choice"]
            # subscription_array = []
            # auth_count = -1
            # jobs = json_site_settings["jobs"]
            # subscription_list = module.format_options(
            #     apis, "users")
            # apis = choose_option(
            #     subscription_list, auto_profile_choice)
            # apis = [x.pop(0) for x in apis]
            # for api in apis:
            #     module.assign_vars(api.auth.auth_details, json_config,
            #                        json_site_settings, site_name)
            #     identifier = ""
            #     setup = False
            #     setup = module.account_setup(api, identifier=identifier)
            #     if not setup:
            #         auth_details = api.auth.auth_details.__dict__
            #         user_auth_filepath = os.path.join(
            #             api.auth.profile_directory, "auth.json")
            #         main_helper.export_data(
            #             auth_details, user_auth_filepath)
            #         continue
            #     if jobs["scrape_names"]:
            #         array = module.manage_subscriptions(
            #             api, auth_count, identifier=identifier)
            #         subscription_array += array
            # subscription_list = module.format_options(
            #     subscription_array, "usernames")
            # if jobs["scrape_paid_content"]:
            #     print("Scraping Paid Content")
            #     paid_content = module.paid_content_scraper(apis)
            # if jobs["scrape_names"]:
            #     print("Scraping Subscriptions")
            #     names = main_helper.process_names(
            #         module, subscription_list, auto_model_choice, apis, json_config, site_name_lower, site_name)
            # x = main_helper.process_downloads(apis, module)
    stop_time = str(int(timeit.default_timer() - archive_time) / 60)[:4]
    print("Archive Completed in " + stop_time + " Minutes")
    return api