Esempio n. 1
0
def search_direct(docketnum, court):

    user = login.Credentials()

    """ Takes in the name of a party and the docket number as a parameter,
        returns the Docket Alarm search results. You can make calls to the
        /getdocket endpoint with these results to get more detailed information
        on the docket you are looking for.
    """
    searchdirect_url = "https://www.docketalarm.com/api/v1/searchdirect/"

    data = {
        'login_token': user.authenticate(),
        'client_matter':"",
        # 'party_name':party_name,
        'docketnum':docketnum,
        'court': court,
        # 'case_type':'CF',

    }
    
    result = requests.post(searchdirect_url, data)

    result_json = result.json()

    return result_json
Esempio n. 2
0
def fetch_updated_court_list():
    """
    Prints all the courts to the console and returns a list of courts
    """

    user = login.Credentials()

    # Sending a get request to the /searchdirect/ endpoint with only the login_token and
    # client matter will return a list of couthouses. This list is every court we can search,
    # with the name formatted in a way that we can send later when accessing the API for
    # searching for dockets.
    searchdirect_url = "https://www.docketalarm.com/api/v1/searchdirect/"

    data = {
        'login_token': user.authenticate(),
        'client_matter': global_variables.CLIENT_MATTER,
    }

    # returns a json object
    result = requests.get(searchdirect_url, data)

    # We call the .json() method on the json object to turn it into a python dictionary
    result_json = result.json()

    # The list of courts is stored in the 'courts' key we assign to a variable here
    courts = result_json['courts']

    # Designates the file path where the new list of courts will be stored
    updated_courts_output_file = os.path.join(CURRENT_DIR, "docs",
                                              "updated-courts.txt")

    # Deletes an old copy of this file if one exists
    try:
        os.remove(updated_courts_output_file)
    except:
        pass

    # Loops through every court in the list
    for court in courts:
        # Opens the output txt file
        with open(updated_courts_output_file, "a") as txt:
            # And writes that court to the file, until all courts are written.
            txt.write(court + "\n")

    # Opens the new list of courts on the users desktop
    try:
        os.startfile(updated_courts_output_file)
    except:
        pass

    # The function call returns a list object with all the courts we can search with Docket Alarm
    return courts
Esempio n. 3
0
def search_pacer(docketnum, court):

    user = login.Credentials()

    url = "https://www.docketalarm.com/api/v1/searchpacer/"

    data = {
        'login_token':user.authenticate(),
        'client_matter':"",
        # 'party_name':party_name,
        'docket_num':docketnum,
        'court_region': court,
        # 'case_type':'CF',

    }
    
    result = requests.get(url, data)

    result_json = result.json()

    return result_json
Esempio n. 4
0
def other_options_menu():
    other_options = """
Type in one of the following numbers and press ENTER to specify your choice:

[0] Return to menu.

[1] Fetch updated list of compatible courts.
    (You use these names in your input CSV.                                          )
    (The list is always being updated and changes are not immediately added to Github)

[2] Log Out of Docket Alarm.

Enter your response below.[0/1/2]
    """
    print(other_options)
    userChoice = input()
    if userChoice == "0":
        clear()
        welcome()
    elif userChoice == "1":
        clear()
        print("Fetching updated court list...")
        fetch_updated_court_list.fetch_updated_court_list()
        print("Done.")
        input()
        welcome()
    elif userChoice == "2":
        clear()
        user = login.Credentials()
        print("\nAre you sure you want to log out of Docket Alarm?[Y/n]")
        userChoice = input()
        if userChoice.upper() == "Y":
            user.logout()
            clear()
            welcome()
        else:
            clear()
            other_options_menu()
    else:
        print("Please Enter Valid input (0, 1 or 2)")
Esempio n. 5
0
def download_from_link_list(link_list):
    """
    Downloads PDF documents from the web and saves them in a specified folder.
    Takes in 3 string arguments:
    1. Link to a pdf file
    2. File name we will save as
    3. Name of the folder we will create to store our PDFs.
    Notice how the arguments are the same as what the get_urls() function returns.
    This function Isn't made to be used on its own, but can be.
    """

    # We store a user object we can use to login
    user = login.Credentials()

    # We unpack the tuple, assigning each value to a human-readable variable name.
    link, fileName, folderName, outputPath, CLIENT_MATTER = link_list

    # The directory where we will create the subdirectories within for each individual docket
    outputDirectoryPath = os.path.join(outputPath, folderName)
    # The path we are saving the file to, inside the subdirectory we will create.
    outputFilePath = os.path.join(outputDirectoryPath, f"{fileName}.pdf")

    # We open a lock so threads can't run this block of code simultaneously since that would cause errors
    with lock:
        # If the directory for the docket doesn't yet exist...
        if not os.path.exists(outputDirectoryPath):

            # Then, create it!
            os.makedirs(outputDirectoryPath)

    # We ready our authentication token to pass as a paramater with our http request to get the pdf file. You must be logged in to access the files.
    params = {
        "login_token": user.authenticate(),
        "client_matter": CLIENT_MATTER,
    }

    # We then make an http request to the pdf link and save the result in a variable. We pass the authentication token as a parameter.
    result = requests.get(link, stream=True, params=params)

    try:
        # If the http request failed, we have it throw a detailed error message. This is not immediately shown to the user and we let the donwload
        # continue for now.
        result.raise_for_status()

    except Exception as a:
        timeNow = datetime.datetime.now().strftime("%I:%M%p %B %d, %Y")
        with lock:
            # We write the error to log/log.txt with a timestamp and detailed information about which case caused the error.
            with open(os.path.join(CURRENT_DIR, 'log', 'log.txt'),
                      'a') as errorlog:
                errorlog.write(f"\n{timeNow}\n")
                errorlog.write(f"{a}")
                errorlog.write(
                    f"\n{link}\n{fileName}\n{folderName}\n{outputPath}\n------------------"
                )

            # We write the error to a csv file that will be stored in the log folder when the download finishes.
            tableErrorLog.append_error_table(f"{a}", folderName, fileName)
            return

    try:
        # Once the folder is created, we can create a file inside it, open it, and...
        with open(outputFilePath, "wb") as e:

            # Write the contents of the PDF to the place we specify.
            e.write(result.content)

    except Exception as a:
        print(a)

    return
Esempio n. 6
0
def query_to_tables(query, results_limit, output_path, result_order=None):
    """
    Takes in a search query as a sting,
    the amount of results you want returned as a string,
    the path you want to save to as a string,
    and optionally, the order of your results as a string.

    Generates a folder within the folder you specify and
    populates it with 4 spreadsheets containing the docket data
    from your search.
    """

    # We convert the amount of results the user wants to an integer so we can work with the number.
    results_limit = int(results_limit)

    def fill_docketInformation(result, docket):
        """
        This nested function populates the docketInformation dataframe.
        """
        if not 'info' in docket:
            return
        # We loop through all the keys present in the dockets info dictionary.
        for key in docket['info']:

            # We create the new row we want to add as a dictionary.
            # Using .get() allows us to specify the key that we want, and specify a default value as the second argument in
            # case the key doesn't exist.
            new_docketInformation_row = {
                'Docket Number':
                result['docket'],
                'Court Name':
                result['court'],
                'Case Title':
                docket['info'].get('title', result.get("title", None)),
                'Case Info Field':
                key,
                'Case Info Values':
                docket['info'][key],
            }

            # We append the global dataframe with the row we want represented as a dictionary.
            # ignore_index=True specifies that we don't want to generate an index column.
            global docketInformation
            appender = docketInformation.append(new_docketInformation_row,
                                                ignore_index=True)

            # When we append a dataframe, the original is not changed, rather a new version of the dataframe with the added row is generated.
            # We replace the original with the new version so our changes get saved.
            docketInformation = appender

    def fill_docketEntries(result, docket):
        """
        This nested function populates the docketEntries dataframe.
        """

        # We loop through each dictionary within the docket_report list
        if not 'docket_report' in docket:
            print(docket)
            return

        for document in docket['docket_report']:

            # We create the new row we want to add as a dictionary.
            # Using .get() allows us to specify the key that we want, and specify a default value as the second argument in
            # case the key doesn't exist.
            new_docketEntries_row = {
                'Docket Number':
                result['docket'],
                'Court Name':
                result['court'],
                'Case Title':
                docket['info'].get('title', result.get("title", None)),
                'Docket Entry Date':
                document.get('entry_date', None),
                'Docket Entry Numbers':
                document.get('number', None),
                'Docket Entry Contents':
                removehtml(document.get('contents', None)),
            }

            # We append the global dataframe with the row we want represented as a dictionary.
            # ignore_index=True specifies that we don't want to generate an index column.
            global docketEntries
            appender = docketEntries.append(new_docketEntries_row,
                                            ignore_index=True)

            # When we append a dataframe, the original is not changed, rather a new version of the dataframe with the added row is generated.
            # We replace the original with the new version so our changes get saved.
            docketEntries = appender

    def fill_parties(result, docket):
        """
        This nested function populates the parties dataframe.
        """

        # The parties key is not always present in our response.
        if not 'parties' in docket:
            # If it's not present, we don't add to the dataframe and we exit the function.
            print(docket)
            return

        for party in docket.get('parties', None):

            # We create the new row we want to add as a dictionary.
            # Using .get() allows us to specify the key that we want, and specify a default value as the second argument in
            # case the key doesn't exist.
            new_parties_row = {
                'Docket Number':
                result.get('docket', None),
                'Court Name':
                result.get('court', None),
                'Case Title':
                docket['info'].get('title', result.get("title", None)),
                'Party Name':
                party.get('name_normalized', party.get('name')),
                'Party Type':
                party.get('type', None),
            }

            # We append the global dataframe with the row we want represented as a dictionary.
            # ignore_index=True specifies that we don't want to generate an index column.
            global parties
            appender = parties.append(new_parties_row, ignore_index=True)

            # When we append a dataframe, the original is not changed, rather a new version of the dataframe with the added row is generated.
            # We replace the original with the new version so our changes get saved.
            parties = appender

    def fill_attorneysAndFirms(result, docket):
        """
        This nested function populates the attorneysAndFirms dataframe.
        """

        # The parties key is not always present in our response.
        if not 'parties' in docket:
            # If it's not present, we don't add to the dataframe and we exit the function.
            return

        # We loop through each dictionary within the parties list of dictionaries.
        for party in docket['parties']:

            # The counsel key will not always be present in the dictionary.
            if not 'counsel' in party:
                # If it's not, we don't write to the dataframe and we exit the function.
                return
            for counsel in party['counsel']:

                # We create the new row we want to add as a dictionary.
                # Using .get() allows us to specify the key that we want, and specify a default value as the second argument in
                # case the key doesn't exist.
                new_attorneysAndFirms_row = {
                    'Docket Number': result.get('docket', None),
                    'Court Name': result.get('court', None),
                    'Attorney Name': counsel.get("name", None),
                    'Attorney Firm': counsel.get("firm", None),
                    'Attorney Email': counsel.get("email", None),
                    'Attorney Phone': counsel.get("phone", None),
                }

                # We append the global dataframe with the row we want represented as a dictionary.
                # ignore_index=True specifies that we don't want to generate an index column.
                global attorneysAndFirms
                appender = attorneysAndFirms.append(new_attorneysAndFirms_row,
                                                    ignore_index=True)

                # When we append a dataframe, the original is not changed, rather a new version of the dataframe with the added row is generated.
                # We replace the original with the new version so our changes get saved.
                attorneysAndFirms = appender

    # After defining all of our nested functions, this is where the query_to_tables() function begins.

    # First we let the user know to wait, so they don't press any buttons that get entered as the input they will be prompted for when this is done loading.
    print("\n")

    # We create our user object to log in. We can use attributes and methods to access the username, password, and authentication token of our currently signed in user.
    user = login.Credentials()

    print("Querying, please wait...")

    # We run our search, using the query, the number of results, and the order that the user specified in the menu.
    searchResults = user_tools.search_docket_alarm(
        (user.username, user.password),
        query,
        limit=results_limit,
        result_order=result_order)

    searchResults = searchResults[0:results_limit]

    # We let the user know how many results were returned for their search and ask them to confirm to proceed.
    print(
        f"\nThis search query resulted in {len(searchResults)} results. Proceed? [Y/n]"
    )

    # We store their answer in a variable.
    user_proceed_choice = input()

    # If the user says no...
    if user_proceed_choice.lower() == "n":
        # We do not proceed. The user is returned to the menu.
        menus.spreadsheet_generator_menu()
    # If answers something other than y or n (yes or no)...
    elif user_proceed_choice.lower() != "y" and user_proceed_choice.lower(
    ) != "n":
        # We let them know their response was invalid...
        print("Invalid response. Returning to menu.")
        # We pause the script until they press enter, so we know they're aware of whats happening...
        input()
        # And we return them to the menu.
        menus.spreadsheet_generator_menu()
    # If the user answers Y (yes), then the script continues.
    menus.clear()

    # We clear the menu and display ascii art in red.
    print(Fore.RED + menus.msg2)

    # We are about to initialize our progress bar. When we do this, we need to specify the maximum number of loops that the
    # progress bar is tracking. This gets passed as an argument.
    progressbar_maximum = len(searchResults)

    # We initialize our progress bar, specifying the text that will be displayed alongside the bar, and the maximum amount of loops
    # the bar will track.
    bar = Bar('Generating CSVs', max=progressbar_maximum)

    # The search results that are returned are a list of dictionaries. We begin to iterate through them.
    for result in searchResults:

        # We use the get_docket() function to return the docket data for every result in our search query.
        # To pull the docket, we specify the docket number and the court. We specify if the data is cached or uncached, and what the client matter is.
        docket = user_tools.get_docket(
            user.authenticate(),
            result['docket'],
            result['court'],
            cached=global_variables.IS_CACHED,
            client_matter=global_variables.CLIENT_MATTER)

        # through every iteration over our results, we pass the result data, and the docket data for each result to each of the
        # nested functions we defined at the beginning of this funciton. The dataframes that are declared as global variables at the
        # top of this module are appended with new data with each iteration.
        fill_docketInformation(result, docket)
        fill_docketEntries(result, docket)
        fill_parties(result, docket)
        fill_attorneysAndFirms(result, docket)

        # With each iteration, we move our progress bar forward until it hits its maximum.
        bar.next()

    # We get the current date and time to use in the name of the output folder we will generate. This helps us generate
    # unique folder names each time we run the script.
    timeNow = datetime.datetime.now().strftime("%I%M%p %B %d %Y")

    # The complete name of the folder will be the search entered, followed by the current time.
    # We use the cleanhtml function to remove any characters that are not allowed in file or folder names.
    # cleanhtml() is imported from get_pdfs.py.
    containing_folder_name = f"{cleanhtml(query)} - {timeNow}"

    # We put together the absolute path to the folder we want to create and populate it.
    output_directory = os.path.join(output_path, containing_folder_name)

    # We check to see if the folder already exists...
    if not os.path.exists(output_directory):
        # If it doesn't, we create it.
        os.makedirs(output_directory)

    # We create strings for the absolute paths to each individual csv file we will be creating, with the .csv extension included.
    docketInformation_outputFile = os.path.join(output_directory,
                                                "docketInformation.csv")
    docketEntries_outputFile = os.path.join(output_directory,
                                            "docketEntries.csv")
    parties_outputFile = os.path.join(output_directory, "parties.csv")
    attorneysAndFirms_outputFile = os.path.join(output_directory,
                                                "attorneysAndFirms.csv")

    # We use the .to_csv() method on our dataframe object to save the filled out dataframes to csv files at the paths we specified above.
    # index=False specifies that we do not want to generate a numerical index column.
    docketInformation.to_csv(docketInformation_outputFile, index=False)
    docketEntries.to_csv(docketEntries_outputFile, index=False)
    parties.to_csv(parties_outputFile, index=False)
    attorneysAndFirms.to_csv(attorneysAndFirms_outputFile, index=False)

    # We set the progress bar to it's completed state.
    bar.finish()
Esempio n. 7
0
def welcome():
    """
    The code that executes at launch. It guides the user through menus and starts other functions from the other folders
    based on the users choices.
    """
    clear()
    print(msg)
    print(Style.RESET_ALL)
    print("\n")
    print("Welcome!")
    input("Press ENTER key to begin!")
    clear()
    print(feesNotice)
    input()
    clear()

    # Checks to see if the account information was stored on the local machine previously
    if not os.path.isfile(
            os.path.join(CURRENT_DIR, "sav", "credentials.pickle")):

        # If the user hasn't successfullly logged in before, it takes them to a menu sequence to log in, and saves the info locally
        # for the next time the script is run.
        login.login_interface()

    user = login.Credentials()

    print(f"You are logged in as: {user.username}")
    instructions = """
Instructions:

This program takes in a csv file full of docket numbers and will automatically
populate 2 folders with the raw JSON data and all of the PDF documents associated
with that docket.

Press ENTER to continue.
    """
    print(instructions)
    input()
    clear()

    options = """
Type in one of the following numbers and press ENTER to specify your choice:

[1] Get all JSON files and PDF files.

[2] Get JSON files only.

[3] Get PDF files only.
    ( Only select 3 if you already have a directory full of JSON files. )
    ( The JSON files are needed to extract the download links from.     )

[4] Search for Dockets

[5] More options.

Enter your response below.[1/2/3/4]
    """
    print(options)

    def handle_input():
        """
        Prompts the user for a choice and calls the function from the 'modules' folder that corresponds
        with that choice.
        """
        userChoice = input()

        # Choice 1 is downloading all json and pdf files.
        if userChoice == "1":
            clear()
            print(
                "\nUpon pressing ENTER, a file browser will open.\nPlease select your input CSV file."
            )
            input()
            # Opens a graphical file browser and returns the path to the csv file that the user selected.
            csvChoice = file_browser.browseCSVFiles()

            # Assigns the choice to a global variable, so other modules can find the path that the user specified.
            global_variables.CSV_INPUT_PATH = csvChoice
            clear()
            menus.select_paths_menu()
            clear()
            menus.specify_client_matter_menu()
            print(msg)
            get_json_and_pdfs()
        # Choice 2 is donwloading only JSON files.
        elif userChoice == "2":
            clear()
            print(
                "\nUpon pressing ENTER, a file browser will open.\nPlease select your input CSV file."
            )
            input()
            # Opens a graphical file browser and returns the path to the csv file that the user selected.
            csvChoice = file_browser.browseCSVFiles()
            # Assigns the choice to a global variable, so other modules can find the path that the user specified.
            global_variables.CSV_INPUT_PATH = csvChoice
            clear()
            menus.select_paths_menu(pdfOption=False)
            menus.specify_client_matter_menu()
            print(msg)
            get_json.thread_download_json()
        # Choice 3 is downloading only PDF files.
        elif userChoice == "3":
            clear()
            menus.select_paths_menu()
            menus.specify_client_matter_menu()
            print(msg)
            link_list = get_pdfs.get_urls("json-output")
            get_pdfs.thread_download_pdfs(link_list)
        elif userChoice == "4":
            spreadsheet_generator_menu()
        elif userChoice == "5":
            clear()
            menus.other_options_menu()

        # If the user enters anything other than a valid choice, then it tells them their choice is invalid and
        # restarts this function, prompting them to make a choice again.
        else:
            print("Please Enter Valid input (1, 2 or 3)")
            return handle_input()

    handle_input()
    try:
        os.startfile(os.path.join(CURRENT_DIR, "log"))
    except:
        pass

    print("\nDone.")
    input()
Esempio n. 8
0
def spreadsheet_generator_menu():
    import generate_spreadsheets
    clear()

    csv_generator_instructions = """
Instructions:

Make a Docket Alarm search query and 4 spreadsheets will be generated containing data from all of the returned results.
You will browse for an output folder to save your spreadsheets to.
These are not the same format as the spreadsheets used as input for downloading PDFs and JSON.

Press ENTER to continue.
    """

    sort_results_msg = """

How would you like to sort your results?

[1] Search by relevancce, which considers matching keywords, court level, and recentness.

[2] Oldest first by docket/document filing date.

[3] Newest first by docket/document filing date.

[4] Order dockets by the date of the most recent entry listed on the docket ascending.

[5] Order dockets by the date of the most recent entry listed on the docket descending.

[6] Random order. (good for sampling)

Enter your choice below. [1/2/3/4/5/6]

"""
    print(Fore.RED + msg2)
    print("\nPress ENTER to begin" + Style.RESET_ALL)
    input()
    clear()
    print(csv_generator_instructions)
    input()
    clear()
    print("\nEnter a Docket Alarm search query.\n")
    print(
        "(This is the same query that you would enter on docketalarm.com.\nFull search documentation can be found at https://www.docketalarm.com/posts/2014/6/23/Terms-and-Connectors-Searching-With-Docket-Alarm/)\n\n"
    )
    users_search_query = input()
    clear()
    print("Calculating maximum number of results, please wait...")
    user = login.Credentials()
    amountOfResults = requests.get(
        "https://www.docketalarm.com/api/v1/search/",
        params={
            "login_token": user.authenticate(),
            "q": users_search_query,
            "limit": "1"
        },
        timeout=60).json()['count']
    clear()
    print(f"Maximum number of results: {amountOfResults}")
    print("\nEnter the number of results you want to return\n\n")
    users_number_of_results = int(input())

    clear()
    print(sort_results_msg)
    sort_choice_input = input()
    if sort_choice_input == "1":
        sort_choice = None
    elif sort_choice_input == "2":
        sort_choice = "date_filed"
    elif sort_choice_input == "3":
        sort_choice = "-date_filed"
    elif sort_choice_input == "4":
        sort_choice = "date_last_filing"
    elif sort_choice_input == "5":
        sort_choice = "-date_last_filing"
    elif sort_choice_input == "6":
        sort_choice = "random"
    else:
        print("Invalid choice. Press ENTER to return to menu.")
        input()
        spreadsheet_generator_menu()

    clear()
    print(
        "\nUpon pressing ENTER, a file browser will open. Please browse to the directory where you\nwould like to save your output folder."
    )
    input()
    users_output_path = file_browser.browseDirectories("csv-output")
    generate_spreadsheets.query_to_tables(users_search_query,
                                          users_number_of_results,
                                          users_output_path,
                                          result_order=sort_choice)
Esempio n. 9
0
def other_options_menu():
    other_options = """
Type in one of the following numbers and press ENTER to specify your choice:

[0] Return to menu.

[1] Fetch updated list of compatible courts.
    (You use these names in your input CSV.                                          )
    (The list is always being updated and changes are not immediately added to Github)

[2] Log Out of Docket Alarm.

[3] Uncached Search

Enter your response below.[0/1/2/3]
    """
    print(other_options)
    userChoice = input()
    if userChoice == "0":
        clear()
        welcome()
    elif userChoice == "1":
        clear()
        print("Fetching updated court list...")
        fetch_updated_court_list.fetch_updated_court_list()
        print("Done.")
        input()
        welcome()
    elif userChoice == "2":
        clear()
        user = login.Credentials()
        print("\nAre you sure you want to log out of Docket Alarm?[Y/n]")
        userChoice = input()
        if userChoice.upper() == "Y":
            user.logout()
            clear()
            welcome()
        else:
            clear()
            other_options_menu()
    elif userChoice == "3":
        clear()
        print(
            "\nUncached searches retrieve more up-to-date results but may result in extra charges.\nWould you like to turn uncached search on?[Y/n]\n"
        )
        userChoice = input()
        if userChoice.upper() == "Y":
            clear()
            global_variables.IS_CACHED = False
            print(
                "\nUncached search is ON and will remain ON until the program is closed."
            )
            print("\nPress ENTER to return to the menu.")
            input()
            welcome()
        elif userChoice.upper() == "N":
            clear()
            other_options_menu()
        else:
            print("Invalid choice entered.\nPress ENTER to return to menu.")
            input()
            clear()
            other_options_menu()

    else:
        print("Please Enter Valid input (0, 1, 2, or 3)")
def download_json_from_list_of_tuples(result_tuple):
    """
    This function takes in a tuple with 5 arguments as strings in order:
    The case name,
    The case number,
    The court the case is in,
    The output path for the download,
    The client matter (The reason for making the call, for billing purposes).
    It downloads json data for each case.
    This function is not called on its own, it is wrapped by the 
    thread_download_json() function, which allows each call of the function to be done in it's
    own thread, speeding up the download.
    """

    # We unpack the tuple and assign all of it's values to human-readable variable names.
    caseName, caseNo, caseCourt, JSON_INPUT_OUTPUT_PATH, CLIENT_MATTER = result_tuple

    user = login.Credentials()

    # The endpoint we will be connecting to. Calls to this endpoint return the json data for the docket we want.
    getdocket_url = "https://www.docketalarm.com/api/v1/getdocket/"

    # The parameters we pass to the endpoint. This is how the API knows how to find what we are looking for.
    data = {
        # The token generated after logging in.
        'login_token': user.authenticate(),
        # The reason for use
        'client_matter': CLIENT_MATTER,
        # The court we want to search.
        'court': caseCourt,
        # The docket number we want data for
        'docket':caseNo,
        # A boolean representing whether or not we want the cached version of the data.
        'cached': config.isCached,
        # Cleans up names
        'normalize':True,
    }

    # Makes the api call. We specify the endpoint and the parameters as arguments. The results of the API call are returned and
    # stored to a variable. 
    result = requests.get(getdocket_url, data)

    

    try:
        # if the api call fails, a detailed error is thrown. The script does not stop and the error message is not immediately shown to the user.
        result.raise_for_status() 
    except:
        # Rather, the error is written to log/log.txt with a timestamp and information about which case could not be downloaded.
        result_json = None
        print(result)
        print(caseName)
        print(caseNo)
        timeNow = datetime.datetime.now().strftime("%I:%M%p %B %d, %Y")
        with open(os.path.join(CURRENT_DIR, 'log', 'log.txt'), 'a') as errorlog:
            errorlog.write(f"\n{timeNow}\n")
            errorlog.write("JSON could not be downloaded:\n")
            errorlog.write(f"{result}: {caseName}, {caseNo}, {caseCourt}\n")
            errorlog.write("------------------")
        return

    result_json = result.json()
    # We use .json() to convert the json results to a python dictionary we can more easily work with.

    
    try:
        # Creates the path where our .json file will be saved to
        filePathNameWExt = os.path.join(JSON_INPUT_OUTPUT_PATH, f"{caseName} {caseNo}" + '.json')


        # We use a lock so this code won't be executed by multiple threads simultaneously, this way we don't get errors.
        with lock:
            # When 'opening' a file that doesn't yet exist, we create that file.
            # Here, we create the json file we'll be saving the data to.
            with open(filePathNameWExt, 'w') as fp:

                # Then we write the data to the newly created .json file.
                json.dump(result_json,fp, indent=3)

    # If the api call was successful, but the writing of the data to a file fails, we display the error message to the user.
    except Exception as e:
        print("\nError writing json file.\nReference the documentation for more information\n")
        input()
        print(e)
    return
def spreadsheet_generator_menu():
    import generate_spreadsheets
    clear()

    csv_generator_instructions = """
Instructions:

Make a Docket Alarm search query or import a csv full of case numbers and 4 spreadsheets will be generated containing data from all of the returned results.
You will browse for an output folder to save your spreadsheets to.
These are not the same format as the spreadsheets used as input for downloading PDFs and JSON.

Press ENTER to continue.
    """

    input_preference_msg = """
How would you like to select your cases?

[1/blank] Docket Alarm search query

[2] Input CSV

Enter your choice [1/2], and press ENTER to continue.
"""

    sort_results_msg = """

How would you like to sort your results?

[1/blank] Search by relevance, which considers matching keywords, court level, and recentness.

[2] Oldest first by docket/document filing date.

[3] Newest first by docket/document filing date.

[4] Order dockets by the date of the most recent entry listed on the docket ascending.

[5] Order dockets by the date of the most recent entry listed on the docket descending.

[6] Random order. (good for sampling)

Enter your choice below. [1/2/3/4/5/6] [blank=1]

"""
    print(Fore.RED + msg2)
    print("\nPress ENTER to begin" + Style.RESET_ALL)
    input()
    clear()
    print(csv_generator_instructions)

    print(input_preference_msg)
    input_preference_choice = input()

    if input_preference_choice == "1" or not input_preference_choice:
        clear()
        print("\nEnter a Docket Alarm search query.\n")
        print(
            "(This is the same query that you would enter on docketalarm.com.\nFull search documentation can be found at https://www.docketalarm.com/posts/2014/6/23/Terms-and-Connectors-Searching-With-Docket-Alarm/)\n\n"
        )
        users_search_query = input()
        clear()
        print("Calculating maximum number of results, please wait...")
        user = login.Credentials()
        amountOfResults = requests.get(
            "https://www.docketalarm.com/api/v1/search/",
            params={
                "login_token": user.authenticate(),
                "q": users_search_query,
                "limit": "1"
            },
            timeout=60).json()['count']
        clear()
        print(f"Maximum number of results: {amountOfResults}")
        print(
            "\nEnter the number of results you want to return (or blank for all)\n\n"
        )
        result = input()
        users_number_of_results = int(
            result) if result.strip() else amountOfResults
        clear()
        print(sort_results_msg)
        sort_choice_input = input()
        if sort_choice_input == "1" or not sort_choice_input.strip():
            sort_choice = None
        elif sort_choice_input == "2":
            sort_choice = "date_filed"
        elif sort_choice_input == "3":
            sort_choice = "-date_filed"
        elif sort_choice_input == "4":
            sort_choice = "date_last_filing"
        elif sort_choice_input == "5":
            sort_choice = "-date_last_filing"
        elif sort_choice_input == "6":
            sort_choice = "random"
        else:
            print("Invalid choice. Press ENTER to return to menu.")
            input()
            spreadsheet_generator_menu()
    elif input_preference_choice == "2":
        clear()
        print(
            "Please browse to the CSV file you will use as input. Press ENTER to open file browser."
        )
        input()
        input_csv = file_browser.browseCSVFiles()

    clear()

    default_dir = os.path.join(file_dir, 'data')
    try:
        os.mkdir(default_dir)
    except:
        pass
    print("""
Select which directory you would like to save the data:

[1] Default folder:
    %s

[2] Use a file browser to select the directory.""" % default_dir)
    response = input()
    if not response or response == '1':
        users_output_path = default_dir
    else:
        users_output_path = file_browser.browseDirectories("csv-output")

    if input_preference_choice == "1" or not input_preference_choice:
        generate_spreadsheets.query_to_tables(users_search_query,
                                              users_number_of_results,
                                              users_output_path,
                                              result_order=sort_choice)

    elif input_preference_choice == "2":
        generate_spreadsheets.query_to_tables("",
                                              "",
                                              users_output_path,
                                              input_csv=input_csv)