Ejemplo n.º 1
0
def print_entry(entries, entry_count, num_entries):
    menus.clear()
    print(f"Date: {entries[entry_count].date}")
    print(f"Employee: {entries[entry_count].employee}")
    print(f"Task: {entries[entry_count].task}")
    print(f"Duration: {entries[entry_count].duration}")
    print(f"Notes: {entries[entry_count].notes}")
    print("\n" + f"Entry {entry_count+1} of {num_entries} ")
    print("\n")
    return entries
Ejemplo n.º 2
0
def main(mode=None):
    '''
        Renders the workflow to the user.
    '''
    try:
        if mode == "configure":
            configure_the_soc()

        if mode == "run":
            if not localsettings:
                configure_the_soc()
            else:
                run_ops_mode()
                exit(0)

        if mode == "dryrun":
            incident = prepare_security_incident_notification()
            clear()
            p9.main(incident.sin['title'])
            exit(0)
    except KeyboardInterrupt:
        print "\n\n[!] Program Interrupted, exiting...\n\n"
Ejemplo n.º 3
0
def run_ops_mode():
    '''
        This calls the script and executes the current workflow.
        The script is not using the elasticsearch bulk apis at this time.
        The bulk apis are being tested and will be upgraded in future versions.
    '''
    localdb = LocalHostDatabase()

    sin = prepare_security_incident_notification()
    sin.sin['observables'] = p9.recordman.dedup
    clear()

    p9.main(sin.sin['title'])
    sin.sin['description'] = sin.sin['observables']['analyst']
    case = prepare_hive_case(sin)

    datatags = prepare_observables(sin.sin['observables'])

    settings = SettingsDatabase()
    restapi = settings.load_settings()

    hiveapi = TheHiveApi(restapi['soc_platform'], restapi['soc_apikey'])

    request = hiveapi.create_case(case.hivecase)
    sin.sin['restapi'] = {
        'caseId': request['caseId'],
        'hiveId': request['id'],
        'owner': request['owner'],
        'title': request['title']
    }

    caseid = sin.sin['restapi']['caseId']
    hiveid = sin.sin['restapi']['hiveId']

    process_observables(hiveapi, caseid, hiveid, sin, case, datatags)

    ##### EMAIL FOR WINDOWS PLATFORMS ONLY
    # LINUX PLATFORMS ARE WIP
    '''
Ejemplo n.º 4
0
def login_interface():
    """
    Called to display menus and options for logging in
    """
    print("\nPlease enter your Docket Alarm username and press ENTER.\n")
    input_username = input()
    menus.clear()
    print(
        "\nPlease enter your Docket Alarm password and press ENTER\n(This will be stored securely on your local machine)\n"
    )
    input_password = stdiomask.getpass(mask="*", prompt="")
    menus.clear()
    # This is the endpoint for logging in to Docket Alarm from the API.
    login_url = "https://www.docketalarm.com/api/v1/login/"
    # The data we will send to the endpoint with our post request will be
    # our Docket Alarm username and password.
    data = {
        'username': input_username,
        'password': input_password,
    }
    # We save the response to a variable. The response is a json object containing
    # our authentication key iside the json key named 'login_token'
    result = requests.post(login_url, data=data)
    # Calling the .json() method on the result turns it into a python dictionary we
    # can work with natively.
    result_json = result.json()
    # Checks to see if the login was a success...
    if result_json['success'] != True:
        # If it was not a sucess, we let the user know...
        print(result_json['error'])
        input()
        # And prompt the user to log in again.
        login_interface()
    else:
        # If the login is a success, we store the user info in a pickle file to be accessed later.
        # That way, the program can log in automatically every time the script is run afterwards.
        store_user_info_locally(input_username, input_password)
Ejemplo n.º 5
0
def query_to_tables(query, results_limit, output_path, result_order=None):
    """
    Takes in a search query as a sting,
    the amount of results you want returned as a string,
    the path you want to save to as a string,
    and optionally, the order of your results as a string.

    Generates a folder within the folder you specify and
    populates it with 4 spreadsheets containing the docket data
    from your search.
    """

    # We convert the amount of results the user wants to an integer so we can work with the number.
    results_limit = int(results_limit)

    def fill_docketInformation(result, docket):
        """
        This nested function populates the docketInformation dataframe.
        """
        if not 'info' in docket:
            return
        # We loop through all the keys present in the dockets info dictionary.
        for key in docket['info']:

            # We create the new row we want to add as a dictionary.
            # Using .get() allows us to specify the key that we want, and specify a default value as the second argument in
            # case the key doesn't exist.
            new_docketInformation_row = {
                'Docket Number':
                result['docket'],
                'Court Name':
                result['court'],
                'Case Title':
                docket['info'].get('title', result.get("title", None)),
                'Case Info Field':
                key,
                'Case Info Values':
                docket['info'][key],
            }

            # We append the global dataframe with the row we want represented as a dictionary.
            # ignore_index=True specifies that we don't want to generate an index column.
            global docketInformation
            appender = docketInformation.append(new_docketInformation_row,
                                                ignore_index=True)

            # When we append a dataframe, the original is not changed, rather a new version of the dataframe with the added row is generated.
            # We replace the original with the new version so our changes get saved.
            docketInformation = appender

    def fill_docketEntries(result, docket):
        """
        This nested function populates the docketEntries dataframe.
        """

        # We loop through each dictionary within the docket_report list
        if not 'docket_report' in docket:
            print(docket)
            return

        for document in docket['docket_report']:

            # We create the new row we want to add as a dictionary.
            # Using .get() allows us to specify the key that we want, and specify a default value as the second argument in
            # case the key doesn't exist.
            new_docketEntries_row = {
                'Docket Number':
                result['docket'],
                'Court Name':
                result['court'],
                'Case Title':
                docket['info'].get('title', result.get("title", None)),
                'Docket Entry Date':
                document.get('entry_date', None),
                'Docket Entry Numbers':
                document.get('number', None),
                'Docket Entry Contents':
                removehtml(document.get('contents', None)),
            }

            # We append the global dataframe with the row we want represented as a dictionary.
            # ignore_index=True specifies that we don't want to generate an index column.
            global docketEntries
            appender = docketEntries.append(new_docketEntries_row,
                                            ignore_index=True)

            # When we append a dataframe, the original is not changed, rather a new version of the dataframe with the added row is generated.
            # We replace the original with the new version so our changes get saved.
            docketEntries = appender

    def fill_parties(result, docket):
        """
        This nested function populates the parties dataframe.
        """

        # The parties key is not always present in our response.
        if not 'parties' in docket:
            # If it's not present, we don't add to the dataframe and we exit the function.
            print(docket)
            return

        for party in docket.get('parties', None):

            # We create the new row we want to add as a dictionary.
            # Using .get() allows us to specify the key that we want, and specify a default value as the second argument in
            # case the key doesn't exist.
            new_parties_row = {
                'Docket Number':
                result.get('docket', None),
                'Court Name':
                result.get('court', None),
                'Case Title':
                docket['info'].get('title', result.get("title", None)),
                'Party Name':
                party.get('name_normalized', party.get('name')),
                'Party Type':
                party.get('type', None),
            }

            # We append the global dataframe with the row we want represented as a dictionary.
            # ignore_index=True specifies that we don't want to generate an index column.
            global parties
            appender = parties.append(new_parties_row, ignore_index=True)

            # When we append a dataframe, the original is not changed, rather a new version of the dataframe with the added row is generated.
            # We replace the original with the new version so our changes get saved.
            parties = appender

    def fill_attorneysAndFirms(result, docket):
        """
        This nested function populates the attorneysAndFirms dataframe.
        """

        # The parties key is not always present in our response.
        if not 'parties' in docket:
            # If it's not present, we don't add to the dataframe and we exit the function.
            return

        # We loop through each dictionary within the parties list of dictionaries.
        for party in docket['parties']:

            # The counsel key will not always be present in the dictionary.
            if not 'counsel' in party:
                # If it's not, we don't write to the dataframe and we exit the function.
                return
            for counsel in party['counsel']:

                # We create the new row we want to add as a dictionary.
                # Using .get() allows us to specify the key that we want, and specify a default value as the second argument in
                # case the key doesn't exist.
                new_attorneysAndFirms_row = {
                    'Docket Number': result.get('docket', None),
                    'Court Name': result.get('court', None),
                    'Attorney Name': counsel.get("name", None),
                    'Attorney Firm': counsel.get("firm", None),
                    'Attorney Email': counsel.get("email", None),
                    'Attorney Phone': counsel.get("phone", None),
                }

                # We append the global dataframe with the row we want represented as a dictionary.
                # ignore_index=True specifies that we don't want to generate an index column.
                global attorneysAndFirms
                appender = attorneysAndFirms.append(new_attorneysAndFirms_row,
                                                    ignore_index=True)

                # When we append a dataframe, the original is not changed, rather a new version of the dataframe with the added row is generated.
                # We replace the original with the new version so our changes get saved.
                attorneysAndFirms = appender

    # After defining all of our nested functions, this is where the query_to_tables() function begins.

    # First we let the user know to wait, so they don't press any buttons that get entered as the input they will be prompted for when this is done loading.
    print("\n")

    # We create our user object to log in. We can use attributes and methods to access the username, password, and authentication token of our currently signed in user.
    user = login.Credentials()

    print("Querying, please wait...")

    # We run our search, using the query, the number of results, and the order that the user specified in the menu.
    searchResults = user_tools.search_docket_alarm(
        (user.username, user.password),
        query,
        limit=results_limit,
        result_order=result_order)

    searchResults = searchResults[0:results_limit]

    # We let the user know how many results were returned for their search and ask them to confirm to proceed.
    print(
        f"\nThis search query resulted in {len(searchResults)} results. Proceed? [Y/n]"
    )

    # We store their answer in a variable.
    user_proceed_choice = input()

    # If the user says no...
    if user_proceed_choice.lower() == "n":
        # We do not proceed. The user is returned to the menu.
        menus.spreadsheet_generator_menu()
    # If answers something other than y or n (yes or no)...
    elif user_proceed_choice.lower() != "y" and user_proceed_choice.lower(
    ) != "n":
        # We let them know their response was invalid...
        print("Invalid response. Returning to menu.")
        # We pause the script until they press enter, so we know they're aware of whats happening...
        input()
        # And we return them to the menu.
        menus.spreadsheet_generator_menu()
    # If the user answers Y (yes), then the script continues.
    menus.clear()

    # We clear the menu and display ascii art in red.
    print(Fore.RED + menus.msg2)

    # We are about to initialize our progress bar. When we do this, we need to specify the maximum number of loops that the
    # progress bar is tracking. This gets passed as an argument.
    progressbar_maximum = len(searchResults)

    # We initialize our progress bar, specifying the text that will be displayed alongside the bar, and the maximum amount of loops
    # the bar will track.
    bar = Bar('Generating CSVs', max=progressbar_maximum)

    # The search results that are returned are a list of dictionaries. We begin to iterate through them.
    for result in searchResults:

        # We use the get_docket() function to return the docket data for every result in our search query.
        # To pull the docket, we specify the docket number and the court. We specify if the data is cached or uncached, and what the client matter is.
        docket = user_tools.get_docket(
            user.authenticate(),
            result['docket'],
            result['court'],
            cached=global_variables.IS_CACHED,
            client_matter=global_variables.CLIENT_MATTER)

        # through every iteration over our results, we pass the result data, and the docket data for each result to each of the
        # nested functions we defined at the beginning of this funciton. The dataframes that are declared as global variables at the
        # top of this module are appended with new data with each iteration.
        fill_docketInformation(result, docket)
        fill_docketEntries(result, docket)
        fill_parties(result, docket)
        fill_attorneysAndFirms(result, docket)

        # With each iteration, we move our progress bar forward until it hits its maximum.
        bar.next()

    # We get the current date and time to use in the name of the output folder we will generate. This helps us generate
    # unique folder names each time we run the script.
    timeNow = datetime.datetime.now().strftime("%I%M%p %B %d %Y")

    # The complete name of the folder will be the search entered, followed by the current time.
    # We use the cleanhtml function to remove any characters that are not allowed in file or folder names.
    # cleanhtml() is imported from get_pdfs.py.
    containing_folder_name = f"{cleanhtml(query)} - {timeNow}"

    # We put together the absolute path to the folder we want to create and populate it.
    output_directory = os.path.join(output_path, containing_folder_name)

    # We check to see if the folder already exists...
    if not os.path.exists(output_directory):
        # If it doesn't, we create it.
        os.makedirs(output_directory)

    # We create strings for the absolute paths to each individual csv file we will be creating, with the .csv extension included.
    docketInformation_outputFile = os.path.join(output_directory,
                                                "docketInformation.csv")
    docketEntries_outputFile = os.path.join(output_directory,
                                            "docketEntries.csv")
    parties_outputFile = os.path.join(output_directory, "parties.csv")
    attorneysAndFirms_outputFile = os.path.join(output_directory,
                                                "attorneysAndFirms.csv")

    # We use the .to_csv() method on our dataframe object to save the filled out dataframes to csv files at the paths we specified above.
    # index=False specifies that we do not want to generate a numerical index column.
    docketInformation.to_csv(docketInformation_outputFile, index=False)
    docketEntries.to_csv(docketEntries_outputFile, index=False)
    parties.to_csv(parties_outputFile, index=False)
    attorneysAndFirms.to_csv(attorneysAndFirms_outputFile, index=False)

    # We set the progress bar to it's completed state.
    bar.finish()