Example #1
0
def get_expenses(db_exp_data_fpaths: list,
                 db_inc_data_fpaths: list,
                 stor_pair_path: str,
                 stor_exp_data_path: str,
                 budg_path: str,
                 exp_path: str,
                 dont_print_cols=None,
                 bankconfig=None):
    """
    main method for the importing of expense data
    """
    bank_json = data_help.read_jsonFile(bankconfig.settings_path)
    exp_df = data_help.load_csvs(
        db_exp_data_fpaths,
        dtype=bankconfig.exp_dtypes,
        parse_dates=env.pdates_colname
    )  # only using on csv db for now. newest will be last? idk verify later.

    exp_df = data_help.drop_for_substring(
        exp_df, env.BANK_STORENAME, bankconfig.ignorable_transactions,
        "\nRemoving the below expense transactions as they are either an internal bank acct transfer, cash advance or credit payment."
    )
    dates = data_help.extract_months(exp_df[env.DATE], start=False)
    # check for any missing budgets either this month or any month in the data
    expManager.get_budgets(budg_path, exp_path, dates)
    exp_df = expManager.get_expenses_for_rows(exp_df, stor_exp_data_path,
                                              stor_pair_path, budg_path,
                                              bankconfig)
    print("\nFinished gathering your expense data: \n")
    util.print_fulldf(exp_df, dont_print_cols)
    data_help.write_data(exp_df, db_exp_data_fpaths[0])
Example #2
0
def initialize_csvs(list_of_csvs, list_of_cols):
    """
    Initializes empty csv files given by paths list_of_csvs
    """

    for item in zip(list_of_csvs, list_of_cols):
        if not os.path.exists(item[0]):
            df = pd.DataFrame(columns=item[1])
            data_help.write_data(df, item[0])
Example #3
0
def edit_df_entries(df, df_path, column_name, old_entry, new_entry):
    """
    Edits a value in a column replacing it with new_entry
    """
    if not df.loc[df[column_name] == old_entry].empty:
        df.loc[df[column_name] == old_entry, column_name] = new_entry
        data_help.write_data(df, df_path)
    else:
        print("No records matched in dataframe. Left it alone.")
Example #4
0
def edit_df_entries_given_columns(df, df_path, col_to_change, col_to_match,
                                  match_key, old_entry, new_entry):
    """
    Find entries in col_to_match and replaces the values in col_to_change from old_entry to new_entry
    """
    if not df.loc[(df[col_to_match] == match_key)
                  & (df[col_to_change] == old_entry)].empty:
        df.loc[(df[col_to_match] == match_key) &
               (df[col_to_change] == old_entry), col_to_change] = new_entry
        data_help.write_data(df, df_path)
    else:
        print("No records matched in dataframe, left it alone.")
Example #5
0
def get_income(db_inc_data_fpaths: list,
               dont_print_cols=None,
               bankconfig=None):
    """
    main method for the importing of income data
    """
    inc_df = data_help.load_csvs(db_inc_data_fpaths,
                                 dtype=bankconfig.inc_dtypes,
                                 parse_dates=env.pdates_colname)
    inc_df = data_help.drop_for_substring(
        inc_df, env.BANK_STORENAME, bankconfig.ignorable_transactions,
        "\nRemoving the below income transactions as they are either an internal bank acct transfer, cash advance or credit payment."
    )
    data_help.write_data(inc_df, db_inc_data_fpaths[0])
    print("\nFinished gathering your income data: \n")
    util.print_fulldf(inc_df, dont_print_cols)
Example #6
0
def edit_cell_in_dfcol(db_data_filepath: str,
                       df,
                       col_name,
                       opt_col=None,
                       opt_dict=None,
                       col_type=None):
    """
    Edits a single cell in the df based upon options provided in opt_dict
    params:
        db_data_filepath - the path to the dataframes csv data
        df - (DataFrame) object
        col_name - the column to set the new value of
        opt_col - the column to grab a key from to search opt_dict for the list of options. If none, user can edit cell manually with input
        opt_dict - the dictionary containing the pairs between keys and options for a key
        col_type - the columns type to check for on user inputs
    """
    index_list = df.index.tolist()
    util.print_fulldf(df)
    prompt = f"Select some indices from the above dataframe column '{col_name}' to edit: : "
    indices = util.select_indices_of_list(prompt,
                                          index_list,
                                          return_matches=True,
                                          abortchar='q',
                                          print_lst=False)
    if indices != None:
        for index in indices:
            if opt_col != None:
                opt_key = df.loc[index, opt_col]
                val = util.select_from_list(
                    opt_dict[opt_key],
                    f"Please select an option for cell [{index}] col '{col_name}' or (q) to quit: ",
                    abortchar='q',
                    ret_match=True)

            else:
                if col_type == 'float':
                    val = util.get_float_input(
                        f"Please input ({col_type}) for this entry at row [{index}] col [{col_name}]: ",
                        force_pos=False,
                        roundto=2)

            if val != None:  # nonetype aborts
                df.at[index, col_name] = val
                data_help.write_data(df, db_data_filepath)

            else:
                break
Example #7
0
def df_swap(prompt=None,
            df_to_move_from=None,
            df_to_move_to=None,
            df_to_move_from_path=None,
            df_to_move_to_path=None,
            rows=None,
            cross_check_df=None,
            cross_check_col=None,
            cross_check_df_path=None):
    """
    Performs a swap of data from one dataframe to another
    params
        prompt - the output to the user
        df_to_move_from - the dataframe to move rows from
        df_to_move_to - the dataframe to move the rows to
        rows - (default) None. If none, will prompt user, else will use the given rows to perform a swap.
        cross_check_df - perform a cross check on this dataframe for a value in cross_check_col and return matches
        cross_check_col - column to perform the cross check on
    """
    if rows is None:
        util.print_fulldf(df_to_move_from)
        rows = util.select_indices_of_list(prompt,
                                           list(df_to_move_from.index),
                                           abortchar='q',
                                           print_lst=False)
    if rows is not None:  # above returns none if user aborts
        if cross_check_df is not None:
            data_help.check_for_match_in_rows(rows, df_to_move_from,
                                              env.AMOUNT, cross_check_df,
                                              cross_check_col,
                                              cross_check_df_path,
                                              env.ADJUSTMENT)

        df_to_move_from, df_to_move_to = data_help.locate_and_move_data_between_dfs(
            df_to_move_from, rows, df_to_move_to, cross_check_col)
        data_help.write_data(df_to_move_to,
                             df_to_move_to_path,
                             sortby=env.DATE)
        data_help.write_data(df_to_move_from,
                             df_to_move_from_path,
                             sortby=env.DATE)
Example #8
0
def check_for_data(ndata_filepaths, db_exp_data_fpaths, db_inc_data_fpaths,
                   adata_path, db_exp_data_path, db_inc_data_path,
                   exp_recbin_path, inc_recbin_path, bankconfig):
    """
    Checks db and new folder for any data. 
    Imports the data into expense and income dataframes
    """
    if len(ndata_filepaths) == 0:
        return False

    if len(ndata_filepaths) != 0 and len(db_exp_data_fpaths) != 0 and len(
            db_inc_data_fpaths) != 0:
        df_new = data_help.load_and_process_csvs(
            file_paths=ndata_filepaths,
            strip_cols=bankconfig.strip_cols,
            data_type=bankconfig.selection)
        util.print_fulldf(df_new)

        df_inc_new, df_exp_new = data_help.filter_by_amnt(
            df_new,
            col_name=env.AMOUNT,
            col_name2=env.NULL,
            bank_name=bankconfig.selection)
        df_inc_new = data_help.add_columns(
            df_inc_new, [env.ADJUSTMENT, env.INC_UUID, env.EXP_UUID])

        df_exp_new = data_help.add_columns(df_exp_new, [
            env.FILT_STORENAME, env.EXPENSE, env.ADJUSTMENT, env.EXP_UUID,
            env.INC_UUID
        ])

        df_exp = data_help.load_csvs(file_paths=db_exp_data_fpaths,
                                     strip_cols=bankconfig.strip_cols,
                                     dtype=bankconfig.exp_dtypes)
        df_inc = data_help.load_csvs(file_paths=db_inc_data_fpaths,
                                     strip_cols=bankconfig.strip_cols,
                                     dtype=bankconfig.inc_dtypes)
        df_exp = pd.concat([df_exp, df_exp_new])
        df_inc = pd.concat([df_inc, df_inc_new])

    elif len(ndata_filepaths) != 0:
        df_new = data_help.load_and_process_csvs(
            file_paths=ndata_filepaths,
            strip_cols=bankconfig.strip_cols,
            data_type=bankconfig.selection)
        df_inc, df_exp = data_help.filter_by_amnt(
            df_new,
            col_name=env.AMOUNT,
            col_name2=env.NULL,
            bank_name=bankconfig.selection)
        df_inc_new = data_help.add_columns(
            df_inc, [env.ADJUSTMENT, env.INC_UUID, env.EXP_UUID])

        df_exp_new = data_help.add_columns(df_exp, [
            env.FILT_STORENAME, env.EXPENSE, env.ADJUSTMENT, env.EXP_UUID,
            env.INC_UUID
        ])

    else:
        return False

    df_exp_recbin = data_help.load_csvs([exp_recbin_path],
                                        dtype=bankconfig.exp_dtypes,
                                        parse_dates=env.pdates_colname)
    df_inc_recbin = data_help.load_csvs([inc_recbin_path],
                                        dtype=bankconfig.inc_dtypes,
                                        parse_dates=env.pdates_colname)
    print("New data loaded locally.\n\n")
    print("INCOME\n\n")
    util.print_fulldf(df_inc)
    print("IGNORED INCOME\n\n")
    util.print_fulldf(df_inc_recbin)
    print("EXPENSES\n\n")
    util.print_fulldf(df_exp)
    print("YOUR IGNORED EXPENSES\n\n")
    util.print_fulldf(df_exp_recbin)

    df_exp = data_help.drop_dups(df=df_exp,
                                 col_names=bankconfig.check_for_dups_cols,
                                 ignore_index=True)
    df_inc = data_help.drop_dups(df=df_inc,
                                 col_names=bankconfig.check_for_dups_cols,
                                 ignore_index=True)

    df_exp = data_help.remove_subframe(
        df_to_remove_from=df_exp,
        df_to_remove=df_exp_recbin,
        col_names=bankconfig.check_for_dups_cols)
    df_inc = data_help.remove_subframe(
        df_to_remove_from=df_inc,
        df_to_remove=df_inc_recbin,
        col_names=bankconfig.check_for_dups_cols)

    print("INCOME WITHOUT DUPS\n\n")
    util.print_fulldf(df_inc)
    print("EXPENSES WITHOUT DUPS\n\n")
    util.print_fulldf(df_exp)

    df_exp = data_help.iterate_df_and_add_uuid_to_col(df_exp, env.EXP_UUID)
    df_inc = data_help.iterate_df_and_add_uuid_to_col(df_inc, env.INC_UUID)

    data_help.write_data(df_exp,
                         os.path.join(db_exp_data_path,
                                      env.OUT_EXP_DATA_TEMPL),
                         sortby=env.DATE,
                         fillna_col=[env.ADJUSTMENT])
    data_help.write_data(df_inc,
                         os.path.join(db_inc_data_path,
                                      env.OUT_INC_DATA_TEMPL),
                         sortby=env.DATE,
                         fillna_col=[env.ADJUSTMENT])
    timestamp = datetime.datetime.now().strftime("%m_%d_%Y__%H_%M_%S") + ".csv"
    data_help.move_files(files=ndata_filepaths,
                         dest=os.path.join(adata_path, timestamp))
    print(
        f"Data imported to {db_inc_data_path} and {db_exp_data_path}. Old files moved to {adata_path}"
    )
    return True
Example #9
0
def edit_df_transaction_price(df_to_edit,
                              df_to_edit_path,
                              col_to_use,
                              df_to_move_reduction_to=None,
                              df_to_move_reduction_to_path=None,
                              df_with_reductions=None,
                              df_with_reductions_path=None,
                              reduction_col=None,
                              uuid_col=None,
                              df_reduct_uuid_col=None,
                              perform_swap=None):
    """
    params:
        df_to_edit - the dataframe to edit the price on
        col_to_use - the column across all dataframes to be using
        df_with_reductions - the dataframe carrying transaction values that can be inserted into df_to_edit
        df_to_move_reduction_to - the df that will take the reduction transaction from df_with_reductions
        reduction_col - the column to grab reduction value from
        restorable - whether the df is one which transactions can be restored from. e.g. a recycle bin
    """
    index_list = df_to_edit.index.tolist()
    util.print_fulldf(df_to_edit)
    prompt = f"Select some indices from the above dataframe column '{col_to_use}' to edit: : "
    indices = util.select_indices_of_list(prompt,
                                          index_list,
                                          return_matches=True,
                                          abortchar='q',
                                          print_lst=False)
    if indices is None:  # none type aborts
        return None

    for index in indices:
        reductions_index_list = df_with_reductions.index.tolist()
        util.print_fulldf(df_with_reductions)
        prompt = f"Which index contains the transaction you want? "
        reduction_indices = util.select_indices_of_list(prompt,
                                                        reductions_index_list,
                                                        abortchar='q',
                                                        return_matches=False,
                                                        print_lst=False)
        if reduction_indices is not None:  # none type aborts
            for reduction_index in reduction_indices:
                val = df_with_reductions.at[
                    reduction_index, reduction_col]  # get transaction val

                if df_to_edit.at[index,
                                 col_to_use] == np.nan:  # check for nan value
                    df_to_edit.at[index, col_to_use] = 0.0

                df_with_reductions.at[reduction_index,
                                      uuid_col] = df_to_edit.at[index,
                                                                uuid_col]
                df_to_edit.at[index,
                              col_to_use] = df_to_edit.at[index,
                                                          col_to_use] + val
            if perform_swap:
                df_swap(df_to_move_from=df_with_reductions,
                        df_to_move_to=df_to_move_reduction_to,
                        df_to_move_from_path=df_with_reductions_path,
                        df_to_move_to_path=df_to_move_reduction_to_path,
                        rows=reduction_indices)  # writes changes
            else:
                data_help.write_data(df_with_reductions,
                                     df_with_reductions_path,
                                     sortby=env.DATE)
        else:
            break

    data_help.write_data(df_to_edit, df_to_edit_path,
                         sortby=env.DATE)  # writes changes to the edited df.
Example #10
0
def df_editor(df_to_move_from_path,
              df_to_move_to_path=None,
              restorable=False,
              recycle=True,
              df_with_reductions_path=None,
              df_to_move_reduction_to_path=None,
              uuid_col=None,
              df_reduct_uuid_col=None,
              bankconfig=None,
              dtype=None):
    """
    Allows the editing of a dataframe
    params:
        df_to_move_from_path - the dataframe file paths to edit
        df_to_move_to_path - the recyclebin path (default None)
        restorable - whether or not the df is restorable, if True, will not recycle
        recycle - whether or not data deleted from a frame will be moved to another or lost
        df_with_reductions_path - the path to the dataframe containing prices to reduce df_to_move_from_path by
        dtype - specifies how the csv dataypes should be setup when loading.  
    """
    done = False
    if dtype == 'exp':
        dtype_move_from = bankconfig.exp_dtypes
        dtype_move_to = bankconfig.exp_dtypes
        dtype_with_red = bankconfig.inc_dtypes
        dtype_move_red_to = bankconfig.inc_dtypes
    elif dtype == 'inc':
        dtype_move_from = bankconfig.inc_dtypes
        dtype_move_to = bankconfig.inc_dtypes
        dtype_with_red = bankconfig.exp_dtypes
        dtype_move_red_to = bankconfig.exp_dtypes

    while not done:
        df_to_move_from = data_help.load_csv(df_to_move_from_path,
                                             dtype=dtype_move_from,
                                             parse_dates=env.pdates_colname)

        if df_to_move_to_path is not None:
            df_to_move_to = data_help.load_csv(df_to_move_to_path,
                                               dtype=dtype_move_to,
                                               parse_dates=env.pdates_colname)

        if df_with_reductions_path is not None:
            df_with_reductions = data_help.load_csv(
                df_with_reductions_path,
                dtype=dtype_with_red,
                parse_dates=env.pdates_colname)

        if df_to_move_reduction_to_path is not None:
            df_to_move_reduction_to = data_help.load_csv(
                df_to_move_reduction_to_path,
                dtype=dtype_move_red_to,
                parse_dates=env.pdates_colname)

        if restorable == False:
            prompt = "Would you like to: \n(a) - move transactions to the recycle bin\n(b) - adjust a transaction price manually\n"
            prompt = prompt + "(c) - reduce a transaction by another\n(q) - quit\nType here: "
            input_chars = ['a', 'b', 'c', 'q']
        else:
            prompt = "\n".join([
                "Would you like to: ",
                "(a) - delete a row from the recycle bin",
                "(b) - restore from recycling", "(q) - quit", "Type here: "
            ])
            recycle = False  # if user is in recycle bin, deleting removes permanently
            input_chars = ['a', 'b', 'c', 'd', 'q']

        user_in = util.get_user_input_for_chars(prompt, input_chars)

        if user_in == 'a' and recycle == True:  # expenses or income case
            df_swap("Which rows would you like to recycle? (q) to abort? ",
                    df_to_move_from, df_to_move_to, df_to_move_from_path,
                    df_to_move_to_path)

        elif user_in == 'a' and recycle == False:
            df_to_move_from = data_help.drop_rows(
                "Which rows would you like to delete? (q) to abort? ",
                df_to_move_from)
            if df_to_move_from is not None:  # none type aborts
                data_help.write_data(df_to_move_from, df_to_move_from_path)

        elif user_in == 'b' and restorable == True:
            df_swap(
                "Which row or rows would you like to restore (q) to abort? ",
                df_to_move_from,
                df_to_move_to,
                df_to_move_from_path,
                df_to_move_to_path,
                cross_check_df=df_with_reductions,
                cross_check_col=df_reduct_uuid_col,
                cross_check_df_path=df_with_reductions_path)

        elif user_in == 'b' and restorable == False:
            edit_cell_in_dfcol(df_to_move_from_path,
                               df_to_move_from,
                               col_name=env.ADJUSTMENT,
                               col_type='float')

        elif user_in == 'c' and restorable == False:
            prompt = "\n".join([
                "What database is the source of your reductions?",
                f"(a) - {df_with_reductions_path}",
                f"(b) - {df_to_move_reduction_to_path}", f"(q) - abort",
                "Type here: "
            ])
            selection = util.get_user_input_for_chars(prompt, ['a', 'b', 'q'])

            if selection == 'a':
                edit_df_transaction_price(
                    df_to_edit=df_to_move_from,
                    df_to_edit_path=df_to_move_from_path,
                    col_to_use=env.ADJUSTMENT,
                    df_to_move_reduction_to=df_to_move_reduction_to,
                    df_to_move_reduction_to_path=df_to_move_reduction_to_path,
                    df_with_reductions=df_with_reductions,
                    df_with_reductions_path=df_with_reductions_path,
                    reduction_col=env.AMOUNT,
                    uuid_col=uuid_col,
                    df_reduct_uuid_col=df_reduct_uuid_col)
            elif selection == 'b':
                edit_df_transaction_price(
                    df_to_edit=df_to_move_from,
                    df_to_edit_path=df_to_move_from_path,
                    col_to_use=env.ADJUSTMENT,
                    df_with_reductions=df_to_move_reduction_to,
                    df_with_reductions_path=df_to_move_reduction_to_path,
                    reduction_col=env.AMOUNT,
                    uuid_col=uuid_col,
                    df_reduct_uuid_col=df_reduct_uuid_col,
                    perform_swap=False)

        elif user_in == 'q':
            done = True
Example #11
0
        util.print_fulldf(df_to_walk)
        for idx, row in df_to_walk.iterrows():
            print("Curr Transaction:  %-10s | %-10s | %-10s | %-10s" %
                  (row[env.DATE], row[env.AMOUNT], row[env.BANK_STORENAME],
                   row[env.TYPE]))
            month_end_date = util.get_month_from_timestamp(
                row[env.DATE], start=False
            )  # get relevant expenses for that month set by the user.
            selected_exp, exp_stor_data, stor_data, storename = expManager.search_store_relationships(
                new_pairing, exp_stor_data, budg_db[month_end_date],
                exp_stor_data_path, stor_data, stor_pair_path
            )  # take the new pairing and pass into this func to get expense out the other end.
            df.at[idx, env.FILT_STORENAME] = storename
            df.at[idx, env.EXPENSE] = selected_exp

        data_help.write_data(df, df_path)
        util.print_fulldf(df)


def expenses_editor(db_exp_data_fpaths,
                    exp_recbin_path,
                    stor_pair_path,
                    exp_stor_data_path,
                    budg_path,
                    exp_path,
                    bankconfig=None):
    """
    Edits an expense's name across all databases
    """
    done = False
    while not done: