def save_and_insert_file(file, card_type):
        # Load in uploaded file and current data as DFs
        # Old tail is default index of earliest value to be updated (For updating derived values: Checking, Savings, Total, Total Inc)
        uploaded_file = Loaders.save_and_load_file(file)
        data = Loaders.load_data()
        old_tail = data.shape[0]

        # Sets framework to build a dataframe out of new data
        # Needed to map columns in uploaded CSVs to my data columns
        new_dataframe = DataHandlers.construct_new_dataframe_dict(
            uploaded_file, card_type)
        min_date_in_new = min(new_dataframe['Date'])

        # Converts dict to DF, then concats to my data, and sorts them by date primarily, reseting index values
        new_dataframe = pd.DataFrame.from_dict(new_dataframe)
        data = pd.concat([data, new_dataframe])
        data.sort_values(by=['Date', 'Transaction History', 'Cost'],
                         inplace=True)
        data.reset_index(drop=True, inplace=True)

        # Uses the minimum date in the new data defined above
        # Finds first index occurrence of lowest added date for the new data
        # This way, if data is added that is inserted BEFORE previous existing data,
        # the derived values (Check, Sav, Tot etc) will adjust properly
        # This does have the side-effect of overwriting sacvings transfers, but this can be accounted for later
        for index, row in data.iterrows():
            if (row['Date'] == min_date_in_new):
                old_tail = index
                break

        # Iterates and calculates column values for derived columns
        DataHandlers.recalc_check_sav_tot_from(data, old_tail)

        # Saves shiny new Data to pickle fiel
        data.to_pickle(Routes.STORAGE_ADDRESS)
 def get_line_data(cols=None):
     # Loads File from memory into df, initializes datasets and series ('Columns')
     df = Loaders.load_data(cols)
     datasets = []
     series = df.columns
     series = series.delete(series.get_loc("Date"))
     series = series.delete(series.get_loc('Transaction History'))
     # Maps columns to objects, column head goes to header key
     # Data key will hold array of formatted data objects
     for i in range(0, len(series)):
         datasets.append({'header': series[i], 'data': []})
     # Iterates through rows, then takes each column values out of the row
     # and inserts the necessary data into the corresponding dataset array
     for index, row in df.iterrows():
         count = 0
         for header in series:
             datasets[count]['data'].append({
                 'x':
                 row["Date"].timestamp(),
                 'y':
                 round(row[header], 2),
                 'name':
                 row['Transaction History']
             })
             count += 1
     return datasets
 def add_entry(body):
     # Below block loads in data.p, and initializes values of new row in a dictionary
     data = Loaders.load_data()
     new_dataframe = {}
     new_dataframe['Transaction History'] = [body['th']]
     min_date_in_new = pd.Timestamp(body['date'])
     new_dataframe['Date'] = [min_date_in_new]
     if (body['type']):
         new_dataframe['Type'] = [body['type']]
     else:
         new_dataframe['Type'] = ['N/A']
     new_dataframe['Cost'] = [float(body['cost'])]
     new_dataframe['Checking'] = [0]
     new_dataframe['Savings'] = [0]
     new_dataframe['Total'] = [0]
     new_dataframe['Total Income'] = [0]
     new_dataframe['401k'] = [0]
     new_dataframe['HSA Account'] = [0]
     # Coverts new_dataframe dict to dataframe
     new_dataframe = pd.DataFrame.from_dict(new_dataframe)
     # Concats new row to old dataframe
     data = pd.concat([data, new_dataframe])
     data.sort_values(by=['Date', 'Transaction History', 'Cost'],
                      inplace=True)
     data.reset_index(drop=True, inplace=True)
     # Locates earliest index that Checking, Savings, Total etc will need recalc
     old_tail = data.shape[0]
     for index, row in data.iterrows():
         if (row['Date'] == min_date_in_new):
             old_tail = index
             break
     # Recalcs values, overwrites pickle, returns success
     DataHandlers.recalc_check_sav_tot_from(data, old_tail)
     data.to_pickle(Routes.STORAGE_ADDRESS)
     return True
 def reset_from_backup(body):
     tag = body['filetag']
     if (tag == 'p'):
         df = Loaders.load_pickle_file(body['filename'])
         df.to_pickle(Routes.STORAGE_ADDRESS)
         return True
     elif (tag == 'csv'):
         df = Loaders.load_csv_file(body['filename'])
         df.to_pickle(Routes.STORAGE_ADDRESS)
         return True
     elif (tag == 'xlsx'):
         df = Loaders.load_excel_file(body['filename'])
         df.to_pickle(Routes.STORAGE_ADDRESS)
         return True
     else:
         return False
 def delete_entry(body):
     df = Loaders.load_data()
     index = int(body['index'])
     try:
         df.drop(index, inplace=True)
     except KeyError:
         print("This key was out of bounds")
         return False
     else:
         df.reset_index(drop=True, inplace=True)
         DataHandlers.recalc_check_sav_tot_from(df, index - 1)
         df.to_pickle(Routes.STORAGE_ADDRESS)
         return True
 def save_backup(body):
     df = Loaders.load_data()
     tag = body['filetag']
     if (tag == 'p'):
         df.to_pickle(Routes.PICKLE + body['filename'])
         return True
     elif (tag == 'csv'):
         df.to_csv(Routes.CSV + body['filename'])
         return True
     elif (tag == 'xlsx'):
         df.to_excel(Routes.XL + body['filename'])
         return True
     else:
         return False
 def initialize_table(file):
     uploaded_file = Loaders.save_and_load_file(file)
     uploaded_file = uploaded_file.astype({
         'Cost': 'float_',
         'Checking': 'float_',
         'Savings': 'float_',
         'Total': 'float_',
         'Total Income': 'float_'
     })
     uploaded_file.replace(np.nan, 'N/A', regex=True, inplace=True)
     Loaders.initialize_files()
     preferences = shelve.open(Routes.PREFERENCES_ADDRESS)
     prefs = preferences['user']
     for category in np.unique(uploaded_file[['Type']].values):
         if category != 'N/A':
             if (category not in prefs['categories'].keys()):
                 prefs['categories'][category] = {
                     'spending': False,
                     'income': False
                 }
     for (category, values) in prefs['categories'].items():
         if category.lower() == 'correction':
             prefs['correction_type'] = category
         if category.lower() == 'transfer':
             prefs['transfer_type'] = category
     preferences['user'] = prefs
     preferences.close()
     columns = uploaded_file.columns.tolist()
     for header in ColumnSets.COLUMN_LIST:
         if not header in uploaded_file.columns:
             return False
     uploaded_file.sort_values(by=['Date', 'Transaction History', 'Cost'],
                               inplace=True)
     uploaded_file.reset_index(drop=True, inplace=True)
     uploaded_file.to_pickle(Routes.STORAGE_ADDRESS)
     return True
 def export_file(body):
     df = Loaders.load_data()
     tag = body['filetag']
     if (tag == 'p'):
         address = Routes.EXPORTS + body['filename']
         df.to_pickle(address)
         return address
     elif (tag == 'csv'):
         address = Routes.EXPORTS + body['filename']
         df.to_csv(address)
         return address
     elif (tag == 'xlsx'):
         address = Routes.EXPORTS + body['filename']
         df.to_excel(address)
         return address
     else:
         return ""
 def get_pie_data(cats, cols=None):
     df = Loaders.load_data(cols)
     df = df.groupby(['Type']).sum()
     # Below adjustments are personal, due to errors in my data, remove if you use
     # df.at['INCOME', 'Cost'] += 10800
     # df.at['TAX', 'Cost'] -= 5703.06
     # df.at['UNTRACKED', 'Cost'] -= 3200
     # Sets framework, iterates through aggregate table and fills matching values
     # into arrays in order
     dataset = {'data': [], 'labels': []}
     for i in range(len(cats)):
         try:
             datum = df.at[cats[i], 'Cost']
         except KeyError:
             datum = 0
             dataset['data'].append(round(datum, 2))
             dataset['labels'].append(cats[i])
         else:
             if (datum < 0):
                 datum *= -1
             dataset['data'].append(round(datum, 2))
             dataset['labels'].append(cats[i])
     return dataset
 def get_data(cols=None):
     df = Loaders.load_data(cols)
     df.replace(np.nan, 'N/A', regex=True, inplace=True)
     return df
 def update_cell(body):
     df = Loaders.load_data()
     df.at[int(body['index']), body['column']] = body['category']
     df.to_pickle(Routes.STORAGE_ADDRESS)
     return df.to_dict()