def load_limits() -> 'Dict': """Loads a pre-made limits file from the hard drive. To be used in the hedging simulation.""" t = gf.timing() try: # Windows open path with open(sc.PATH_TO_LIMITS_DATA + r'\Limits.csv', mode='r') as limits_file: # Mac open path # with open(sc.PATH_TO_LIMITS_DATA + r'/Limits.csv', mode='r') as limits_file: reader = csv.reader(limits_file) limits = {rows[0]: rows[1] for rows in reader} t(str(len(limits)) + ' limits loaded from Limits.csv.') except: print('Limits loading from Limits.csv failed.') limits = 0 return limits
def set_up_company(company: 'Dict') -> 'List of dicts': """'Crank up' a company with sales- and procurement invoices. The hardcoded set as per below is a simplified reflection of Norganic in 2017. But with less invoices, so faster to simulate. sales = ig.generate_list_of_invoices(2018, 60, 111681000, {'NOK': 100}) procurement = ig.generate_list_of_invoices(2018, 100, 110211000, {'NOK': 28, 'EUR': 43, 'USD': 20, 'SEK': 4, 'DKK': 3, 'GBP': 2}) """ # Create the mock-up company t = gf.timing() # Preload lists of invoices sales = {} procurement = {} net_income = {} years_in_company = [int(key) for key in company] for year in range(years_in_company[0], years_in_company[-1] + 1): # Generate sales and procurement invoices with these values sales[year] = ig.generate_list_of_invoices( year, company[str(year)]['Amount_of_Invoices'], company[str(year)]['Total_Revenue'], company[str(year)]['Sales_Currency_Distribution'], 'Revenue') procurement[year] = ig.generate_list_of_invoices( year, company[str(year)]['Amount_of_Invoices'], company[str(year)]['Total_Revenue'] - company[str(year)]['Net_Income'], company[str(year)]['Proc_Currency_Distribution'], sc.DEFAULT_COSTS_CATEGORY) # Add a column for the NOK equivalent of the invoices on invoice date procurement[year] = conv.perform_conversion(procurement[year], True) sales[year] = conv.perform_conversion(sales[year], True) net_income[year] = company[str(year)]['Net_Income'] t('Invoices and limits loaded.') return company, sales, procurement, net_income
def load_correlations() -> 'Dict': """Loads a pre-made correlations file from the hard drive. To be used in the hedging simulation.""" t = gf.timing() try: path = r'C:\Apps\Currency Extractor\limit-files-for-python\Correlations.csv' # Mac path: # path = r'/Users/bjorngyles/Desktop/Currency Extractor/limit-files-for-python/Correlations.csv' with open(path, mode='r') as correlations_file: reader = csv.reader(correlations_file) correlations = {rows[0]: rows[1] for rows in reader} t( str(len(correlations)) + ' correlations loaded from Correlations.csv.') except: print('Correlations loading from Correlations.csv failed.') correlations = 0 return correlations
def load_rates() -> 'DataFrame, Array, Dictionary': """Load all exchange rate .csv files from Path into an exchange rates DataFrame, array, and dictionary.""" # Read data from all available .csv files t = gf.timing() try: # Windows path path = r'C:\Apps\Currency Extractor\rates-files-for-python' # Mac path: # path = r'/Users/bjorngyles/Desktop/Currency Extractor/rates-files-for-python' all_files = glob.glob(os.path.join(sc.PATH_TO_RATES_DATA, "*.csv")) rates_from_files = pd.concat( pd.read_csv(f, thousands=',', decimal='.', quoting=csv.QUOTE_MINIMAL) for f in all_files) # rates now contains all rates tables in the csv files note = str( len(rates_from_files) ) + ' historic exchange rates loaded from available .csv files...' except: note = 'Rates loading from available .csv files failed.' rates_from_files = 0 # Filter data frame to only contain keys that look like EURNOK_2019-05-19 rates_from_files = rates_from_files.set_index('ID').filter( regex='^[A-Z]{6}[_][0-9]{4}[-][0-9]{2}[-][0-9]{2}$', axis=0).reset_index() # Convert data frame also into a dictionary as it is MUCH faster to access later on rates_dictionary = { str(k): float(str(v).replace(',', '')) for k, v in zip(rates_from_files['ID'], rates_from_files['Price']) } # Update user t(note) return rates_from_files.CROSS.unique(), rates_dictionary
def main(): t = gf.timing() results = clean_up_and_sort_results(construct_exposure_matrix()) t('Exposure determined.') return results
def perform_conversion(input_table: 'Dataframe', add_to_original_set=False) -> 'SetOfConvertedData': """Takes an input dataframe, and generates a converted amount in the accounting currency for each row. Uses the invoice date as the reference date.""" t = gf.timing() # Generate a column of answers back into the clipboard original_currency = [] conversion_dates = [] conversion_amounts = [] conversion_rates = [] original_category = [] converted_data = pd.DataFrame() # Loop all the rows in the input_table (check https://stackoverflow.com/questions/16476924/how-to-iterate-over-rows-in-a-dataframe-in-pandas for how to improve this. for i, row in input_table.iterrows(): amount = gf.convert_excel_amount(row[AMOUNT]) date = gf.convert_excel_date(row[INVOICE_DATE]) currency = row[INVOICE_CURRENCY] try: category = row[CATEGORY] except: category = sc.DEFAULT_COSTS_CATEGORY try: rate = er.exchange_rate(currency + sc.ACCOUNTING_CURRENCY, date.date()) except: rate = er.exchange_rate(currency + sc.ACCOUNTING_CURRENCY, date) if not isinstance(rate, str): original_currency.append(currency) conversion_dates.append(date) conversion_rates.append(float(rate)) conversion_amounts.append(amount * float(rate)) original_category.append(category) else: original_currency.append(np.nan) conversion_dates.append(np.nan) conversion_rates.append(np.nan) conversion_amounts.append(np.nan) original_category.append(np.nan) # Add the results to the converted_data DataFrame converted_data['Original_Currency'] = original_currency converted_data['Conversion_Date'] = conversion_dates converted_data['Amount_[' + sc.ACCOUNTING_CURRENCY + ']'] = conversion_amounts converted_data['Rate'] = conversion_rates converted_data['Category'] = original_category if add_to_original_set: input_table['Amount_[' + sc.ACCOUNTING_CURRENCY + ']'] = conversion_amounts input_table['Rate'] = conversion_rates converted_data = input_table # Print message t(str(len(converted_data)) + ' rows of conversion results generated') # Return the converted data set. If run in this module, results will be copied to clipboard. # If called from another module, results will be further used. return converted_data