def calcs(self): data = {} idx = {} self.link_map = lambda x: '/snapshot/glsnapshots/reconcile/%s/%s?date=%s' % (self.snapshot.id, x, self.snapshot.closing_date.isoformat()) snapshot_strategy = QueryManager(gl_strategy=self.qm_strategy) if self.qm_strategy else self.query_manager snap_bals = snapshot_strategy.pd_acct_balances(self.company_id,{'snapshot': self.date}).fillna(DZERO) curr_bals = self.query_manager.pd_acct_balances(self.company_id,{'current': self.date}).fillna(DZERO) self.column_order = ['snapshot', 'current','diff'] bals = pd.concat([snap_bals, curr_bals], axis=1).fillna(0.0) bals['diff'] = bals['current'] - bals['snapshot'] bals.loc['Total'] = bals.apply(sum, axis=0) accts = api_func('gl', 'account') acct_map = dict((a['id'], a['display_name']) for a in accts) label_map = lambda x: x + ': ' + acct_map[x] if x in acct_map else x bals['fmt_tag'] = 'item' bals['label'] = bals.index.map(label_map) bals['link'] = bals.index.map(self.link_map) bals.loc['Total', 'fmt_tag'] = 'major_total' table_data = bals.to_dict(orient='records') for row in table_data: if row['fmt_tag'] != 'header': for col in self.column_order: row[col] = {'text': row[col], 'link': row['link']} return table_data
def run_primecache(*args, **kwargs): year = datetime.datetime.now().year dates_list = utils.end_of_months(year) + utils.end_of_months(year-1) dates = dict((dt.isoformat(), dt) for dt in dates_list) company_ids = [c['id'] for c in Company.objects.filter(cmpy_type='ALO').values('id')] tags = [None, ['yearend']] query_manager = QueryManager() for cmpy in company_ids: for tag in tags: exclude_cps = [None] exclude_cps.append([c for c in company_ids if c!=cmpy]) for exclude_cp in exclude_cps: logger.info('priming balance cache with %s, %s, %s' %(str(cmpy), str(tag), str(exclude_cp))) throw_away = query_manager.pd_acct_balances(cmpy, dates, excl_contra=exclude_cp, excl_tags=tag)
def download_tranlines(request): company_ID = utils.get_company(request) trans = QueryManager().tranlines(company_ID) response = HttpResponse(content_type='text/csv') response['Content-Disposition'] = 'attachment; filename="tranlines.csv"' writer = csv.writer(response) header_row = ['TranLine ID', 'Transaction ID', 'Amount', 'Account Name', 'Counterparty Name', 'Counterparty ID'] writer.writerow(header_row) for ex in trans: writer.writerow(ex) return response
def expense_trends(request): # dynamically generate the dates today = datetime.datetime.now().date() last_year = datetime.date(today.year-1, today.month, today.day) all_dts = ['%sM%02d' % (x[0], x[1]) for x in list(monthrange(last_year,today))] cols = dict(zip(all_dts, all_dts)) raw_data = QueryManager().path_drilldown('SAV', cols, 'equity.retearnings.opexp', excl_contra=['4150']) # pull out top 5 total_exp = raw_data.sum(axis=1) total_exp.sort() tbl_data = raw_data.loc[total_exp.index[:5]] tbl_data.loc['rest'] = raw_data.loc[total_exp.index[5:]].sum(axis=0) tbl_data.index = tbl_data.index.map(display_name) data = {} data['chart_data'] = {} data['chart_data']['dates'] = list(tbl_data.columns) data['chart_data']['values'] = dict((pth, [-int(tbl_data.loc[pth, x]) for x in data['chart_data']['dates']]) for pth in tbl_data.index) return HttpResponse(json.dumps(data), content_type='application/json')