def toCsv(inputFile, outputDir, prefix): """ [String] intputFile, [String] outputDir, [String] prefix => [String] outputFile Side effect: create an output csv file """ logger.info('toCsv(): {0}'.format(inputFile)) dictToValues = lambda keys, d: map(partial(getitem, d), keys) (dateString, holdings, cashEntries) = \ readJPM(worksheetToLines(open_workbook(inputFile).sheet_by_index(0))) (holdingFile, cashFile) = getOutputFilename(dateString, prefix, outputDir) headers = [ 'portfolio', 'custodian', 'date', 'geneva_investment_id' , 'ISIN', 'bloomberg_figi', 'name', 'currency', 'quantity'] writeCsv(holdingFile , chain([headers] , map(partial(dictToValues, headers), holdings)) , '|') headers = [ 'portfolio', 'custodian', 'date', 'currency', 'balance'] writeCsv(cashFile , chain([headers] , map(partial(dictToValues, headers), cashEntries)) , '|') return [holdingFile, cashFile]
def toCsv(inputFile, outputDir, mode): """ [String] intputFile, [String] outputDir => [String] outputFile name (including path) Side effect: create an output csv file """ headers = [ 'CLIENT A/C NO.', 'REF NO.', 'SEC ID TYPE', 'SEC ID'\ , 'SEC NAME', 'TRAN TYPE', 'TRADE DATE', 'SETT DATE', 'QTY/NOMINAL'\ , 'SEC CCY', 'PRICE', 'GROSS AMT', 'FEE CCY', 'COMMISSION'\ , 'STAMP DUTY', 'TAXES AND OTHER FEES', 'ACCRUED INT', 'NET AMT'\ , 'SETT CCY', 'NET AMT BASE', 'CORRESPONDENT', 'BROKER', 'BROKER A/C'\ , 'CLEARER AGENT', 'CLEARER AGENT A/C', 'INTERMEDIATE AGENT'\ , 'INTERMEDIATE AGENT A/C', 'PSET', 'PLACE OF SAFEKEEPING'\ , 'REMARKS', 'MESSAGE FUNCTION'] date, positions = readHolding(inputFile) outputFile = join(outputDir, 'Trade Blotter ' + date + '_Nomura.csv') positionToRow = lambda position: [(lambda h: position[h] if h in position else '')(h) \ for h in headers] writeCsv( outputFile , chain( [headers] , map( positionToRow , map( partial(cmbPosition, mode)\ , positions)))\ , quotechar='"'\ , quoting=csv.QUOTE_NONNUMERIC) return outputFile
def toCsv(portId, inputFile, outputDir, prefix): """ [String] portId, [String] intputFile, [String] outputDir, [String] prefix => [String] outputFile name (including path) Side effect: create an output csv file This function is to be called by the recon_helper.py from reconciliation_helper package. """ if isHoldingFile(inputFile): gPositions = map(partial(genevaPosition, portId, getDateFromFilename(inputFile)) , readHolding(open_workbook(inputFile).sheet_by_index(0) , getStartRow())) headers = ['portfolio', 'custodian', 'date', 'geneva_investment_id', 'ISIN', 'bloomberg_figi', 'name', 'currency', 'quantity'] prefix = prefix + 'holding_' elif isCashFile(inputFile): gPositions = map(partial(genevaCash, portId, getDateFromFilename(inputFile)) , readCash(open_workbook(inputFile).sheet_by_index(0))) headers = ['portfolio', 'custodian', 'date', 'currency', 'balance'] prefix = prefix + 'cash_' else: raise ValueError('toCsv(): invalid input file {0}'.format(inputFile)) rows = map(partial(dictToValues, headers), gPositions) outputFile = getOutputFileName(inputFile, outputDir, prefix) writeCsv(outputFile, chain([headers], rows), '|') return outputFile
def writeTSCF(folder): """ [String] folder => write an output csv in the folder. """ headRows = [['Upload Method', 'INCREMENTAL', '', '', '', ''], [ 'Field Id', 'Security Id Type', 'Security Id', 'Account Code', 'Numeric Value', 'Char Value' ]] writeCsv( join( folder, 'f3321tscf.historical.' + datetime.now().strftime('%Y%m%d') + '.inc'), chain(headRows, folderToTSCF(folder)))
def writeTradeFiles(recordGroups, outputDir, portfolio, broker, date): """ [List] recordGroups , [String] outputDir , [String] portfolio , [String] broker , [Datetime] date => create output csv file(s) for each group The trade file will be uploaded to Bloomberg, it contains the below fields: Account: account code in AIM (e.g., 40006) Security: Bloomberg Ticker Broker: Broker code (e.g., IB-QUANT) Side: Buy/Cover, Sell/Short Quantity: Price: Trade Date: [String] mm/dd/yy Settlement Date: same format as Trade Date No header row is required. """ fields = [ 'Account', 'BloombergTicker', 'Broker', 'Side', 'Quantity', 'Price', 'TradeDate', 'SettlementDate', 'Commission Code 1', 'Commission Amt 1', 'Strategy' ] outputFiles = [] for (index, group) in enumerate(recordGroups): file = createTradeFileName(index, date, outputDir, portfolio) writeCsv(file, [ createCsvRow(fields, portfolio, broker, record) for record in group ]) outputFiles.append(file) if outputFiles == []: logger.debug( 'writeTradeFiles(): {0}, {1} no trades were written to ouput'. format(portfolio, date)) return outputFiles
def writeCashFile(portfolio, records, outputDir, date): """ [List] cash records => [String] output csv file name The cash file is for Geneva reconciliation, it contains the below fields: Portfolio: account code in Geneva (e.g., 40006) Date: [String] yyyy-mm-dd Currency: Quantity: A header row is included. """ fields = ['Portfolio', 'Date', 'Currency', 'Balance'] file = join(outputDir, toFileName(date, portfolio, 'cash')) writeCsv(file, createCsvRows(fields, records, portfolio)) return file
def writeTrusteeTradeFile(outputDir, portfolio, date, positions): """ [String] outputDir (the directory to write the csv file) [String] portfolio [String] date (yyyy-mm-dd) [Iterator] positions (the positions from the trade file) => [String] csv file From the date and positions of the Bloomberg AIM trade file, write the CL trustee trade file. """ updatePosition = lambda p: \ mergeDictionary(p, {'Broker Long Name': p['FACC Long Name']}) headers = [ 'Fund', 'Ticker & Exc', 'ISIN', 'Shrt Name', 'B/S', 'Yield', 'As of Dt', 'Stl Date', 'Amount Pennies', 'Price', 'Bkr Comm', 'Stamp Duty', 'Exch Fee', 'Trans. Levy', 'Misc Fee', 'Crcy', 'Broker Long Name', 'Accr Int', 'Settle Amount', 'L1 Tag Nm' ] positionToValues = lambda position: map(lambda key: position[key], headers) def getOutputFileName(portfolio, date, outputDir): prefix = 'Order Record of A-HK Equity ' if portfolio == '11490' \ else 'Order Record of A-HK Equity_BOC ' if portfolio == '11500' \ else 'Order Record of A-MC-P Equity' # 13006 return join( outputDir, prefix + datetime.strftime(datetime.strptime(date, '%Y-%m-%d'), '%y%m%d') + '.csv') getOutputRows = lambda portfolio, date, positions: \ chain( [ ['China Life Franklin - CLT-CLI HK BR (CLASS A-HK) TRUST FUND' \ if portfolio == '11490' else \ 'China Life Franklin - CLT-CLI HK BR (CLASS A-HK) TRUST FUND_BOC' \ if portfolio == '11500' else \ 'CLT-CLI Macau BR (Class A-MC) Trust Fund-Par' #13006 ] , ['{0} Equity FOR {0} ON '.format(portfolio) \ + datetime.strftime(datetime.strptime(date, '%Y-%m-%d'), '%m/%d/%y') ] , [''] # an empty row , headers ] , map( positionToValues , map( updatePosition , filter(lambda p: p['Fund'].startswith(portfolio), positions))) ) return writeCsv(getOutputFileName(portfolio, date, outputDir), getOutputRows(portfolio, date, positions), delimiter=',')
def outputCsv(positions): headerRows = \ [ ['Upload Method', 'INCREMENTAL', '', '', '', ''] , [ 'Field Id', 'Security Id Type', 'Security Id', 'Account Code' , 'Numeric Value', 'Char Value'] ] toCsvRow = lambda p: \ ['CD012', 4, p['ISIN'], p['Portfolio'], p['AmortizedCost'], p['AmortizedCost']] return writeCsv('f3321tscf.htm.' + positions[0]['Date'] + '.inc', chain(headerRows, map(toCsvRow, positions)))
def convertAccumulateExcelToCSV(file): """ [String] file => [String] file Read an accmulative trade excel file, write it as csv. We need to make sure: make sure dates as yyyy-mm-dd, so that it's consistent with a daily addon from the bloomberg aim trade file. The csv file name is the same as the excel file, except that its file extension is '.csv' instead of '.xlsx' This is an utility function that needs to run only once, to convert the excel version accmulate trade file into csv format. After that, we just need to add daily trades to that csv file. """ getOutputFileName = lambda fn: \ fn[0:-4] + 'csv' if fn.endswith('.xlsx') else \ fn[0:-3] + 'csv' if fn.endswith('.xls') else \ lognRaise('convertAccumulateExcelToCSV(): invalid input file {0}'.format(fn)) """ [List] line => [List] headers Note the second header is an empty string, but we need to keep it. All other empty strings in the list are ignored """ getHeaders = compose(list, partial(map, lambda t: t[1]), partial(takewhile, lambda t: t[0] < 2 or t[1] != ''), lambda line: zip(count(), line)) def toDatetimeString(value): if isinstance(value, float): return datetime.strftime(fromExcelOrdinal(value), '%Y-%m-%d') else: try: return datetime.strftime(datetime.strptime(value, '%m/%d/%Y'), '%Y-%m-%d') except ValueError: return datetime.strftime(datetime.strptime(value, '%d/%m/%Y'), '%Y-%m-%d') getLineItems = lambda headers, line: compose( partial( map , lambda t: toDatetimeString(t[1]) \ if t[0] in ['Trade Date', 'Settlement Date'] else t[1] ) , lambda headers, line: zip(headers, line) )(headers, line) return compose( lambda rows: writeCsv(getOutputFileName(file), rows, delimiter=','), lambda t: chain([t[0]], map(partial(getLineItems, t[0]), t[1])), lambda lines: (getHeaders(pop(lines)), lines), fileToLines)(file)
def writePositionFile(portfolio, records, outputDir, date): """ [List] position records => [String] output csv file name The position file is for Geneva reconciliation, it contains the below fields: Portfolio: account code in Geneva (e.g., 40006) Custodian: custodian bank ID Date: [String] yyyy-mm-dd Investment: identifier in Geneva Currency: Quantity: Date: same as Date above A header row is included. """ fields = ['Portfolio', 'Date', 'Investment', 'Currency', 'Quantity'] file = join(outputDir, toFileName(date, portfolio, 'position')) writeCsv(file, createCsvRows(fields, records, portfolio)) return file
def buildLqaRequestOldStyle(name, date, positions): """ [String] name (name of the lqa request, 'masterlist_clo' etc.) [String] date (yyyy-mm-dd) [Iterable] positions => [String] output lqa request file name Side effect: create a lqa request file This version builds a csv file that is for human inspection. """ headers = [ 'Id' , 'IdType' , 'LQA_POSITION_TAG_1' , 'LQA_TGT_LIQUIDATION_VOLUME' , 'LQA_SOURCE_TGT_LIQUIDATION_COST' , 'LQA_FACTOR_TGT_LIQUIDATION_COST' , 'LQA_TGT_LIQUIDATION_HORIZON' , 'LQA_TGT_COST_CONF_LEVL' , 'LQA_MODEL_AS_OF_DATE' ] lqaPosition = lambda name, date, position: \ { 'Id': position['Id'] , 'IdType': position['IdType'] , 'LQA_POSITION_TAG_1': name , 'LQA_TGT_LIQUIDATION_VOLUME': position['Position'] , 'LQA_SOURCE_TGT_LIQUIDATION_COST': 'PR' if position['IdType'] == 'TICKER' else 'BA' , 'LQA_FACTOR_TGT_LIQUIDATION_COST': '20' if position['IdType'] == 'TICKER' else '1' , 'LQA_TGT_LIQUIDATION_HORIZON': '1' , 'LQA_TGT_COST_CONF_LEVL': '95' , 'LQA_MODEL_AS_OF_DATE': date } lqaFile = 'LQA_request_'+ name + '_' + date + '.csv' return writeCsv( lqaFile , chain( [headers] , map( lambda p: [p[key] for key in headers] , map( partial(lqaPosition, name, date) , positions))))
def writeIdnTypeToFile(file, positions): """ [String] output file name, [Iterator] positions => [String] output file name Side effect: write a csv file containing the id, idType for the positions A utility function, using which we can convert positions (Geneva or Bloomberg) to a file containing two columns (id, idType). The file will used to load Bloomberg information for asset type processing. """ noNeedId = lambda position: \ any(juxt(isPrivateSecurity, isCash, isMoneyMarket, isRepo, isFxForward)(position)) return \ compose( lambda idnTypes: writeCsv(file, chain([('ID', 'ID_TYPE')], idnTypes)) , set , partial(map, getIdnType) , partial(filterfalse, noNeedId) )(positions)
def writeComparisonCsv(month, profitLossFile, dailyInterestFile): """ [Int] month, [String] profitLossFile, [String] dailyInterestFile, => [String] csv output Side effect: write csv output file showing the difference between position level interest income and sum of tax lot level interest income. """ toString = lambda x: '0' + str(x) if x < 10 else str(x) def getTaxLotInterestWithDefault(d, taxlotId): if taxlotId in d: return d[taxlotId] else: print('{0} not found'.format(taxlotId)) return 0 sumTaxlotInterestIncome = lambda taxlotMapping, taxLotInterestIncome, investId: \ compose( sum , partial(map, partial(getTaxLotInterestWithDefault, taxLotInterestIncome)) , lambda investId: taxlotMapping[investId] )(investId) getTaxLotIds = lambda taxlotMapping, investId: \ ' '.join(taxlotMapping[investId]) """ [Tuple] (investId, interest income) => [Tuple] (investId, interest income, interest income of all tax lots, tax lot list) """ outcomeRow = lambda taxlotMapping, taxLotInterestIncome, plEntry: \ ( plEntry[0] , plEntry[1] , sumTaxlotInterestIncome(taxlotMapping, taxLotInterestIncome, plEntry[0]) , getTaxLotIds(taxlotMapping, plEntry[0]) ) """ [Tuple] (investId, interest income, interest income of all tax lots, tax lot list) => [Tuple] ( investId, interest income, interest income of all tax lots , difference between the two, tax lot list) """ addDelta = lambda t: \ (t[0], t[1], t[2], t[1]-t[2], t[3]) return \ writeCsv( 'tax lot cross check 2020-' + toString(month) + '.csv' , chain( [('InvestId', 'Interst Income', 'Interest Income Tax Lot', 'Difference', 'Tax Lots')] , map( addDelta , map( partial( outcomeRow , getInvestIdTaxlotMapping(dailyInterestFile) , getTaxlotInterestIncome( list(readDailyInterestAccrualDetailTxtReport( 'utf-16', '\t', dailyInterestFile)[0])) ) , filterfalse( lambda t: t[0] in ('HK0000241288 HTM', 'XS1376566714 HTM') # get rid of CERCG , getInterestIncomeFromPL(profitLossFile))) ) ) , delimiter=',' )
partial(groupbyToolz, lambda p: p['SortKey']), partial( filterfalse, lambda p: p['SortKey'] in ['Cash and Equivalents', 'FX Forward']))( positions) getCsvFilename = lambda metaData: \ 'investment types ' + metaData['Portfolio'] + ' ' + metaData['PeriodEndDate'] + '.csv' """ [String] filename, [Dictionary] typeCount => [String] output csv name """ writeOutputCsv = lambda filename, typeCount: \ writeCsv( filename , chain( [('InvestmentType', 'UniquePositionCount')] , typeCount.items()) ) if __name__ == '__main__': import logging.config logging.config.fileConfig('logging.config', disable_existing_loggers=False) import argparse parser = argparse.ArgumentParser( description='Give statistics of investment types') parser.add_argument('file', metavar='file', type=str, help='investment position report') """ Generate an investment position report, then:
, positions)))\ ) if isCashFile(inputFile) else \ ( '_cmbc_' + date + '_position'\ , chain( [getHoldingHeaders()]\ , map( partial(dictToValues, getHoldingHeaders())\ , map( partial(holdingPosition, date)\ , positions)))\ ) )(*getPositions(inputFile)) outputCsv = lambda inputFile, outputDir: \ (lambda postfix, outputData: \ writeCsv( getOutputFileName(inputFile, postfix, outputDir)\ , outputData, delimiter='|') )(*toOutputData(inputFile)) if __name__ == '__main__': import logging.config logging.config.fileConfig('logging.config', disable_existing_loggers=False) # inputFile = join(getCurrentDirectory(), 'samples', 'Security Holding 20191209.xls') inputFile = join(getCurrentDirectory(), 'samples', 'Security Holding 20200131.xlsx') # Show the first raw position # for p in getRawPositions(inputFile): # print(p) # break
return True else: return False def toList(tradeTable): """ [Dict] tradeTable => [List] tuple (month, # trades, # trades portfolio) Where tradeTable is a dictonary like: '2018-01': (5588, 123) '2018-02': (6789, 456) """ return [(key, value[0], value[1]) for key, value in tradeTable.items()] if __name__ == '__main__': import logging.config logging.config.fileConfig('logging.config', disable_existing_loggers=False) """ To use the program, put Bloomberg XML trade files into a folder (specified in the config file), then run: $python trade.py """ writeCsv('result.csv' , sorted(toList(tradeTable(getFiles(getInputDirectory(), True) \ , '40006'))))