def process_args(api, args): df = lpt.read_csv(args.filename) date = lpt.to_date(args.date) scope = args.scope c1 = df.columns.values[0] # First column is the instrument if args.fx: scope = scope + "_FX" df[c1] = "CCY_" + df[c1] df["column3"] = None else: mi.map_instruments(api, df, c1) # fix the column names df.columns = ["instrument", "price", "ccy"] def upsert_analytics(result=None): analytics = [ api.models.InstrumentAnalytic(row["instrument"], row["price"], row["ccy"]) for i, row in df.iterrows() ] return api.call.set_analytics(scope, date.year, date.month, date.day, analytics).bind(lambda r: None) if args.update: return upsert_analytics() return api.call.create_analytic_store( api.models.CreateAnalyticStoreRequest(scope, date)).bind(upsert_analytics)
def transactions(r=None): if args.transactions: df = lpt.read_input(args.transactions) if args.map: mi.map_instruments(api, df, "instrument_uid"), def load_transactions(portfolio, txns): return api.call.upsert_transactions( args.scope, portfolio, transactions=api.from_df(txns, api.models.TransactionRequest), ) if args.portfolio.lower().startswith("col:"): # Multiple portfolios contained in the file. Read the ID from the columns portfolio_column = args.portfolio[4:] def load_groups(iterator): try: portfolio, df = next(iterator) print("Transactions: {}".format(portfolio)) return load_transactions( portfolio, df.drop(portfolio_column, axis=1) ).bind(lambda r: load_groups(iterator)) except StopIteration: return Either.Right(None) return load_groups(iter(df.groupby(portfolio_column))) else: # one-off load. The portfolio id is provided return load_transactions(args.portfolio, df) return Either.Right(None)