class TCACallerImplGen(TCACaller): def __init__(self, app, session_manager, callback_manager, glob_volatile_cache, layout): super(TCACallerImplGen, self).__init__(app, session_manager, callback_manager, glob_volatile_cache, layout) self._util_func = UtilFunc() def calculate_computation_summary(self, tca_type, external_params=None): # callback triggered by Dash application def callback(*args): """Kicks off fetching of data of market data and TCA calculations for a specific currency pair. Caches the data in a VolatileCache instance, ready to be read in by the other charts. Parameters ---------- ticker_val : str ticker to be used in TCA calculations start_date_val : str Start date of TCA analysis start_time_val : str Start time of TCA analysis finish_date_val : str Finish date of TCA analysis finish_time_val : str Finish time of TCA analysis venue_val : str Venue data to be used n_clicks : int Number of clicks Returns ------- str """ start = time.time() tag = tca_type + '-calculation-button' old_clicks = self._session_manager.get_session_clicks(tag) # make sure none of the other charts/links are plotted till we have completed this! self._session_manager.set_session_flag([ self._plot_flags['aggregated'], self._plot_flags['detailed'], self._plot_flags['compliance'] ], False) logger = LoggerManager.getLogger(__name__) if tca_type == 'detailed': ticker_val, start_date_val, start_time_val, finish_date_val, finish_time_val, \ broker_val, algo_val, venue_val, market_data_val, metric_val, n_clicks = args # Catch cases where users repeatedly click, which can cause misalignment in clicks self._session_manager.set_session_clicks(tag, n_clicks, old_clicks=old_clicks) logger.debug( self.create_generate_button_msg(old_clicks, n_clicks)) # Make sure all the parameters have been selected if ticker_val != '' and venue_val != '' and start_date_val != '' and start_time_val != '' and \ finish_date_val != '' and finish_time_val != '' and market_data_val != '' and broker_val != '' and \ algo_val != '' and n_clicks > old_clicks: # Expand tickers/broker fields etc, in case for example 'All' has been specified or any other groups broker_val = self._util_func.populate_field( broker_val, constants.available_brokers_dictionary, exception_fields='All') algo_val = self._util_func.populate_field( algo_val, constants.available_algos_dictionary, exception_fields='All') venue_val = self._util_func.populate_field( venue_val, constants.available_venues_dictionary, exception_fields='All') # Combine the start date/time and finish date/time start_date_val = start_date_val + ' ' + start_time_val finish_date_val = finish_date_val + ' ' + finish_time_val metric_val = metric_val.replace(' ', '_') logger.debug('Calculation click old: ' + str(old_clicks) + " clicks vs new " + str(n_clicks)) self._session_manager.set_session_clicks(tag, n_clicks) self._session_manager.set_session_flag('metric', value=metric_val) self._session_manager.set_session_flag( 'detailed-visualization', value=True) logger.info('Selected ' + ticker_val + " " + start_date_val + " - " + finish_date_val) # Check that dates are less than 1 month apart if pd.Timestamp(finish_date_val) - pd.Timestamp( start_date_val) > pd.Timedelta( days=constants.max_plot_days): return "Status: Cannot plot more than " + str( constants.max_plot_days) + " days!" elif pd.Timestamp(start_date_val) >= pd.Timestamp( finish_date_val): return "Status: Start date must be before the end date" try: #if True: # Clear the cache for the current user self._glob_volatile_cache.clear_key_match( self._session_manager.get_session_id()) results_form = [ # Calculate the distribute of the metric for trades/orders, broken down by trade side (buy/sell) DistResultsForm( trade_order_list=['trade_df', 'order_df'], metric_name=metric_val, aggregate_by_field='side', scalar=10000.0, weighting_field= 'executed_notional_in_reporting_currency'), # Create a table the markout of every trade TableResultsForm( trade_order_list=['trade_df'], metric_name='markout', filter_by='all', replace_text={ 'markout_': '', 'executed_notional': 'exec not', 'notional_currency': 'exec not cur' }, keep_fields=[ 'executed_notional', 'side', 'notional_currency' ], scalar={ 'all': 10000.0, 'exclude': ['executed_notional', 'side'] }, round_figures_by={ 'all': 2, 'executed_notional': 0, 'side': 0 }, weighting_field='executed_notional') ] benchmark_calcs = [ # Calculate the arrival prices for every trade/order BenchmarkArrival( trade_order_list=['trade_df', 'order_df']), # Calculate the VWAP for each order BenchmarkVWAP(trade_order_list=['order_df']), # Calculate the TWAP for each order BenchmarkTWAP(trade_order_list=['order_df']) ] metric_calcs = [ metric_val, MetricMarkout(trade_order_list=['trade_df']) ] # Get from cache, note given that we are in the first part of the chain we should force it to calculate! sparse_market_trade_df = self.get_cached_computation_analysis( key='sparse_market_trade_df', start_date=start_date_val, finish_date=finish_date_val, ticker=ticker_val, venue=venue_val, market_data=market_data_val, event_type='trade', dummy_market=False, broker=broker_val, algo=algo_val, metric_calcs=metric_calcs, metric_trade_order_list=['trade_df', 'order_df'], benchmark_calcs=benchmark_calcs, tca_type='detailed', tca_engine=self._tca_engine, results_form=results_form, force_calculate=True) calc_start = sparse_market_trade_df.index[0] calc_end = sparse_market_trade_df.index[-1] detailed_title = self.create_status_msg_flags( 'detailed', ticker_val, calc_start, calc_end) except Exception as e: LoggerManager().getLogger(__name__).exception(e) return "Status: error " + str(e) + ". Check dates?" finish = time.time() return 'Status: calculated ' + str(round( finish - start, 3)) + "s for " + detailed_title elif tca_type == 'aggregated': ticker_val, start_date_val, finish_date_val, broker_val, algo_val, venue_val, reload_val, market_data_val, \ event_type_val, metric_val, n_clicks = args # Catch cases where users repeatedly click, which can cause misalignment in clicks self._session_manager.set_session_clicks(tag, n_clicks, old_clicks=old_clicks) logger.debug( self.create_generate_button_msg(old_clicks, n_clicks)) if ticker_val != '' and start_date_val != '' and venue_val != '' \ and finish_date_val != '' and reload_val != '' and event_type_val != '' and metric_val != '' and \ n_clicks > old_clicks: # Expand tickers/broker fields etc, in case for example 'All' has been specified or any other groups ticker_val_list = self._util_func.populate_field( ticker_val, constants.available_tickers_dictionary) broker_val_list = self._util_func.populate_field( broker_val, constants.available_brokers_dictionary) algo_val_list = self._util_func.populate_field( algo_val, constants.available_algos_dictionary) venue_val_list = self._util_func.populate_field( venue_val, constants.available_venues_dictionary) metric_val = metric_val.replace(' ', '_') logger.debug('Calculation click old: ' + str(old_clicks) + " clicks vs new " + str(n_clicks)) self._session_manager.set_session_clicks(tag, n_clicks) self._session_manager.set_session_flag('metric', value=metric_val) self._session_manager.set_session_flag( 'aggregated-visualization', True) try: # if True: # Clear the cache for the current user self._glob_volatile_cache.clear_key_match( self._session_manager.get_session_id()) results_form = [ # Show the distribution of the selected metric for trades weighted by notional # aggregated by ticker and then by venue DistResultsForm( trade_order_list=['trade_df'], metric_name=metric_val, aggregate_by_field=['ticker', 'venue'], weighting_field= 'executed_notional_in_reporting_currency'), # Display the timeline of metrics average by day (and weighted by notional) TimelineResultsForm( trade_order_list=['trade_df'], by_date='date', metric_name=metric_val, aggregation_metric='mean', aggregate_by_field='ticker', scalar=10000.0, weighting_field= 'executed_notional_in_reporting_currency'), # Display a bar chart showing the average metric weighted by notional and aggregated by ticker # venue BarResultsForm( trade_order_list=['trade_df'], metric_name=metric_val, aggregation_metric='mean', aggregate_by_field=['ticker', 'venue'], scalar=10000.0, weighting_field= 'executed_notional_in_reporting_currency') ] try: # if True: timeline_trade_df_metric_by_ticker = self.get_cached_computation_analysis( key='timeline_trade_df_' + metric_val + '_by_ticker', start_date=start_date_val, finish_date=finish_date_val, event_type=event_type_val, ticker=ticker_val_list, broker=broker_val_list, algo=algo_val_list, venue=venue_val_list, market_data=market_data_val, dummy_market=True, tca_engine=self._tca_engine, tca_type='aggregated', metric_calcs=metric_val, metric_trade_order_list=['trade_df'], results_form=results_form, force_calculate=True, reload_val=reload_val, trade_order_mapping=['trade_df']) calc_start = timeline_trade_df_metric_by_ticker.index[ 0] calc_end = timeline_trade_df_metric_by_ticker.index[ -1] aggregated_title = self.create_status_msg_flags( 'aggregated', ticker_val, calc_start, calc_end) logger.debug('Plotted aggregated summary plot!') finish = time.time() except Exception as e: LoggerManager().getLogger(__name__).exception(e) return "Status: error - " + str( e) + ". Check data exists for these dates?" except Exception as e: LoggerManager().getLogger(__name__).exception(e) return 'Status: error - ' + str( e) + ". Check data exists for these dates?" return 'Status: calculated ' + str(round( finish - start, 3)) + "s for " + aggregated_title elif tca_type == 'compliance': ticker_val, start_date_val, finish_date_val, broker_val, algo_val, venue_val, reload_val, market_data_val, \ filter_time_of_day_val, start_time_of_day_val, finish_time_of_day_val, slippage_bounds_val, visualization_val, n_clicks = args # Catch cases where users repeatedly click, which can cause misalignment in clicks self._session_manager.set_session_clicks(tag, n_clicks, old_clicks=old_clicks) logger.debug( self.create_generate_button_msg(old_clicks, n_clicks)) if ticker_val != '' and start_date_val != '' and broker_val != '' and algo_val != '' and venue_val != '' \ and finish_date_val != '' and reload_val != '' and filter_time_of_day_val != '' \ and start_time_of_day_val != '' and finish_time_of_day_val != '' and slippage_bounds_val != '' \ and n_clicks > old_clicks: ticker_val_list = self._util_func.populate_field( ticker_val, constants.available_tickers_dictionary) broker_val_list = self._util_func.populate_field( broker_val, constants.available_brokers_dictionary, exception_fields='All') algo_val_list = self._util_func.populate_field( algo_val, constants.available_algos_dictionary, exception_fields='All') venue_val_list = self._util_func.populate_field( venue_val, constants.available_venues_dictionary, exception_fields='All') logger.debug('Calculation click old: ' + str(old_clicks) + " clicks vs new " + str(n_clicks)) self._session_manager.set_session_clicks(tag, n_clicks) if visualization_val == 'yes': self._session_manager.set_session_flag( 'compliance-visualization', True) else: self._session_manager.set_session_flag( 'compliance-visualization', False) try: # if True: # Clear the cache for the current user self._glob_volatile_cache.clear_key_match( self._session_manager.get_session_id()) slippage_bounds = 0.0 overwrite_bid_ask = True if slippage_bounds_val == 'bid/ask': overwrite_bid_ask = False else: slippage_bounds = float(slippage_bounds_val) metric_calcs = [ # Calculate slippage for trades MetricSlippage(trade_order_list='trade_df'), ] benchmark_calcs = [ # Generate the spread to mid for market data (in certain case artificially create a spread) BenchmarkSpreadToMid( bid_mid_bp=slippage_bounds, ask_mid_bp=slippage_bounds, overwrite_bid_ask=overwrite_bid_ask) ] results_form = [ # Display a table of all the anomalous trades by slippage (ie. outside bid/ask) TableResultsForm( # Only display for trades trade_order_list=['trade_df'], # Display slippage metric_name='slippage', # Order by the worst slippage filter_by='worst_all', # Replace text on table to make it look nicer replace_text={ 'markout_': '', 'executed_notional': 'exec not', '_currency': ' cur', '_in_reporting': ' in rep', 'slippage_benchmark': 'benchmark', 'slippage_anomalous': 'anomalous', 'broker_id': 'broker ID', 'algo_id': 'algo ID', 'executed_price': 'price' }, exclude_fields_from_avg=[ 'slippage_anomalous', 'slippage_benchmark', 'side' ], # Only select trades outside bid/ask (ie. where slippage anomalous = 1) tag_value_combinations={ 'slippage_anomalous': 1.0 }, # Display several columns keep_fields=[ 'ticker', 'broker_id', 'algo_id', 'notional_currency', 'executed_notional', 'executed_notional_in_reporting_currency', 'side', 'executed_price' ], # Multiply slippage field by 10000 (to convert into basis points) scalar={'slippage': 10000.0}, # Round figures to make them easier to read round_figures_by={ 'executed_notional': 0, 'executed_notional_in_reporting_currency': 0, 'side': 0, 'slippage': 2, 'slippage_benchmark': 4 }), # Get the total notional executed by broker (in reporting currency) BarResultsForm( # Select child orders trade_order_list=['trade_df'], # Aggregate by broker name aggregate_by_field='broker_id', # Select the notional for analysis metric_name= 'executed_notional_in_reporting_currency', # analyse notional # Sum all the notionals aggregation_metric='sum', # Round figures round_figures_by=0) ] # Reformat tables for notional by broker join_tables = [ # JoinTables( # tables_dict={'table_name': 'jointables_broker_id_df', # # # fetch the following calculated tables # 'table_list': [ # 'bar_order_df_executed_notional_in_reporting_currency_by_broker_id'], # # # append to the columns of each table # 'column_list': ['notional (rep cur)'], # 'replace_text': {'broker_id': 'broker ID'} # }) ] try: # if True: trade_df = self.get_cached_computation_analysis( key='trade_df', start_date=start_date_val, finish_date=finish_date_val, start_time_of_day=start_time_of_day_val, finish_time_of_day=finish_time_of_day_val, filter_time_of_day=filter_time_of_day_val, event_type='trade', ticker=ticker_val_list, broker=broker_val_list, algo=algo_val_list, venue=venue_val_list, dummy_market=True, market_data=market_data_val, tca_engine=self._tca_engine, tca_type='compliance', metric_calcs=metric_calcs, benchmark_calcs=benchmark_calcs, metric_trade_order_list=['trade_df'], results_form=results_form, join_tables=join_tables, force_calculate=True, reload_val=reload_val, trade_order_mapping=['trade_df']) calc_start = trade_df.index[0] calc_end = trade_df.index[-1] compliance_title = self.create_status_msg_flags( 'compliance', ticker_val, calc_start, calc_end) logger.debug( 'Generated compliance summary.. awaiting plot callbacks!' ) finish = time.time() except Exception as e: logger.exception(e) return "Status: error " + str( e) + ". Check data exists for these dates?" except Exception as e: logger.exception(e) return 'Status: error ' + str( e) + ". Check data exists for these dates?" return 'Status: calculated ' + str(round( finish - start, 3)) + "s for " + compliance_title raise dash.exceptions.PreventUpdate( "No data changed - " + tca_type ) # Not very elegant but only way to prevent plots disappearing # return "Status: ok" if external_params is not None: return callback(**external_params) return callback
class TCAEngineImpl(TCAEngine): """This does the computation for TCA style _calculations for a specific currency pair for the analysis of trades and orders over a number of days. It creates a number of different additional DataFrames, which can be used by a GUI or dumped to disk. - sparse combination of market prices and trades/orders alongside these - a markout table for all the trades """ def __init__(self, version=constants.tcapy_version): super(TCAEngineImpl, self).__init__(version=version) self._util_func = UtilFunc() def calculate_tca(self, tca_request): """Does a full TCA calculation according to various criteria such as: - ticker to be examined (eg. EURUSD - must be a single ticker) - start and finish dates for the calculation? - what benchmarks to use for comparison for each side of the trade? Parameters ---------- tca_request : TCARequest Defines the parameters for the TCA calculation """ logger = LoggerManager.getLogger(__name__) if tca_request.tca_provider == 'internal_tcapy': # Check the inputs of the TCARequest are valid ValidateRequest().validate_request(tca_request) # For detailed TCA analysis # this is specifically only for ONE ticker (*always* return the market data to user) if tca_request.tca_type == 'detailed': # Only allow one ticker when we are doing detailed analysis if len(tca_request.ticker) > 1: logger.info( "More than 1 ticker specified for TCA detailed computation. Only working on first" ) if isinstance(tca_request.ticker, list): tca_request.ticker = tca_request.ticker[0] # Load market/trade data and compute all the TCA metrics/benchmarks etc. market_df_dict, trade_order_results_df_dict = self._tca_market_trade_loader.load_market_calculate_summarize_metrics( tca_request, dummy_market=tca_request.dummy_market) if market_df_dict is not None: if tca_request.ticker in market_df_dict.keys(): trade_order_results_df_dict[ 'market_df'] = market_df_dict[tca_request.ticker] # If we want aggregated TCA analysis, typically to later calculate many metrics across many trades and _tickers, # as opposed to one specific currency pair # Or for market-analysis (which involves purely _calculations on market data WITHOUT any trade/order data) elif tca_request.tca_type == 'aggregated' or tca_request.tca_type == 'compliance' or tca_request.tca_type == 'market-analysis': tca_request.ticker = self._util_func.populate_field( tca_request.ticker, constants.available_tickers_dictionary) tca_request.venue = self._util_func.populate_field( tca_request.venue, constants.available_venues_dictionary, exception_fields='All') # Load market/trade data and compute all the TCA metrics/benchmarks/displays market_df_dict, trade_order_results_df_dict = self._tca_market_trade_loader.load_market_calculate_summarize_metrics( tca_request, dummy_market=tca_request.dummy_market) # Add the market data to our dictionary, for further user analysis later, if desired (generally don't do this # because the underlying tick data can be very large! if market_df_dict is not None: for k in market_df_dict.keys(): trade_order_results_df_dict[k + '_df'] = market_df_dict[k] else: # In the future will support external TCA providers too logger.error("TCA provider " + tca_request.tca_provider + " is not implemented yet!") return None contains_data = False for t in trade_order_results_df_dict: if t is None: contains_data = contains_data or False else: if isinstance(trade_order_results_df_dict[t], pd.DataFrame): if not (trade_order_results_df_dict[t].empty): contains_data = True if not (contains_data): raise DataMissingException("Raise no data for " + str(tca_request.ticker) + " between " + str(tca_request.start_date) + " - " + str(tca_request.finish_date)) return trade_order_results_df_dict def get_engine_description(self): return 'tca-engine-impl'