Ejemplo n.º 1
0
def test_write_chunked_market_data_arctic():
    """For very large CSV files we might need to read them in chunks. tcapy supports this and also supports CSVs
    which are sorted in reverse (ie. descending). We need to enable chunking and reverse reading with flags.

    This tests whether chunked data is written correctly to Arctic, comparing it with that read from CSV directly
    """

    if not (run_arctic_tests): return

    market_loader = Mediator.get_tca_market_trade_loader(version=tcapy_version)

    arctic_start_date = '01 Jan 2016'; arctic_finish_date = pd.Timestamp(datetime.datetime.utcnow())

    # load data from CSVs directly (for comparison later)
    market_df_csv_desc = DatabaseSourceCSV(market_data_database_csv=csv_reverse_market_data_store).fetch_market_data(
        start_date=arctic_start_date, finish_date=arctic_finish_date, ticker=ticker)

    market_df_csv_asc = DatabaseSourceCSV(market_data_database_csv=csv_market_data_store).fetch_market_data(
        start_date=arctic_start_date, finish_date=arctic_finish_date, ticker=ticker)

    for a in arctic_lib_type:
        database_source = DatabaseSourceArctic(postfix='testharness', arctic_lib_type=a)

        ### write CSV data to Arctic which is sorted ascending (default!)
        database_source.convert_csv_to_table(csv_market_data_store, ticker,
                                             test_harness_arctic_market_data_table,
                                             if_exists_table='replace',
                                             if_exists_ticker='replace', market_trade_data='market',
                                             csv_read_chunksize=100000, remove_duplicates=False)

        market_request = MarketRequest(start_date=arctic_start_date, finish_date=arctic_finish_date, ticker=ticker,
                                       data_store=test_harness_arctic_market_data_store,
                                       market_data_database_table=test_harness_arctic_market_data_table)

        market_df_load = market_loader.get_market_data(market_request=market_request)

        # compare reading directly from the CSV vs. reading back from arctic
        assert all(market_df_csv_asc['mid'] - market_df_load['mid'] < eps)

        ### write CSV data to Arctic which is sorted descending
        database_source.convert_csv_to_table(csv_reverse_market_data_store, ticker,
                                             test_harness_arctic_market_data_table,
                                             if_exists_table='append',
                                             if_exists_ticker='replace', market_trade_data='market',
                                             csv_read_chunksize=100000, read_in_reverse=True, remove_duplicates=False)

        market_request = MarketRequest(start_date=arctic_start_date, finish_date=arctic_finish_date, ticker=ticker,
                                       data_store=test_harness_arctic_market_data_store,
                                       market_data_database_table=test_harness_arctic_market_data_table)

        market_df_load = market_loader.get_market_data(market_request=market_request)

        # compare reading directly from the CSV vs. reading back from arctic
        assert all(market_df_csv_desc['mid'] - market_df_load['mid'] < eps)
Ejemplo n.º 2
0
def test_write_trade_data_ms_sql_server():
    """Tests that trade data can be read from CSV and dumped to MS SQL server
    """

    if not (run_ms_sql_server_tests): return

    database_source = DatabaseSourceMSSQLServer()

    for t in trade_order_list:
        # dump trade_df to SQL test harness database and overwrite
        database_source.convert_csv_to_table(csv_trade_order_mapping[t], None, sql_trade_order_mapping[t],
                                             database_name=test_harness_sql_server_trade_data_database_name,
                                             if_exists_table='replace', market_trade_data='trade')

        trade_order_df_sql = database_source.fetch_trade_order_data(
            start_date=start_date, finish_date=finish_date, ticker=ticker, table_name=sql_trade_order_mapping[t],
            database_name=test_harness_sql_server_trade_data_database_name)

        database_source_csv = DatabaseSourceCSV()

        trade_order_df_csv = database_source_csv.fetch_trade_order_data(
            start_date=start_date, finish_date=finish_date, ticker=ticker, table_name=csv_trade_order_mapping[t])

        comp_fields = ['executed_price', 'notional', 'side']

        # check that the data read back from MS SQL Server matches that from the original CSV
        for c in comp_fields:
            if c in trade_order_df_sql.columns and c in trade_order_df_csv.columns:
                exec_sql = trade_order_df_sql[c]#.dropna()
                exec_csv = trade_order_df_csv[c]#.dropna()

                exec_diff = exec_sql - exec_csv

                assert all(exec_diff < eps)
Ejemplo n.º 3
0
def dataframe_tca_example():
    """Example for doing detailed TCA analysis on all the trades in a CSV, calculating metrics for slippage,
    transient market impact & permanent market impact. It also calculates benchmarks for arrival price of each trade and
    spread to mid).

    Collects results for slippage into a daily timeline and also average by venue (by default weights by reporting
    currency)
    """
    PLOT = False

    # clear entire cache
    # Mediator.get_volatile_cache(version='pro').clear_cache()

    tca_engine = TCAEngineImpl(version=tca_version)

    trade_order_type = 'trade_df'
    trade_order_list = ['trade_df']

    trade_df = DatabaseSourceCSV(trade_data_database_csv=csv_trade_order_mapping['trade_df']).fetch_trade_order_data()

    data_frame_trade_order_mapping = OrderedDict([('trade_df', trade_df)])

    start_date = trade_df.index[0]; finish_date = trade_df.index[-1]

    ticker_list = FXConv().correct_unique_notation_list(trade_df['ticker'].unique().tolist())

    # Specify the TCA request
    tca_request = TCARequest(start_date=start_date, finish_date=finish_date, ticker=ticker_list,
                             tca_type='aggregated', dummy_market=True,
                             trade_data_store='dataframe', market_data_store=market_data_store,
                             metric_calcs=[MetricSlippage(trade_order_list=trade_order_list),
                                           MetricTransientMarketImpact(transient_market_impact_gap={'ms': 100},
                                                                       trade_order_list=trade_order_list),
                                           MetricPermanentMarketImpact(permanent_market_impact_gap={'h': 1},
                                                                       trade_order_list=trade_order_list)],
                             results_form=[TimelineResultsForm(metric_name='slippage', by_date='date'),
                                           BarResultsForm(metric_name='slippage', aggregate_by_field='venue')],
                             benchmark_calcs=[BenchmarkArrival(), BenchmarkSpreadToMid()],
                             trade_order_mapping=data_frame_trade_order_mapping, use_multithreading=False)

    # Dictionary of dataframes as output from TCA calculation
    dict_of_df = tca_engine.calculate_tca(tca_request)

    print(dict_of_df.keys())

    timeline_df = dict_of_df['timeline_' + trade_order_type + '_slippage_by_all']  # average slippage per day
    metric_df = dict_of_df[trade_order_type]['permanent_market_impact']  # permanent market impact for every trade

    print(metric_df.head(500))

    if PLOT:
        from chartpy import Chart, Style

        # plot slippage by timeline
        Chart(engine='plotly').plot(timeline_df)

        # plot market impact (per trade)
        Chart(engine='plotly').plot(metric_df.head(500))
Ejemplo n.º 4
0
def test_write_market_data_arctic():
    """Tests we can write market data to Arctic
    """
    if not (run_arctic_tests): return

    market_loader = Mediator.get_tca_market_trade_loader(version=tcapy_version)

    ### Test we can read data from CSV and dump to Arctic (and when read back it matches CSV)
    db_start_date = '01 Jan 2016'
    db_finish_date = pd.Timestamp(datetime.datetime.utcnow())

    replace_append = ['replace', 'append']

    # Check first when replacing full table and then appending
    for a in arctic_lib_type:
        for i in replace_append:

            database_source = DatabaseSourceArctic(postfix='testharness',
                                                   arctic_lib_type=a)

            # Write CSV to Arctic
            database_source.convert_csv_to_table(
                csv_market_data_store,
                ticker,
                test_harness_arctic_market_data_table,
                if_exists_table=i,
                if_exists_ticker='replace',
                market_trade_data='market',
                remove_duplicates=False)

            # Fetch data directly from CSV
            database_source_csv = DatabaseSourceCSV(
                market_data_database_csv=csv_market_data_store)

            market_df_csv = database_source_csv.fetch_market_data(
                start_date=db_start_date,
                finish_date=db_finish_date,
                ticker=ticker)

            # Read back data from Arctic and compare with CSV
            market_request = MarketRequest(
                start_date=db_start_date,
                finish_date=db_finish_date,
                ticker=ticker,
                data_store=
                database_source,  # test_harness_arctic_market_data_store,
                market_data_database_table=test_harness_arctic_market_data_table
            )

            market_df_load = market_loader.get_market_data(
                market_request=market_request)

            diff_df = market_df_csv['mid'] - market_df_load['mid']

            diff_df.to_csv('test' + i + '.csv')
            assert all(diff_df < eps)
Ejemplo n.º 5
0
def test_write_market_data_db():
    """Tests we can write market data to KDB/Influxdb/PyStore
    """

    database_source_list, test_harness_market_data_table_list, test_harness_data_store_list = _get_db_market_database_source(
    )

    market_loader = Mediator.get_tca_market_trade_loader(version=tcapy_version)

    for i in range(0, len(database_source_list)):

        database_source = database_source_list[i]
        test_harness_market_data_table = test_harness_market_data_table_list[i]
        test_harness_data_store = test_harness_data_store_list[i]

        ### Test we can read data from CSV and dump to InfluxDB/KDB/PyStore (and when read back it matches CSV)
        db_start_date = '01 Jan 2016'
        db_finish_date = pd.Timestamp(datetime.datetime.utcnow())

        replace_append = ['replace', 'append']

        database_source_csv = DatabaseSourceCSV(
            market_data_database_csv=csv_market_data_store)

        market_df_csv = database_source_csv.fetch_market_data(
            start_date=db_start_date,
            finish_date=db_finish_date,
            ticker=ticker)

        # Check first when replacing full table and then appending (will still replace ticker though)
        for i in replace_append:

            database_source.convert_csv_to_table(
                csv_market_data_store,
                ticker,
                test_harness_market_data_table,
                if_exists_table=i,
                if_exists_ticker='replace',
                market_trade_data='market',
                remove_duplicates=False)

            market_request = MarketRequest(
                start_date=db_start_date,
                finish_date=db_finish_date,
                ticker=ticker,
                data_store=test_harness_data_store,
                market_data_database_table=test_harness_market_data_table)

            market_df_load = market_loader.get_market_data(
                market_request=market_request)

            diff_df = market_df_csv['mid'] - market_df_load['mid']

            assert all(diff_df < eps)
Ejemplo n.º 6
0
def test_append_market_data_arctic():
    """Tests we can append market data to arctic (we will have already written data to the test harness database)
    """
    if not (run_arctic_tests): return

    market_loader = Mediator.get_tca_market_trade_loader(version=tcapy_version)

    ### Test we can append (non-overlapping) data to Arctic
    arctic_start_date = '01 Jan 2016'; arctic_finish_date = pd.Timestamp(datetime.datetime.utcnow())

    # use this market request later when reading back from Arctic
    market_request = MarketRequest(start_date=arctic_start_date, finish_date=arctic_finish_date, ticker=ticker,
                                   data_store=test_harness_arctic_market_data_store,
                                   market_data_database_table=test_harness_arctic_market_data_table)

    # load data from CSV for comparison later
    database_source_csv = DatabaseSourceCSV(market_data_database_csv=csv_market_data_store)

    market_df_csv = database_source_csv.fetch_market_data(
        start_date=arctic_start_date, finish_date=arctic_finish_date, ticker=ticker)

    market_df_list = TimeSeriesOps().split_array_chunks(market_df_csv, chunks=2)

    for a in arctic_lib_type:

        database_source = DatabaseSourceArctic(postfix='testharness', arctic_lib_type=a)

        market_df_lower = market_df_list[0];
        market_df_higher = market_df_list[1]

        database_source.append_market_data(market_df_lower, ticker, table_name=test_harness_arctic_market_data_table,
                                           if_exists_table='replace', if_exists_ticker='replace', remove_duplicates=False)

        overlap_error = False

        ## Try to append overlapping data (this will fail!)
        try:
            database_source.append_market_data(market_df_lower, ticker,
                                               table_name=test_harness_arctic_market_data_table,
                                               if_exists_table='append', if_exists_ticker='append', remove_duplicates=False)
        except ErrorWritingOverlapDataException as e:
            overlap_error = True

        assert overlap_error

        # Append non-overlapping data which follows (writing overlapping data into Arctic will mess up the datastore!)
        database_source.append_market_data(market_df_higher, ticker, table_name=test_harness_arctic_market_data_table,
                                           if_exists_table='append', if_exists_ticker='append', remove_duplicates=False)

        market_df_all_read_back = market_loader.get_market_data(market_request=market_request)

        assert all(market_df_all_read_back['mid'] - market_df_csv['mid'] < eps)
Ejemplo n.º 7
0
def test_write_trade_data_sql():
    """Tests that trade data can be read from CSV and dumped to various SQL dialect
    """

    database_source_list, test_harness_trade_database_list, test_harness_data_store_list = _get_db_trade_database_source(
    )

    for i in range(0, len(database_source_list)):

        database_source = database_source_list[i]

        test_harness_trade_database = test_harness_trade_database_list[i]
        test_harness_data_store = test_harness_data_store_list[i]

        for t in trade_order_list:
            # Dump trade_df to SQL test harness database and overwrite
            database_source.convert_csv_to_table(
                csv_trade_order_mapping[t],
                None, (sql_trade_order_mapping[test_harness_data_store])[t],
                test_harness_trade_database,
                if_exists_table='replace',
                market_trade_data='trade')

            trade_order_df_sql = database_source.fetch_trade_order_data(
                start_date=start_date,
                finish_date=finish_date,
                ticker=ticker,
                table_name=sql_trade_order_mapping[test_harness_data_store][t],
                database_name=test_harness_trade_database)

            database_source_csv = DatabaseSourceCSV()

            trade_order_df_csv = database_source_csv.fetch_trade_order_data(
                start_date=start_date,
                finish_date=finish_date,
                ticker=ticker,
                table_name=csv_trade_order_mapping[t])

            comp_fields = ['executed_price', 'notional', 'side']

            # Check that the data read back from SQL database matches that from the original CSV
            for c in comp_fields:
                if c in trade_order_df_sql.columns and c in trade_order_df_csv.columns:
                    exec_sql = trade_order_df_sql[c]  #.dropna()
                    exec_csv = trade_order_df_csv[c]  #.dropna()

                    exec_diff = exec_sql - exec_csv

                    assert all(exec_diff < eps)
Ejemplo n.º 8
0
def test_randomized_trade_data_generation():
    """Tests randomized trade generation data (and writing to database)
    """
    data_test_creator = DataTestCreator(write_to_db=False)

    # use database source as Arctic for market data (assume we are using market data as a source)
    if use_test_csv:
        data_test_creator._database_source_market = DatabaseSourceCSV(
            market_data_database_csv=market_data_store)
    else:
        data_test_creator._database_source_market = DatabaseSourceArctic(
            postfix=postfix)

    # create randomised trade/order data
    trade_order = data_test_creator.create_test_trade_order(
        ticker, start_date=start_date, finish_date=finish_date)

    # trade_order has dictionary of trade_df and order_df

    # make sure the number of trades > number of orders
    assert (len(trade_order['trade_df'].index) > len(
        trade_order['order_df'].index))
def test_randomized_trade_data_generation():
    """Tests randomized trade generation data (and writing to database)
    """
    from tcapy.data.datatestcreator import DataTestCreator

    data_test_creator = DataTestCreator(market_data_postfix=postfix, write_to_db=False,
                                        market_data_database_table=test_harness_arctic_market_data_table,
                                        trade_data_database_name=test_harness_mysql_trade_data_database)

    # Use database source as Arctic for market data (assume we are using market data as a source)
    if use_market_data_test_csv:
        data_test_creator._database_source_market = DatabaseSourceCSV(market_data_database_csv=market_data_store)
        data_test_creator._market_data_source = market_data_store
    else:
        data_test_creator._database_source_market = DatabaseSourceArctic(postfix=postfix)

    # Create randomised trade/order data
    trade_order = data_test_creator.create_test_trade_order(ticker, start_date=start_date, finish_date=finish_date)

    # Trade_order has dictionary of trade_df and order_df

    # Make sure the number of trades > number of orders
    assert (len(trade_order['trade_df'].index) > len(trade_order['order_df'].index))
Ejemplo n.º 10
0
def test_write_multiple_wildcard_market_data_csvs_arctic():
    """Tests we can write sequential market data CSVs (or HDF5) whose path has been specified by a wildcard (eg. EURUSD*.csv).
    It is assumed that the CSVs are in chronological orders, from their filenames.
    """
    if not (run_arctic_tests): return

    market_loader = Mediator.get_tca_market_trade_loader(version=tcapy_version)

    arctic_start_date = '01 Jan 2016'
    arctic_finish_date = pd.Timestamp(datetime.datetime.utcnow())

    for a in arctic_lib_type:
        database_source = DatabaseSourceArctic(postfix='testharness',
                                               arctic_lib_type=a)

        ### Read CSV data which is sorted ascending (default!)
        database_source.convert_csv_to_table(
            csv_market_data_store,
            ticker,
            test_harness_arctic_market_data_table,
            if_exists_table='replace',
            if_exists_ticker='replace',
            market_trade_data='market',
            csv_read_chunksize=10**6,
            remove_duplicates=False)

        database_source_csv = DatabaseSourceCSV(
            market_data_database_csv=csv_market_data_store)

        market_df_csv = database_source_csv.fetch_market_data(
            start_date=arctic_start_date,
            finish_date=arctic_finish_date,
            ticker=ticker)

        # Prepare the CSV folder first
        csv_folder = os.path.join(constants.test_data_harness_folder,
                                  'csv_arctic_mult')

        # Empty the CSV test harness folder, where we shall dump the mini CSVs
        UtilFunc().forcibly_create_empty_folder(csv_folder)

        # Split the CSV file into several mini CSV files (and also HDF5 files)
        market_df_list = TimeSeriesOps().split_array_chunks(market_df_csv,
                                                            chunks=3)

        chunk_no = 0

        for m in market_df_list:
            m.to_csv(
                os.path.join(csv_folder, "EURUSD" + str(chunk_no) + '.csv'))
            UtilFunc().write_dataframe_to_binary(
                m,
                os.path.join(csv_folder,
                             "EURUSD" + str(chunk_no) + '.parquet'),
                format='parquet')

            chunk_no = chunk_no + 1

        file_ext = ['csv', 'parquet']

        for f in file_ext:
            ### Read CSV data from the mini CSVs (using wildcard char) and dump to Arctic
            database_source.convert_csv_to_table(
                os.path.join(csv_folder, "EURUSD*." + f),
                ticker,
                test_harness_arctic_market_data_table,
                if_exists_table='append',
                if_exists_ticker='replace',
                market_trade_data='market',
                csv_read_chunksize=10**6,
                remove_duplicates=False)

            market_request = MarketRequest(
                start_date=arctic_start_date,
                finish_date=arctic_finish_date,
                ticker=ticker,
                data_store=database_source,
                market_data_database_table=test_harness_arctic_market_data_table
            )

            # Read back from Arctic
            market_df_load = market_loader.get_market_data(
                market_request=market_request)

            # Compare reading directly from the original large CSV vs. reading back from arctic (which was dumped from split CSVs)
            diff_df = abs(market_df_load['mid'] - market_df_csv['mid'])

            outside_bounds = diff_df[diff_df >= eps]

            assert len(outside_bounds) == 0
        file_extension = 'parquet'  # 'parquet' (recommended) or 'csv' or 'h5' on disk

        # Files dumped by DatabasePopulator look like this
        ## 'AUDUSD_dukascopy_2016-01-03_22_00_01.868000+00_002016-01-31_23_59_57.193000+00_00.parquet'

        csv_file = [x + '_' + data_vendor + '_20*.' + file_extension for x in
                    ticker_mkt]  # assume that ALL TIME IN UTC!

        date_format = None
        read_in_reverse = False
        remove_duplicates = False

    ####################################################################################################################

    # Load market data
    data_source_csv = DatabaseSourceCSV()

    # Create market data store for database and associated data vendor
    if market_data_store == 'arctic':
        database_source = DatabaseSourceArctic(postfix=data_vendor)
        market_data_database_table = constants.arctic_market_data_database_table

    if market_data_store == 'pystore':
        database_source = DatabaseSourcePyStore(postfix=data_vendor)
        market_data_database_table = constants.pystore_market_data_database_table

    if market_data_store == 'influxdb':
        database_source = DatabaseSourceInfluxDB(postfix=data_vendor)
        market_data_database_table = constants.influxdb_market_data_database_table

    if market_data_store == 'kdb':
Ejemplo n.º 12
0
def test_fetch_market_trade_data_csv():
    """Tests downloading of market and trade/order data from CSV files
    """

    ### Get market data
    market_loader = Mediator.get_tca_market_trade_loader()

    market_request = MarketRequest(start_date=start_date,
                                   finish_date=finish_date,
                                   ticker=ticker,
                                   data_store=csv_market_data_store)

    market_df = market_loader.get_market_data(market_request)

    assert not(market_df.empty) \
           and market_df.index[0] >= pd.Timestamp(start_date).tz_localize('utc') \
           and market_df.index[-1] <= pd.Timestamp(finish_date).tz_localize('utc')

    # For a high level trade data request, we need to use TCA request, because it usually involves some
    # market data download (we are assuming that the market data is being downloaded from our arctic database)
    # eg. for converting notionals to reporting currency
    tca_request = TCARequest(start_date=start_date,
                             finish_date=finish_date,
                             ticker=ticker,
                             trade_data_store='csv',
                             market_data_store=arctic_market_data_store,
                             trade_order_mapping=csv_trade_order_mapping)

    for t in trade_order_list:
        trade_order_df = market_loader.get_trade_order_data(tca_request, t)

        try:
            trade_order_df = Mediator.get_volatile_cache(
            ).get_dataframe_handle(trade_order_df)
        except:
            pass

        assert not trade_order_df.empty \
               and trade_order_df.index[0] >= pd.Timestamp(start_date).tz_localize('utc') \
               and trade_order_df.index[-1] <= pd.Timestamp(finish_date).tz_localize('utc')

    ### Test using DataFactory and DatabaseSource
    from tcapy.data.datafactory import DataFactory

    data_factory = DataFactory()

    for t in trade_order_list:
        ### Test using DataFactory
        trade_request = TradeRequest(
            start_date=start_date,
            finish_date=finish_date,
            ticker=ticker,
            data_store='csv',
            trade_order_mapping=csv_trade_order_mapping,
            trade_order_type=t)

        trade_order_df = data_factory.fetch_table(trade_request)

        assert not trade_order_df.empty \
                          and trade_order_df.index[0] >= pd.Timestamp(start_date).tz_localize('utc') \
                          and trade_order_df.index[-1] <= pd.Timestamp(finish_date).tz_localize('utc')

        ### Test using DatabaseSourceCSV
        from tcapy.data.databasesource import DatabaseSourceCSV

        database_source = DatabaseSourceCSV()

        trade_order_df = database_source.fetch_trade_order_data(
            start_date,
            finish_date,
            ticker,
            table_name=csv_trade_order_mapping[t])

        assert not trade_order_df.empty \
                             and trade_order_df.index[0] >= pd.Timestamp(start_date).tz_localize('utc') \
                             and trade_order_df.index[-1] <= pd.Timestamp(finish_date).tz_localize('utc')
Ejemplo n.º 13
0
def dataframe_compliance_tca_example():
    """Get a DataFrame of trades and apply compliance based TCA to it
    """

    tca_engine = TCAEngineImpl(version=tca_version)

    spread_to_mid_bp = 0.1
    trade_order_list = ['trade_df']

    # Read in CSV file as a DataFrame
    trade_df = DatabaseSourceCSV(
        trade_data_database_csv=csv_trade_order_mapping['trade_df']
    ).fetch_trade_order_data()

    data_frame_trade_order_mapping = OrderedDict([('trade_df', trade_df)])

    ticker_list = FXConv().correct_unique_notation_list(
        trade_df['ticker'].unique().tolist())

    start_date = trade_df.index[0]
    finish_date = trade_df.index[-1]

    # Specify the TCA request
    tca_request = TCARequest(
        start_date=start_date,
        finish_date=finish_date,
        ticker=ticker_list,
        tca_type='aggregated',
        dummy_market=True,
        trade_data_store='dataframe',
        market_data_store=market_data_store,
        metric_calcs=[
            MetricSlippage(trade_order_list=trade_order_list),
            MetricTransientMarketImpact(
                transient_market_impact_gap={'ms': 100},
                trade_order_list=trade_order_list),
            MetricPermanentMarketImpact(permanent_market_impact_gap={'h': 1},
                                        trade_order_list=trade_order_list)
        ],
        benchmark_calcs=[  # add spread to mid fields for every market data spot
            BenchmarkSpreadToMid(bid_mid_bp=spread_to_mid_bp,
                                 ask_mid_bp=spread_to_mid_bp),
        ],
        results_form=[
            # Display a table of all the anomalous trades by slippage (ie. outside bid/ask)
            TableResultsForm(
                trade_order_list=['trade_df'],
                metric_name='slippage',
                filter_by='worst_all',  # Order by the worst slippage
                tag_value_combinations={'slippage_anomalous': 1.0},

                # Only flag trades outside bid/ask
                keep_fields=[
                    'executed_notional_in_reporting_currency', 'side'
                ],

                # Display only side and executed notionals
                round_figures_by=None),

            # Get the total notional executed by broker (in reporting currency)
            BarResultsForm(
                trade_order_list=['trade_df'],  # trade
                aggregate_by_field='broker_id',  # aggregate by broker name
                # keep_fields=['executed_notional_in_reporting_currency', 'executed_notional', 'side'],
                metric_name='executed_notional_in_reporting_currency',
                # analyse notional
                aggregation_metric='sum',  # sum the notional
                scalar=1,  # no need for a multipler
                round_figures_by=0),  # round to nearest unit

            # Get average slippage per broker (weighted by notional)
            BarResultsForm(
                trade_order_list=['trade_df'],
                aggregate_by_field='broker_id',
                metric_name='slippage',
                aggregation_metric='mean',
                # keep_fields=['executed_notional_in_reporting_currency', 'executed_notional',
                #             'side'],
                weighting_field='executed_notional_in_reporting_currency',
                # weight results by notional
                scalar=10000.0,
                round_figures_by=2)
        ],

        # Aggregate the results (total notional and slippage) by broker
        # into a single table for easy display to the user
        join_tables=[
            JoinTables(
                tables_dict={
                    'table_name':
                    'jointables_broker_id',

                    # fetch the following calculated tables
                    'table_list': [
                        'bar_trade_df_executed_notional_in_reporting_currency_by_broker_id',
                        'bar_trade_df_slippage_by_broker_id'
                    ],

                    # append to the columns of each table
                    'column_list': ['notional (rep cur)', 'slippage (bp)']
                })
        ],
        trade_order_mapping=data_frame_trade_order_mapping,
        use_multithreading=False)

    # Dictionary of dataframes as output from TCA calculation
    dict_of_df = tca_engine.calculate_tca(tca_request)

    # print all the output tables
    print(dict_of_df.keys())

    print('All trades')
    print(dict_of_df['trade_df'])

    print('Notional by broker ID')
    print(dict_of_df[
        'bar_trade_df_executed_notional_in_reporting_currency_by_broker_id'])

    print('Notional by broker ID and weighted slippage')
    print(dict_of_df['jointables_broker_id'])

    print('Trades by worst slippage')
    print(dict_of_df['table_trade_df_slippage_by_worst_all'])

    from chartpy import Canvas, Chart

    broker_notional_chart = Chart(
        engine='plotly',
        df=dict_of_df[
            'bar_trade_df_executed_notional_in_reporting_currency_by_broker_id'],
        chart_type='bar',
        style=Style(title='Notional in USD per broker'))

    broker_slippage_chart = Chart(
        engine='plotly',
        df=dict_of_df['bar_trade_df_slippage_by_broker_id'],
        chart_type='bar',
        style=Style(title='Slippage by broker (bp)'))

    # Using plain template
    canvas = Canvas([[broker_notional_chart, broker_slippage_chart]])

    canvas.generate_canvas(silent_display=False, canvas_plotter='plain')
Ejemplo n.º 14
0
from tcapy.conf.constants import Constants

import os

constants = Constants()

if __name__ == '__main__':

    folder = constants.test_data_harness_folder

    csv_market_data_files = ['small_test_market_df.csv.gz', 'small_test_market_df_reverse.csv.gz']

    # Can either dump to Parquet (default) or HDF (optional)
    # format = 'hdf5'; file_ext = 'h5'
    format = 'parquet'; file_ext = 'parquet'

    for csv_market_data in csv_market_data_files:
        csv_market_data = os.path.join(folder, csv_market_data)

        REVERSE_SORT = False

        from tcapy.data.databasesource import DatabaseSourceCSV

        # Read CSV and parse the main field
        df = DatabaseSourceCSV()._fetch_table(csv_market_data)

        if REVERSE_SORT:
            df = df.sort_index(ascending=False)

        h5_market_data = csv_market_data.replace('.csv.gz', '.' + file_ext)
        UtilFunc().write_dataframe_to_binary(df, h5_market_data, format=format)