示例#1
0
 def run_date_sort(self, event_stream, expected, source_ids):
     """
     Take a list of events, their source_ids, and an expected sorting.
     Assert that date_sort's output agrees with expected.
     """
     sort_out = date_sort(event_stream, source_ids)
     for m1, m2 in izip_longest(sort_out, expected):
         assert m1 == m2
示例#2
0
 def run_date_sort(self, event_stream, expected, source_ids):
     """
     Take a list of events, their source_ids, and an expected sorting.
     Assert that date_sort's output agrees with expected.
     """
     sort_out = date_sort(event_stream, source_ids)
     for m1, m2 in izip_longest(sort_out, expected):
         assert m1 == m2
示例#3
0
def date_sorted_sources(*sources):
    """
    Takes an iterable of sources, generating namestrings and
    piping their output into date_sort.
    """

    for source in sources:
        assert iter(source), "Source %s not iterable" % source
        assert hasattr(source, 'get_hash'), "No get_hash"

    # Get name hashes to pass to date_sort.
    names = [source.get_hash() for source in sources]

    # Convert the list of generators into a flat stream by pulling
    # one element at a time from each.
    stream_in = roundrobin(sources, names)

    # Guarantee the flat stream will be sorted by date, using
    # source_id as tie-breaker, which is fully deterministic (given
    # deterministic string representation for all args/kwargs)

    return date_sort(stream_in, names)
示例#4
0
    def test_tracker(self, parameter_comment, days_to_delete):
        """
        @days_to_delete - configures which days in the data set we should
        remove, used for ensuring that we still return performance messages
        even when there is no data.
        """
        # This date range covers Columbus day,
        # however Columbus day is not a market holiday
        #
        #     October 2008
        # Su Mo Tu We Th Fr Sa
        #           1  2  3  4
        #  5  6  7  8  9 10 11
        # 12 13 14 15 16 17 18
        # 19 20 21 22 23 24 25
        # 26 27 28 29 30 31
        start_dt = datetime.datetime(year=2008,
                                     month=10,
                                     day=9,
                                     tzinfo=pytz.utc)
        end_dt = datetime.datetime(year=2008,
                                   month=10,
                                   day=16,
                                   tzinfo=pytz.utc)

        trade_count = 6
        sid = 133
        price = 10.1
        price_list = [price] * trade_count
        volume = [100] * trade_count
        trade_time_increment = datetime.timedelta(days=1)

        benchmark_returns, treasury_curves = \
            factory.load_market_data()

        trading_environment = TradingEnvironment(
            benchmark_returns,
            treasury_curves,
            period_start=start_dt,
            period_end=end_dt
        )

        trade_history = factory.create_trade_history(
            sid,
            price_list,
            volume,
            trade_time_increment,
            trading_environment,
            source_id="factory1"
        )

        sid2 = 134
        price2 = 12.12
        price2_list = [price2] * trade_count
        trade_history2 = factory.create_trade_history(
            sid2,
            price2_list,
            volume,
            trade_time_increment,
            trading_environment,
            source_id="factory2"
        )
        # 'middle' start of 3 depends on number of days == 7
        middle = 3

        # First delete from middle
        if days_to_delete.middle:
            del trade_history[middle:(middle + days_to_delete.middle)]
            del trade_history2[middle:(middle + days_to_delete.middle)]

        # Delete start
        if days_to_delete.start:
            del trade_history[:days_to_delete.start]
            del trade_history2[:days_to_delete.start]

        # Delete from end
        if days_to_delete.end:
            del trade_history[-days_to_delete.end:]
            del trade_history2[-days_to_delete.end:]

        trade_history.extend(trade_history2)

        trading_environment.first_open = \
            trading_environment.calculate_first_open()
        trading_environment.last_close = \
            trading_environment.calculate_last_close()
        trading_environment.capital_base = 1000.0
        trading_environment.frame_index = [
            'sid',
            'volume',
            'dt',
            'price',
            'changed']
        perf_tracker = perf.PerformanceTracker(
            trading_environment
        )

        # date_sort requires 'DONE' messages from each source
        events = itertools.chain(trade_history,
                                 [ndict({
                                        'source_id': 'factory1',
                                        'dt': 'DONE',
                                        'type': DATASOURCE_TYPE.TRADE
                                        }),
                                  ndict({
                                        'source_id': 'factory2',
                                        'dt': 'DONE',
                                        'type': DATASOURCE_TYPE.TRADE
                                        })])
        events = date_sort(events, ('factory1', 'factory2'))
        events = itertools.chain(events,
                                 [ndict({'dt': 'DONE'})])

        events = [self.event_with_txn(event, trade_history[0].dt)
                  for event in events]

        perf_messages = \
            [msg for date, snapshot in
             perf_tracker.transform(
                 itertools.groupby(events, attrgetter('dt')))
             for event in snapshot
             for msg in event.perf_messages]

        #we skip two trades, to test case of None transaction
        txn_count = len(trade_history) - 2
        self.assertEqual(perf_tracker.txn_count, txn_count)

        cumulative_pos = perf_tracker.cumulative_performance.positions[sid]
        expected_size = txn_count / 2 * -25
        self.assertEqual(cumulative_pos.amount, expected_size)

        self.assertEqual(perf_tracker.last_close,
                         perf_tracker.cumulative_risk_metrics.end_date)

        self.assertEqual(len(perf_messages),
                         trading_environment.days_in_period)
示例#5
0
    def test_tracker(self, start_dt):

        trade_count = 100
        sid = 133
        price = 10.1
        price_list = [price] * trade_count
        volume = [100] * trade_count
        trade_time_increment = datetime.timedelta(days=1)

        trading_environment, start_dt, end_dt = self.create_env(start_dt)

        trade_history = factory.create_trade_history(
            sid,
            price_list,
            volume,
            trade_time_increment,
            trading_environment,
            source_id="factory1"
        )

        sid2 = 134
        price2 = 12.12
        price2_list = [price2] * trade_count
        trade_history2 = factory.create_trade_history(
            sid2,
            price2_list,
            volume,
            trade_time_increment,
            trading_environment,
            source_id="factory2"
        )

        trade_history.extend(trade_history2)

        trading_environment.period_start = trade_history[0].dt
        trading_environment.period_end = trade_history[-1].dt
        trading_environment.first_open = \
            trading_environment.calculate_first_open()
        trading_environment.last_close = \
            trading_environment.calculate_last_close()
        trading_environment.capital_base = 1000.0
        trading_environment.frame_index = [
            'sid',
            'volume',
            'dt',
            'price',
            'changed']
        perf_tracker = perf.PerformanceTracker(
            trading_environment
        )

        # date_sort requires 'DONE' messages from each source
        events = itertools.chain(trade_history,
                                 [ndict({
                                        'source_id': 'factory1',
                                        'dt': 'DONE',
                                        'type': DATASOURCE_TYPE.TRADE
                                        }),
                                  ndict({
                                        'source_id': 'factory2',
                                        'dt': 'DONE',
                                        'type': DATASOURCE_TYPE.TRADE
                                        })])
        events = date_sort(events, ('factory1', 'factory2'))
        events = itertools.chain(events,
                                 [ndict({'dt': 'DONE'})])

        events = [self.event_with_txn(event, trading_environment)
                  for event in events]

        list(perf_tracker.transform(
            itertools.groupby(events, attrgetter('dt'))))

        #we skip two trades, to test case of None transaction
        txn_count = len(trade_history) - 2
        self.assertEqual(perf_tracker.txn_count, txn_count)

        cumulative_pos = perf_tracker.cumulative_performance.positions[sid]
        expected_size = txn_count / 2 * -25
        self.assertEqual(cumulative_pos.amount, expected_size)

        self.assertEqual(perf_tracker.last_close,
                         perf_tracker.cumulative_risk_metrics.end_date)