示例#1
0
    def _create_data_generator(self, source_filter):
        """
        Create a merged data generator using the sources and
        transforms attached to this algorithm.

        ::source_filter:: is a method that receives events in date
        sorted order, and returns True for those events that should be
        processed by the zipline, and False for those that should be
        skipped.
        """
        benchmark_return_source = [
            Event({'dt': ret.date,
                   'returns': ret.returns,
                   'type': zipline.protocol.DATASOURCE_TYPE.BENCHMARK,
                   'source_id': 'benchmarks'})
            for ret in trading.environment.benchmark_returns
            if ret.date.date() >= self.sim_params.period_start.date()
            and ret.date.date() <= self.sim_params.period_end.date()
        ]

        date_sorted = date_sorted_sources(*self.sources)

        if source_filter:
            date_sorted = ifilter(source_filter, date_sorted)

        with_tnfms = sequential_transforms(date_sorted,
                                           *self.transforms)
        with_alias_dt = alias_dt(with_tnfms)

        with_benchmarks = date_sorted_sources(benchmark_return_source,
                                              with_alias_dt)

        # Group together events with the same dt field. This depends on the
        # events already being sorted.
        return groupby(with_benchmarks, attrgetter('dt'))
示例#2
0
    def _create_data_generator(self, source_filter, sim_params):
        """
        Create a merged data generator using the sources and
        transforms attached to this algorithm.

        ::source_filter:: is a method that receives events in date
        sorted order, and returns True for those events that should be
        processed by the zipline, and False for those that should be
        skipped.
        """
        benchmark_return_source = [
            Event({'dt': ret.date,
                   'returns': ret.returns,
                   'type': zipline.protocol.DATASOURCE_TYPE.BENCHMARK,
                   'source_id': 'benchmarks'})
            for ret in trading.environment.benchmark_returns
            if ret.date.date() >= sim_params.period_start.date()
            and ret.date.date() <= sim_params.period_end.date()
        ]

        date_sorted = date_sorted_sources(*self.sources)

        if source_filter:
            date_sorted = ifilter(source_filter, date_sorted)

        with_tnfms = sequential_transforms(date_sorted,
                                           *self.transforms)
        with_alias_dt = alias_dt(with_tnfms)

        with_benchmarks = date_sorted_sources(benchmark_return_source,
                                              with_alias_dt)

        # Group together events with the same dt field. This depends on the
        # events already being sorted.
        return groupby(with_benchmarks, attrgetter('dt'))
示例#3
0
    def _create_data_generator(self, source_filter, sim_params=None):
        """
        Create a merged data generator using the sources attached to this
        algorithm.

        ::source_filter:: is a method that receives events in date
        sorted order, and returns True for those events that should be
        processed by the zipline, and False for those that should be
        skipped.
        """
        if sim_params is None:
            sim_params = self.sim_params

        if self.benchmark_return_source is None:
            env = trading.environment
            if sim_params.data_frequency == 'minute' or \
               sim_params.emission_rate == 'minute':

                def update_time(date):
                    return env.get_open_and_close(date)[1]
            else:

                def update_time(date):
                    return date

            benchmark_return_source = [
                Event({
                    'dt': update_time(dt),
                    'returns': ret,
                    'type': zipline.protocol.DATASOURCE_TYPE.BENCHMARK,
                    'source_id': 'benchmarks'
                }) for dt, ret in
                trading.environment.benchmark_returns.iteritems()
                if dt.date() >= sim_params.period_start.date()
                and dt.date() <= sim_params.period_end.date()
            ]
        else:
            benchmark_return_source = self.benchmark_return_source

        date_sorted = date_sorted_sources(*self.sources)

        if source_filter:
            date_sorted = filter(source_filter, date_sorted)

        with_benchmarks = date_sorted_sources(benchmark_return_source,
                                              date_sorted)

        # Group together events with the same dt field. This depends on the
        # events already being sorted.
        return groupby(with_benchmarks, attrgetter('dt'))
示例#4
0
    def _create_data_generator(self, source_filter, sim_params=None):
        """
        Create a merged data generator using the sources attached to this
        algorithm.

        ::source_filter:: is a method that receives events in date
        sorted order, and returns True for those events that should be
        processed by the zipline, and False for those that should be
        skipped.
        """
        if sim_params is None:
            sim_params = self.sim_params

        if self.benchmark_return_source is None:
            if sim_params.data_frequency == "minute" or sim_params.emission_rate == "minute":

                def update_time(date):
                    return self.trading_environment.get_open_and_close(date)[1]

            else:

                def update_time(date):
                    return date

            benchmark_return_source = [
                Event(
                    {
                        "dt": update_time(dt),
                        "returns": ret,
                        "type": zipline.protocol.DATASOURCE_TYPE.BENCHMARK,
                        "source_id": "benchmarks",
                    }
                )
                for dt, ret in self.trading_environment.benchmark_returns.iteritems()
                if dt.date() >= sim_params.period_start.date() and dt.date() <= sim_params.period_end.date()
            ]
        else:
            benchmark_return_source = self.benchmark_return_source

        date_sorted = date_sorted_sources(*self.sources)

        if source_filter:
            date_sorted = filter(source_filter, date_sorted)

        with_benchmarks = date_sorted_sources(benchmark_return_source, date_sorted)

        # Group together events with the same dt field. This depends on the
        # events already being sorted.
        return groupby(with_benchmarks, attrgetter("dt"))
示例#5
0
    def _create_data_generator(self, source_filter, sim_params=None):
        """
        Create a merged data generator using the sources and
        transforms attached to this algorithm.

        ::source_filter:: is a method that receives events in date
        sorted order, and returns True for those events that should be
        processed by the zipline, and False for those that should be
        skipped.
        """
        if sim_params is None:
            sim_params = self.sim_params

        if self.benchmark_return_source is None:
            env = trading.environment
            if (sim_params.data_frequency == 'minute'
                    or sim_params.emission_rate == 'minute'):
                update_time = lambda date: env.get_open_and_close(date)[1]
            else:
                update_time = lambda date: date
            benchmark_return_source = [
                Event({'dt': update_time(dt),
                       'returns': ret,
                       'type': zipline.protocol.DATASOURCE_TYPE.BENCHMARK,
                       'source_id': 'benchmarks'})
                for dt, ret in
                trading.environment.benchmark_returns.iteritems()
                if dt.date() >= sim_params.period_start.date()
                and dt.date() <= sim_params.period_end.date()
            ]
        else:
            benchmark_return_source = self.benchmark_return_source

        date_sorted = date_sorted_sources(*self.sources)

        if source_filter:
            date_sorted = filter(source_filter, date_sorted)

        with_tnfms = sequential_transforms(date_sorted,
                                           *self.transforms)

        with_benchmarks = date_sorted_sources(benchmark_return_source,
                                              with_tnfms)

        # Group together events with the same dt field. This depends on the
        # events already being sorted.
        return groupby(with_benchmarks, attrgetter('dt'))
示例#6
0
    def _create_generator(self, environment):
        """
        Create a basic generator setup using the sources and
        transforms attached to this algorithm.
        """

        self.date_sorted = date_sorted_sources(*self.sources)
        self.with_tnfms = sequential_transforms(self.date_sorted,
                                                *self.transforms)
        self.trading_client = tsc(self, environment)

        transact_method = transact_partial(self.slippage, self.commission)
        self.set_transact(transact_method)

        return self.trading_client.simulate(self.with_tnfms)
示例#7
0
    def _create_generator(self, environment):
        """
        Create a basic generator setup using the sources and
        transforms attached to this algorithm.
        """

        self.date_sorted = date_sorted_sources(*self.sources)
        self.with_tnfms = sequential_transforms(self.date_sorted,
                                                *self.transforms)
        self.trading_client = tsc(self, environment)

        transact_method = transact_partial(self.slippage, self.commission)
        self.set_transact(transact_method)

        return self.trading_client.simulate(self.with_tnfms)
示例#8
0
    def _create_generator(self, environment):
        """
        Create a basic generator setup using the sources and
        transforms attached to this algorithm.
        """

        self.date_sorted = date_sorted_sources(*self.sources)
        self.with_tnfms = sequential_transforms(self.date_sorted,
                                                *self.transforms)
        # Group together events with the same dt field. This depends on the
        # events already being sorted.
        self.grouped_by_date = groupby(self.with_tnfms, attrgetter('dt'))
        self.trading_client = tsc(self, environment)

        transact_method = transact_partial(self.slippage, self.commission)
        self.set_transact(transact_method)

        return self.trading_client.simulate(self.grouped_by_date)
示例#9
0
    def _create_generator(self, environment):
        """
        Create a basic generator setup using the sources and
        transforms attached to this algorithm.
        """

        self.date_sorted = date_sorted_sources(*self.sources)
        self.with_tnfms = sequential_transforms(self.date_sorted,
                                                *self.transforms)
        # Group together events with the same dt field. This depends on the
        # events already being sorted.
        self.grouped_by_date = groupby(self.with_tnfms, attrgetter('dt'))
        self.trading_client = tsc(self, environment)

        transact_method = transact_partial(self.slippage, self.commission)
        self.set_transact(transact_method)

        return self.trading_client.simulate(self.grouped_by_date)
示例#10
0
    def _create_data_generator(self, source_filter):
        """
        Create a merged data generator using the sources and
        transforms attached to this algorithm.

        ::source_filter:: is a method that receives events in date
        sorted order, and returns True for those events that should be
        processed by the zipline, and False for those that should be
        skipped.
        """

        date_sorted = date_sorted_sources(*self.sources)
        if source_filter:
            date_sorted = ifilter(source_filter, date_sorted)
        with_tnfms = sequential_transforms(date_sorted, *self.transforms)
        with_alias_dt = alias_dt(with_tnfms)

        # Group together events with the same dt field. This depends on the
        # events already being sorted.
        return groupby(with_alias_dt, attrgetter('dt'))
示例#11
0
    def _create_data_generator(self, source_filter):
        """
        Create a merged data generator using the sources and
        transforms attached to this algorithm.

        ::source_filter:: is a method that receives events in date
        sorted order, and returns True for those events that should be
        processed by the zipline, and False for those that should be
        skipped.
        """

        date_sorted = date_sorted_sources(*self.sources)
        if source_filter:
            date_sorted = ifilter(source_filter, date_sorted)
        with_tnfms = sequential_transforms(date_sorted,
                                           *self.transforms)
        with_alias_dt = alias_dt(with_tnfms)

        # Group together events with the same dt field. This depends on the
        # events already being sorted.
        return groupby(with_alias_dt, attrgetter('dt'))
示例#12
0
def calculate_results(host, events):

    perf_tracker = perf.PerformanceTracker(host.sim_params)

    events = sorted(events, key=lambda ev: ev.dt)
    all_events = date_sorted_sources(events, host.benchmark_events)

    filtered_events = (filt_event for filt_event in all_events
                       if filt_event.dt <= events[-1].dt)
    grouped_events = itertools.groupby(filtered_events, lambda x: x.dt)
    results = []

    bm_updated = False
    for date, group in grouped_events:
        for event in group:
            perf_tracker.process_event(event)
            if event.type == DATASOURCE_TYPE.BENCHMARK:
                bm_updated = True
        if bm_updated:
            msg = perf_tracker.handle_market_close()
            results.append(msg)
            bm_updated = False
    return results
示例#13
0
def calculate_results(host, events):

    perf_tracker = perf.PerformanceTracker(host.sim_params)

    events = sorted(events, key=lambda ev: ev.dt)
    all_events = date_sorted_sources(events, host.benchmark_events)

    filtered_events = (filt_event for filt_event in all_events
                       if filt_event.dt <= events[-1].dt)
    grouped_events = itertools.groupby(filtered_events, lambda x: x.dt)
    results = []

    bm_updated = False
    for date, group in grouped_events:
        for event in group:
            perf_tracker.process_event(event)
            if event.type == DATASOURCE_TYPE.BENCHMARK:
                bm_updated = True
        if bm_updated:
            msg = perf_tracker.handle_market_close()
            results.append(msg)
            bm_updated = False
    return results
示例#14
0
    def test_sort_composite(self):

        filter = [1, 2]

        #Set up source a. One hour between events.
        args_a = tuple()
        kwargs_a = {
            'count': 100,
            'sids': [1],
            'start': datetime(2012, 6, 6, 0),
            'delta': timedelta(hours=1),
            'filter': filter
        }
        source_a = SpecificEquityTrades(*args_a, **kwargs_a)

        #Set up source b. One day between events.
        args_b = tuple()
        kwargs_b = {
            'count': 50,
            'sids': [2],
            'start': datetime(2012, 6, 6, 0),
            'delta': timedelta(days=1),
            'filter': filter
        }
        source_b = SpecificEquityTrades(*args_b, **kwargs_b)

        #Set up source c. One minute between events.
        args_c = tuple()
        kwargs_c = {
            'count': 150,
            'sids': [1, 2],
            'start': datetime(2012, 6, 6, 0),
            'delta': timedelta(minutes=1),
            'filter': filter
        }
        source_c = SpecificEquityTrades(*args_c, **kwargs_c)
        # Set up source d. This should produce no events because the
        # internal sids don't match the filter.
        args_d = tuple()
        kwargs_d = {
            'count': 50,
            'sids': [3],
            'start': datetime(2012, 6, 6, 0),
            'delta': timedelta(minutes=1),
            'filter': filter
        }
        source_d = SpecificEquityTrades(*args_d, **kwargs_d)
        sources = [source_a, source_b, source_c, source_d]
        hashes = [source.get_hash() for source in sources]

        sort_out = date_sorted_sources(*sources)

        # Read all the values from sort and assert that they arrive in
        # the correct sorting with the expected hash values.
        to_list = list(sort_out)
        copy = to_list[:]

        # We should have 300 events (100 from a, 150 from b, 50 from c)
        assert len(to_list) == 300

        for e in to_list:
            # All events should match one of our expected source_ids.
            assert e.source_id in hashes
            # But none of them should match source_d.
            assert e.source_id != source_d.get_hash()

        # The events should be sorted by dt, with source_id as tiebreaker.
        expected = sorted(copy, comp)

        assert to_list == expected
示例#15
0
    def transaction_sim(self, **params):
        """ This is a utility method that asserts expected
        results for conversion of orders to transactions given a
        trade history"""

        trade_count = params['trade_count']
        trade_interval = params['trade_interval']
        order_count = params['order_count']
        order_amount = params['order_amount']
        order_interval = params['order_interval']
        expected_txn_count = params['expected_txn_count']
        expected_txn_volume = params['expected_txn_volume']
        # optional parameters
        # ---------------------
        # if present, alternate between long and short sales
        alternate = params.get('alternate')
        # if present, expect transaction amounts to match orders exactly.
        complete_fill = params.get('complete_fill')

        sid = 1
        sim_params = factory.create_simulation_parameters()
        blotter = Blotter()
        price = [10.1] * trade_count
        volume = [100] * trade_count
        start_date = sim_params.first_open

        generated_trades = factory.create_trade_history(
            sid, price, volume, trade_interval, sim_params)

        if alternate:
            alternator = -1
        else:
            alternator = 1

        order_date = start_date
        for i in range(order_count):

            blotter.set_date(order_date)
            blotter.order(sid, order_amount * alternator**i, MarketOrder())

            order_date = order_date + order_interval
            # move after market orders to just after market next
            # market open.
            if order_date.hour >= 21:
                if order_date.minute >= 00:
                    order_date = order_date + timedelta(days=1)
                    order_date = order_date.replace(hour=14, minute=30)

        # there should now be one open order list stored under the sid
        oo = blotter.open_orders
        self.assertEqual(len(oo), 1)
        self.assertTrue(sid in oo)
        order_list = oo[sid][:]  # make copy
        self.assertEqual(order_count, len(order_list))

        for i in range(order_count):
            order = order_list[i]
            self.assertEqual(order.sid, sid)
            self.assertEqual(order.amount, order_amount * alternator**i)

        tracker = PerformanceTracker(sim_params)

        benchmark_returns = [
            Event({
                'dt': dt,
                'returns': ret,
                'type': zipline.protocol.DATASOURCE_TYPE.BENCHMARK,
                'source_id': 'benchmarks'
            })
            for dt, ret in trading.environment.benchmark_returns.iteritems()
            if dt.date() >= sim_params.period_start.date()
            and dt.date() <= sim_params.period_end.date()
        ]

        generated_events = date_sorted_sources(generated_trades,
                                               benchmark_returns)

        # this approximates the loop inside TradingSimulationClient
        transactions = []
        for dt, events in itertools.groupby(generated_events,
                                            operator.attrgetter('dt')):
            for event in events:
                if event.type == DATASOURCE_TYPE.TRADE:

                    for txn, order in blotter.process_trade(event):
                        transactions.append(txn)
                        tracker.process_transaction(txn)
                elif event.type == DATASOURCE_TYPE.BENCHMARK:
                    tracker.process_benchmark(event)
                elif event.type == DATASOURCE_TYPE.TRADE:
                    tracker.process_trade(event)

        if complete_fill:
            self.assertEqual(len(transactions), len(order_list))

        total_volume = 0
        for i in range(len(transactions)):
            txn = transactions[i]
            total_volume += txn.amount
            if complete_fill:
                order = order_list[i]
                self.assertEqual(order.amount, txn.amount)

        self.assertEqual(total_volume, expected_txn_volume)
        self.assertEqual(len(transactions), expected_txn_count)

        cumulative_pos = tracker.cumulative_performance.positions[sid]
        self.assertEqual(total_volume, cumulative_pos.amount)

        # the open orders should not contain sid.
        oo = blotter.open_orders
        self.assertNotIn(sid, oo, "Entry is removed when no open orders")
示例#16
0
def calculate_results(host,
                      trade_events,
                      dividend_events=None,
                      splits=None,
                      txns=None):
    """
    Run the given events through a stripped down version of the loop in
    AlgorithmSimulator.transform.

    IMPORTANT NOTE FOR TEST WRITERS/READERS:

    This loop has some wonky logic for the order of event processing for
    datasource types.  This exists mostly to accomodate legacy tests accomodate
    existing tests that were making assumptions about how events would be
    sorted.

    In particular:

        - Dividends passed for a given date are processed PRIOR to any events
          for that date.
        - Splits passed for a given date are process AFTER any events for that
          date.

    Tests that use this helper should not be considered useful guarantees of
    the behavior of AlgorithmSimulator on a stream containing the same events
    unless the subgroups have been explicitly re-sorted in this way.
    """

    txns = txns or []
    splits = splits or []

    perf_tracker = perf.PerformanceTracker(host.sim_params)
    if dividend_events is not None:
        dividend_frame = pd.DataFrame(
            [
                event.to_series(index=zp.DIVIDEND_FIELDS)
                for event in dividend_events
            ],
        )
        perf_tracker.update_dividends(dividend_frame)

    # Raw trades
    trade_events = sorted(trade_events, key=lambda ev: (ev.dt, ev.source_id))

    # Add a benchmark event for each date.
    trades_plus_bm = date_sorted_sources(trade_events, host.benchmark_events)

    # Filter out benchmark events that are later than the last trade date.
    filtered_trades_plus_bm = (filt_event for filt_event in trades_plus_bm
                               if filt_event.dt <= trade_events[-1].dt)

    grouped_trades_plus_bm = itertools.groupby(filtered_trades_plus_bm,
                                               lambda x: x.dt)
    results = []

    bm_updated = False
    for date, group in grouped_trades_plus_bm:

        for txn in filter(lambda txn: txn.dt == date, txns):
            # Process txns for this date.
            perf_tracker.process_event(txn)

        for event in group:

            perf_tracker.process_event(event)
            if event.type == zp.DATASOURCE_TYPE.BENCHMARK:
                bm_updated = True

        for split in filter(lambda split: split.dt == date, splits):
            # Process splits for this date.
            perf_tracker.process_event(split)

        if bm_updated:
            msg = perf_tracker.handle_market_close_daily()
            results.append(msg)
            bm_updated = False
    return results
示例#17
0
    def test_tracker(self, parameter_comment, days_to_delete):
        """
        @days_to_delete - configures which days in the data set we should
        remove, used for ensuring that we still return performance messages
        even when there is no data.
        """
        # This date range covers Columbus day,
        # however Columbus day is not a market holiday
        #
        #     October 2008
        # Su Mo Tu We Th Fr Sa
        #           1  2  3  4
        #  5  6  7  8  9 10 11
        # 12 13 14 15 16 17 18
        # 19 20 21 22 23 24 25
        # 26 27 28 29 30 31
        start_dt = datetime(year=2008,
                            month=10,
                            day=9,
                            tzinfo=pytz.utc)
        end_dt = datetime(year=2008,
                          month=10,
                          day=16,
                          tzinfo=pytz.utc)

        trade_count = 6
        sid = 133
        price = 10.1
        price_list = [price] * trade_count
        volume = [100] * trade_count
        trade_time_increment = timedelta(days=1)

        sim_params = SimulationParameters(
            period_start=start_dt,
            period_end=end_dt
        )

        benchmark_events = benchmark_events_in_range(sim_params)

        trade_history = factory.create_trade_history(
            sid,
            price_list,
            volume,
            trade_time_increment,
            sim_params,
            source_id="factory1"
        )

        sid2 = 134
        price2 = 12.12
        price2_list = [price2] * trade_count
        trade_history2 = factory.create_trade_history(
            sid2,
            price2_list,
            volume,
            trade_time_increment,
            sim_params,
            source_id="factory2"
        )
        # 'middle' start of 3 depends on number of days == 7
        middle = 3

        # First delete from middle
        if days_to_delete.middle:
            del trade_history[middle:(middle + days_to_delete.middle)]
            del trade_history2[middle:(middle + days_to_delete.middle)]

        # Delete start
        if days_to_delete.start:
            del trade_history[:days_to_delete.start]
            del trade_history2[:days_to_delete.start]

        # Delete from end
        if days_to_delete.end:
            del trade_history[-days_to_delete.end:]
            del trade_history2[-days_to_delete.end:]

        sim_params.first_open = \
            sim_params.calculate_first_open()
        sim_params.last_close = \
            sim_params.calculate_last_close()
        sim_params.capital_base = 1000.0
        sim_params.frame_index = [
            'sid',
            'volume',
            'dt',
            'price',
            'changed']
        perf_tracker = perf.PerformanceTracker(
            sim_params
        )

        events = date_sorted_sources(trade_history, trade_history2)

        events = [event for event in
                  self.trades_with_txns(events, trade_history[0].dt)]

        # Extract events with transactions to use for verification.
        txns = [event for event in
                events if event.type == zp.DATASOURCE_TYPE.TRANSACTION]

        orders = [event for event in
                  events if event.type == zp.DATASOURCE_TYPE.ORDER]

        all_events = date_sorted_sources(events, benchmark_events)

        filtered_events = [filt_event for filt_event
                           in all_events if filt_event.dt <= end_dt]
        filtered_events.sort(key=lambda x: x.dt)
        grouped_events = itertools.groupby(filtered_events, lambda x: x.dt)
        perf_messages = []

        for date, group in grouped_events:
            for event in group:
                perf_tracker.process_event(event)
            msg = perf_tracker.handle_market_close_daily()
            perf_messages.append(msg)

        self.assertEqual(perf_tracker.txn_count, len(txns))
        self.assertEqual(perf_tracker.txn_count, len(orders))

        cumulative_pos = perf_tracker.cumulative_performance.positions[sid]
        expected_size = len(txns) / 2 * -25
        self.assertEqual(cumulative_pos.amount, expected_size)

        self.assertEqual(len(perf_messages),
                         sim_params.days_in_period)
示例#18
0
    def test_tracker(self, parameter_comment, days_to_delete):
        """
        @days_to_delete - configures which days in the data set we should
        remove, used for ensuring that we still return performance messages
        even when there is no data.
        """
        # This date range covers Columbus day,
        # however Columbus day is not a market holiday
        #
        #     October 2008
        # Su Mo Tu We Th Fr Sa
        #           1  2  3  4
        #  5  6  7  8  9 10 11
        # 12 13 14 15 16 17 18
        # 19 20 21 22 23 24 25
        # 26 27 28 29 30 31
        start_dt = datetime.datetime(year=2008,
                                     month=10,
                                     day=9,
                                     tzinfo=pytz.utc)
        end_dt = datetime.datetime(year=2008,
                                   month=10,
                                   day=16,
                                   tzinfo=pytz.utc)

        trade_count = 6
        sid = 133
        price = 10.1
        price_list = [price] * trade_count
        volume = [100] * trade_count
        trade_time_increment = datetime.timedelta(days=1)

        benchmark_returns, treasury_curves = \
            factory.load_market_data()

        trading_environment = TradingEnvironment(
            benchmark_returns,
            treasury_curves,
            period_start=start_dt,
            period_end=end_dt
        )

        trade_history = factory.create_trade_history(
            sid,
            price_list,
            volume,
            trade_time_increment,
            trading_environment,
            source_id="factory1"
        )

        sid2 = 134
        price2 = 12.12
        price2_list = [price2] * trade_count
        trade_history2 = factory.create_trade_history(
            sid2,
            price2_list,
            volume,
            trade_time_increment,
            trading_environment,
            source_id="factory2"
        )
        # 'middle' start of 3 depends on number of days == 7
        middle = 3

        # First delete from middle
        if days_to_delete.middle:
            del trade_history[middle:(middle + days_to_delete.middle)]
            del trade_history2[middle:(middle + days_to_delete.middle)]

        # Delete start
        if days_to_delete.start:
            del trade_history[:days_to_delete.start]
            del trade_history2[:days_to_delete.start]

        # Delete from end
        if days_to_delete.end:
            del trade_history[-days_to_delete.end:]
            del trade_history2[-days_to_delete.end:]

        trading_environment.first_open = \
            trading_environment.calculate_first_open()
        trading_environment.last_close = \
            trading_environment.calculate_last_close()
        trading_environment.capital_base = 1000.0
        trading_environment.frame_index = [
            'sid',
            'volume',
            'dt',
            'price',
            'changed']
        perf_tracker = perf.PerformanceTracker(
            trading_environment
        )

        events = date_sorted_sources(trade_history, trade_history2)

        events = [self.event_with_txn(event, trade_history[0].dt)
                  for event in events]

        # Extract events with transactions to use for verification.
        events_with_txns = [event for event in events if event.TRANSACTION]

        perf_messages = \
            [msg for date, snapshot in
             perf_tracker.transform(
                 itertools.groupby(events, attrgetter('dt')))
             for event in snapshot
             for msg in event.perf_messages]

        end_perf_messages, risk_message = perf_tracker.handle_simulation_end()

        perf_messages.extend(end_perf_messages)

        #we skip two trades, to test case of None transaction
        self.assertEqual(perf_tracker.txn_count, len(events_with_txns))

        cumulative_pos = perf_tracker.cumulative_performance.positions[sid]
        expected_size = len(events_with_txns) / 2 * -25
        self.assertEqual(cumulative_pos.amount, expected_size)

        self.assertEqual(perf_tracker.last_close,
                         perf_tracker.cumulative_risk_metrics.end_date)

        self.assertEqual(len(perf_messages),
                         trading_environment.days_in_period)
示例#19
0
    def transaction_sim(self, **params):
        """ This is a utility method that asserts expected
        results for conversion of orders to transactions given a
        trade history"""

        trade_count = params['trade_count']
        trade_interval = params['trade_interval']
        order_count = params['order_count']
        order_amount = params['order_amount']
        order_interval = params['order_interval']
        expected_txn_count = params['expected_txn_count']
        expected_txn_volume = params['expected_txn_volume']
        # optional parameters
        # ---------------------
        # if present, alternate between long and short sales
        alternate = params.get('alternate')
        # if present, expect transaction amounts to match orders exactly.
        complete_fill = params.get('complete_fill')

        sid = 1
        sim_params = factory.create_simulation_parameters()
        blotter = Blotter()
        price = [10.1] * trade_count
        volume = [100] * trade_count
        start_date = sim_params.first_open

        generated_trades = factory.create_trade_history(
            sid,
            price,
            volume,
            trade_interval,
            sim_params,
            env=self.env,
        )

        if alternate:
            alternator = -1
        else:
            alternator = 1

        order_date = start_date
        for i in range(order_count):

            blotter.set_date(order_date)
            blotter.order(sid, order_amount * alternator ** i, MarketOrder())

            order_date = order_date + order_interval
            # move after market orders to just after market next
            # market open.
            if order_date.hour >= 21:
                if order_date.minute >= 00:
                    order_date = order_date + timedelta(days=1)
                    order_date = order_date.replace(hour=14, minute=30)

        # there should now be one open order list stored under the sid
        oo = blotter.open_orders
        self.assertEqual(len(oo), 1)
        self.assertTrue(sid in oo)
        order_list = oo[sid][:]  # make copy
        self.assertEqual(order_count, len(order_list))

        for i in range(order_count):
            order = order_list[i]
            self.assertEqual(order.sid, sid)
            self.assertEqual(order.amount, order_amount * alternator ** i)

        tracker = PerformanceTracker(sim_params, env=self.env)

        benchmark_returns = [
            Event({'dt': dt,
                   'returns': ret,
                   'type':
                   zipline.protocol.DATASOURCE_TYPE.BENCHMARK,
                   'source_id': 'benchmarks'})
            for dt, ret in self.env.benchmark_returns.iteritems()
            if dt.date() >= sim_params.period_start.date() and
            dt.date() <= sim_params.period_end.date()
        ]

        generated_events = date_sorted_sources(generated_trades,
                                               benchmark_returns)

        # this approximates the loop inside TradingSimulationClient
        transactions = []
        for dt, events in itertools.groupby(generated_events,
                                            operator.attrgetter('dt')):
            for event in events:
                if event.type == DATASOURCE_TYPE.TRADE:

                    for txn, order in blotter.process_trade(event):
                        transactions.append(txn)
                        tracker.process_transaction(txn)
                elif event.type == DATASOURCE_TYPE.BENCHMARK:
                    tracker.process_benchmark(event)
                elif event.type == DATASOURCE_TYPE.TRADE:
                    tracker.process_trade(event)

        if complete_fill:
            self.assertEqual(len(transactions), len(order_list))

        total_volume = 0
        for i in range(len(transactions)):
            txn = transactions[i]
            total_volume += txn.amount
            if complete_fill:
                order = order_list[i]
                self.assertEqual(order.amount, txn.amount)

        self.assertEqual(total_volume, expected_txn_volume)
        self.assertEqual(len(transactions), expected_txn_count)

        cumulative_pos = tracker.cumulative_performance.positions[sid]
        self.assertEqual(total_volume, cumulative_pos.amount)

        # the open orders should not contain sid.
        oo = blotter.open_orders
        self.assertNotIn(sid, oo, "Entry is removed when no open orders")
示例#20
0
def calculate_results(host,
                      trade_events,
                      dividend_events=None,
                      splits=None,
                      txns=None):
    """
    Run the given events through a stripped down version of the loop in
    AlgorithmSimulator.transform.

    IMPORTANT NOTE FOR TEST WRITERS/READERS:

    This loop has some wonky logic for the order of event processing for
    datasource types.  This exists mostly to accomodate legacy tests accomodate
    existing tests that were making assumptions about how events would be
    sorted.

    In particular:

        - Dividends passed for a given date are processed PRIOR to any events
          for that date.
        - Splits passed for a given date are process AFTER any events for that
          date.

    Tests that use this helper should not be considered useful guarantees of
    the behavior of AlgorithmSimulator on a stream containing the same events
    unless the subgroups have been explicitly re-sorted in this way.
    """

    txns = txns or []
    splits = splits or []

    perf_tracker = perf.PerformanceTracker(host.sim_params)
    if dividend_events is not None:
        dividend_frame = pd.DataFrame([
            event.to_series(index=zp.DIVIDEND_FIELDS)
            for event in dividend_events
        ], )
        perf_tracker.update_dividends(dividend_frame)

    # Raw trades
    trade_events = sorted(trade_events, key=lambda ev: (ev.dt, ev.source_id))

    # Add a benchmark event for each date.
    trades_plus_bm = date_sorted_sources(trade_events, host.benchmark_events)

    # Filter out benchmark events that are later than the last trade date.
    filtered_trades_plus_bm = (filt_event for filt_event in trades_plus_bm
                               if filt_event.dt <= trade_events[-1].dt)

    grouped_trades_plus_bm = itertools.groupby(filtered_trades_plus_bm,
                                               lambda x: x.dt)
    results = []

    bm_updated = False
    for date, group in grouped_trades_plus_bm:

        for txn in filter(lambda txn: txn.dt == date, txns):
            # Process txns for this date.
            perf_tracker.process_event(txn)

        for event in group:

            perf_tracker.process_event(event)
            if event.type == zp.DATASOURCE_TYPE.BENCHMARK:
                bm_updated = True

        for split in filter(lambda split: split.dt == date, splits):
            # Process splits for this date.
            perf_tracker.process_event(split)

        if bm_updated:
            msg = perf_tracker.handle_market_close_daily()
            results.append(msg)
            bm_updated = False
    return results
示例#21
0
    def test_tracker(self, parameter_comment, days_to_delete):
        """
        @days_to_delete - configures which days in the data set we should
        remove, used for ensuring that we still return performance messages
        even when there is no data.
        """
        # This date range covers Columbus day,
        # however Columbus day is not a market holiday
        #
        #     October 2008
        # Su Mo Tu We Th Fr Sa
        #           1  2  3  4
        #  5  6  7  8  9 10 11
        # 12 13 14 15 16 17 18
        # 19 20 21 22 23 24 25
        # 26 27 28 29 30 31
        start_dt = datetime.datetime(year=2008,
                                     month=10,
                                     day=9,
                                     tzinfo=pytz.utc)
        end_dt = datetime.datetime(year=2008,
                                   month=10,
                                   day=16,
                                   tzinfo=pytz.utc)

        trade_count = 6
        sid = 133
        price = 10.1
        price_list = [price] * trade_count
        volume = [100] * trade_count
        trade_time_increment = datetime.timedelta(days=1)

        sim_params = SimulationParameters(
            period_start=start_dt,
            period_end=end_dt
        )

        benchmark_events = benchmark_events_in_range(sim_params)

        trade_history = factory.create_trade_history(
            sid,
            price_list,
            volume,
            trade_time_increment,
            sim_params,
            source_id="factory1"
        )

        sid2 = 134
        price2 = 12.12
        price2_list = [price2] * trade_count
        trade_history2 = factory.create_trade_history(
            sid2,
            price2_list,
            volume,
            trade_time_increment,
            sim_params,
            source_id="factory2"
        )
        # 'middle' start of 3 depends on number of days == 7
        middle = 3

        # First delete from middle
        if days_to_delete.middle:
            del trade_history[middle:(middle + days_to_delete.middle)]
            del trade_history2[middle:(middle + days_to_delete.middle)]

        # Delete start
        if days_to_delete.start:
            del trade_history[:days_to_delete.start]
            del trade_history2[:days_to_delete.start]

        # Delete from end
        if days_to_delete.end:
            del trade_history[-days_to_delete.end:]
            del trade_history2[-days_to_delete.end:]

        sim_params.first_open = \
            sim_params.calculate_first_open()
        sim_params.last_close = \
            sim_params.calculate_last_close()
        sim_params.capital_base = 1000.0
        sim_params.frame_index = [
            'sid',
            'volume',
            'dt',
            'price',
            'changed']
        perf_tracker = perf.PerformanceTracker(
            sim_params
        )

        events = date_sorted_sources(trade_history, trade_history2)

        events = [event for event in
                  self.trades_with_txns(events, trade_history[0].dt)]

        # Extract events with transactions to use for verification.
        txns = [event for event in
                events if event.type == DATASOURCE_TYPE.TRANSACTION]

        orders = [event for event in
                  events if event.type == DATASOURCE_TYPE.ORDER]

        all_events = date_sorted_sources(events, benchmark_events)

        filtered_events = [filt_event for filt_event
                           in all_events if filt_event.dt <= end_dt]
        filtered_events.sort(key=lambda x: x.dt)
        grouped_events = itertools.groupby(filtered_events, lambda x: x.dt)
        perf_messages = []

        for date, group in grouped_events:
            for event in group:
                perf_tracker.process_event(event)
            msg = perf_tracker.handle_market_close()
            perf_messages.append(msg)

        self.assertEqual(perf_tracker.txn_count, len(txns))
        self.assertEqual(perf_tracker.txn_count, len(orders))

        cumulative_pos = perf_tracker.cumulative_performance.positions[sid]
        expected_size = len(txns) / 2 * -25
        self.assertEqual(cumulative_pos.amount, expected_size)

        self.assertEqual(len(perf_messages),
                         sim_params.days_in_period)
示例#22
0
    def test_tracker(self, parameter_comment, days_to_delete):
        """
        @days_to_delete - configures which days in the data set we should
        remove, used for ensuring that we still return performance messages
        even when there is no data.
        """
        # This date range covers Columbus day,
        # however Columbus day is not a market holiday
        #
        #     October 2008
        # Su Mo Tu We Th Fr Sa
        #           1  2  3  4
        #  5  6  7  8  9 10 11
        # 12 13 14 15 16 17 18
        # 19 20 21 22 23 24 25
        # 26 27 28 29 30 31
        start_dt = datetime.datetime(year=2008,
                                     month=10,
                                     day=9,
                                     tzinfo=pytz.utc)
        end_dt = datetime.datetime(year=2008,
                                   month=10,
                                   day=16,
                                   tzinfo=pytz.utc)

        trade_count = 6
        sid = 133
        price = 10.1
        price_list = [price] * trade_count
        volume = [100] * trade_count
        trade_time_increment = datetime.timedelta(days=1)

        sim_params = SimulationParameters(period_start=start_dt,
                                          period_end=end_dt)

        trade_history = factory.create_trade_history(sid,
                                                     price_list,
                                                     volume,
                                                     trade_time_increment,
                                                     sim_params,
                                                     source_id="factory1")

        sid2 = 134
        price2 = 12.12
        price2_list = [price2] * trade_count
        trade_history2 = factory.create_trade_history(sid2,
                                                      price2_list,
                                                      volume,
                                                      trade_time_increment,
                                                      sim_params,
                                                      source_id="factory2")
        # 'middle' start of 3 depends on number of days == 7
        middle = 3

        # First delete from middle
        if days_to_delete.middle:
            del trade_history[middle:(middle + days_to_delete.middle)]
            del trade_history2[middle:(middle + days_to_delete.middle)]

        # Delete start
        if days_to_delete.start:
            del trade_history[:days_to_delete.start]
            del trade_history2[:days_to_delete.start]

        # Delete from end
        if days_to_delete.end:
            del trade_history[-days_to_delete.end:]
            del trade_history2[-days_to_delete.end:]

        sim_params.first_open = \
            sim_params.calculate_first_open()
        sim_params.last_close = \
            sim_params.calculate_last_close()
        sim_params.capital_base = 1000.0
        sim_params.frame_index = ['sid', 'volume', 'dt', 'price', 'changed']
        perf_tracker = perf.PerformanceTracker(sim_params)

        events = date_sorted_sources(trade_history, trade_history2)

        events = [
            self.event_with_txn(event, trade_history[0].dt) for event in events
        ]

        # Extract events with transactions to use for verification.
        events_with_txns = [event for event in events if event.TRANSACTION]

        perf_messages = \
            [msg for date, snapshot in
             perf_tracker.transform(
                 itertools.groupby(events, attrgetter('dt')))
             for event in snapshot
             for msg in event.perf_messages]

        end_perf_messages, risk_message = perf_tracker.handle_simulation_end()

        perf_messages.extend(end_perf_messages)

        #we skip two trades, to test case of None transaction
        self.assertEqual(perf_tracker.txn_count, len(events_with_txns))

        cumulative_pos = perf_tracker.cumulative_performance.positions[sid]
        expected_size = len(events_with_txns) / 2 * -25
        self.assertEqual(cumulative_pos.amount, expected_size)

        self.assertEqual(perf_tracker.last_close,
                         perf_tracker.cumulative_risk_metrics.end_date)

        self.assertEqual(len(perf_messages), sim_params.days_in_period)
示例#23
0
    def test_sort_composite(self):

        filter = [1, 2]

        #Set up source a. One hour between events.
        args_a = tuple()
        kwargs_a = {
            'count': 100,
            'sids': [1],
            'start': datetime(2012, 6, 6, 0),
            'delta': timedelta(hours=1),
            'filter': filter
        }
        source_a = SpecificEquityTrades(*args_a, **kwargs_a)

        #Set up source b. One day between events.
        args_b = tuple()
        kwargs_b = {
            'count': 50,
            'sids': [2],
            'start': datetime(2012, 6, 6, 0),
            'delta': timedelta(days=1),
            'filter': filter
        }
        source_b = SpecificEquityTrades(*args_b, **kwargs_b)

        #Set up source c. One minute between events.
        args_c = tuple()
        kwargs_c = {
            'count': 150,
            'sids': [1, 2],
            'start': datetime(2012, 6, 6, 0),
            'delta': timedelta(minutes=1),
            'filter': filter
        }
        source_c = SpecificEquityTrades(*args_c, **kwargs_c)
        # Set up source d. This should produce no events because the
        # internal sids don't match the filter.
        args_d = tuple()
        kwargs_d = {
            'count': 50,
            'sids': [3],
            'start': datetime(2012, 6, 6, 0),
            'delta': timedelta(minutes=1),
            'filter': filter
        }
        source_d = SpecificEquityTrades(*args_d, **kwargs_d)
        sources = [source_a, source_b, source_c, source_d]
        hashes = [source.get_hash() for source in sources]

        sort_out = date_sorted_sources(*sources)

        # Read all the values from sort and assert that they arrive in
        # the correct sorting with the expected hash values.
        to_list = list(sort_out)
        copy = to_list[:]

        # We should have 300 events (100 from a, 150 from b, 50 from c)
        assert len(to_list) == 300

        for e in to_list:
            # All events should match one of our expected source_ids.
            assert e.source_id in hashes
            # But none of them should match source_d.
            assert e.source_id != source_d.get_hash()

        # The events should be sorted by dt, with source_id as tiebreaker.
        expected = sorted(copy, comp)

        assert to_list == expected