Ejemplo n.º 1
0
    def test_lse_algorithm(self):

        lse = trading.TradingEnvironment(bm_symbol='^FTSE',
                                         exchange_tz='Europe/London')

        with lse:

            sim_params = factory.create_simulation_parameters(
                start=datetime(2012, 5, 1, tzinfo=pytz.utc),
                end=datetime(2012, 6, 30, tzinfo=pytz.utc))
            algo = TestAlgo(self, identifiers=[8229], sim_params=sim_params)
            trade_source = factory.create_daily_trade_source([8229], 200,
                                                             sim_params)
            algo.set_sources([trade_source])

            gen = algo.get_generator()
            results = list(gen)
            self.assertEqual(len(results), 42)
            # May 7, 2012 was an LSE holiday, confirm the 4th trading
            # day was May 8.
            self.assertEqual(results[4]['daily_perf']['period_open'],
                             datetime(2012, 5, 8, 8, 31, tzinfo=pytz.utc))
Ejemplo n.º 2
0
def create_simulation_parameters(year=2006,
                                 start=None,
                                 end=None,
                                 capital_base=float("1.0e5"),
                                 num_days=None):
    """Construct a complete environment with reasonable defaults"""
    if start is None:
        start = datetime(year, 1, 1, tzinfo=pytz.utc)
    if end is None:
        if num_days:
            trading.environment = trading.TradingEnvironment()
            start_index = trading.environment.trading_days.searchsorted(start)
            end = trading.environment.trading_days[start_index + num_days - 1]
        else:
            end = datetime(year, 12, 31, tzinfo=pytz.utc)
    sim_params = SimulationParameters(
        period_start=start,
        period_end=end,
        capital_base=capital_base,
    )

    return sim_params
Ejemplo n.º 3
0
def create_test_panel_source(sim_params=None):
    start = sim_params.first_open \
        if sim_params else pd.datetime(1990, 1, 3, 0, 0, 0, 0, pytz.utc)

    end = sim_params.last_close \
        if sim_params else pd.datetime(1990, 1, 8, 0, 0, 0, 0, pytz.utc)

    if trading.environment is None:
        trading.environment = trading.TradingEnvironment()

    index = trading.environment.days_in_range(start, end)

    price = np.arange(0, len(index))
    volume = np.ones(len(index)) * 1000
    arbitrary = np.ones(len(index))

    df = pd.DataFrame({'price': price,
                       'volume': volume,
                       'arbitrary': arbitrary},
                      index=index)
    panel = pd.Panel.from_dict({0: df})

    return DataPanelSource(panel), panel
Ejemplo n.º 4
0
def create_random_simulation_parameters():
    trading.environment = trading.TradingEnvironment()
    treasury_curves = trading.environment.treasury_curves

    for n in range(100):

        random_index = random.randint(0, len(treasury_curves) - 1)

        start_dt = treasury_curves.index[random_index]
        end_dt = start_dt + timedelta(days=365)

        now = datetime.utcnow().replace(tzinfo=pytz.utc)

        if end_dt <= now:
            break

    assert end_dt <= now, """
failed to find a suitable daterange after 100 attempts. please double
check treasury and benchmark data in findb, and re-run the test."""

    sim_params = SimulationParameters(period_start=start_dt, period_end=end_dt)

    return sim_params, start_dt, end_dt
Ejemplo n.º 5
0
    def test_commission_no_position(self):
        """
        Ensure no position-not-found or sid-not-found errors.
        """
        with trading.TradingEnvironment():
            events = factory.create_trade_history(
                1,
                [10, 10, 10, 10, 10],
                [100, 100, 100, 100, 100],
                oneday,
                self.sim_params
            )

            cash_adj_dt = self.sim_params.first_open \
                + datetime.timedelta(hours=3)
            cash_adjustment = factory.create_commission(1, 300.0,
                                                        cash_adj_dt)

            events.insert(0, cash_adjustment)
            results = calculate_results(self, events)
            # Validate that we lost 300 dollars from our cash pool.
            self.assertEqual(results[-1]['cumulative_perf']['ending_cash'],
                             9700)
Ejemplo n.º 6
0
    def test_commission_zero_position(self):
        """
        Ensure no div-by-zero errors.
        """
        with trading.TradingEnvironment():
            events = factory.create_trade_history(1, [10, 10, 10, 10, 10],
                                                  [100, 100, 100, 100, 100],
                                                  oneday, self.sim_params)

            cash_adj_dt = self.sim_params.period_start \
                + datetime.timedelta(hours=3)
            cash_adjustment = factory.create_commission(1, 300.0, cash_adj_dt)

            # Insert a purchase order.
            events.insert(0, create_txn(events[0], 20, 1))

            # Sell that order.
            events.insert(1, create_txn(events[1], 20, -1))

            events.insert(2, cash_adjustment)
            results = calculate_results(self, events)
            # Validate that we lost 300 dollars from our cash pool.
            self.assertEqual(results[-1]['cumulative_perf']['ending_cash'],
                             9700)
Ejemplo n.º 7
0
    def test_minute_buy_and_hold(self):
        with trading.TradingEnvironment():
            start_date = datetime.datetime(year=2006,
                                           month=1,
                                           day=3,
                                           hour=0,
                                           minute=0,
                                           tzinfo=pytz.utc)
            end_date = datetime.datetime(year=2006,
                                         month=1,
                                         day=5,
                                         hour=0,
                                         minute=0,
                                         tzinfo=pytz.utc)

            sim_params = SimulationParameters(period_start=start_date,
                                              period_end=end_date,
                                              emission_rate='daily',
                                              data_frequency='minute')

            algo = BuyAndHoldAlgorithm(sim_params=sim_params,
                                       data_frequency='minute')

            first_date = datetime.datetime(2006, 1, 3, tzinfo=pytz.utc)
            first_open, first_close = \
                trading.environment.get_open_and_close(first_date)

            second_date = datetime.datetime(2006, 1, 4, tzinfo=pytz.utc)
            second_open, second_close = \
                trading.environment.get_open_and_close(second_date)

            third_date = datetime.datetime(2006, 1, 5, tzinfo=pytz.utc)
            third_open, third_close = \
                trading.environment.get_open_and_close(third_date)

            benchmark_data = [
                Event({
                    'returns': 0.1,
                    'dt': first_close,
                    'source_id': 'test-benchmark-source',
                    'type': DATASOURCE_TYPE.BENCHMARK
                }),
                Event({
                    'returns': 0.2,
                    'dt': second_close,
                    'source_id': 'test-benchmark-source',
                    'type': DATASOURCE_TYPE.BENCHMARK
                }),
                Event({
                    'returns': 0.4,
                    'dt': third_close,
                    'source_id': 'test-benchmark-source',
                    'type': DATASOURCE_TYPE.BENCHMARK
                }),
            ]

            trade_bar_data = [
                Event({
                    'open_price': 10,
                    'close_price': 15,
                    'price': 15,
                    'volume': 1000,
                    'sid': 1,
                    'dt': first_open,
                    'source_id': 'test-trade-source',
                    'type': DATASOURCE_TYPE.TRADE
                }),
                Event({
                    'open_price': 10,
                    'close_price': 15,
                    'price': 15,
                    'volume': 1000,
                    'sid': 1,
                    'dt': first_open + datetime.timedelta(minutes=10),
                    'source_id': 'test-trade-source',
                    'type': DATASOURCE_TYPE.TRADE
                }),
                Event({
                    'open_price': 15,
                    'close_price': 20,
                    'price': 20,
                    'volume': 2000,
                    'sid': 1,
                    'dt': second_open,
                    'source_id': 'test-trade-source',
                    'type': DATASOURCE_TYPE.TRADE
                }),
                Event({
                    'open_price': 15,
                    'close_price': 20,
                    'price': 20,
                    'volume': 2000,
                    'sid': 1,
                    'dt': second_open + datetime.timedelta(minutes=10),
                    'source_id': 'test-trade-source',
                    'type': DATASOURCE_TYPE.TRADE
                }),
                Event({
                    'open_price': 20,
                    'close_price': 15,
                    'price': 15,
                    'volume': 1000,
                    'sid': 1,
                    'dt': third_open,
                    'source_id': 'test-trade-source',
                    'type': DATASOURCE_TYPE.TRADE
                }),
                Event({
                    'open_price': 20,
                    'close_price': 15,
                    'price': 15,
                    'volume': 1000,
                    'sid': 1,
                    'dt': third_open + datetime.timedelta(minutes=10),
                    'source_id': 'test-trade-source',
                    'type': DATASOURCE_TYPE.TRADE
                }),
            ]

            algo.benchmark_return_source = benchmark_data
            algo.sources = list([trade_bar_data])
            gen = algo._create_generator(sim_params)

            crm = algo.perf_tracker.cumulative_risk_metrics

            first_msg = gen.next()

            self.assertIsNotNone(first_msg,
                                 "There should be a message emitted.")

            # Protects against bug where the positions appeared to be
            # a day late, because benchmarks were triggering
            # calculations before the events for the day were
            # processed.
            self.assertEqual(
                1, len(algo.portfolio.positions), "There should "
                "be one position after the first day.")

            self.assertEquals(
                0, crm.metrics.algorithm_volatility[algo.datetime.date()],
                "On the first day algorithm volatility does not exist.")

            second_msg = gen.next()

            self.assertIsNotNone(second_msg, "There should be a message "
                                 "emitted.")

            self.assertEqual(1, len(algo.portfolio.positions),
                             "Number of positions should stay the same.")

            # TODO: Hand derive. Current value is just a canary to
            # detect changes.
            np.testing.assert_almost_equal(0.050022510129558301,
                                           crm.algorithm_returns[-1],
                                           decimal=6)

            third_msg = gen.next()

            self.assertEqual(1, len(algo.portfolio.positions),
                             "Number of positions should stay the same.")

            self.assertIsNotNone(third_msg, "There should be a message "
                                 "emitted.")

            # TODO: Hand derive. Current value is just a canary to
            # detect changes.
            np.testing.assert_almost_equal(-0.047639464532418657,
                                           crm.algorithm_returns[-1],
                                           decimal=6)
Ejemplo n.º 8
0
    def test_risk_metrics_returns(self):
        trading.environment = trading.TradingEnvironment()
        # Advance start date to first date in the trading calendar
        if trading.environment.is_trading_day(self.start_date):
            start_date = self.start_date
        else:
            start_date = trading.environment.next_trading_day(self.start_date)

        self.all_benchmark_returns = pd.Series({
            x.date: x.returns
            for x in trading.environment.benchmark_returns
            if x.date >= self.start_date
        })

        start_index = trading.environment.trading_days.searchsorted(start_date)
        end_date = trading.environment.trading_days[start_index + len(RETURNS)]

        sim_params = SimulationParameters(start_date, end_date)

        risk_metrics_refactor = risk.RiskMetricsIterative(sim_params)
        todays_date = start_date

        cur_returns = []
        for i, ret in enumerate(RETURNS):

            todays_return_obj = DailyReturn(todays_date, ret)
            cur_returns.append(todays_return_obj)

            try:
                risk_metrics_original = risk.RiskMetricsBatch(
                    start_date=start_date,
                    end_date=todays_date,
                    returns=cur_returns)
            except Exception as e:
                #assert that when original raises exception, same
                #exception is raised by risk_metrics_refactor
                np.testing.assert_raises(
                    type(e), risk_metrics_refactor.update, todays_date,
                    self.all_benchmark_returns[todays_return_obj.date])
                continue

            risk_metrics_refactor.update(
                todays_date, ret,
                self.all_benchmark_returns[todays_return_obj.date])

            # Move forward day counter to next trading day
            todays_date = trading.environment.next_trading_day(todays_date)

            self.assertEqual(risk_metrics_original.start_date,
                             risk_metrics_refactor.start_date)
            self.assertEqual(risk_metrics_original.end_date,
                             risk_metrics_refactor.algorithm_returns.index[-1])
            self.assertEqual(risk_metrics_original.treasury_period_return,
                             risk_metrics_refactor.treasury_period_return)
            np.testing.assert_allclose(risk_metrics_original.benchmark_returns,
                                       risk_metrics_refactor.benchmark_returns,
                                       rtol=0.001)
            np.testing.assert_allclose(risk_metrics_original.algorithm_returns,
                                       risk_metrics_refactor.algorithm_returns,
                                       rtol=0.001)
            risk_original_dict = risk_metrics_original.to_dict()
            risk_refactor_dict = risk_metrics_refactor.to_dict()
            self.assertEqual(set(risk_original_dict.keys()),
                             set(risk_refactor_dict.keys()))

            err_msg_format = """\
"In update step {iter}: {measure} should be {truth} but is {returned}!"""

            for measure in risk_original_dict.iterkeys():
                if measure == 'max_drawdown':
                    np.testing.assert_almost_equal(
                        risk_refactor_dict[measure],
                        risk_original_dict[measure],
                        err_msg=err_msg_format.format(
                            iter=i,
                            measure=measure,
                            truth=risk_original_dict[measure],
                            returned=risk_refactor_dict[measure]))
                else:
                    if isinstance(risk_original_dict[measure], numbers.Real):
                        np.testing.assert_allclose(
                            risk_original_dict[measure],
                            risk_refactor_dict[measure],
                            rtol=0.001,
                            err_msg=err_msg_format.format(
                                iter=i,
                                measure=measure,
                                truth=risk_original_dict[measure],
                                returned=risk_refactor_dict[measure]))
                    else:
                        np.testing.assert_equal(
                            risk_original_dict[measure],
                            risk_refactor_dict[measure],
                            err_msg=err_msg_format.format(
                                iter=i,
                                measure=measure,
                                truth=risk_original_dict[measure],
                                returned=risk_refactor_dict[measure]))
Ejemplo n.º 9
0
    def test_minute_tracker(self):
        """ Tests minute performance tracking."""
        with trading.TradingEnvironment():
            start_dt = trading.environment.exchange_dt_in_utc(
                datetime.datetime(2013, 3, 1, 9, 31))
            end_dt = trading.environment.exchange_dt_in_utc(
                datetime.datetime(2013, 3, 1, 16, 0))

            sim_params = SimulationParameters(
                period_start=start_dt,
                period_end=end_dt,
                emission_rate='minute'
            )
            tracker = perf.PerformanceTracker(sim_params)

            foo_event_1 = factory.create_trade('foo', 10.0, 20, start_dt)
            order_event_1 = Order(sid=foo_event_1.sid,
                                  amount=-25,
                                  dt=foo_event_1.dt)
            bar_event_1 = factory.create_trade('bar', 100.0, 200, start_dt)
            txn_event_1 = Transaction(sid=foo_event_1.sid,
                                      amount=-25,
                                      dt=foo_event_1.dt,
                                      price=10.0,
                                      commission=0.50,
                                      order_id=order_event_1.id)
            benchmark_event_1 = Event({
                'dt': start_dt,
                'returns': 0.01,
                'type': DATASOURCE_TYPE.BENCHMARK
            })

            foo_event_2 = factory.create_trade(
                'foo', 11.0, 20, start_dt + datetime.timedelta(minutes=1))
            bar_event_2 = factory.create_trade(
                'bar', 11.0, 20, start_dt + datetime.timedelta(minutes=1))
            benchmark_event_2 = Event({
                'dt': start_dt + datetime.timedelta(minutes=1),
                'returns': 0.02,
                'type': DATASOURCE_TYPE.BENCHMARK
            })

            events = [
                foo_event_1,
                order_event_1,
                benchmark_event_1,
                txn_event_1,
                bar_event_1,
                foo_event_2,
                benchmark_event_2,
                bar_event_2,
            ]

            grouped_events = itertools.groupby(
                events, operator.attrgetter('dt'))

            messages = {}
            for date, group in grouped_events:
                tracker.set_date(date)
                for event in group:
                    tracker.process_event(event)
                tracker.handle_minute_close(date)
                msg = tracker.to_dict()
                messages[date] = msg

            self.assertEquals(2, len(messages))

            msg_1 = messages[foo_event_1.dt]
            msg_2 = messages[foo_event_2.dt]

            self.assertEquals(1, len(msg_1['minute_perf']['transactions']),
                              "The first message should contain one "
                              "transaction.")
            # Check that transactions aren't emitted for previous events.
            self.assertEquals(0, len(msg_2['minute_perf']['transactions']),
                              "The second message should have no "
                              "transactions.")

            self.assertEquals(1, len(msg_1['minute_perf']['orders']),
                              "The first message should contain one orders.")
            # Check that orders aren't emitted for previous events.
            self.assertEquals(0, len(msg_2['minute_perf']['orders']),
                              "The second message should have no orders.")

            # Ensure that period_close moves through time.
            # Also, ensure that the period_closes are the expected dts.
            self.assertEquals(foo_event_1.dt,
                              msg_1['minute_perf']['period_close'])
            self.assertEquals(foo_event_2.dt,
                              msg_2['minute_perf']['period_close'])

            # Ensure that a Sharpe value for cumulative metrics is being
            # created.
            self.assertIsNotNone(msg_1['cumulative_risk_metrics']['sharpe'])
            self.assertIsNotNone(msg_2['cumulative_risk_metrics']['sharpe'])
Ejemplo n.º 10
0
    def setUp(self):
        self.env = env = trading.TradingEnvironment()
        self.dates = date_range('2014-01-01',
                                '2014-02-01',
                                freq=trading_day,
                                tz='UTC')
        asset_info = DataFrame.from_records([
            {
                'sid': 1,
                'symbol': 'A',
                'asset_type': 'equity',
                'start_date': self.dates[10],
                'end_date': self.dates[13],
                'exchange': 'TEST',
            },
            {
                'sid': 2,
                'symbol': 'B',
                'asset_type': 'equity',
                'start_date': self.dates[11],
                'end_date': self.dates[14],
                'exchange': 'TEST',
            },
            {
                'sid': 3,
                'symbol': 'C',
                'asset_type': 'equity',
                'start_date': self.dates[12],
                'end_date': self.dates[15],
                'exchange': 'TEST',
            },
        ])
        self.first_asset_start = min(asset_info.start_date)
        self.last_asset_end = max(asset_info.end_date)
        env.write_data(equities_df=asset_info)
        self.asset_finder = finder = env.asset_finder

        sids = (1, 2, 3)
        self.assets = finder.retrieve_all(sids)

        # View of the baseline data.
        self.closes = DataFrame(
            {sid: arange(1,
                         len(self.dates) + 1) * sid
             for sid in sids},
            index=self.dates,
            dtype=float,
        )

        # Add a split for 'A' on its second date.
        self.split_asset = self.assets[0]
        self.split_date = self.split_asset.start_date + trading_day
        self.split_ratio = 0.5
        self.adjustments = DataFrame.from_records([{
            'sid':
            self.split_asset.sid,
            'value':
            self.split_ratio,
            'kind':
            MULTIPLY,
            'start_date':
            Timestamp('NaT'),
            'end_date':
            self.split_date,
            'apply_date':
            self.split_date,
        }])

        # View of the data on/after the split.
        self.adj_closes = adj_closes = self.closes.copy()
        adj_closes.ix[:self.split_date, self.split_asset] *= self.split_ratio

        self.pipeline_loader = DataFrameLoader(
            column=USEquityPricing.close,
            baseline=self.closes,
            adjustments=self.adjustments,
        )
Ejemplo n.º 11
0
 def setUpClass(cls):
     cls.env = trading.TradingEnvironment()
     cls.env.write_data(equities_identifiers=[8229])
Ejemplo n.º 12
0
 def test_market_hours_calculations(self):
     with trading.TradingEnvironment():
         # DST in US/Eastern began on Sunday March 14, 2010
         before = datetime(2010, 3, 12, 14, 31, tzinfo=pytz.utc)
         after = factory.get_next_trading_dt(before, timedelta(days=1))
         self.assertEqual(after.hour, 13)
Ejemplo n.º 13
0
def _run(handle_data, initialize, before_trading_start, analyze, algofile,
         algotext, defines, data_frequency, capital_base, data, bundle,
         bundle_timestamp, start, end, output, trading_calendar, print_algo,
         metrics_set, local_namespace, environ, blotter):
    """Run a backtest for the given algorithm.

    This is shared between the cli and :func:`zipline.run_algo`.
    """
    if algotext is not None:
        if local_namespace:
            ip = get_ipython()  # noqa
            namespace = ip.user_ns
        else:
            namespace = {}

        for assign in defines:
            try:
                name, value = assign.split('=', 2)
            except ValueError:
                raise ValueError(
                    'invalid define %r, should be of the form name=value' %
                    assign, )
            try:
                # evaluate in the same namespace so names may refer to
                # eachother
                namespace[name] = eval(value, namespace)
            except Exception as e:
                raise ValueError(
                    'failed to execute definition for name %r: %s' %
                    (name, e), )
    elif defines:
        raise _RunAlgoError(
            'cannot pass define without `algotext`',
            "cannot pass '-D' / '--define' without '-t' / '--algotext'",
        )
    else:
        namespace = {}
        if algofile is not None:
            algotext = algofile.read()

    if print_algo:
        if PYGMENTS:
            highlight(
                algotext,
                PythonLexer(),
                TerminalFormatter(),
                outfile=sys.stdout,
            )
        else:
            click.echo(algotext)

    if trading_calendar is None:
        trading_calendar = get_calendar('NYSE')

    # date parameter validation
    if trading_calendar.session_distance(start, end) < 1:
        raise _RunAlgoError(
            'There are no trading days between %s and %s' % (
                start.date(),
                end.date(),
            ), )

    if bundle is not None:
        bundle_data = bundles.load(
            bundle,
            environ,
            bundle_timestamp,
        )

        prefix, connstr = re.split(
            r'sqlite:///',
            str(bundle_data.asset_finder.engine.url),
            maxsplit=1,
        )
        if prefix:
            raise ValueError(
                "invalid url %r, must begin with 'sqlite:///'" %
                str(bundle_data.asset_finder.engine.url), )
        env = trading.TradingEnvironment(
            asset_db_path=connstr,
            environ=environ,
            trading_calendar=trading_calendar,
            trading_day=trading_calendar.day,
            trading_days=trading_calendar.schedule[start:end].index,
        )
        first_trading_day =\
            bundle_data.equity_minute_bar_reader.first_trading_day
        data = DataPortal(
            env.asset_finder,
            trading_calendar=trading_calendar,
            first_trading_day=first_trading_day,
            equity_minute_reader=bundle_data.equity_minute_bar_reader,
            equity_daily_reader=bundle_data.equity_daily_bar_reader,
            adjustment_reader=bundle_data.adjustment_reader,
        )

        pipeline_loader = loaders.USEquityPricingLoader(
            bundle_data.equity_daily_bar_reader,
            bundle_data.adjustment_reader,
        )

        def choose_loader(column):
            if column in data.USEquityPricing.columns:
                return pipeline_loader
            raise ValueError("No PipelineLoader registered for column %s." %
                             column)
    else:
        env = trading.TradingEnvironment(
            environ=environ,
            trading_calendar=trading_calendar,
            trading_day=trading_calendar.day,
            trading_days=trading_calendar.schedule[start:end].index,
        )
        choose_loader = None

    if isinstance(metrics_set, six.string_types):
        try:
            metrics_set = metrics.load(metrics_set)
        except ValueError as e:
            raise _RunAlgoError(str(e))

    if isinstance(blotter, six.string_types):
        try:
            blotter = load(Blotter, blotter)
        except ValueError as e:
            raise _RunAlgoError(str(e))

    perf = algorithm.TradingAlgorithm(
        namespace=namespace,
        env=env,
        get_pipeline_loader=choose_loader,
        trading_calendar=trading_calendar,
        sim_params=create_simulation_parameters(
            start=start,
            end=end,
            capital_base=capital_base,
            data_frequency=data_frequency,
            trading_calendar=trading_calendar,
        ),
        metrics_set=metrics_set,
        blotter=blotter,
        **{
            'initialize': initialize,
            'handle_data': handle_data,
            'before_trading_start': before_trading_start,
            'analyze': analyze,
        } if algotext is None else {
            'algo_filename': getattr(algofile, 'name', '<algorithm>'),
            'script': algotext,
        }).run(
            data,
            overwrite_sim_params=False,
        )

    if output == '-':
        click.echo(str(perf))
    elif output != os.devnull:  # make the zipline magic not write any data
        perf.to_pickle(output)

    return perf
Ejemplo n.º 14
0
    def setUp(self):
        self.env = env = trading.TradingEnvironment()
        self.dates = date_range(
            '2014-01-01', '2014-02-01', freq=trading_day, tz='UTC'
        )
        asset_info = DataFrame.from_records([
            {
                'sid': 1,
                'symbol': 'A',
                'start_date': self.dates[10],
                'end_date': self.dates[13],
                'exchange': 'TEST',
            },
            {
                'sid': 2,
                'symbol': 'B',
                'start_date': self.dates[11],
                'end_date': self.dates[14],
                'exchange': 'TEST',
            },
            {
                'sid': 3,
                'symbol': 'C',
                'start_date': self.dates[12],
                'end_date': self.dates[15],
                'exchange': 'TEST',
            },
        ])
        self.first_asset_start = min(asset_info.start_date)
        self.last_asset_end = max(asset_info.end_date)
        env.write_data(equities_df=asset_info)
        self.asset_finder = finder = env.asset_finder

        sids = (1, 2, 3)
        self.assets = finder.retrieve_all(sids)

        # View of the baseline data.
        self.closes = DataFrame(
            {sid: arange(1, len(self.dates) + 1) * sid for sid in sids},
            index=self.dates,
            dtype=float,
        )

        # Create a data portal holding the data in self.closes
        data = {}
        for sid in sids:
            data[sid] = DataFrame({
                "open": self.closes[sid].values,
                "high": self.closes[sid].values,
                "low": self.closes[sid].values,
                "close": self.closes[sid].values,
                "volume": self.closes[sid].values,
                "day": [day.value for day in self.dates]
            })

        path = os.path.join(self.tempdir.path, "testdaily.bcolz")

        DailyBarWriterFromDataFrames(data).write(
            path,
            self.dates,
            data
        )

        daily_bar_reader = BcolzDailyBarReader(path)

        self.data_portal = DataPortal(
            self.env,
            equity_daily_reader=daily_bar_reader,
        )

        # Add a split for 'A' on its second date.
        self.split_asset = self.assets[0]
        self.split_date = self.split_asset.start_date + trading_day
        self.split_ratio = 0.5
        self.adjustments = DataFrame.from_records([
            {
                'sid': self.split_asset.sid,
                'value': self.split_ratio,
                'kind': MULTIPLY,
                'start_date': Timestamp('NaT'),
                'end_date': self.split_date,
                'apply_date': self.split_date,
            }
        ])

        # View of the data on/after the split.
        self.adj_closes = adj_closes = self.closes.copy()
        adj_closes.ix[:self.split_date, self.split_asset] *= self.split_ratio

        self.pipeline_loader = DataFrameLoader(
            column=USEquityPricing.close,
            baseline=self.closes,
            adjustments=self.adjustments,
        )
Ejemplo n.º 15
0
class TestPerformanceTracker(unittest.TestCase):

    NumDaysToDelete = collections.namedtuple(
        'NumDaysToDelete', ('start', 'middle', 'end'))

    @parameterized.expand([
        ("Don't delete any events",
         NumDaysToDelete(start=0, middle=0, end=0)),
        ("Delete first day of events",
         NumDaysToDelete(start=1, middle=0, end=0)),
        ("Delete first two days of events",
         NumDaysToDelete(start=2, middle=0, end=0)),
        ("Delete one day of events from the middle",
         NumDaysToDelete(start=0, middle=1, end=0)),
        ("Delete two events from the middle",
         NumDaysToDelete(start=0, middle=2, end=0)),
        ("Delete last day of events",
         NumDaysToDelete(start=0, middle=0, end=1)),
        ("Delete last two days of events",
         NumDaysToDelete(start=0, middle=0, end=2)),
        ("Delete all but one event.",
         NumDaysToDelete(start=2, middle=1, end=2)),
    ])
    def test_tracker(self, parameter_comment, days_to_delete):
        """
        @days_to_delete - configures which days in the data set we should
        remove, used for ensuring that we still return performance messages
        even when there is no data.
        """
        # This date range covers Columbus day,
        # however Columbus day is not a market holiday
        #
        #     October 2008
        # Su Mo Tu We Th Fr Sa
        #           1  2  3  4
        #  5  6  7  8  9 10 11
        # 12 13 14 15 16 17 18
        # 19 20 21 22 23 24 25
        # 26 27 28 29 30 31
        start_dt = datetime.datetime(year=2008,
                                     month=10,
                                     day=9,
                                     tzinfo=pytz.utc)
        end_dt = datetime.datetime(year=2008,
                                   month=10,
                                   day=16,
                                   tzinfo=pytz.utc)

        trade_count = 6
        sid = 133
        price = 10.1
        price_list = [price] * trade_count
        volume = [100] * trade_count
        trade_time_increment = datetime.timedelta(days=1)

        sim_params = SimulationParameters(
            period_start=start_dt,
            period_end=end_dt
        )

        trade_history = factory.create_trade_history(
            sid,
            price_list,
            volume,
            trade_time_increment,
            sim_params,
            source_id="factory1"
        )

        sid2 = 134
        price2 = 12.12
        price2_list = [price2] * trade_count
        trade_history2 = factory.create_trade_history(
            sid2,
            price2_list,
            volume,
            trade_time_increment,
            sim_params,
            source_id="factory2"
        )
        # 'middle' start of 3 depends on number of days == 7
        middle = 3

        # First delete from middle
        if days_to_delete.middle:
            del trade_history[middle:(middle + days_to_delete.middle)]
            del trade_history2[middle:(middle + days_to_delete.middle)]

        # Delete start
        if days_to_delete.start:
            del trade_history[:days_to_delete.start]
            del trade_history2[:days_to_delete.start]

        # Delete from end
        if days_to_delete.end:
            del trade_history[-days_to_delete.end:]
            del trade_history2[-days_to_delete.end:]

        sim_params.first_open = \
            sim_params.calculate_first_open()
        sim_params.last_close = \
            sim_params.calculate_last_close()
        sim_params.capital_base = 1000.0
        sim_params.frame_index = [
            'sid',
            'volume',
            'dt',
            'price',
            'changed']
        perf_tracker = perf.PerformanceTracker(
            sim_params
        )

        events = date_sorted_sources(trade_history, trade_history2)

        events = [self.event_with_txn(event, trade_history[0].dt)
                  for event in events]

        # Extract events with transactions to use for verification.
        events_with_txns = [event for event in events if event.TRANSACTION]

        perf_messages = \
            [msg for date, snapshot in
             perf_tracker.transform(
                 itertools.groupby(events, attrgetter('dt')))
             for event in snapshot
             for msg in event.perf_messages]

        end_perf_messages, risk_message = perf_tracker.handle_simulation_end()

        perf_messages.extend(end_perf_messages)

        #we skip two trades, to test case of None transaction
        self.assertEqual(perf_tracker.txn_count, len(events_with_txns))

        cumulative_pos = perf_tracker.cumulative_performance.positions[sid]
        expected_size = len(events_with_txns) / 2 * -25
        self.assertEqual(cumulative_pos.amount, expected_size)

        self.assertEqual(perf_tracker.last_close,
                         perf_tracker.cumulative_risk_metrics.end_date)

        self.assertEqual(len(perf_messages),
                         sim_params.days_in_period)

    def event_with_txn(self, event, no_txn_dt):
        #create a transaction for all but
        #first trade in each sid, to simulate None transaction
        if event.dt != no_txn_dt:
            txn = Transaction(**{
                'sid': event.sid,
                'amount': -25,
                'dt': event.dt,
                'price': 10.0,
                'commission': 0.50
            })
        else:
            txn = None
        event['TRANSACTION'] = txn

        return event

    @trading.use_environment(trading.TradingEnvironment())
    def test_minute_tracker(self):
        """ Tests minute performance tracking."""
        start_dt = trading.environment.exchange_dt_in_utc(
            datetime.datetime(2013, 3, 1, 9, 30))
        end_dt = trading.environment.exchange_dt_in_utc(
            datetime.datetime(2013, 3, 1, 16, 0))

        sim_params = SimulationParameters(
            period_start=start_dt,
            period_end=end_dt,
            emission_rate='minute'
        )
        tracker = perf.PerformanceTracker(sim_params)

        foo_event_1 = factory.create_trade('foo', 10.0, 20, start_dt)
        bar_event_1 = factory.create_trade('bar', 100.0, 200, start_dt)
        txn = Transaction(sid=foo_event_1.sid,
                          amount=-25,
                          dt=foo_event_1.dt,
                          price=10.0,
                          commission=0.50)
        foo_event_1.TRANSACTION = txn

        foo_event_2 = factory.create_trade(
            'foo', 11.0, 20, start_dt + datetime.timedelta(minutes=1))
        bar_event_2 = factory.create_trade(
            'bar', 11.0, 20, start_dt + datetime.timedelta(minutes=1))

        events = [
            foo_event_1,
            bar_event_1,
            foo_event_2,
            bar_event_2
        ]

        import operator
        messages = {date: snapshot[0].perf_messages[0] for date, snapshot in
                    tracker.transform(
                        itertools.groupby(
                            events,
                            operator.attrgetter('dt')))}

        self.assertEquals(2, len(messages))

        msg_1 = messages[foo_event_1.dt]
        msg_2 = messages[foo_event_2.dt]

        self.assertEquals(1, len(msg_1['intraday_perf']['transactions']),
                          "The first message should contain one transaction.")
        # Check that transactions aren't emitted for previous events.
        self.assertEquals(0, len(msg_2['intraday_perf']['transactions']),
                          "The second message should have no transactions.")

        # Ensure that period_close moves through time.
        # Also, ensure that the period_closes are the expected dts.
        self.assertEquals(foo_event_1.dt,
                          msg_1['intraday_perf']['period_close'])
        self.assertEquals(foo_event_2.dt,
                          msg_2['intraday_perf']['period_close'])