示例#1
0
    def test_benchmark_file(self):
        """Test running with a benchmark file.
        """
        csv_file_path = self.tmpdir.getpath('b.csv')
        with open(csv_file_path, 'w') as csv_file:
            csv_file.write("date,return\n"
                           "2020-01-03 00:00:00+00:00,-0.1\n"
                           "2020-01-06 00:00:00+00:00,0.333\n"
                           "2020-01-07 00:00:00+00:00,0.167\n"
                           "2020-01-08 00:00:00+00:00,0.143\n"
                           "2020-01-09 00:00:00+00:00,6.375\n")

        spec = BenchmarkSpec.from_cli_params(
            no_benchmark=False,
            benchmark_sid=None,
            benchmark_file=csv_file_path,
        )

        sid, returns = self.resolve_spec(spec)

        self.assertIs(sid, None)

        expected_dates = pd.to_datetime(
            [
                '2020-01-03', '2020-01-06', '2020-01-07', '2020-01-08',
                '2020-01-09'
            ],
            utc=True,
        )
        expected_values = [-0.1, 0.333, 0.167, 0.143, 6.375]
        expected_returns = pd.Series(index=expected_dates,
                                     data=expected_values)

        assert_series_equal(returns, expected_returns, check_names=False)
示例#2
0
def run(ctx, algofile, algotext, define, data_frequency, capital_base, bundle,
        bundle_timestamp, benchmark_file, benchmark_symbol, benchmark_sid,
        no_benchmark, start, end, output, trading_calendar, print_algo,
        metrics_set, local_namespace, blotter):
    """Run a backtest for the given algorithm.
    """
    # check that the start and end dates are passed correctly
    if start is None and end is None:
        # check both at the same time to avoid the case where a user
        # does not pass either of these and then passes the first only
        # to be told they need to pass the second argument also
        ctx.fail(
            "must specify dates with '-s' / '--start' and '-e' / '--end'", )
    if start is None:
        ctx.fail("must specify a start date with '-s' / '--start'")
    if end is None:
        ctx.fail("must specify an end date with '-e' / '--end'")

    if (algotext is not None) == (algofile is not None):
        ctx.fail(
            "must specify exactly one of '-f' / '--algofile' or"
            " '-t' / '--algotext'", )

    trading_calendar = get_calendar(trading_calendar)

    benchmark_spec = BenchmarkSpec.from_cli_params(
        no_benchmark=no_benchmark,
        benchmark_sid=benchmark_sid,
        benchmark_symbol=benchmark_symbol,
        benchmark_file=benchmark_file,
    )

    return _run(
        initialize=None,
        handle_data=None,
        before_trading_start=None,
        analyze=None,
        algofile=algofile,
        algotext=algotext,
        defines=define,
        data_frequency=data_frequency,
        capital_base=capital_base,
        bundle=bundle,
        bundle_timestamp=bundle_timestamp,
        start=start,
        end=end,
        output=output,
        trading_calendar=trading_calendar,
        print_algo=print_algo,
        metrics_set=metrics_set,
        local_namespace=local_namespace,
        environ=os.environ,
        blotter=blotter,
        benchmark_spec=benchmark_spec,
    )
示例#3
0
    def test_benchmark_sid(self, input_sid):
        """Test running with no benchmark provided, with no_benchmark flag.
        """
        spec = BenchmarkSpec.from_cli_params(
            no_benchmark=False,
            benchmark_sid=input_sid,
            benchmark_file=None,
        )

        sid, returns = self.resolve_spec(spec)

        assert_equal(sid, input_sid)
        self.assertIs(returns, None)
示例#4
0
    def test_no_benchmark_explicitly_disabled(self):
        """Test running with no benchmark provided, with no_benchmark flag.
        """
        spec = BenchmarkSpec.from_cli_params(
            no_benchmark=True,
            benchmark_sid=None,
            benchmark_file=None,
        )

        sid, returns = self.resolve_spec(spec)

        self.assertIs(sid, None)
        assert_series_equal(returns, self.zero_returns)
示例#5
0
    def test_no_benchmark(self):
        """Test running with no benchmark provided.

        We should have no benchmark sid and have a returns series of all zeros.
        """
        spec = BenchmarkSpec.from_cli_params(
            no_benchmark=False,
            benchmark_sid=None,
            benchmark_file=None,
        )

        sid, returns = self.resolve_spec(spec)

        self.assertIs(sid, None)
        self.assertIs(returns, None)
    def test_benchmark_sid(self, input_sid):
        """Test running with no benchmark provided, with no_benchmark flag."""
        spec = BenchmarkSpec.from_cli_params(
            no_benchmark=False,
            benchmark_sid=input_sid,
            benchmark_symbol=None,
            benchmark_file=None,
        )

        sid, returns = self.resolve_spec(spec)

        assert_equal(sid, input_sid)
        assert returns is None

        warnings = self.logs_at_level(logbook.WARNING)
        expected = []
        assert_equal(warnings, expected)
    def test_no_benchmark_explicitly_disabled(self):
        """Test running with no benchmark provided, with no_benchmark flag."""
        spec = BenchmarkSpec.from_cli_params(
            no_benchmark=True,
            benchmark_sid=None,
            benchmark_symbol=None,
            benchmark_file=None,
        )

        sid, returns = self.resolve_spec(spec)

        assert sid is None
        assert_series_equal(returns, self.zero_returns)

        warnings = self.logs_at_level(logbook.WARNING)
        expected = []
        assert_equal(warnings, expected)
示例#8
0
    def test_benchmark_symbol(self, case):
        """Test running with no benchmark provided, with no_benchmark flag.
        """
        symbol, expected_sid = case

        spec = BenchmarkSpec.from_cli_params(
            no_benchmark=False,
            benchmark_sid=None,
            benchmark_symbol=symbol,
            benchmark_file=None,
        )

        sid, returns = self.resolve_spec(spec)

        assert_equal(sid, expected_sid)
        self.assertIs(returns, None)

        warnings = self.logs_at_level(logbook.WARNING)
        expected = []
        assert_equal(warnings, expected)
    def test_benchmark_file(self):
        """Test running with a benchmark file."""
        csv_file_path = self.tmpdir.getpath("b.csv")
        with open(csv_file_path, "w") as csv_file:
            csv_file.write("date,return\n"
                           "2020-01-03 00:00:00+00:00,-0.1\n"
                           "2020-01-06 00:00:00+00:00,0.333\n"
                           "2020-01-07 00:00:00+00:00,0.167\n"
                           "2020-01-08 00:00:00+00:00,0.143\n"
                           "2020-01-09 00:00:00+00:00,6.375\n")

        spec = BenchmarkSpec.from_cli_params(
            no_benchmark=False,
            benchmark_sid=None,
            benchmark_symbol=None,
            benchmark_file=csv_file_path,
        )

        sid, returns = self.resolve_spec(spec)

        assert sid is None

        expected_dates = pd.to_datetime(
            [
                "2020-01-03", "2020-01-06", "2020-01-07", "2020-01-08",
                "2020-01-09"
            ],
            utc=True,
        )
        expected_values = [-0.1, 0.333, 0.167, 0.143, 6.375]
        expected_returns = pd.Series(index=expected_dates,
                                     data=expected_values)

        assert_series_equal(returns, expected_returns, check_names=False)

        warnings = self.logs_at_level(logbook.WARNING)
        expected = []
        assert_equal(warnings, expected)
示例#10
0
    def test_no_benchmark(self):
        """Test running with no benchmark provided.

        We should have no benchmark sid and have a returns series of all zeros.
        """
        spec = BenchmarkSpec.from_cli_params(
            no_benchmark=False,
            benchmark_sid=None,
            benchmark_symbol=None,
            benchmark_file=None,
        )

        sid, returns = self.resolve_spec(spec)

        self.assertIs(sid, None)
        self.assertIs(returns, None)

        warnings = self.logs_at_level(logbook.WARNING)
        expected = [
            'No benchmark configured. Assuming algorithm calls set_benchmark.',
            'Pass --benchmark-sid, --benchmark-symbol, or --benchmark-file to set a source of benchmark returns.',  # noqa
            "Pass --no-benchmark to use a dummy benchmark of zero returns.",
        ]
        assert_equal(warnings, expected)
示例#11
0
def run(ctx,
        algofile,
        algotext,
        define,
        data_frequency,
        capital_base,
        bundle,
        bundle_timestamp,
        benchmark_file,
        benchmark_symbol,
        benchmark_sid,
        no_benchmark,
        start,
        end,
        output,
        trading_calendar,
        print_algo,
        metrics_set,
        local_namespace,
        blotter,
        broker,
        broker_uri,
        state_file,
        realtime_bar_target,
        list_brokers):
    """Run a backtest for the given algorithm.
    """

    if list_brokers:
        click.echo("Supported brokers:")
        for _, name, _ in pkgutil.iter_modules(brokers.__path__):
            if name != 'broker':
                click.echo(name)
        return

    # check that the start and end dates are passed correctly
    if not broker and start is None and end is None:
        # check both at the same time to avoid the case where a user
        # does not pass either of these and then passes the first only
        # to be told they need to pass the second argument also
        ctx.fail(
            "must specify dates with '-s' / '--start' and '-e' / '--end'",
        )

    if not broker and start is None:
        ctx.fail("must specify a start date with '-s' / '--start'")
    if not broker and end is None:
        ctx.fail("must specify an end date with '-e' / '--end'")

    if broker and broker_uri is None:
        ctx.fail("must specify broker-uri if broker is specified")

    if broker and state_file is None:
        ctx.fail("must specify state-file with live trading")

    if broker and realtime_bar_target is None:
        ctx.fail("must specify realtime-bar-target with live trading")

    brokerobj = None
    if broker:
        mod_name = 'zipline.gens.brokers.%s_broker' % broker.lower()
        try:
            bmod = import_module(mod_name)
        except ImportError:
            ctx.fail("unsupported broker: can't import module %s" % mod_name)

        cl_name = '%sBroker' % broker.upper()
        try:
            bclass = getattr(bmod, cl_name)
        except AttributeError:
            ctx.fail("unsupported broker: can't import class %s from %s" %
                     (cl_name, mod_name))
        brokerobj = bclass(broker_uri)
    if end is None:
            end = pd.Timestamp.utcnow() + pd.Timedelta(days=1, seconds=1)  # Add 1-second to assure that end is > 1day

    if (algotext is not None) == (algofile is not None):
        ctx.fail(
            "must specify exactly one of '-f' / '--algofile' or"
            " '-t' / '--algotext'",
        )

    trading_calendar = get_calendar(trading_calendar)

    benchmark_spec = BenchmarkSpec.from_cli_params(
        no_benchmark=no_benchmark,
        benchmark_sid=benchmark_sid,
        benchmark_symbol=benchmark_symbol,
        benchmark_file=benchmark_file,
    )

    return _run(
        initialize=None,
        handle_data=None,
        before_trading_start=None,
        analyze=None,
        teardown=None,
        algofile=algofile,
        algotext=algotext,
        defines=define,
        data_frequency=data_frequency,
        capital_base=capital_base,
        bundle=bundle,
        bundle_timestamp=bundle_timestamp,
        start=start,
        end=end,
        output=output,
        trading_calendar=trading_calendar,
        print_algo=print_algo,
        metrics_set=metrics_set,
        local_namespace=local_namespace,
        environ=os.environ,
        blotter=blotter,
        benchmark_spec=benchmark_spec,
        broker=brokerobj,
        state_filename=state_file,
        realtime_bar_target=realtime_bar_target,
        performance_callback=None,
        stop_execution_callback=None,
        execution_id=None
    )