Esempio n. 1
0
def trade(strategies,
          accounts=None,
          review_date=None,
          output="csv",
          filepath_or_buffer=None):
    """
    Run one or more strategies and generate orders.

    Allocations are read from configuration (quantrocket.moonshot.allocations.yml).

    Parameters
    ----------
    strategies : list of str, required
        one or more strategy codes

    accounts : list of str, optional
        limit to these accounts

    review_date : str (YYYY-MM-DD), optional
        generate orders as if it were this date, rather than using today's date

    output : str, required
        the output format (choices are csv or json)

    filepath_or_buffer : str, optional
        the location to write the orders file (omit to write to stdout)

    Returns
    -------
    None
    """
    params = {}
    if strategies:
        params["strategies"] = strategies
    if accounts:
        params["accounts"] = accounts
    if review_date:
        params["review_date"] = review_date

    output = output or "csv"

    if output not in ("csv", "json"):
        raise ValueError(
            "invalid output: {0} (choices are csv or json".format(output))

    response = houston.get("/moonshot/orders.{0}".format(output),
                           params=params,
                           timeout=60 * 5)

    houston.raise_for_status_with_json(response)

    # Don't write a null response to file
    if response.content[:4] == b"null":
        return

    filepath_or_buffer = filepath_or_buffer or sys.stdout
    write_response_to_filepath_or_buffer(filepath_or_buffer, response)
def download_positions(filepath_or_buffer=None, output="csv",
                       order_refs=None, accounts=None, conids=None):
    """
    Query current positions and write results to file.

    To return positions as a Python list, see list_positions.

    Parameters
    ----------
    filepath_or_buffer : str or file-like object
        filepath to write the data to, or file-like object (defaults to stdout)

    output : str
        output format (json, csv, txt, default is csv)

    order_refs : list of str, optional
        limit to these order refs

    accounts : list of str, optional
        limit to these accounts

    conids : list of int, optional
        limit to these conids

    Returns
    -------
    None
    """
    params = {}
    if order_refs:
        params["order_refs"] = order_refs
    if accounts:
        params["accounts"] = accounts
    if conids:
        params["conids"] = conids

    output = output or "csv"

    if output not in ("csv", "json", "txt"):
        raise ValueError("Invalid ouput: {0}".format(output))

    response = houston.get("/blotter/positions.{0}".format(output), params=params)

    houston.raise_for_status_with_json(response)

    # Don't write a null response to file
    if response.content[:4] == b"null":
        return

    filepath_or_buffer = filepath_or_buffer or sys.stdout

    write_response_to_filepath_or_buffer(filepath_or_buffer, response)
Esempio n. 3
0
def download_executions(filepath_or_buffer=None,
                        order_refs=None,
                        accounts=None,
                        conids=None,
                        start_date=None,
                        end_date=None):
    """
    Query executions from the executions database.

    Parameters
    ----------
    filepath_or_buffer : str or file-like object
        filepath to write the data to, or file-like object (defaults to stdout)

    order_refs : list of str, optional
        limit to these order refs

    accounts : list of str, optional
        limit to these accounts

    conids : list of int, optional
        limit to these conids

    start_date : str (YYYY-MM-DD), optional
        limit to executions on or after this date

    end_date : str (YYYY-MM-DD), optional
        limit to executions on or before this date

    Returns
    -------
    None
    """
    params = {}
    if order_refs:
        params["order_refs"] = order_refs
    if accounts:
        params["accounts"] = accounts
    if conids:
        params["conids"] = conids
    if start_date:
        params["start_date"] = start_date
    if end_date:
        params["end_date"] = end_date

    response = houston.get("/blotter/executions.csv", params=params)

    houston.raise_for_status_with_json(response)

    filepath_or_buffer = filepath_or_buffer or sys.stdout

    write_response_to_filepath_or_buffer(filepath_or_buffer, response)
Esempio n. 4
0
def execute_command(cmd,
                    return_file=None,
                    filepath_or_buffer=None,
                    service="satellite"):
    """
    Execute an abitrary command on a satellite service and optionally return a file.

    Parameters
    ----------
    cmd: str, required
        the command to run

    return_file : str, optional
        the path of a file to be returned after the command completes

    filepath_or_buffer : str, optional
        the location to write the return_file (omit to write to stdout)

    service : str, optional
        the service name (default 'satellite')

    Returns
    -------
    dict or None
        None if return_file, otherwise status message
    """
    params = {}
    if not service:
        raise ValueError("a service is required")
    if not cmd:
        raise ValueError("a command is required")
    params["cmd"] = cmd
    if return_file:
        params["return_file"] = return_file

    if not service.startswith("satellite"):
        raise ValueError("service must start with 'satellite'")

    response = houston.post("/{0}/commands".format(service),
                            params=params,
                            timeout=60 * 60 * 24)

    houston.raise_for_status_with_json(response)

    if return_file:
        filepath_or_buffer = filepath_or_buffer or sys.stdout
        write_response_to_filepath_or_buffer(filepath_or_buffer, response)
    else:
        return response.json()
Esempio n. 5
0
def download_history_availability_file(code,
                                       filepath_or_buffer=None,
                                       output="csv"):
    """
    Query historical market data availability from a history database and download to file.

    This function is normally called after running:

        quantrocket history collect mydb --availability

    Parameters
    ----------
    code : str, required
        the code of the database to query

    filepath_or_buffer : str or file-like object
        filepath to write the data to, or file-like object (defaults to stdout)

    output : str
        output format (json, csv, default is csv)

    Returns
    -------
    None

    See Also
    --------
    get_history_availability : load historical availability into Series
    """
    output = output or "csv"

    if output not in ("csv", "json", "txt"):
        raise ValueError("Invalid ouput: {0}".format(output))

    response = houston.get("/history/availability/{0}.{1}".format(
        code, output))

    houston.raise_for_status_with_json(response)
    filepath_or_buffer = filepath_or_buffer or sys.stdout

    write_response_to_filepath_or_buffer(filepath_or_buffer, response)
Esempio n. 6
0
def create_tearsheet(infilepath_or_buffer, outfilepath_or_buffer=None):
    """
    Create a pyfolio PDF tear sheet from a Zipline backtest result.

    Parameters
    ----------
    infilepath_or_buffer : str, required
        the CSV file from a Zipline backtest (specify '-' to read file from stdin)

    outfilepath_or_buffer : str or file-like, optional
        the location to write the pyfolio tear sheet (write to stdout if omitted)

    Returns
    -------
    None
    """
    url = "/zipline/tearsheets"
    # Pyfolio can take a long time
    timeout = 60 * 60 * 5

    if infilepath_or_buffer == "-":
        infilepath_or_buffer = sys.stdin.buffer if six.PY3 else sys.stdin
        response = houston.post(url,
                                data=infilepath_or_buffer,
                                timeout=timeout)

    elif infilepath_or_buffer and hasattr(infilepath_or_buffer, "read"):
        if infilepath_or_buffer.seekable():
            infilepath_or_buffer.seek(0)
        response = houston.post(url,
                                data=infilepath_or_buffer,
                                timeout=timeout)

    else:
        with open(infilepath_or_buffer, "rb") as f:
            response = houston.post(url, data=f, timeout=timeout)

    houston.raise_for_status_with_json(response)

    outfilepath_or_buffer = outfilepath_or_buffer or sys.stdout
    write_response_to_filepath_or_buffer(outfilepath_or_buffer, response)
def generate_orders(strategies, accounts=None, json=False, filepath_or_buffer=None):
    """
    Run one or more strategies and generate orders.

    Allocations are read from configuration (quantrocket.moonshot.allocations.yml).

    Parameters
    ----------
    strategies : list of str, required
        one or more strategy codes

    accounts : list of str, optional
        limit to these accounts

    json : bool
        format orders as JSON (default is CSV)

    filepath_or_buffer : str, optional
        the location to write the orders file (omit to write to stdout)

    Returns
    -------
    None
    """
    params = {}
    if strategies:
        params["strategies"] = strategies
    if accounts:
        params["accounts"] = accounts

    output = "json" if json else "csv"

    response = houston.get("/moonshot/orders.{0}".format(output), params=params, timeout=60*5)

    houston.raise_for_status_with_json(response)

    filepath_or_buffer = filepath_or_buffer or sys.stdout
    write_response_to_filepath_or_buffer(filepath_or_buffer, response)
Esempio n. 8
0
def run_algorithm(algofile,
                  data_frequency=None,
                  capital_base=None,
                  bundle=None,
                  bundle_timestamp=None,
                  start=None,
                  end=None,
                  filepath_or_buffer=None,
                  calendar=None):
    """
    Run a Zipline backtest and write the test results to a CSV file.

    The CSV result file contains several DataFrames stacked into one: the Zipline performance
    results, plus the extracted returns, transactions, positions, and benchmark returns from those
    results.

    Parameters
    ----------
    algofile : str, required
        the file that contains the algorithm to run

    data_frequency : str, optional
        the data frequency of the simulation. Possible choices: daily, minute (default is daily)

    capital_base : float, optional
        the starting capital for the simulation (default is 10000000.0)

    bundle : str, required
        the data bundle to use for the simulation

    bundle_timestamp : str, optional
        the date to lookup data on or before (default is <current-time>)

    start : str (YYYY-MM-DD), required
        the start date of the simulation

    end : str (YYYY-MM-DD), required
        the end date of the simulation

    filepath_or_buffer : str, optional
        the location to write the output file (omit to write to stdout)

    calendar : str, optional
        the calendar you want to use e.g. LSE (default is to use the calendar
        associated with the data bundle).

    Returns
    -------
    None

    Examples
    --------
    Run a backtest and save to CSV.

    >>> from quantrocket.zipline import run_algorithm
    >>> run_algorithm("momentum_pipeline.py", bundle="my-bundle",
                      start="2015-02-04", end="2015-12-31",
                      filepath_or_buffer="momentum_pipeline_results.csv")

    Get a pyfolio tear sheet from the results:

    >>> import pyfolio as pf
    >>> pf.from_zipline_csv("momentum_pipeline_results.csv")
    """
    params = {}
    if data_frequency:
        params["data_frequency"] = data_frequency
    if capital_base:
        params["capital_base"] = capital_base
    if not bundle:
        raise ValueError("must specify a bundle")
    params["bundle"] = bundle
    if bundle_timestamp:
        params["bundle_timestamp"] = bundle_timestamp
    if start:
        params["start"] = start
    if end:
        params["end"] = end
    if calendar:
        params["calendar"] = calendar

    response = houston.post("/zipline/backtests/{0}".format(algofile),
                            params=params,
                            timeout=60 * 60 * 3)

    houston.raise_for_status_with_json(response)

    filepath_or_buffer = filepath_or_buffer or sys.stdout
    write_response_to_filepath_or_buffer(filepath_or_buffer, response)
def download_reuters_estimates(codes, filepath_or_buffer=None, output="csv",
                               start_date=None, end_date=None,
                               universes=None, conids=None,
                               exclude_universes=None, exclude_conids=None,
                               period_types=None, fields=None):
    """
    Query estimates and actuals from the Reuters estimates database and
    download to file.

    You can query one or more indicator codes. Use the `list_reuters_codes`
    function to see available codes.

    Parameters
    ----------
    codes : list of str, required
        the indicator code(s) to query

    filepath_or_buffer : str or file-like object
        filepath to write the data to, or file-like object (defaults to stdout)

    output : str
        output format (json, csv, txt, default is csv)

    start_date : str (YYYY-MM-DD), optional
        limit to estimates and actuals on or after this fiscal period end date

    end_date : str (YYYY-MM-DD), optional
        limit to estimates and actuals on or before this fiscal period end date

    universes : list of str, optional
        limit to these universes

    conids : list of int, optional
        limit to these conids

    exclude_universes : list of str, optional
        exclude these universes

    exclude_conids : list of int, optional
        exclude these conids

    period_types : list of str, optional
        limit to these fiscal period types. Possible choices: A, Q, S, where
        A=Annual, Q=Quarterly, S=Semi-Annual

    fields : list of str, optional
        only return these fields (pass ['?'] or any invalid fieldname to see
        available fields)

    Returns
    -------
    None

    Examples
    --------
    Query EPS estimates and actuals for a universe of Australian stocks. You can use
    StringIO to load the CSV into pandas.

    >>> f = io.StringIO()
    >>> download_reuters_estimates(["EPS"], f, universes=["asx-stk"],
                                    start_date="2014-01-01"
                                    end_date="2017-01-01")
    >>> estimates = pd.read_csv(f, parse_dates=["FiscalPeriodEndDate", "AnnounceDate"])
    """
    params = {}
    if codes:
        params["codes"] = codes
    if start_date:
        params["start_date"] = start_date
    if end_date:
        params["end_date"] = end_date
    if universes:
        params["universes"] = universes
    if conids:
        params["conids"] = conids
    if exclude_universes:
        params["exclude_universes"] = exclude_universes
    if exclude_conids:
        params["exclude_conids"] = exclude_conids
    if period_types:
        params["period_types"] = period_types
    if fields:
        params["fields"] = fields

    output = output or "csv"

    if output not in ("csv", "json", "txt"):
        raise ValueError("Invalid ouput: {0}".format(output))

    response = houston.get("/fundamental/reuters/estimates.{0}".format(output), params=params,
                           timeout=60*5)

    houston.raise_for_status_with_json(response)

    filepath_or_buffer = filepath_or_buffer or sys.stdout

    write_response_to_filepath_or_buffer(filepath_or_buffer, response)
def download_reuters_financials(codes, filepath_or_buffer=None, output="csv",
                                start_date=None, end_date=None,
                                universes=None, conids=None,
                                exclude_universes=None, exclude_conids=None,
                                interim=False, restatements=False, fields=None):
    """
    Query financial statements from the Reuters financials database and
    download to file.

    You can query one or more COA codes. Use the `list_reuters_codes` function to see
    available codes.

    Annual or interim reports are available. Annual is the default and provides
    deeper history.

    By default restatements are excluded, but they can optionally be included.

    Parameters
    ----------
    codes : list of str, required
        the Chart of Account (COA) code(s) to query

    filepath_or_buffer : str or file-like object
        filepath to write the data to, or file-like object (defaults to stdout)

    output : str
        output format (json, csv, txt, default is csv)

    start_date : str (YYYY-MM-DD), optional
        limit to statements on or after this date (based on the
        fiscal period end date if including restatements, otherwise the
        filing date)

    end_date : str (YYYY-MM-DD), optional
        limit to statements on or before this date (based on the
        fiscal period end date if including restatements, otherwise the
        filing date)

    universes : list of str, optional
        limit to these universes

    conids : list of int, optional
        limit to these conids

    exclude_universes : list of str, optional
        exclude these universes

    exclude_conids : list of int, optional
        exclude these conids

    interim : bool, optional
        return interim reports (default is to return annual reports,
        which provide deeper history)

    restatements : bool, optional
        include restatements (default is to exclude them)

    fields : list of str, optional
        only return these fields (pass ['?'] or any invalid fieldname to see
        available fields)

    Returns
    -------
    None

    Examples
    --------
    Query total revenue (COA code RTLR) for a universe of Australian stocks. You can use
    StringIO to load the CSV into pandas.

    >>> f = io.StringIO()
    >>> download_reuters_financials(["RTLR"], f, universes=["asx-stk"],
                                    start_date="2014-01-01"
                                    end_date="2017-01-01")
    >>> financials = pd.read_csv(f, parse_dates=["StatementDate", "SourceDate", "FiscalPeriodEndDate"])

    Query net income (COA code NINC) from interim reports for two securities
    (identified by conid) and include restatements:

    >>> download_reuters_financials(["NINC"], f, conids=[123456, 234567],
                                    interim=True, restatements=True)

    Query common and preferred shares outstanding (COA codes QTCO and QTPO) and return a
    minimal set of fields (several required fields will always be returned):

    >>> download_reuters_financials(["QTCO", "QTPO"], f, universes=["nyse-stk"],
                                    fields=["Amount"])
    """
    params = {}
    if codes:
        params["codes"] = codes
    if start_date:
        params["start_date"] = start_date
    if end_date:
        params["end_date"] = end_date
    if universes:
        params["universes"] = universes
    if conids:
        params["conids"] = conids
    if exclude_universes:
        params["exclude_universes"] = exclude_universes
    if exclude_conids:
        params["exclude_conids"] = exclude_conids
    if interim:
        params["interim"] = interim
    if restatements:
        params["restatements"] = restatements
    if fields:
        params["fields"] = fields

    output = output or "csv"

    if output not in ("csv", "json", "txt"):
        raise ValueError("Invalid ouput: {0}".format(output))

    response = houston.get("/fundamental/reuters/financials.{0}".format(output), params=params,
                           timeout=60*5)

    houston.raise_for_status_with_json(response)

    filepath_or_buffer = filepath_or_buffer or sys.stdout

    write_response_to_filepath_or_buffer(filepath_or_buffer, response)
Esempio n. 11
0
def create_tearsheet(infilepath_or_buffer,
                     outfilepath_or_buffer=None,
                     simple=None,
                     live_start_date=None,
                     slippage=None,
                     hide_positions=None,
                     bayesian=None,
                     round_trips=None,
                     bootstrap=None):
    """
    Create a pyfolio PDF tear sheet from a Zipline backtest result.

    Parameters
    ----------
    infilepath_or_buffer : str, required
        the CSV file from a Zipline backtest (specify '-' to read file from stdin)

    outfilepath_or_buffer : str or file-like, optional
        the location to write the pyfolio tear sheet (write to stdout if omitted)

    simple : bool
        create a simple tear sheet (default is to create a full tear sheet)

    live_start_date : str (YYYY-MM-DD), optional
        date when the strategy began live trading

    slippage : int or float, optional
        basis points of slippage to apply to returns before generating tear sheet
        stats and plots

    hide_positions : bool
        don't output any symbol names

    bayesian : bool
        include a Bayesian tear sheet

    round_trips : bool
        include a round-trips tear sheet

    bootstrap : bool
        perform bootstrap analysis for the performance metrics (takes a few minutes
        longer)

    Returns
    -------
    None
    """
    params = {}
    if simple:
        params["simple"] = simple
    if live_start_date:
        params["live_start_date"] = live_start_date
    if slippage:
        params["slippage"] = slippage
    if hide_positions:
        params["hide_positions"] = hide_positions
    if bayesian:
        params["bayesian"] = bayesian
    if round_trips:
        params["round_trips"] = round_trips
    if bootstrap:
        params["bootstrap"] = bootstrap

    url = "/zipline/tearsheets"
    # Pyfolio can take a long time, particularly for Bayesian analysis
    timeout = 60 * 60 * 5

    if infilepath_or_buffer == "-":
        infilepath_or_buffer = sys.stdin.buffer if six.PY3 else sys.stdin
        response = houston.post(url,
                                data=infilepath_or_buffer,
                                params=params,
                                timeout=timeout)

    elif infilepath_or_buffer and hasattr(infilepath_or_buffer, "read"):
        if infilepath_or_buffer.seekable():
            infilepath_or_buffer.seek(0)
        response = houston.post(url,
                                data=infilepath_or_buffer,
                                params=params,
                                timeout=timeout)

    else:
        with open(infilepath_or_buffer, "rb") as f:
            response = houston.post(url,
                                    data=f,
                                    params=params,
                                    timeout=timeout)

    houston.raise_for_status_with_json(response)

    outfilepath_or_buffer = outfilepath_or_buffer or sys.stdout
    write_response_to_filepath_or_buffer(outfilepath_or_buffer, response)
Esempio n. 12
0
def round_to_tick_sizes(infilepath_or_buffer, round_fields,
                        how=None, append_ticksize=False,
                        outfilepath_or_buffer=None):
    """
    Round prices in a CSV file to valid tick sizes.

    CSV should contain columns `ConId`, `Exchange`, and the columns to be rounded
    (e.g. `LmtPrice`). Additional columns will be ignored and returned unchanged.

    Parameters
    ----------
    infilepath_or_buffer : str or file-like object, required
        CSV file with prices to be rounded (specify '-' to read file from stdin)

    round_fields : list of str, required
        columns to be rounded

    how : str, optional
        which direction to round to. Possible choices: 'up', 'down', 'nearest'
        (default is 'nearest')

    append_ticksize : bool
        append a column of tick sizes for each field to be rounded (default False)

    outfilepath_or_buffer : str or file-like object
        filepath to write the data to, or file-like object (defaults to stdout)

    Returns
    -------
    None
    """

    params = {}
    if round_fields:
        params["round_fields"] = round_fields
    if how:
        params["how"] = how
    if append_ticksize:
        params["append_ticksize"] = append_ticksize

    url = "/master/ticksizes.csv"

    if infilepath_or_buffer == "-":
        # No-op if an empty file is passed on stdin
        f = six.StringIO(sys.stdin.read())
        if not f.getvalue():
            return

        response = houston.get(url, params=params, data=to_bytes(f))

    elif infilepath_or_buffer and hasattr(infilepath_or_buffer, "read"):
        if infilepath_or_buffer.seekable():
            infilepath_or_buffer.seek(0)
        response = houston.get(url, params=params, data=to_bytes(infilepath_or_buffer))

    elif infilepath_or_buffer:
        with open(infilepath_or_buffer, "rb") as f:
            response = houston.get(url, params=params, data=f)
    else:
        raise ValueError("infilepath_or_buffer is required")

    houston.raise_for_status_with_json(response)

    filepath_or_buffer = outfilepath_or_buffer or sys.stdout
    write_response_to_filepath_or_buffer(filepath_or_buffer, response)
Esempio n. 13
0
def download_order_statuses(filepath_or_buffer=None,
                            output="csv",
                            order_ids=None,
                            conids=None,
                            order_refs=None,
                            accounts=None,
                            open_orders=None,
                            start_date=None,
                            end_date=None,
                            fields=None):
    """
    Download order statuses.

    Parameters
    ----------
    filepath_or_buffer : str or file-like object
        filepath to write the data to, or file-like object (defaults to stdout)

    output : str
        output format (json or csv, default is csv)

    order_ids : list of str, optional
        limit to these order IDs

    conids : list of int, optional
        limit to orders for these conids

    order_refs : list of str, optional
        limit to orders for these order refs

    accounts : list of str, optional
        limit to orders for these accounts

    open_orders : bool
        limit to open orders

    start_date : str (YYYY-MM-DD), optional
        limit to orders submitted on or after this date

    end_date : str (YYYY-MM-DD), optional
        limit to orders submitted on or before this date

    fields : list of str, optional
        return these fields in addition to the default fields (pass '?' or any invalid
        fieldname to see available fields)

    Returns
    -------
    None

    Examples
    --------
    Download order status by order ID and load into Pandas:

    >>> f = io.StringIO()
    >>> download_order_statuses(f, order_ids=['6001:45','6001:46'])
    >>> order_statuses = pd.read_csv(f)

    Download order status for all open orders and include extra fields in output:

    >>> download_order_statuses(open_orders=True, fields=["LmtPrice", "OcaGroup"])

    Download order status of open orders by conid:

    >>> download_order_statuses(conids=[123456], open_orders=True)

    Download order status of open orders by order ref:

    >>> download_order_statuses(order_refs=['my-strategy'], open_orders=True)
    """
    params = {}
    if order_ids:
        params["order_ids"] = order_ids
    if conids:
        params["conids"] = conids
    if order_refs:
        params["order_refs"] = order_refs
    if accounts:
        params["accounts"] = accounts
    if open_orders:
        params["open_orders"] = open_orders
    if fields:
        params["fields"] = fields
    if start_date:
        params["start_date"] = start_date
    if end_date:
        params["end_date"] = end_date

    output = output or "csv"

    if output not in ("csv", "json"):
        raise ValueError("Invalid ouput: {0}".format(output))

    response = houston.get("/blotter/orders.{0}".format(output), params=params)

    houston.raise_for_status_with_json(response)

    # Don't write a null response to file
    if response.content[:4] == b"null":
        return

    filepath_or_buffer = filepath_or_buffer or sys.stdout

    write_response_to_filepath_or_buffer(filepath_or_buffer, response)
Esempio n. 14
0
def ml_walkforward(strategy,
                   start_date,
                   end_date,
                   train,
                   min_train=None,
                   rolling_train=None,
                   model_filepath=None,
                   force_nonincremental=None,
                   segment=None,
                   allocation=None,
                   nlv=None,
                   params=None,
                   details=None,
                   progress=False,
                   filepath_or_buffer=None):
    """
    Run a walk-forward optimization of a machine learning strategy.

    The date range will be split into segments of `train` size. For each
    segment, the model will be trained with the data, then the trained model will
    be backtested on the following segment.

    By default, uses scikit-learn's StandardScaler+SGDRegressor. Also supports other
    scikit-learn models/pipelines and Keras models. To customize model, instantiate
    the model locally, serialize it to disk, and pass the path of the serialized
    model as `model_filepath`.

    Supports expanding walk-forward optimizations (the default), which use an anchored start date
    for model training, or rolling walk-forward optimizations (by specifying `rolling_train`),
    which use a rolling or non-anchored start date for model training.

    Returns a backtest results CSV and a dump of the machine learning model
    as of the end of the analysis.

    Parameters
    ----------
    strategy : str, required
        the strategy code

    start_date : str (YYYY-MM-DD), required
        the analysis start date (note that model training will start on this date
        but backtesting will not start until after the initial training period)

    end_date : str (YYYY-MM-DD), required
        the analysis end date

    train : str, required
        train model this frequently (use Pandas frequency string, e.g. 'A'
        for annual training or 'Q' for quarterly training)

    min_train : str, optional
        don't backtest until at least this much model training has occurred;
        defaults to the length of `train` if not specified (use Pandas frequency
        string, e.g. '5Y' for 5 years of initial training)

    rolling_train : str, optional
        train model with a rolling window of this length; if omitted, train
        model with an expanding window (use Pandas frequency string, e.g. '3Y' for
        a 3-year rolling training window)

    model_filepath : str, optional
        filepath of serialized model to use, filename must end in ".joblib" or
        ".pkl" (if omitted, default model is scikit-learn's StandardScaler+SGDRegressor)

    force_nonincremental : bool, optional
        force the model to be trained non-incrementally (i.e. load entire training
        data set into memory) even if it supports incremental learning. Must be True
        in order to perform a rolling (as opposed to expanding) walk-forward optimization
        with a model that supports incremental learning. Default False.

    segment : str, optional
        train and backtest in date segments of this size, to reduce memory usage;
        must be smaller than `train`/`min_train` or will have no effect (use Pandas frequency string,
        e.g. 'A' for annual segments or 'Q' for quarterly segments)

    allocation : float, optional
        the allocation for the strategy (default 1.0)

    nlv : dict of CURRENCY:NLV, optional
        the NLV (net liquidation value, i.e. account balance) to assume for
        the backtest, expressed in each currency represented in the backtest (pass
        as {currency:nlv})

    params : dict of PARAM:VALUE, optional
        one or more strategy params to set on the fly before backtesting
        (pass as {param:value})

    details : bool
        return detailed results for all securities instead of aggregating

    progress : bool
        log status and Sharpe ratios of each walk-forward segment during analysis
        (default False)

    filepath_or_buffer : str, optional
        the location to write the ZIP file to; or, if path ends with "*", the
        pattern to use for extracting the zipped files. For example, if the path is
        my_ml*, files will extracted to my_ml_results.csv and my_ml_trained_model.joblib.

    Returns
    -------
    None

    Examples
    --------
    Run a walk-forward optimization using the default model and retrain the model
    annually, writing the backtest results and trained model to demo_ml_results.csv
    and demo_ml_trained_model.joblib, respectively:

    >>> ml_walkforward(
            "demo-ml",
            "2007-01-01",
            "2018-12-31",
            train="A",
            filepath_or_buffer="demo_ml*")

    Create a scikit-learn model, serialize it with joblib, and use it to
    run the walkforward backtest:

    >>> from sklearn.linear_model import SGDClassifier
    >>> import joblib
    >>> clf = SGDClassifier()
    >>> joblib.dump(clf, "my_model.joblib")
    >>> ml_walkforward(
            "demo-ml",
            "2007-01-01",
            "2018-12-31",
            train="A",
            model_filepath="my_model.joblib",
            filepath_or_buffer="demo_ml*")

    Run a walk-forward optimization using a custom model (serialized with joblib),
    retrain the model annually, don't perform backtesting until after 5 years
    of initial training, and further split the training and backtesting into
    quarterly segments to reduce memory usage:

    >>> ml_walkforward(
            "demo-ml",
            "2007-01-01",
            "2018-12-31",
            model_filepath="my_model.joblib",
            train="A",
            min_train="5Y",
            segment="Q",
            filepath_or_buffer="demo_ml*")

    Create a Keras model, serialize it, and use it to run the walkforward backtest:

    >>> from keras.models import Sequential
    >>> from keras.layers import Dense
    >>> model = Sequential()
    >>> # input_dim should match number of features in training data
    >>> model.add(Dense(units=4, activation='relu', input_dim=5))
    >>> # last layer should have a single unit
    >>> model.add(Dense(units=1, activation='softmax'))
    >>> model.compile(loss='sparse_categorical_crossentropy',
                      optimizer='sgd',
                      metrics=['accuracy'])
    >>> model.save('my_model.keras.h5')
    >>> ml_walkforward(
            "neuralnet-ml",
            "2007-01-01",
            "2018-12-31",
            train="A",
            model_filepath="my_model.keras.h5",
            filepath_or_buffer="neuralnet_ml*")
    """
    _params = {}

    _params["start_date"] = start_date
    _params["end_date"] = end_date
    _params["train"] = train
    if min_train:
        _params["min_train"] = min_train
    if rolling_train:
        _params["rolling_train"] = rolling_train
    if force_nonincremental:
        _params["force_nonincremental"] = force_nonincremental
    if segment:
        _params["segment"] = segment
    if allocation:
        _params["allocation"] = allocation
    if nlv:
        _params["nlv"] = dict_to_dict_strs(nlv)
    if details:
        _params["details"] = details
    if progress:
        _params["progress"] = progress
    if params:
        _params["params"] = dict_to_dict_strs(params)

    url = "/moonshot/ml/walkforward/{0}.zip".format(strategy)

    if model_filepath:
        # Send the filename as a hint how to open it
        _params["model_filename"] = os.path.basename(model_filepath)

        with open(model_filepath, "rb") as f:
            response = houston.post(url,
                                    data=f,
                                    params=_params,
                                    timeout=60 * 60 * 24)
    else:
        response = houston.post(url, params=_params, timeout=60 * 60 * 24)

    houston.raise_for_status_with_json(response)

    filepath_or_buffer = filepath_or_buffer or sys.stdout

    auto_extract = isinstance(
        filepath_or_buffer,
        six.string_types) and filepath_or_buffer.endswith("*")

    if auto_extract:
        base_filepath = filepath_or_buffer[:-1]
        zipfilepath = base_filepath + ".zip"

        write_response_to_filepath_or_buffer(zipfilepath, response)

        with ZipFile(zipfilepath, mode="r") as zfile:

            model_filename = [
                name for name in zfile.namelist() if "model" in name
            ][0]
            model_filepath = base_filepath + "_" + model_filename
            csv_filepath = base_filepath + "_results.csv"

            with open(csv_filepath, "wb") as csvfile:
                csvfile.write(zfile.read("results.csv"))

            with open(model_filepath, "wb") as modelfile:
                modelfile.write(zfile.read(model_filename))

        os.remove(zipfilepath)

    else:
        write_response_to_filepath_or_buffer(filepath_or_buffer, response)
Esempio n. 15
0
def scan_parameters(strategies,
                    start_date=None,
                    end_date=None,
                    segment=None,
                    param1=None,
                    vals1=None,
                    param2=None,
                    vals2=None,
                    allocations=None,
                    nlv=None,
                    params=None,
                    output="csv",
                    csv=None,
                    filepath_or_buffer=None):
    """
    Run a parameter scan for one or more strategies.

    By default returns a CSV of scan results but can also return a PDF tear sheet.

    Parameters
    ----------
    strategies : list of str, required
        one or more strategy codes

    start_date : str (YYYY-MM-DD), optional
        the backtest start date (default is to use all available history)

    end_date : str (YYYY-MM-DD), optional
        the backtest end date (default is to use all available history)

    segment : str, optional
        backtest in date segments of this size, to reduce memory usage
        (use Pandas frequency string, e.g. 'A' for annual segments or 'Q'
        for quarterly segments)

    param1 : str, required
        the name of the parameter to test (a class attribute on the strategy)

    vals1 : list of int/float/str/tuple, required
        parameter values to test (values can be ints, floats, strings, False,
        True, None, 'default' (to test current param value), or lists of
        ints/floats/strings)

    param2 : str, optional
        name of a second parameter to test (for 2-D parameter scans)

    vals2 : list of int/float/str/tuple, optional
        values to test for parameter 2 (values can be ints, floats, strings,
        False, True, None, 'default' (to test current param value), or lists
        of ints/floats/strings)

    allocations : dict of CODE:FLOAT, optional
        the allocation for each strategy, passed as {code:allocation} (default
        allocation is 1.0 / number of strategies)

    nlv : dict of CURRENCY:NLV, optional
        the NLV (net liquidation value, i.e. account balance) to assume for
        the backtest, expressed in each currency represented in the backtest (pass
        as {currency:nlv})

    params : dict of PARAM:VALUE, optional
        one or more strategy params to set on the fly before backtesting
        (pass as {param:value})

    output : str, required
        the output format (choices are csv or pdf)

    csv : bool
        DEPRECATED: this argument will be removed in a future version. This argument
        may be omitted as CSV is the default.

    filepath_or_buffer : str, optional
        the location to write the results file (omit to write to stdout)

    Returns
    -------
    None

    Examples
    --------
    Run a parameter scan for several different moving averages on a strategy
    called trend-friend and return a CSV (which can be rendered with Moonchart):

    >>> scan_parameters("trend-friend",
                        param1="MAVG_WINDOW",
                        vals1=[20, 50, 100],
                        filepath_or_buffer="trend_friend_MAVG_WINDOW.csv")

    Run a 2-D parameter scan for multiple strategies and return a CSV:

    >>> scan_parameters(["strat1", "strat2", "strat3"],
                        param1="MIN_STD",
                        vals1=[1, 1.5, 2],
                        param2="STD_WINDOW",
                        vals2=[20, 50, 100, 200],
                        filepath_or_buffer="strategies_MIN_STD_and_STD_WINDOW.csv")

    Run a parameter scan in 1-year segments to reduce memory usage:

    >>> scan_parameters("big-strategy",
                        start_date="2000-01-01",
                        end_date="2018-01-01",
                        segment="A",
                        param1="MAVG_WINDOW",
                        vals1=[20, 50, 100],
                        filepath_or_buffer="big_strategy_MAVG_WINDOW.csv")
    """
    output = output or "csv"

    if output not in ("csv", "pdf"):
        raise ValueError(
            "invalid output: {0} (choices are csv or pdf".format(output))

    if csv is not None:
        import warnings
        warnings.warn(
            "the `csv` argument is deprecated and will removed in a future version; "
            "this argument may be omitted as csv is the default",
            DeprecationWarning)

    _params = {}
    if strategies:
        _params["strategies"] = strategies
    if start_date:
        _params["start_date"] = start_date
    if end_date:
        _params["end_date"] = end_date
    if segment:
        _params["segment"] = segment
    if param1:
        _params["param1"] = param1
    if vals1:
        _params["vals1"] = [str(v) for v in vals1]
    if param2:
        _params["param2"] = param2
    if vals2:
        _params["vals2"] = [str(v) for v in vals2]
    if allocations:
        _params["allocations"] = dict_to_dict_strs(allocations)
    if nlv:
        _params["nlv"] = dict_to_dict_strs(nlv)
    if params:
        _params["params"] = dict_to_dict_strs(params)

    response = houston.post("/moonshot/paramscans.{0}".format(output),
                            params=_params,
                            timeout=60 * 60 * 24)

    houston.raise_for_status_with_json(response)

    filepath_or_buffer = filepath_or_buffer or sys.stdout
    write_response_to_filepath_or_buffer(filepath_or_buffer, response)
Esempio n. 16
0
def scan_parameters(strategies, start_date=None, end_date=None,
                    param1=None, vals1=None, param2=None, vals2=None,
                    allocations=None, nlv=None, params=None, output="csv",
                    csv=None, filepath_or_buffer=None):
    """
    Run a parameter scan for one or more strategies.

    By default returns a CSV of scan results but can also return a PDF tear sheet.

    Parameters
    ----------
    strategies : list of str, required
        one or more strategy codes

    start_date : str (YYYY-MM-DD), optional
        the backtest start date (default is to use all available history)

    end_date : str (YYYY-MM-DD), optional
        the backtest end date (default is to use all available history)

    param1 : str, required
        the name of the parameter to test (a class attribute on the strategy)

    vals1 : list of int/float/str/tuple, required
        parameter values to test (values can be ints, floats, strings, False,
        True, None, 'default' (to test current param value), or lists of
        ints/floats/strings)

    param2 : str, optional
        name of a second parameter to test (for 2-D parameter scans)

    vals2 : list of int/float/str/tuple, optional
        values to test for parameter 2 (values can be ints, floats, strings,
        False, True, None, 'default' (to test current param value), or lists
        of ints/floats/strings)

    allocations : dict of CODE:FLOAT, optional
        the allocation for each strategy, passed as {code:allocation} (default
        allocation is 1.0 / number of strategies)

    nlv : dict of CURRENCY:NLV, optional
        the NLV (net liquidation value, i.e. account balance) to assume for
        the backtest, expressed in each currency represented in the backtest (pass
        as {currency:nlv})

    params : dict of PARAM:VALUE, optional
        one or more strategy params to set on the fly before backtesting
        (pass as {param:value})

    output : str, required
        the output format (choices are csv or pdf)

    csv : bool
        DEPRECATED: this argument will be removed in a future version. This argument
        may be omitted as CSV is the default.

    filepath_or_buffer : str, optional
        the location to write the results file (omit to write to stdout)

    Returns
    -------
    None
    """
    output = output or "csv"

    if output not in ("csv", "pdf"):
        raise ValueError("invalid output: {0} (choices are csv or pdf".format(output))

    if csv is not None:
        import warnings
        warnings.warn(
            "the `csv` argument is deprecated and will removed in a future version; "
            "this argument may be omitted as csv is the default", DeprecationWarning)

    _params = {}
    if strategies:
        _params["strategies"] = strategies
    if start_date:
        _params["start_date"] = start_date
    if end_date:
        _params["end_date"] = end_date
    if param1:
        _params["param1"] = param1
    if vals1:
        _params["vals1"] = [str(v) for v in vals1]
    if param2:
        _params["param2"] = param2
    if vals2:
        _params["vals2"] = [str(v) for v in vals2]
    if allocations:
        _params["allocations"] = dict_to_dict_strs(allocations)
    if nlv:
        _params["nlv"] = dict_to_dict_strs(nlv)
    if params:
        _params["params"] = dict_to_dict_strs(params)

    response = houston.post("/moonshot/paramscans.{0}".format(output), params=_params, timeout=60*60*24)

    houston.raise_for_status_with_json(response)

    filepath_or_buffer = filepath_or_buffer or sys.stdout
    write_response_to_filepath_or_buffer(filepath_or_buffer, response)
Esempio n. 17
0
def download_pnl(filepath_or_buffer=None,
                 order_refs=None,
                 accounts=None,
                 conids=None,
                 start_date=None,
                 end_date=None,
                 time=None,
                 details=False,
                 output="csv"):
    """
    Query trading performance and return a CSV of results or PDF tearsheet.

    Trading performance is broken down by account and order ref and optionally by
    conid.

    Parameters
    ----------
    filepath_or_buffer : str or file-like object
        filepath to write the data to, or file-like object (defaults to stdout)

    order_refs : list of str, optional
        limit to these order refs

    accounts : list of str, optional
        limit to these accounts

    conids : list of int, optional
        limit to these conids

    start_date : str (YYYY-MM-DD), optional
        limit to pnl on or after this date

    end_date : str (YYYY-MM-DD), optional
        limit to pnl on or before this date

    time : str (HH:MM:SS [TZ]), optional
        time of day (with optional timezone) for which to calculate daily PNL (default is
        11:59:59 UTC)

    details : bool
        return detailed results for all securities instead of aggregating to
        account/order ref level (only supported for a single account and order ref
        at a time)

    output : str, required
        the output format (choices are csv or pdf, default is csv)

    Returns
    -------
    None
    """
    params = {}
    if order_refs:
        params["order_refs"] = order_refs
    if accounts:
        params["accounts"] = accounts
    if conids:
        params["conids"] = conids
    if start_date:
        params["start_date"] = start_date
    if end_date:
        params["end_date"] = end_date
    if time:
        params["time"] = time
    if details:
        params["details"] = details

    output = output or "csv"

    if output not in ("csv", "pdf"):
        raise ValueError(
            "invalid output: {0} (choices are csv or pdf".format(output))

    response = houston.get("/blotter/pnl.{0}".format(output),
                           params=params,
                           timeout=60 * 10)

    houston.raise_for_status_with_json(response)

    filepath_or_buffer = filepath_or_buffer or sys.stdout

    write_response_to_filepath_or_buffer(filepath_or_buffer, response)
Esempio n. 18
0
def run_algorithm(algofile,
                  data_frequency=None,
                  capital_base=None,
                  bundle=None,
                  bundle_timestamp=None,
                  start=None,
                  end=None,
                  filepath_or_buffer=None,
                  calendar=None):
    """
    Run a Zipline backtest and write the test results to a CSV file.

    The CSV result file contains several DataFrames stacked into one: the Zipline performance
    results, plus the extracted returns, transactions, positions, and benchmark returns from those
    results.

    Parameters
    ----------
    algofile : str, required
        the file that contains the algorithm to run

    data_frequency : str, optional
        the data frequency of the simulation. Possible choices: daily, minute (default is daily)

    capital_base : float, optional
        the starting capital for the simulation (default is 10000000.0)

    bundle : str, optional
        the data bundle to use for the simulation (default is quantopian-quandl)

    bundle_timestamp : str, optional
        the date to lookup data on or before (default is <current-time>)

    start : str (YYYY-MM-DD), required
        the start date of the simulation

    end : str (YYYY-MM-DD), required
        the end date of the simulation

    filepath_or_buffer : str, optional
        the location to write the output file (omit to write to stdout)

    calendar : str, optional
        the calendar you want to use e.g. LSE. NYSE is the default.

    Returns
    -------
    None

    Examples
    --------
    Run a backtest and load the results into pandas.

    >>> from quantrocket.zipline import run_algorithm
    >>> import pandas as pd
    >>> import io
    >>> f = io.StringIO()
    >>> run_algorithm("momentum_pipeline.py", bundle="my-bundle", start="2015-02-04", end="2015-12-31", filepath_or_buffer=f)
    >>> results = pd.read_csv(f, index_col=["dataframe", "index", "column"])["value"]

    To use the results with pyfolio, extract and massage the returns, positions,
    transactions, and benchmark returns:

    >>> # Extract returns
    >>> returns = results.loc["returns"].unstack()
    >>> returns.index = returns.index.droplevel(0).tz_localize("UTC")
    >>> returns = returns["returns"].astype(float)
    >>> # Extract positions
    >>> positions = results.loc["positions"].unstack()
    >>> positions.index = positions.index.droplevel(0).tz_localize("UTC")
    >>> positions = positions.astype(float)
    >>> # Extract transactions
    >>> transactions = results.loc["transactions"].unstack()
    >>> transactions.index = transactions.index.droplevel(0).tz_localize("UTC")
    >>> transactions = transactions.apply(pd.to_numeric, errors='ignore')
    >>> # Extract benchmark
    >>> benchmark_returns = results.loc["benchmark"].unstack()
    >>> benchmark_returns.index = benchmark_returns.index.droplevel(0).tz_localize("UTC")
    >>> benchmark_returns = benchmark_returns["benchmark"].astype(float)

    Ready for pyfolio:

    >>> pf.create_full_tear_sheet(returns, positions=positions, transactions=transactions, benchmark_rets=benchmark_returns)
    """
    params = {}
    if data_frequency:
        params["data_frequency"] = data_frequency
    if capital_base:
        params["capital_base"] = capital_base
    if bundle:
        params["bundle"] = bundle
    if bundle_timestamp:
        params["bundle_timestamp"] = bundle_timestamp
    if start:
        params["start"] = start
    if end:
        params["end"] = end
    if calendar:
        params["calendar"] = calendar

    response = houston.post("/zipline/backtests/{0}".format(algofile),
                            params=params,
                            timeout=60 * 60 * 3)

    houston.raise_for_status_with_json(response)

    filepath_or_buffer = filepath_or_buffer or sys.stdout
    write_response_to_filepath_or_buffer(filepath_or_buffer, response)
Esempio n. 19
0
def close_positions(filepath_or_buffer=None,
                    output="csv",
                    order_refs=None,
                    accounts=None,
                    conids=None,
                    params=None):
    """
    Generate orders to close positions.

    Doesn't actually place any orders but returns an orders file that can be placed
    separately. Additional order parameters can be appended with the `params` argument.

    Parameters
    ----------
    filepath_or_buffer : str or file-like object
        filepath to write the data to, or file-like object (defaults to stdout)

    output : str
        output format (json or csv, default is csv)

    order_refs : list of str, optional
        limit to these order refs

    accounts : list of str, optional
        limit to these accounts

    conids : list of int, optional
        limit to these conids

    params : dict of PARAM:VALUE, optional
        additional parameters to append to each row in output (pass as {param:value},
        for example {"OrderType":"MKT"})

    Returns
    -------
    None

    Examples
    --------
    Get orders to close positions, then place the orders:

    >>> from quantrocket.blotter import place_orders, close_positions
    >>> import io
    >>> orders_file = io.StringIO()
    >>> close_positions(orders_file, params={"OrderType":"MKT", "Tif":"DAY", "Exchange":"SMART"})
    >>> place_orders(infilepath_or_buffer=orders_file)
    """
    _params = {}
    if order_refs:
        _params["order_refs"] = order_refs
    if accounts:
        _params["accounts"] = accounts
    if conids:
        _params["conids"] = conids
    if params:
        _params["params"] = dict_to_dict_strs(params)

    output = output or "csv"

    if output not in ("csv", "json"):
        raise ValueError("Invalid ouput: {0}".format(output))

    response = houston.delete("/blotter/positions.{0}".format(output),
                              params=_params)

    houston.raise_for_status_with_json(response)

    # Don't write a null response to file
    if response.content[:4] == b"null":
        return

    filepath_or_buffer = filepath_or_buffer or sys.stdout

    write_response_to_filepath_or_buffer(filepath_or_buffer, response)
Esempio n. 20
0
def download_positions(filepath_or_buffer=None,
                       output="csv",
                       order_refs=None,
                       accounts=None,
                       conids=None,
                       view="blotter",
                       diff=False):
    """
    Query current positions and write results to file.

    To return positions as a Python list, see list_positions.

    There are two ways to view positions: blotter view (default) and broker view.

    The default "blotter view" returns positions by account, conid, and order ref. Positions
    are tracked based on execution records saved to the blotter database.

    "Broker view" (view='broker') returns positions by account and conid (but
    not order ref) as reported directly by IB. Broker view is more authoritative but less
    informative than blotter view. Broker view is typically used to verify the accuracy
    of blotter view.

    Parameters
    ----------
    filepath_or_buffer : str or file-like object
        filepath to write the data to, or file-like object (defaults to stdout)

    output : str
        output format (json or csv, default is csv)

    order_refs : list of str, optional
        limit to these order refs (not supported with broker view)

    accounts : list of str, optional
        limit to these accounts

    conids : list of int, optional
        limit to these conids

    view : str, optional
        whether to return 'broker' view of positions (by account and conid) or
        default 'blotter' view (by account, conid, and order ref). Choices are:
        blotter, broker

    diff : bool
        limit to positions where the blotter quantity and broker quantity disagree
        (requires `view='broker'`)

    Returns
    -------
    None

    See Also
    --------
    list_positions : load positions into Python list
    """
    params = {}
    if order_refs:
        params["order_refs"] = order_refs
    if accounts:
        params["accounts"] = accounts
    if conids:
        params["conids"] = conids
    if view:
        params["view"] = view
    if diff:
        params["diff"] = diff

    output = output or "csv"

    if output not in ("csv", "json"):
        raise ValueError("Invalid ouput: {0}".format(output))

    response = houston.get("/blotter/positions.{0}".format(output),
                           params=params)

    houston.raise_for_status_with_json(response)

    # Don't write a null response to file
    if response.content[:4] == b"null":
        return

    filepath_or_buffer = filepath_or_buffer or sys.stdout

    write_response_to_filepath_or_buffer(filepath_or_buffer, response)
Esempio n. 21
0
def download_history_file(code, filepath_or_buffer=None, output="csv",
                          start_date=None, end_date=None,
                          universes=None, conids=None,
                          exclude_universes=None, exclude_conids=None,
                          times=None, cont_fut=None, fields=None, tz_naive=False):
    """
    Query historical market data from a history database and download to file.

    Parameters
    ----------
    code : str, required
        the code of the database to query

    filepath_or_buffer : str or file-like object
        filepath to write the data to, or file-like object (defaults to stdout)

    output : str
        output format (json, csv, txt, default is csv)

    start_date : str (YYYY-MM-DD), optional
        limit to history on or after this date

    end_date : str (YYYY-MM-DD), optional
        limit to history on or before this date

    universes : list of str, optional
        limit to these universes (default is to return all securities in database)

    conids : list of int, optional
        limit to these conids

    exclude_universes : list of str, optional
        exclude these universes

    exclude_conids : list of int, optional
        exclude these conids

    times: list of str (HH:MM:SS), optional
        limit to these times

    cont_fut : str
        stitch futures into continuous contracts using this method (default is not
        to stitch together). Possible choices: concat

    fields : list of str, optional
        only return these fields (pass ['?'] or any invalid fieldname to see
        available fields)

    tz_naive : bool
        return timestamps without UTC offsets: 2018-02-01T10:00:00 (default is to
        include UTC offsets: 2018-02-01T10:00:00-4000)

    Returns
    -------
    None

    Examples
    --------
    You can use StringIO to load the CSV into pandas.

    >>> f = io.StringIO()
    >>> download_history_file("my-db", f)
    >>> history = pd.read_csv(f, parse_dates=["Date"])

    See Also
    --------
    quantrocket.get_prices : load prices into a DataFrame
    """
    params = {}
    if start_date:
        params["start_date"] = start_date
    if end_date:
        params["end_date"] = end_date
    if universes:
        params["universes"] = universes
    if conids:
        params["conids"] = conids
    if exclude_universes:
        params["exclude_universes"] = exclude_universes
    if exclude_conids:
        params["exclude_conids"] = exclude_conids
    if times:
        params["times"] = times
    if cont_fut:
        params["cont_fut"] = cont_fut
    if fields:
        params["fields"] = fields
    if tz_naive:
        params["tz_naive"] = tz_naive

    output = output or "csv"

    if output not in ("csv", "json", "txt"):
        raise ValueError("Invalid ouput: {0}".format(output))

    response = houston.get("/history/{0}.{1}".format(code, output), params=params,
                           timeout=60*30)

    try:
        houston.raise_for_status_with_json(response)
    except requests.HTTPError as e:
        # Raise a dedicated exception
        if "no history matches the query parameters" in repr(e).lower():
            raise NoHistoricalData(e)
        raise

    filepath_or_buffer = filepath_or_buffer or sys.stdout

    write_response_to_filepath_or_buffer(filepath_or_buffer, response)
Esempio n. 22
0
def download_master_file(filepath_or_buffer=None, output="csv", exchanges=None, sec_types=None,
                         currencies=None, universes=None, symbols=None, conids=None,
                         exclude_universes=None, exclude_conids=None,
                         sectors=None, industries=None, categories=None,
                         delisted=False, frontmonth=False, fields=None):
    """
    Query security details from the securities master database and download to file.

    Parameters
    ----------
    filepath_or_buffer : str or file-like object
        filepath to write the data to, or file-like object (defaults to stdout)

    output : str
        output format (json, csv, txt, default is csv)

    exchanges : list of str, optional
        limit to these exchanges

    sec_types : list of str, optional
        limit to these security types. Possible choices: STK, ETF, FUT, CASH, IND, OPT, FOP

    currencies : list of str, optional
        limit to these currencies

    universes : list of str, optional
        limit to these universes

    symbols : list of str, optional
        limit to these symbols

    conids : list of int, optional
        limit to these conids

    exclude_universes : list of str, optional
        exclude these universes

    exclude_conids : list of int, optional
        exclude these conids

    sectors : list of str, optional
        limit to these sectors

    industries : list of str, optional
        limit to these industries

    categories : list of str, optional
        limit to these categories

    delisted : bool
        include delisted securities (default False)

    frontmonth : bool
        exclude backmonth and expired futures contracts (default False)

    fields : list of str, optional
        only return these fields (pass ['?'] or any invalid fieldname to see
        available fields)

    Returns
    -------
    None

    Examples
    --------
    You can use StringIO to load the CSV into pandas.

    >>> f = io.StringIO()
    >>> download_master_file(f, universes=["my-universe"])
    >>> securities = pd.read_csv(f)
    """
    params = {}
    if exchanges:
        params["exchanges"] = exchanges
    if sec_types:
        params["sec_types"] = sec_types
    if currencies:
        params["currencies"] = currencies
    if universes:
        params["universes"] = universes
    if symbols:
        params["symbols"] = symbols
    if conids:
        params["conids"] = conids
    if exclude_universes:
        params["exclude_universes"] = exclude_universes
    if exclude_conids:
        params["exclude_conids"] = exclude_conids
    if sectors:
        params["sectors"] = sectors
    if industries:
        params["industries"] = industries
    if categories:
        params["categories"] = categories
    if delisted:
        params["delisted"] = delisted
    if frontmonth:
        params["frontmonth"] = frontmonth
    if fields:
        params["fields"] = fields

    output = output or "csv"

    if output not in ("csv", "json", "txt"):
        raise ValueError("Invalid ouput: {0}".format(output))

    response = houston.get("/master/securities.{0}".format(output), params=params)

    houston.raise_for_status_with_json(response)

    filepath_or_buffer = filepath_or_buffer or sys.stdout

    write_response_to_filepath_or_buffer(filepath_or_buffer, response)
Esempio n. 23
0
def backtest(strategies,
             start_date=None,
             end_date=None,
             segment=None,
             allocations=None,
             nlv=None,
             params=None,
             details=None,
             output="csv",
             csv=None,
             filepath_or_buffer=None):
    """
    Backtest one or more strategies.

    By default returns a CSV of backtest results but can also return a PDF tear sheet
    of performance charts.

    If testing multiple strategies, each column in the CSV represents a strategy.
    If testing a single strategy and `details=True`, each column in the CSV
    represents a security in the strategy universe.

    Parameters
    ----------
    strategies : list of str, required
        one or more strategy codes

    start_date : str (YYYY-MM-DD), optional
        the backtest start date (default is to use all available history)

    end_date : str (YYYY-MM-DD), optional
        the backtest end date (default is to use all available history)

    segment : str, optional
        backtest in date segments of this size, to reduce memory usage
        (use Pandas frequency string, e.g. 'A' for annual segments or 'Q'
        for quarterly segments)

    allocations : dict of CODE:FLOAT, optional
        the allocation for each strategy, passed as {code:allocation} (default
        allocation is 1.0 / number of strategies)

    nlv : dict of CURRENCY:NLV, optional
        the NLV (net liquidation value, i.e. account balance) to assume for
        the backtest, expressed in each currency represented in the backtest (pass
        as {currency:nlv})

    params : dict of PARAM:VALUE, optional
        one or more strategy params to set on the fly before backtesting
        (pass as {param:value})

    details : bool
        return detailed results for all securities instead of aggregating to
        strategy level (only supported for single-strategy backtests)

    output : str, required
        the output format (choices are csv or pdf)

    csv : bool
       DEPRECATED: this argument will be removed in a future version. This argument
       may be omitted as CSV is the default.

    filepath_or_buffer : str, optional
        the location to write the results file (omit to write to stdout)

    Returns
    -------
    None

    Examples
    --------
    Backtest several HML (High Minus Low) strategies from 2005-2015 and return a
    CSV of results:

    >>> backtest(["hml-us", "hml-eur", "hml-asia"],
                 start_date="2005-01-01",
                 end_date="2015-12-31",
                 filepath_or_buffer="hml_results.csv")

    Run a backtest in 1-year segments to reduce memory usage:

    >>> backtest("big-strategy",
                 start_date="2000-01-01",
                 end_date="2018-01-01",
                 segment="A",
                 filepath_or_buffer="results.csv")

    See Also
    --------
    read_moonshot_csv : load a Moonshot backtest CSV into a DataFrame
    """
    output = output or "csv"

    if output not in ("csv", "pdf"):
        raise ValueError(
            "invalid output: {0} (choices are csv or pdf".format(output))

    if csv is not None:
        import warnings
        warnings.warn(
            "the `csv` argument is deprecated and will removed in a future version; "
            "this argument may be omitted as csv is the default",
            DeprecationWarning)

    _params = {}

    if strategies:
        _params["strategies"] = strategies
    if start_date:
        _params["start_date"] = start_date
    if end_date:
        _params["end_date"] = end_date
    if segment:
        _params["segment"] = segment
    if allocations:
        _params["allocations"] = dict_to_dict_strs(allocations)
    if nlv:
        _params["nlv"] = dict_to_dict_strs(nlv)
    if details:
        _params["details"] = details
    if params:
        _params["params"] = dict_to_dict_strs(params)

    response = houston.post("/moonshot/backtests.{0}".format(output),
                            params=_params,
                            timeout=60 * 60 * 24)

    houston.raise_for_status_with_json(response)

    filepath_or_buffer = filepath_or_buffer or sys.stdout
    write_response_to_filepath_or_buffer(filepath_or_buffer, response)
Esempio n. 24
0
def download_market_data_file(code, filepath_or_buffer=None, output="csv",
                              start_date=None, end_date=None,
                              universes=None, conids=None,
                              exclude_universes=None, exclude_conids=None,
                              fields=None):
    """
    Query market data from a tick database or aggregate database and download to file.

    Parameters
    ----------
    code : str, required
        the code of the tick database or aggregate database to query

    filepath_or_buffer : str or file-like object
        filepath to write the data to, or file-like object (defaults to stdout)

    output : str
        output format (json, csv, default is csv)

    start_date : str (YYYY-MM-DD HH:MM:SS), optional
        limit to market data on or after this datetime. Can pass a date (YYYY-MM-DD),
        datetime with optional timezone (YYYY-MM-DD HH:MM:SS TZ), or time with
        optional timezone. A time without date will be interpreted as referring to
        today if the time is earlier than now, or yesterday if the time is later than
        now.

    end_date : str (YYYY-MM-DD HH:MM:SS), optional
        limit to market data on or before this datetime. Can pass a date (YYYY-MM-DD),
        datetime with optional timezone (YYYY-MM-DD HH:MM:SS TZ), or time with
        optional timezone.

    universes : list of str, optional
        limit to these universes (default is to return all securities in database)

    conids : list of int, optional
        limit to these conids

    exclude_universes : list of str, optional
        exclude these universes

    exclude_conids : list of int, optional
        exclude these conids

    fields : list of str, optional
        only return these fields (pass '?' or any invalid fieldname to see
        available fields)

    Returns
    -------
    None

    Examples
    --------
    Download a CSV of futures market data since 08:00 AM Chicago time:

    >>> download_market_data_file("globex-fut-taq",
                                 start_date="08:00:00 America/Chicago",
                                 filepath_or_buffer="globex_taq.csv")
    >>> market_data = pd.read_csv("globex_taq.csv", parse_dates=["Date"])

    See Also
    --------
    quantrocket.get_prices : load prices into a DataFrame
    """
    params = {}
    if start_date:
        params["start_date"] = start_date
    if end_date:
        params["end_date"] = end_date
    if universes:
        params["universes"] = universes
    if conids:
        params["conids"] = conids
    if exclude_universes:
        params["exclude_universes"] = exclude_universes
    if exclude_conids:
        params["exclude_conids"] = exclude_conids
    if fields:
        params["fields"] = fields

    output = output or "csv"

    if output not in ("csv", "json"):
        raise ValueError("Invalid ouput: {0}".format(output))

    response = houston.get("/realtime/{0}.{1}".format(code, output), params=params,
                           timeout=60*30)

    try:
        houston.raise_for_status_with_json(response)
    except requests.HTTPError as e:
        # Raise a dedicated exception
        if "no market data matches the query parameters" in repr(e).lower():
            raise NoRealtimeData(e)
        raise

    filepath_or_buffer = filepath_or_buffer or sys.stdout

    write_response_to_filepath_or_buffer(filepath_or_buffer, response)
Esempio n. 25
0
def download_borrow_fees(filepath_or_buffer=None,
                         output="csv",
                         start_date=None,
                         end_date=None,
                         universes=None,
                         conids=None,
                         exclude_universes=None,
                         exclude_conids=None):
    """
    Query borrow fees from the stockloan database and download to file.

    Data timestamps are UTC.

    Parameters
    ----------
    filepath_or_buffer : str or file-like object
        filepath to write the data to, or file-like object (defaults to stdout)

    output : str
        output format (json, csv, default is csv)

    start_date : str (YYYY-MM-DD), optional
        limit to data on or after this date

    end_date : str (YYYY-MM-DD), optional
        limit to data on or before this date

    universes : list of str, optional
        limit to these universes

    conids : list of int, optional
        limit to these conids

    exclude_universes : list of str, optional
        exclude these universes

    exclude_conids : list of int, optional
        exclude these conids

    Returns
    -------
    None

    Examples
    --------
    Query borrow fees for a universe of Australian stocks.

    >>> f = io.StringIO()
    >>> download_borrow_fees("asx_borrow_fees.csv", universes=["asx-stk"])
    >>> borrow_fees = pd.read_csv("asx_borrow_fees.csv", parse_dates=["Date"])
    """
    params = {}
    if start_date:
        params["start_date"] = start_date
    if end_date:
        params["end_date"] = end_date
    if universes:
        params["universes"] = universes
    if conids:
        params["conids"] = conids
    if exclude_universes:
        params["exclude_universes"] = exclude_universes
    if exclude_conids:
        params["exclude_conids"] = exclude_conids

    output = output or "csv"

    if output not in ("csv", "json"):
        raise ValueError("Invalid ouput: {0}".format(output))

    response = houston.get("/fundamental/stockloan/fees.{0}".format(output),
                           params=params,
                           timeout=60 * 5)

    houston.raise_for_status_with_json(response)

    filepath_or_buffer = filepath_or_buffer or sys.stdout

    write_response_to_filepath_or_buffer(filepath_or_buffer, response)
Esempio n. 26
0
def download_exchange_rates(filepath_or_buffer=None,
                            output="csv",
                            start_date=None,
                            end_date=None,
                            latest=False,
                            base_currencies=None,
                            quote_currencies=None):
    """
    Query exchange rates for the base currency.

    The exchange rates in the exchange rate database are sourced from the
    European Central Bank's reference rates, which are updated each day at 4 PM
    CET.

    Parameters
    ----------
    filepath_or_buffer : str or file-like object
        filepath to write the data to, or file-like object (defaults to stdout)

    output : str
        output format (json, csv, txt, default is csv)

    start_date : str (YYYY-MM-DD), optional
        limit to exchange rates on or after this date

    end_date : str (YYYY-MM-DD), optional
        limit to exchange rates on or before this date

    latest : bool
        rreturn the latest exchange rates

    base_currencies : list of str, optional
        limit to these base currencies

    quote_currencies : list of str, optional
        limit to these quote currencies

    Returns
    -------
    None

    Examples
    --------
    Query latest exchange rates. You can use StringIO to load the CSV into pandas.

    >>> f = io.StringIO()
    >>> download_exchange_rates(f, latest=True)
    >>> rates = pd.read_csv(f, parse_dates=["Date"])
    """
    params = {}
    if start_date:
        params["start_date"] = start_date
    if end_date:
        params["end_date"] = end_date
    if latest:
        params["latest"] = latest
    if base_currencies:
        params["base_currencies"] = base_currencies
    if quote_currencies:
        params["quote_currencies"] = quote_currencies

    output = output or "csv"

    if output not in ("csv", "json", "txt"):
        raise ValueError("Invalid ouput: {0}".format(output))

    response = houston.get("/account/rates.{0}".format(output), params=params)

    houston.raise_for_status_with_json(response)

    filepath_or_buffer = filepath_or_buffer or sys.stdout

    write_response_to_filepath_or_buffer(filepath_or_buffer, response)
Esempio n. 27
0
def download_master_file(filepath_or_buffer=None, output="csv", exchanges=None, sec_types=None,
                         currencies=None, universes=None, symbols=None, conids=None,
                         exclude_universes=None, exclude_conids=None,
                         sectors=None, industries=None, categories=None,
                         exclude_delisted=False, delisted=True, frontmonth=False, fields=None,
                         domain=None):
    """
    Query security details from the securities master database and download to file.

    Parameters
    ----------
    filepath_or_buffer : str or file-like object
        filepath to write the data to, or file-like object (defaults to stdout)

    output : str
        output format (json, csv, txt, default is csv)

    exchanges : list of str, optional
        limit to these exchanges

    sec_types : list of str, optional
        limit to these security types. Possible choices: STK, ETF, FUT, CASH, IND, OPT, FOP

    currencies : list of str, optional
        limit to these currencies

    universes : list of str, optional
        limit to these universes

    symbols : list of str, optional
        limit to these symbols

    conids : list of int, optional
        limit to these conids

    exclude_universes : list of str, optional
        exclude these universes

    exclude_conids : list of int, optional
        exclude these conids

    sectors : list of str, optional
        limit to these sectors

    industries : list of str, optional
        limit to these industries

    categories : list of str, optional
        limit to these categories

    exclude_delisted : bool
        exclude delisted securities (default is to include them)

    delisted : bool
        [DEPRECATED] include delisted securities; this parameter is deprecated
        and will be removed in a future release; it has no effect as delisted
        securities are included by default

    frontmonth : bool
        exclude backmonth and expired futures contracts (default False)

    fields : list of str, optional
        only return these fields (pass ['?'] or any invalid fieldname to see
        available fields)

    domain : str, optional
        query against this domain (default is 'main', which runs against
        quantrocket.master.main.sqlite. Possible choices: main, sharadar)

    Returns
    -------
    None

    Examples
    --------
    Download several exchanges to file:

    >>> download_master_file("securities.csv", exchanges=["NYSE","NASDAQ"])

    Download securities for a particular universe to in-memory file and
    load the CSV into pandas.

    >>> f = io.StringIO()
    >>> download_master_file(f, universes=["my-universe"])
    >>> securities = pd.read_csv(f)

    Download Sharadar securities from quantrocket.master.sharadar.sqlite:

    >>> download_master_file("sharadar_securities.csv", domain="sharadar")
    """
    # Handle legacy param "delisted"
    if delisted is False and exclude_delisted is None:
        exclude_delisted = True

    params = {}
    if exchanges:
        params["exchanges"] = exchanges
    if sec_types:
        params["sec_types"] = sec_types
    if currencies:
        params["currencies"] = currencies
    if universes:
        params["universes"] = universes
    if symbols:
        params["symbols"] = symbols
    if conids:
        params["conids"] = conids
    if exclude_universes:
        params["exclude_universes"] = exclude_universes
    if exclude_conids:
        params["exclude_conids"] = exclude_conids
    if sectors:
        params["sectors"] = sectors
    if industries:
        params["industries"] = industries
    if categories:
        params["categories"] = categories
    if exclude_delisted:
        params["exclude_delisted"] = exclude_delisted
    if frontmonth:
        params["frontmonth"] = frontmonth
    if fields:
        params["fields"] = fields

    output = output or "csv"

    url = "/master/{0}securities.{1}".format(
        "{0}/".format(domain) if domain else "",
        output)

    if output not in ("csv", "json", "txt"):
        raise ValueError("Invalid ouput: {0}".format(output))

    response = houston.get(url, params=params)

    houston.raise_for_status_with_json(response)

    filepath_or_buffer = filepath_or_buffer or sys.stdout

    write_response_to_filepath_or_buffer(filepath_or_buffer, response)
Esempio n. 28
0
def download_account_balances(filepath_or_buffer=None,
                              output="csv",
                              start_date=None,
                              end_date=None,
                              latest=False,
                              accounts=None,
                              below=None,
                              fields=None,
                              no_cache=False):
    """
    Query IB account balances.

    Parameters
    ----------
    filepath_or_buffer : str or file-like object
        filepath to write the data to, or file-like object (defaults to stdout)

    output : str
        output format (json, csv, txt, default is csv)

    start_date : str (YYYY-MM-DD), optional
        limit to account balance snapshots taken on or after this date

    end_date : str (YYYY-MM-DD), optional
        limit to account balance snapshots taken on or before this date

    latest : bool
        return the latest account balance snapshot

    accounts : list of str, optional
        limit to these accounts

    below : dict of FIELD:AMOUNT, optional
        limit to accounts where the specified field is below the specified
        amount (pass as {field:amount}, for example {'Cushion':0.05})

    fields : list of str, optional
        only return these fields (pass ['?'] or any invalid fieldname to see
        available fields)

    no_cache : bool
        fetch account balances directly from IB (default is to query the
        database, which is updated every minute)

    Returns
    -------
    None

    Examples
    --------
    Query latest balances. You can use StringIO to load the CSV into pandas.

    >>> f = io.StringIO()
    >>> download_account_balances(f, latest=True)
    >>> balances = pd.read_csv(f, parse_dates=["LastUpdated"])
    """
    params = {}
    if start_date:
        params["start_date"] = start_date
    if end_date:
        params["end_date"] = end_date
    if latest:
        params["latest"] = latest
    if accounts:
        params["accounts"] = accounts
    if below:
        params["below"] = dict_to_dict_strs(below)
    if fields:
        params["fields"] = fields
    if no_cache:
        params["no_cache"] = no_cache

    output = output or "csv"

    if output not in ("csv", "json", "txt"):
        raise ValueError("Invalid ouput: {0}".format(output))

    response = houston.get("/account/balances.{0}".format(output),
                           params=params)

    houston.raise_for_status_with_json(response)

    # Don't write a null response to file when using below filters
    if below and response.content[:4] == b"null":
        return

    filepath_or_buffer = filepath_or_buffer or sys.stdout

    write_response_to_filepath_or_buffer(filepath_or_buffer, response)
Esempio n. 29
0
def execute_command(cmd,
                    return_file=None,
                    filepath_or_buffer=None,
                    params=None,
                    service="satellite"):
    """
    Execute a Python function or abitrary shell command on a satellite service.

    Parameters
    ----------
    cmd: str, required
        the shell command to run, or the Python function in dot notation (must
        start with "codeload." to be interpreted as a Python function).

    return_file : str, optional
        the path of a file to be returned after the command completes

    filepath_or_buffer : str, optional
        the location to write the return_file (omit to write to stdout)

    params : dict of PARAM:VALUE, optional
        one or more params to pass to the Python function (pass as {param:value})

    service : str, optional
        the service name (default 'satellite')

    Returns
    -------
    dict or None
        None if return_file, otherwise status message

    Examples
    --------
    Run a Python function called 'create_calendar_spread' defined in '/codeload/scripts/combos.py'
    and pass it arguments:

    >>> execute_command("codeload.scripts.combos.create_calendar_spread",
                        params={"universe":"cl-fut", "contract_months":[1,2]})

    Run a backtrader backtest and save the performance chart to file:

    >>> execute_command("python /codeload/backtrader/dual_moving_average.py",
                        return_file="/tmp/backtrader-plot.pdf"
                        outfile="backtrader-plot.pdf")
    """
    _params = {}
    if not service:
        raise ValueError("a service is required")
    if not cmd:
        raise ValueError("a command is required")
    _params["cmd"] = cmd
    if params:
        _params["params"] = dict_to_dict_strs(params)
    if return_file:
        _params["return_file"] = return_file

    if not service.startswith("satellite"):
        raise ValueError("service must start with 'satellite'")

    response = houston.post("/{0}/commands".format(service),
                            params=_params,
                            timeout=60 * 60 * 24)

    houston.raise_for_status_with_json(response)

    if return_file:
        filepath_or_buffer = filepath_or_buffer or sys.stdout
        write_response_to_filepath_or_buffer(filepath_or_buffer, response)
    else:
        return response.json()
Esempio n. 30
0
def backtest(strategies, start_date=None, end_date=None, allocations=None,
                 nlv=None, params=None, details=None, csv=None, filepath_or_buffer=None):
    """
    Backtest one or more strategies.

    By default returns a PDF tear sheet of performance charts but can also return a CSV of
    backtest results.

    Parameters
    ----------
    strategies : list of str, required
        one or more strategy codes

    start_date : str (YYYY-MM-DD), optional
        the backtest start date (default is to use all available history)

    end_date : str (YYYY-MM-DD), optional
        the backtest end date (default is to use all available history)

    allocations : dict of CODE:FLOAT, optional
        the allocation for each strategy, passed as {code:allocation} (default
        allocation is 1.0 / number of strategies)

    nlv : dict of CURRENCY:NLV, optional
        the NLV (net liquidation value, i.e. account balance) to assume for
        the backtest, expressed in each currency represented in the backtest (pass
        as {currency:nlv})

    params : dict of PARAM:VALUE, optional
        one or more strategy params to set on the fly before backtesting
        (pass as {param:value})

    details : bool
        return detailed results for all securities instead of aggregating to
        strategy level (only supported for single-strategy backtests)

    csv : bool
        return a CSV of performance data (default is to return a PDF
        performance tear sheet)

    filepath_or_buffer : str, optional
        the location to write the results file (omit to write to stdout)

    Returns
    -------
    None
    """
    _params = {}
    if strategies:
        _params["strategies"] = strategies
    if start_date:
        _params["start_date"] = start_date
    if end_date:
        _params["end_date"] = end_date
    if allocations:
        _params["allocations"] = dict_to_dict_strs(allocations)
    if nlv:
        _params["nlv"] = dict_to_dict_strs(nlv)
    if details:
        _params["details"] = details
    if csv:
        _params["csv"] = csv
    if params:
        _params["params"] = dict_to_dict_strs(params)

    response = houston.post("/moonshot/backtests", params=_params, timeout=60*60*24)

    houston.raise_for_status_with_json(response)

    filepath_or_buffer = filepath_or_buffer or sys.stdout
    write_response_to_filepath_or_buffer(filepath_or_buffer, response)