예제 #1
0
def plot_history_from_s3():
    """plot_history_from_s3

    Run a derived algorithm with an algorithm config dictionary

    :param config_dict: algorithm config dictionary
    """

    log.debug('start - plot')

    parser = argparse.ArgumentParser(
        description=('plot a local algorithm trading history file'))
    parser.add_argument('-b',
                        help=('saved in this s3 bucket'),
                        required=False,
                        dest='s3_bucket')
    parser.add_argument('-k',
                        help=('saved in this s3 key'),
                        required=False,
                        dest='history_json_file')
    parser.add_argument('-d',
                        help=('debug'),
                        required=False,
                        dest='debug',
                        action='store_true')
    args = parser.parse_args()

    s3_access_key = consts.S3_ACCESS_KEY
    s3_secret_key = consts.S3_SECRET_KEY
    s3_region_name = consts.S3_REGION_NAME
    s3_address = consts.S3_ADDRESS
    s3_secure = consts.S3_SECURE
    compress = True

    s3_bucket = ('bt-spy-williamsr-2018-12-05-22-44-50-714400')
    s3_key = ('-181.55_netgain_9818.45_netvalue_NEGATIVE_'
              '10000.0_startbalance_1710.95_endbalance_'
              '30_shares_270.25_close_3_buys_0_sells_'
              '1_minbuyinds_1_minsellinds_'
              '43.52_seconds_'
              'trade_history-SPY_williamsr_test_'
              '0.73_for_176_of_24000.json')

    debug = False

    if args.debug:
        debug = True

    load_res = load_history.load_history_dataset(s3_enabled=True,
                                                 s3_key=s3_key,
                                                 s3_address=s3_address,
                                                 s3_bucket=s3_bucket,
                                                 s3_access_key=s3_access_key,
                                                 s3_secret_key=s3_secret_key,
                                                 s3_region_name=s3_region_name,
                                                 s3_secure=s3_secure,
                                                 compress=compress)

    algo_config = load_res.get('algo_config_dict', None)
    algo_name = load_res.get('algo_name', None)
    tickers = load_res.get('tickers', [
        'SPY',
    ])
    ticker = tickers[0]

    log.info(f'found algo: {algo_name}')
    log.info(f'config: {consts.ppj(algo_config)}')

    history_df = load_res[ticker]
    history_df['date'] = pd.to_datetime(history_df['date'])
    history_df['minute'] = pd.to_datetime(history_df['minute'])
    ticker = history_df['ticker'].iloc[0]

    log.info('plotting history')

    first_date = history_df['date'].iloc[0]
    end_date = history_df['date'].iloc[-1]
    title = (f'Trading History {ticker}\n'
             f'Backtest dates from {first_date} to {end_date}')
    use_xcol = 'date'
    use_as_date_format = '%d\n%b'
    use_minute = False
    if 'minute' in history_df:
        found_valid_minute = history_df['minute'].iloc[0]
        if found_valid_minute:
            use_minute = True

    if use_minute:
        use_xcol = 'minute'
        use_as_date_format = '%d %H:%M:%S\n%b'
    xlabel = 'Dates vs Algo values'
    ylabel = 'Algo values'
    df_filter = (history_df['close'] > 1.00)

    # set default hloc columns:
    blue = None
    green = None
    orange = None

    red = 'close'
    blue = 'balance'

    if debug:
        for i, r in history_df.iterrows():
            log.info(f'{r["minute"]} - {r["close"]}')
    # end of debug

    show_plot = True
    if show_plot:
        plot_trading_history.plot_trading_history(
            title=title,
            df=history_df,
            red=red,
            blue=blue,
            green=green,
            orange=orange,
            date_col=use_xcol,
            date_format=use_as_date_format,
            xlabel=xlabel,
            ylabel=ylabel,
            df_filter=df_filter,
            show_plot=True,
            dropna_for_all=True)
def backtest_with_runner():
    """backtest_with_runner

    build and publish a trading history from an algorithm config.

    ::

        backtest_with_runner.py -t TICKER -c ALGO_CONFIG -s START_DATE
        -k S3_KEY -b S3_BUCKET -l
    """

    parser = argparse.ArgumentParser(
        description=('backtest an algorithm and publish '
                     'the trading history'))
    parser.add_argument('-t',
                        help=('ticker symbol'),
                        required=False,
                        dest='ticker')
    parser.add_argument('-k', help=('s3_key'), required=False, dest='s3_key')
    parser.add_argument('-b',
                        help=('s3_bucket'),
                        required=False,
                        dest='s3_bucket')
    parser.add_argument('-s',
                        help=('start date format YYYY-MM-DD'),
                        required=False,
                        dest='start_date')
    parser.add_argument('-c',
                        help=('algo config file'),
                        required=False,
                        dest='algo_config')
    parser.add_argument('-l',
                        help=('run a backtest with the latest '
                              'pricing data'),
                        required=False,
                        dest='latest',
                        action='store_true')
    parser.add_argument('-d',
                        help='debug',
                        required=False,
                        dest='debug',
                        action='store_true')
    args = parser.parse_args()

    ticker = 'SPY'
    s3_bucket = (f'algohistory')
    s3_key = (f'trade_history_{ticker}')
    start_date = (f'2019-01-01')
    algo_config = (f'/opt/sa/cfg/default_algo.json')
    latest = False
    show_plot = True
    debug = False

    if args.ticker:
        ticker = args.ticker.upper()
    if args.s3_key:
        s3_key = args.s3_key
    if args.s3_bucket:
        s3_bucket = args.s3_bucket
    if args.start_date:
        start_date = args.start_date
    if args.algo_config:
        algo_config = args.algo_config
    if args.latest:
        latest = True
        start_date = ae_utils.get_last_close_str()
    if args.debug:
        debug = True

    history_loc = (f's3://{s3_bucket}/{s3_key}')

    log.info(f'building {ticker} trade history '
             f'start_date={start_date} '
             f'config={algo_config} '
             f'history_loc={history_loc}')

    runner = algo_runner.AlgoRunner(ticker=ticker,
                                    start_date=start_date,
                                    history_loc=history_loc,
                                    algo_config=algo_config,
                                    verbose_algo=debug,
                                    verbose_processor=False,
                                    verbose_indicators=False)

    trading_history_df = None
    if latest:
        trading_history_df = runner.latest()
        log.info(f'{ticker} latest:')
        print(trading_history_df[['minute', 'close']].tail(5))
        log.info(f'Other available columns to plot:')
        print(trading_history_df.columns.values)
        if show_plot:
            plot.plot_trading_history(
                title=(f'{ticker} at '
                       f'${trading_history_df["close"].iloc[-1]} '
                       f'at: '
                       f'{trading_history_df["minute"].iloc[-1]}'),
                df=trading_history_df,
                red='high',
                blue='close')
    else:
        runner.start()

    sys.exit(0)
def run_backtest_and_plot_history(config_dict):
    """run_backtest_and_plot_history

    Run a derived algorithm with an algorithm config dictionary

    :param config_dict: algorithm config dictionary
    """

    log.debug('start - sa')

    parser = argparse.ArgumentParser(description=('stock analysis tool'))
    parser.add_argument('-t', help=('ticker'), required=True, dest='ticker')
    parser.add_argument('-e',
                        help=('file path to extract an '
                              'algorithm-ready datasets from redis'),
                        required=False,
                        dest='algo_extract_loc')
    parser.add_argument('-l',
                        help=('show dataset in this file'),
                        required=False,
                        dest='show_from_file')
    parser.add_argument('-H',
                        help=('show trading history dataset in this file'),
                        required=False,
                        dest='show_history_from_file')
    parser.add_argument(
        '-E',
        help=('show trading performance report dataset in this file'),
        required=False,
        dest='show_report_from_file')
    parser.add_argument(
        '-L',
        help=('restore an algorithm-ready dataset file back into redis'),
        required=False,
        dest='restore_algo_file')
    parser.add_argument('-f',
                        help=('save the trading history dataframe '
                              'to this file'),
                        required=False,
                        dest='history_json_file')
    parser.add_argument(
        '-J',
        help=('plot action - after preparing you can use: '
              '-J show to open the image (good for debugging)'),
        required=False,
        dest='plot_action')
    parser.add_argument(
        '-b',
        help=('run a backtest using the dataset in '
              'a file path/s3 key/redis key formats: '
              'file:/opt/sa/tests/datasets/algo/SPY-latest.json or '
              's3://algoready/SPY-latest.json or '
              'redis:SPY-latest'),
        required=False,
        dest='backtest_loc')
    parser.add_argument('-B',
                        help=('optional - broker url for Celery'),
                        required=False,
                        dest='broker_url')
    parser.add_argument('-C',
                        help=('optional - broker url for Celery'),
                        required=False,
                        dest='backend_url')
    parser.add_argument(
        '-w',
        help=('optional - flag for publishing an algorithm job '
              'using Celery to the ae workers'),
        required=False,
        dest='run_on_engine',
        action='store_true')
    parser.add_argument('-k',
                        help=('optional - s3 access key'),
                        required=False,
                        dest='s3_access_key')
    parser.add_argument('-K',
                        help=('optional - s3 secret key'),
                        required=False,
                        dest='s3_secret_key')
    parser.add_argument('-a',
                        help=('optional - s3 address format: <host:port>'),
                        required=False,
                        dest='s3_address')
    parser.add_argument('-Z',
                        help=('optional - s3 secure: default False'),
                        required=False,
                        dest='s3_secure')
    parser.add_argument('-s',
                        help=('optional - start date: YYYY-MM-DD'),
                        required=False,
                        dest='start_date')
    parser.add_argument('-n',
                        help=('optional - end date: YYYY-MM-DD'),
                        required=False,
                        dest='end_date')
    parser.add_argument('-u',
                        help=('optional - s3 bucket name'),
                        required=False,
                        dest='s3_bucket_name')
    parser.add_argument('-G',
                        help=('optional - s3 region name'),
                        required=False,
                        dest='s3_region_name')
    parser.add_argument(
        '-g',
        help=('Path to a custom algorithm module file '
              'on disk. This module must have a single '
              'class that inherits from: '
              'https://github.com/AlgoTraders/stock-analysis-engine/'
              'blob/master/'
              'analysis_engine/algo.py Additionally you '
              'can find the Example-Minute-Algorithm here: '
              'https://github.com/AlgoTraders/stock-analysis-engine/'
              'blob/master/analysis_engine/mocks/'
              'example_algo_minute.py'),
        required=False,
        dest='run_algo_in_file')
    parser.add_argument('-p',
                        help=('optional - s3 bucket/file for trading history'),
                        required=False,
                        dest='algo_history_loc')
    parser.add_argument(
        '-o',
        help=('optional - s3 bucket/file for trading performance report'),
        required=False,
        dest='algo_report_loc')
    parser.add_argument('-r',
                        help=('optional - redis_address format: <host:port>'),
                        required=False,
                        dest='redis_address')
    parser.add_argument('-R',
                        help=('optional - redis and s3 key name'),
                        required=False,
                        dest='keyname')
    parser.add_argument(
        '-m',
        help=('optional - redis database number (0 by default)'),
        required=False,
        dest='redis_db')
    parser.add_argument('-x',
                        help=('optional - redis expiration in seconds'),
                        required=False,
                        dest='redis_expire')
    parser.add_argument(
        '-c',
        help=('optional - algorithm config_file path for setting '
              'up internal algorithm trading strategies and '
              'indicators'),
        required=False,
        dest='config_file')
    parser.add_argument('-v',
                        help=('set the Algorithm to verbose logging'),
                        required=False,
                        dest='verbose_algo',
                        action='store_true')
    parser.add_argument(
        '-P',
        help=('set the Algorithm\'s IndicatorProcessor to verbose logging'),
        required=False,
        dest='verbose_processor',
        action='store_true')
    parser.add_argument(
        '-I',
        help=('set all Algorithm\'s Indicators to verbose logging '
              '(note indivdual indicators support a \'verbose\' key '
              'that can be set to True to debug just one '
              'indicator)'),
        required=False,
        dest='verbose_indicators',
        action='store_true')
    parser.add_argument(
        '-V',
        help=('inspect the datasets an algorithm is processing - this'
              'will slow down processing to show debugging'),
        required=False,
        dest='inspect_datasets',
        action='store_true')
    parser.add_argument(
        '-j',
        help=('run the algorithm on just this specific date in the datasets '
              '- specify the date in a format: YYYY-MM-DD like: 2018-11-29'),
        required=False,
        dest='run_this_date')
    parser.add_argument('-d',
                        help=('debug'),
                        required=False,
                        dest='debug',
                        action='store_true')
    args = parser.parse_args()

    ticker = ae_consts.TICKER
    use_balance = 10000.0
    use_commission = 6.0
    use_start_date = None
    use_end_date = None
    use_config_file = None
    debug = False
    verbose_algo = None
    verbose_processor = None
    verbose_indicators = None
    inspect_datasets = None
    history_json_file = None
    run_this_date = None

    s3_access_key = ae_consts.S3_ACCESS_KEY
    s3_secret_key = ae_consts.S3_SECRET_KEY
    s3_region_name = ae_consts.S3_REGION_NAME
    s3_address = ae_consts.S3_ADDRESS
    s3_secure = ae_consts.S3_SECURE
    redis_address = ae_consts.REDIS_ADDRESS
    redis_password = ae_consts.REDIS_PASSWORD
    redis_db = ae_consts.REDIS_DB
    redis_expire = ae_consts.REDIS_EXPIRE

    if args.s3_access_key:
        s3_access_key = args.s3_access_key
    if args.s3_secret_key:
        s3_secret_key = args.s3_secret_key
    if args.s3_region_name:
        s3_region_name = args.s3_region_name
    if args.s3_address:
        s3_address = args.s3_address
    if args.s3_secure:
        s3_secure = args.s3_secure
    if args.redis_address:
        redis_address = args.redis_address
    if args.redis_db:
        redis_db = args.redis_db
    if args.redis_expire:
        redis_expire = args.redis_expire
    if args.history_json_file:
        history_json_file = args.history_json_file
    if args.ticker:
        ticker = args.ticker.upper()
    if args.debug:
        debug = True
    if args.verbose_algo:
        verbose_algo = True
    if args.verbose_processor:
        verbose_processor = True
    if args.verbose_indicators:
        verbose_indicators = True
    if args.inspect_datasets:
        inspect_datasets = True
    if args.run_this_date:
        run_this_date = args.run_this_date

    if args.start_date:
        try:
            use_start_date = f'{str(args.start_date)} 00:00:00'
            datetime.datetime.strptime(args.start_date,
                                       ae_consts.COMMON_DATE_FORMAT)
        except Exception as e:
            msg = ('please use a start date formatted as: '
                   f'{ae_consts.COMMON_DATE_FORMAT}\nerror was: {e}')
            log.error(msg)
            sys.exit(1)
        # end of testing for a valid date
    # end of args.start_date
    if args.end_date:
        try:
            use_end_date = f'{str(args.end_date)} 00:00:00'
            datetime.datetime.strptime(args.end_date,
                                       ae_consts.COMMON_DATE_FORMAT)
        except Exception as e:
            msg = ('please use an end date formatted as: '
                   f'{ae_consts.COMMON_DATE_FORMAT}\nerror was: {e}')
            log.error(msg)
            sys.exit(1)
        # end of testing for a valid date
    # end of args.end_date
    if args.config_file:
        use_config_file = args.config_file
        if not os.path.exists(use_config_file):
            log.error(
                f'Failed: unable to find config file: -c {use_config_file}')
            sys.exit(1)

    if args.backtest_loc:
        backtest_loc = args.backtest_loc
        if ('file:/' not in backtest_loc and 's3://' not in backtest_loc
                and 'redis://' not in backtest_loc):
            log.error('invalid -b <backtest dataset file> specified. '
                      f'{backtest_loc} '
                      'please use either: '
                      '-b file:/opt/sa/tests/datasets/algo/SPY-latest.json or '
                      '-b s3://algoready/SPY-latest.json or '
                      '-b redis://SPY-latest')
            sys.exit(1)

        load_from_s3_bucket = None
        load_from_s3_key = None
        load_from_redis_key = None
        load_from_file = None

        if 's3://' in backtest_loc:
            load_from_s3_bucket = backtest_loc.split('/')[-2]
            load_from_s3_key = backtest_loc.split('/')[-1]
        elif 'redis://' in backtest_loc:
            load_from_redis_key = backtest_loc.split('/')[-1]
        elif 'file:/' in backtest_loc:
            load_from_file = backtest_loc.split(':')[-1]
        # end of parsing supported transport - loading an algo-ready

        load_config = build_publish_request.build_publish_request(
            ticker=ticker,
            output_file=load_from_file,
            s3_bucket=load_from_s3_bucket,
            s3_key=load_from_s3_key,
            redis_key=load_from_redis_key,
            redis_address=redis_address,
            redis_db=redis_db,
            redis_password=redis_password,
            redis_expire=redis_expire,
            s3_address=s3_address,
            s3_access_key=s3_access_key,
            s3_secret_key=s3_secret_key,
            s3_region_name=s3_region_name,
            s3_secure=s3_secure,
            verbose=debug,
            label=f'load-{backtest_loc}')
        if load_from_file:
            load_config['output_file'] = load_from_file
        if load_from_redis_key:
            load_config['redis_key'] = load_from_redis_key
            load_config['redis_enabled'] = True
        if load_from_s3_bucket and load_from_s3_key:
            load_config['s3_bucket'] = load_from_s3_bucket
            load_config['s3_key'] = load_from_s3_key
            load_config['s3_enabled'] = True

    if debug:
        log.info('starting algo')

    config_dict['ticker'] = ticker
    config_dict['balance'] = use_balance
    config_dict['commission'] = use_commission

    if verbose_algo:
        config_dict['verbose'] = verbose_algo
    if verbose_processor:
        config_dict['verbose_processor'] = verbose_processor
    if verbose_indicators:
        config_dict['verbose_indicators'] = verbose_indicators
    if inspect_datasets:
        config_dict['inspect_datasets'] = inspect_datasets
    if run_this_date:
        config_dict['run_this_date'] = run_this_date

    algo_obj = ExampleCustomAlgo(ticker=config_dict['ticker'],
                                 config_dict=config_dict)

    algo_res = run_algo.run_algo(ticker=ticker,
                                 algo=algo_obj,
                                 start_date=use_start_date,
                                 end_date=use_end_date,
                                 raise_on_err=True)

    if algo_res['status'] != ae_consts.SUCCESS:
        log.error('failed running algo backtest '
                  f'{algo_obj.get_name()} hit status: '
                  f'{ae_consts.get_status(status=algo_res["status"])} '
                  f'error: {algo_res["err"]}')
        return
    # if not successful

    log.info(f'backtest: {algo_obj.get_name()} '
             f'{ae_consts.get_status(status=algo_res["status"])}')

    trading_history_dict = algo_obj.get_history_dataset()
    history_df = trading_history_dict[ticker]
    if not hasattr(history_df, 'to_json'):
        return

    if history_json_file:
        log.info(f'saving history to: {history_json_file}')
        history_df.to_json(history_json_file,
                           orient='records',
                           date_format='iso')

    log.info('plotting history')

    use_xcol = 'date'
    use_as_date_format = '%d\n%b'
    xlabel = f'Dates vs {trading_history_dict["algo_name"]} values'
    ylabel = f'Algo {trading_history_dict["algo_name"]}\nvalues'
    df_filter = (history_df['close'] > 0.01)
    first_date = history_df[df_filter]['date'].iloc[0]
    end_date = history_df[df_filter]['date'].iloc[-1]
    if config_dict['timeseries'] == 'minute':
        use_xcol = 'minute'
        use_as_date_format = '%d %H:%M:%S\n%b'
        first_date = history_df[df_filter]['minute'].iloc[0]
        end_date = history_df[df_filter]['minute'].iloc[-1]
    title = (f'Trading History {ticker} for Algo '
             f'{trading_history_dict["algo_name"]}\n'
             f'Backtest dates from {first_date} to {end_date}')

    # set default hloc columns:
    blue = None
    green = None
    orange = None

    red = 'close'
    blue = 'balance'

    if debug:
        for i, r in history_df.iterrows():
            log.debug(f'{r["minute"]} - {r["close"]}')

    plot_trading_history.plot_trading_history(title=title,
                                              df=history_df,
                                              red=red,
                                              blue=blue,
                                              green=green,
                                              orange=orange,
                                              date_col=use_xcol,
                                              date_format=use_as_date_format,
                                              xlabel=xlabel,
                                              ylabel=ylabel,
                                              df_filter=df_filter,
                                              show_plot=True,
                                              dropna_for_all=True)
예제 #4
0
def examine_dataset_in_file(
    path_to_file,
    compress=False,
    encoding='utf-8',
    ticker=None,
    dataset_type=ae_consts.SA_DATASET_TYPE_ALGO_READY,
    serialize_datasets=ae_consts.DEFAULT_SERIALIZED_DATASETS,
):
    """examine_dataset_in_file

    Show the internal dataset dictionary structure in dataset file

    :param path_to_file: path to file
    :param compress: optional - boolean flag for decompressing
        the contents of the ``path_to_file`` if necessary
        (default is ``False`` and algorithms
        use ``zlib`` for compression)
    :param encoding: optional - string for data encoding
    :param ticker: optional - string ticker symbol
        to verify is in the dataset
    :param dataset_type: optional - dataset type
        (default is ``SA_DATASET_TYPE_ALGO_READY``)
    :param serialize_datasets: optional - list of dataset names to
        deserialize in the dataset
    """
    if dataset_type == ae_consts.SA_DATASET_TYPE_ALGO_READY:
        log.info(f'show start - load dataset from file={path_to_file}')
        show_dataset.show_dataset(path_to_file=path_to_file,
                                  compress=compress,
                                  encoding=encoding,
                                  dataset_type=dataset_type,
                                  serialize_datasets=serialize_datasets)
        log.info(f'show done - dataset in file={path_to_file}')
    elif dataset_type == ae_consts.SA_DATASET_TYPE_TRADING_HISTORY:
        log.info('load trading history dataset ' f'from file={path_to_file}')
        trading_history_dict = load_history.load_history_dataset_from_file(
            path_to_file=path_to_file, compress=compress, encoding=encoding)
        history_df = trading_history_dict[ticker]

        first_date = history_df['date'].iloc[0]
        end_date = history_df['date'].iloc[-1]
        title = (f'Trading History {ticker} for Algo '
                 f'{trading_history_dict["algo_name"]}\n'
                 f'Backtest dates from {first_date} to {end_date}')
        xcol = 'date'
        xlabel = f'Dates vs {trading_history_dict["algo_name"]} values'
        ylabel = (
            f'Algo Values from columns:\n{list(history_df.columns.values)}')
        df_filter = (history_df['close'] > 0.01)

        # set default hloc columns:
        red = 'close'
        blue = 'low'
        green = 'high'
        orange = 'open'

        log.info('available columns to plot in dataset: '
                 f'{ae_consts.ppj(list(history_df.columns.values))}')

        plot_trading_history.plot_trading_history(title=title,
                                                  df=history_df,
                                                  red=red,
                                                  blue=blue,
                                                  green=green,
                                                  orange=orange,
                                                  date_col=xcol,
                                                  xlabel=xlabel,
                                                  ylabel=ylabel,
                                                  df_filter=df_filter,
                                                  show_plot=True,
                                                  dropna_for_all=False)
    elif dataset_type == ae_consts.SA_DATASET_TYPE_TRADING_REPORT:
        log.info('load trading performance report dataset '
                 f'from file={path_to_file}')
        trading_report_dict = load_report.load_report_dataset_from_file(
            path_to_file=path_to_file, compress=compress, encoding=encoding)
        print(trading_report_dict)
    else:
        log.error(f'show unsupported dataset type={dataset_type} for '
                  f'file={path_to_file}')
        return
def create_column_dnn(predict_feature='close',
                      ticker='',
                      debug=False,
                      use_epochs=10,
                      use_batch_size=10,
                      use_test_size=0.1,
                      use_random_state=1,
                      use_seed=7,
                      use_shuffle=False,
                      model_verbose=True,
                      fit_verbose=True,
                      use_scalers=True,
                      df=[],
                      dnn_config={},
                      compile_config={},
                      s3_bucket='',
                      s3_key='',
                      send_plots_to_slack=False):
    """create_column_dnn

    For scaler-normalized datasets this will
    compile numeric columns and ignore string/non-numeric
    columns as training and test feature columns

    :param predict_feature: Column to create DNN with
    :param ticker: Ticker being used
    :param debug: Debug mode
    :param use_epochs: Epochs times to use
    :param use_batch_size: Batch size to use
    :param use_test_size: Test size to use
    :param use_random_state: Random state to train with
    :param use_seed: Seed used to build scalar datasets
    :param use_shuffle: To shuffle the regression estimator or not
    :param model_verbose: To use a verbose Keras regression model or not
    :param fit_verbose: To use a verbose fitting of the regression estimator
    :param use_scalers: To build using scalars or not
    :param df: Ticker dataset
    :param dnn_config: Deep Neural Net keras model json to build the model
    :param compile_config: Deep Neural Net dictionary of compile options
    :param s3_bucket: S3 Bucket
    :param s3_key: S3 Key
    """

    df_filter = (df[f'{predict_feature}'] >= 0.1)
    first_date = df[df_filter]['date'].iloc[0]
    end_date = df[df_filter]['date'].iloc[-1]

    if 'minute' in df:
        found_valid_minute = df['minute'].iloc[0]
        if found_valid_minute:
            first_date = df[df_filter]['minute'].iloc[0]
            end_date = df[df_filter]['minute'].iloc[-1]

    num_rows = len(df.index)
    log.info(f'prepared training data from '
             f'history {s3_bucket}@{s3_key} '
             f'rows={num_rows} '
             f'dates: {first_date} to {end_date}')

    if debug:
        for i, r in df.iterrows():
            log.info(f'{r["minute"]} - {r["{}".format(predict_feature)]}')
        # end of for loop

        log.info(f'columns: {df.columns.values}')
        log.info(f'rows: {len(df.index)}')
    # end of debug

    use_all_features = use_scalers
    all_features = []
    train_features = []
    if use_all_features:
        for c in df.columns.values:
            if (pandas_types.is_numeric_dtype(df[c])
                    and c not in train_features):
                if c != predict_feature:
                    train_features.append(c)
                if c not in all_features:
                    all_features.append(c)

        dnn_config['layers'][-1]['activation'] = ('sigmoid')
    else:
        temp_choices = choices[:]
        temp_choices.remove(predict_feature)
        train_features = ['open']
        train_features.extend(temp_choices)
        all_features = [f'{predict_feature}'] + train_features

    num_features = len(train_features)
    features_and_minute = ['minute'] + all_features

    log.info('converting columns to floats')

    timeseries_df = df[df_filter][features_and_minute].fillna(-10000.0)
    converted_df = timeseries_df[all_features].astype('float32')

    train_df = None
    test_df = None
    scaler_predictions = None
    if use_all_features:
        scaler_res = build_scaler_datasets.build_datasets_using_scalers(
            train_features=train_features,
            test_feature=predict_feature,
            df=converted_df,
            test_size=use_test_size,
            seed=use_seed)
        if scaler_res['status'] != ae_consts.SUCCESS:
            log.error('failed to build scaler train and test datasets')
            return
        train_df = scaler_res['scaled_train_df']
        test_df = scaler_res['scaled_test_df']
        x_train = scaler_res['x_train']
        x_test = scaler_res['x_test']
        y_train = scaler_res['y_train']
        y_test = scaler_res['y_test']
        scaler_predictions = scaler_res['scaler_test']
    else:
        log.info('building train and test dfs from subset of features')
        train_df = converted_df[train_features]
        test_df = converted_df[[predict_feature]]

        log.info(f'splitting {num_rows} into test and training '
                 f'size={use_test_size}')

        (x_train, x_test, y_train,
         y_test) = tt_split.train_test_split(train_df,
                                             test_df,
                                             test_size=use_test_size,
                                             random_state=use_random_state)

    log.info(f'split breakdown - '
             f'x_train={len(x_train)} '
             f'x_test={len(x_test)} '
             f'y_train={len(y_train)} '
             f'y_test={len(y_test)}')

    def set_model():
        return build_dnn.build_regression_dnn(num_features=num_features,
                                              compile_config=compile_config,
                                              model_config=dnn_config)

    estimator = keras_scikit.KerasRegressor(build_fn=set_model,
                                            epochs=use_epochs,
                                            batch_size=use_batch_size,
                                            verbose=model_verbose)

    log.info(f'fitting estimator - '
             f'predicting={predict_feature} '
             f'epochs={use_epochs} '
             f'batch={use_batch_size} '
             f'test_size={use_test_size} '
             f'seed={use_seed}')

    history = estimator.fit(x_train,
                            y_train,
                            validation_data=(x_train, y_train),
                            epochs=use_epochs,
                            batch_size=use_batch_size,
                            shuffle=use_shuffle,
                            verbose=fit_verbose)

    created_on = (datetime.datetime.now().strftime(
        ae_consts.COMMON_TICK_DATE_FORMAT))
    plot_fit_history.plot_dnn_fit_history(
        df=history.history,
        title=(f'DNN Errors Over Training Epochs\n'
               f'Training Data: s3://{s3_bucket}/{s3_key}\n'
               f'Created: {created_on}'),
        red='mean_squared_error',
        blue='mean_absolute_error',
        green='acc',
        orange='cosine_proximity',
        send_plots_to_slack=send_plots_to_slack)

    # on production use newly fetched pricing data
    # not the training data
    predict_records = []
    if use_all_features:
        prediction_res = build_scaler_df.build_scaler_dataset_from_df(
            df=converted_df[train_features])
        if prediction_res['status'] == ae_consts.SUCCESS:
            predict_records = prediction_res['df']
    else:
        predict_records = converted_df[train_features]

    log.info(f'making predictions: {len(predict_records)}')

    predictions = estimator.model.predict(predict_records, verbose=True)

    np.set_printoptions(threshold=np.nan)
    indexes = tf.argmax(predictions, axis=1)
    data = {}
    data['indexes'] = indexes
    price_predictions = []
    if use_all_features and scaler_predictions:
        price_predictions = [
            ae_consts.to_f(x) for x in scaler_predictions.inverse_transform(
                predictions.reshape(-1, 1)).reshape(-1)
        ]
    else:
        price_predictions = [ae_consts.to_f(x[0]) for x in predictions]

    timeseries_df[f'predicted_{predict_feature}'] = price_predictions
    timeseries_df['error'] = (timeseries_df[f'{predict_feature}'] -
                              timeseries_df[f'predicted_{predict_feature}'])

    output_features = [
        'minute', f'{predict_feature}', f'predicted_{predict_feature}', 'error'
    ]

    date_str = (f'Dates: {timeseries_df["minute"].iloc[0]} '
                f'to '
                f'{timeseries_df["minute"].iloc[-1]}')

    log.info(f'historical {predict_feature} with predicted {predict_feature}: '
             f'{timeseries_df[output_features]}')
    log.info(date_str)
    log.info(f'Columns: {output_features}')

    average_error = ae_consts.to_f(timeseries_df['error'].sum() /
                                   len(timeseries_df.index))

    log.info(f'Average historical {predict_feature} '
             f'vs predicted {predict_feature} error: '
             f'{average_error}')

    log.info(
        f'plotting historical {predict_feature} vs predicted {predict_feature}'
        f' from training with columns={num_features}')

    ts_filter = (timeseries_df[f'{predict_feature}'] > 0.1)
    latest_feature = (timeseries_df[ts_filter][f'{predict_feature}'].iloc[-1])
    latest_predicted_feature = (
        timeseries_df[ts_filter][f'predicted_{predict_feature}'].iloc[-1])

    log.info(f'{end_date} {predict_feature}={latest_feature} '
             f'with '
             f'predicted_{predict_feature}={latest_predicted_feature}')

    plot_trading_history.plot_trading_history(
        title=(f'{ticker} - Historical {predict_feature.title()} vs '
               f'Predicted {predict_feature.title()}\n'
               f'Number of Training Features: {num_features}\n'
               f'{date_str}'),
        df=timeseries_df,
        red=f'{predict_feature}',
        blue=f'predicted_{predict_feature}',
        green=None,
        orange=None,
        date_col='minute',
        date_format='%d %H:%M:%S\n%b',
        xlabel='minute',
        ylabel=(f'Historical {predict_feature.title()} vs '
                f'Predicted {predict_feature.title()}'),
        df_filter=ts_filter,
        width=8.0,
        height=8.0,
        show_plot=True,
        dropna_for_all=False,
        send_plots_to_slack=send_plots_to_slack)
def plot_local_history_file():
    """plot_local_history_file

    Run a derived algorithm with an algorithm config dictionary

    :param config_dict: algorithm config dictionary
    """

    log.debug('start - plot')

    parser = argparse.ArgumentParser(
        description=('plot a local algorithm trading history file'))
    parser.add_argument('-f',
                        help=('plot this trading history dataframe '
                              'saved in this file'),
                        required=False,
                        dest='history_json_file')
    parser.add_argument('-d',
                        help=('debug'),
                        required=False,
                        dest='debug',
                        action='store_true')
    args = parser.parse_args()

    history_json_file = None
    debug = False

    if args.history_json_file:
        history_json_file = args.history_json_file
    if args.debug:
        debug = True

    if not history_json_file:
        log.error('usage error - please run with: '
                  '-f <path to local trading history file>')
        return
    elif not os.path.exists(history_json_file):
        log.error(
            'did not find trading history file={}'.format(history_json_file))
        return
    # end of checking the file arg is set and exists on disk

    log.info('plotting history to: {}'.format(history_json_file))
    history_df = pd.read_json(history_json_file, orient='records')

    history_df['date'] = pd.to_datetime(history_df['date'])
    history_df['minute'] = pd.to_datetime(history_df['minute'])
    ticker = history_df['ticker'].iloc[0]

    log.info('plotting history')

    first_date = history_df['date'].iloc[0]
    end_date = history_df['date'].iloc[-1]
    title = ('Trading History {}\n'
             'Backtest dates from {} to {}'.format(ticker, first_date,
                                                   end_date))
    use_xcol = 'date'
    use_as_date_format = '%d\n%b'
    use_minute = False
    if 'minute' in history_df:
        found_valid_minute = history_df['minute'].iloc[0]
        if found_valid_minute:
            use_minute = True

    if use_minute:
        use_xcol = 'minute'
        use_as_date_format = '%d %H:%M:%S\n%b'
    xlabel = 'Dates vs Algo values'
    ylabel = 'Algo values'
    df_filter = (history_df['close'] > 1.00)

    # set default hloc columns:
    blue = None
    green = None
    orange = None

    red = 'close'
    blue = 'balance'

    if debug:
        for i, r in history_df.iterrows():
            log.info('{} - {}'.format(r['minute'], r['close']))
    # end of debug

    show_plot = True
    if show_plot:
        plot_trading_history.plot_trading_history(
            title=title,
            df=history_df,
            red=red,
            blue=blue,
            green=green,
            orange=orange,
            date_col=use_xcol,
            date_format=use_as_date_format,
            xlabel=xlabel,
            ylabel=ylabel,
            df_filter=df_filter,
            show_plot=True,
            dropna_for_all=True)
def run_backtest_and_plot_history(config_dict):
    """run_backtest_and_plot_history

    Run a derived algorithm with an algorithm config dictionary

    :param config_dict: algorithm config dictionary
    """

    log.debug('start - sa')

    parser = argparse.ArgumentParser(description=('stock analysis tool'))
    parser.add_argument('-t', help=('ticker'), required=False, dest='ticker')
    parser.add_argument('-e',
                        help=('file path to extract an '
                              'algorithm-ready datasets from redis'),
                        required=False,
                        dest='algo_extract_loc')
    parser.add_argument('-l',
                        help=('show dataset in this file'),
                        required=False,
                        dest='show_from_file')
    parser.add_argument('-H',
                        help=('show trading history dataset in this file'),
                        required=False,
                        dest='show_history_from_file')
    parser.add_argument(
        '-E',
        help=('show trading performance report dataset in this file'),
        required=False,
        dest='show_report_from_file')
    parser.add_argument(
        '-L',
        help=('restore an algorithm-ready dataset file back into redis'),
        required=False,
        dest='restore_algo_file')
    parser.add_argument('-f',
                        help=('save the trading history dataframe '
                              'to this file'),
                        required=False,
                        dest='history_json_file')
    parser.add_argument(
        '-J',
        help=('plot action - after preparing you can use: '
              '-J show to open the image (good for debugging)'),
        required=False,
        dest='plot_action')
    parser.add_argument(
        '-b',
        help=('run a backtest using the dataset in '
              'a file path/s3 key/redis key formats: '
              'file:/opt/sa/tests/datasets/algo/SPY-latest.json or '
              's3://algoready/SPY-latest.json or '
              'redis:SPY-latest'),
        required=False,
        dest='backtest_loc')
    parser.add_argument('-B',
                        help=('optional - broker url for Celery'),
                        required=False,
                        dest='broker_url')
    parser.add_argument('-C',
                        help=('optional - broker url for Celery'),
                        required=False,
                        dest='backend_url')
    parser.add_argument(
        '-w',
        help=('optional - flag for publishing an algorithm job '
              'using Celery to the analysis_engine workers'),
        required=False,
        dest='run_on_engine',
        action='store_true')
    parser.add_argument('-k',
                        help=('optional - s3 access key'),
                        required=False,
                        dest='s3_access_key')
    parser.add_argument('-K',
                        help=('optional - s3 secret key'),
                        required=False,
                        dest='s3_secret_key')
    parser.add_argument('-a',
                        help=('optional - s3 address format: <host:port>'),
                        required=False,
                        dest='s3_address')
    parser.add_argument('-Z',
                        help=('optional - s3 secure: default False'),
                        required=False,
                        dest='s3_secure')
    parser.add_argument('-s',
                        help=('optional - start date: YYYY-MM-DD'),
                        required=False,
                        dest='start_date')
    parser.add_argument('-n',
                        help=('optional - end date: YYYY-MM-DD'),
                        required=False,
                        dest='end_date')
    parser.add_argument('-u',
                        help=('optional - s3 bucket name'),
                        required=False,
                        dest='s3_bucket_name')
    parser.add_argument('-G',
                        help=('optional - s3 region name'),
                        required=False,
                        dest='s3_region_name')
    parser.add_argument('-g',
                        help=('Path to a custom algorithm module file '
                              'on disk. This module must have a single '
                              'class that inherits from: '
                              'https://github.com/AlgoTraders/stock-ana'
                              'lysis-engine/blob/master/'
                              'analysis_engine/algo.py Additionally you '
                              'can find the Example-Minute-Algorithm here: '
                              'https://github.com/AlgoTraders/stock-anal'
                              'ysis-engine/blob/master/analysis_engine/mocks/'
                              'example_algo_minute.py'),
                        required=False,
                        dest='run_algo_in_file')
    parser.add_argument('-p',
                        help=('optional - s3 bucket/file for trading history'),
                        required=False,
                        dest='algo_history_loc')
    parser.add_argument(
        '-o',
        help=('optional - s3 bucket/file for trading performance report'),
        required=False,
        dest='algo_report_loc')
    parser.add_argument('-r',
                        help=('optional - redis_address format: <host:port>'),
                        required=False,
                        dest='redis_address')
    parser.add_argument('-R',
                        help=('optional - redis and s3 key name'),
                        required=False,
                        dest='keyname')
    parser.add_argument(
        '-m',
        help=('optional - redis database number (0 by default)'),
        required=False,
        dest='redis_db')
    parser.add_argument('-x',
                        help=('optional - redis expiration in seconds'),
                        required=False,
                        dest='redis_expire')
    parser.add_argument(
        '-c',
        help=('optional - algorithm config_file path for setting '
              'up internal algorithm trading strategies and '
              'indicators'),
        required=False,
        dest='config_file')
    parser.add_argument('-v',
                        help=('set the Algorithm to verbose logging'),
                        required=False,
                        dest='verbose_algo',
                        action='store_true')
    parser.add_argument(
        '-P',
        help=('set the Algorithm\'s IndicatorProcessor to verbose logging'),
        required=False,
        dest='verbose_processor',
        action='store_true')
    parser.add_argument(
        '-I',
        help=('set all Algorithm\'s Indicators to verbose logging '
              '(note indivdual indicators support a \'verbose\' key '
              'that can be set to True to debug just one '
              'indicator)'),
        required=False,
        dest='verbose_indicators',
        action='store_true')
    parser.add_argument(
        '-V',
        help=('inspect the datasets an algorithm is processing - this'
              'will slow down processing to show debugging'),
        required=False,
        dest='inspect_datasets',
        action='store_true')
    parser.add_argument(
        '-j',
        help=('run the algorithm on just this specific date in the datasets '
              '- specify the date in a format: YYYY-MM-DD like: 2018-11-29'),
        required=False,
        dest='run_this_date')
    parser.add_argument('-d',
                        help=('debug'),
                        required=False,
                        dest='debug',
                        action='store_true')
    args = parser.parse_args()

    ticker = None
    use_balance = 10000.0
    use_commission = 6.0
    use_start_date = None
    use_end_date = None
    use_config_file = None
    debug = False
    verbose_algo = None
    verbose_processor = None
    verbose_indicators = None
    inspect_datasets = None
    history_json_file = None
    run_this_date = None
    algo_obj = None
    algo_history_loc = 's3://algohistory'
    algo_report_loc = 's3://algoreport'
    algo_extract_loc = 's3://algoready'
    backtest_loc = None

    ssl_options = ae_consts.SSL_OPTIONS
    transport_options = ae_consts.TRANSPORT_OPTIONS
    broker_url = ae_consts.WORKER_BROKER_URL
    backend_url = ae_consts.WORKER_BACKEND_URL
    path_to_config_module = ae_consts.WORKER_CELERY_CONFIG_MODULE
    include_tasks = ae_consts.INCLUDE_TASKS
    load_from_s3_bucket = None
    load_from_s3_key = None
    load_from_redis_key = None
    load_from_file = None
    load_compress = False
    load_publish = True
    load_config = None
    report_redis_key = None
    report_s3_bucket = None
    report_s3_key = None
    report_file = None
    report_compress = False
    report_publish = True
    report_config = None
    history_redis_key = None
    history_s3_bucket = None
    history_s3_key = None
    history_file = None
    history_compress = False
    history_publish = True
    history_config = None
    extract_redis_key = None
    extract_s3_bucket = None
    extract_s3_key = None
    extract_file = None
    extract_save_dir = None
    extract_compress = False
    extract_publish = True
    extract_config = None
    s3_enabled = True
    s3_access_key = ae_consts.S3_ACCESS_KEY
    s3_secret_key = ae_consts.S3_SECRET_KEY
    s3_region_name = ae_consts.S3_REGION_NAME
    s3_address = ae_consts.S3_ADDRESS
    s3_bucket_name = ae_consts.S3_BUCKET
    s3_key = None
    s3_secure = ae_consts.S3_SECURE
    redis_enabled = True
    redis_address = ae_consts.REDIS_ADDRESS
    redis_key = None
    redis_password = ae_consts.REDIS_PASSWORD
    redis_db = ae_consts.REDIS_DB
    redis_expire = ae_consts.REDIS_EXPIRE
    redis_serializer = 'json'
    redis_encoding = 'utf-8'
    publish_to_s3 = True
    publish_to_redis = True
    publish_to_slack = True
    slack_enabled = False
    slack_code_block = False
    slack_full_width = False

    dataset_type = ae_consts.SA_DATASET_TYPE_ALGO_READY
    serialize_datasets = ae_consts.DEFAULT_SERIALIZED_DATASETS
    compress = False
    encoding = 'utf-8'
    debug = False
    run_on_engine = False

    auto_fill = True
    timeseries = 'minute'
    trade_strategy = 'count'

    if args.s3_access_key:
        s3_access_key = args.s3_access_key
    if args.s3_secret_key:
        s3_secret_key = args.s3_secret_key
    if args.s3_region_name:
        s3_region_name = args.s3_region_name
    if args.s3_address:
        s3_address = args.s3_address
    if args.s3_secure:
        s3_secure = args.s3_secure
    if args.redis_address:
        redis_address = args.redis_address
    if args.redis_db:
        redis_db = args.redis_db
    if args.redis_expire:
        redis_expire = args.redis_expire
    if args.history_json_file:
        history_json_file = args.history_json_file
    if args.ticker:
        ticker = args.ticker.upper()
    if args.debug:
        debug = True
    if args.verbose_algo:
        verbose_algo = True
    if args.verbose_processor:
        verbose_processor = True
    if args.verbose_indicators:
        verbose_indicators = True
    if args.inspect_datasets:
        inspect_datasets = True
    if args.run_this_date:
        run_this_date = args.run_this_date
    if args.start_date:
        try:
            use_start_date = '{} 00:00:00'.format(str(args.start_date))
            datetime.datetime.strptime(args.start_date,
                                       ae_consts.COMMON_DATE_FORMAT)
        except Exception as e:
            msg = ('please use a start date formatted as: {}'
                   '\n'
                   'error was: {}'.format(ae_consts.COMMON_DATE_FORMAT, e))
            log.error(msg)
            sys.exit(1)
        # end of testing for a valid date
    # end of args.start_date
    if args.end_date:
        try:
            use_end_date = '{} 00:00:00'.format(str(args.end_date))
            datetime.datetime.strptime(args.end_date,
                                       ae_consts.COMMON_DATE_FORMAT)
        except Exception as e:
            msg = ('please use an end date formatted as: {}'
                   '\n'
                   'error was: {}'.format(ae_consts.COMMON_DATE_FORMAT, e))
            log.error(msg)
            sys.exit(1)
        # end of testing for a valid date
    # end of args.end_date
    algo_mod_path = None
    if args.run_algo_in_file:
        if not os.path.exists(args.run_algo_in_file):
            log.error('missing algorithm module file: {}'.format(
                args.run_algo_in_file))
            sys.exit(1)
        algo_mod_path = args.run_algo_in_file
    if args.config_file:
        use_config_file = args.config_file
        if not os.path.exists(use_config_file):
            log.error('Failed: unable to find config file: -c {}'.format(
                use_config_file))
            sys.exit(1)
        config_dict = json.loads(open(use_config_file).read())
        algo_mod_path = config_dict.get('algo_path', algo_mod_path)
        if not os.path.exists(algo_mod_path):
            log.error('missing algorithm module file from config: {}'.format(
                algo_mod_path))
            sys.exit(1)
    """
    Finalize the algo config
    """
    if config_dict:
        use_balance = float(config_dict.get('balance', use_balance))
        use_commission = float(config_dict.get('commission', use_commission))
        ticker = str(config_dict.get('ticker', ticker)).upper()

        config_dict['ticker'] = ticker
        config_dict['balance'] = use_balance
        config_dict['commission'] = use_commission
    else:
        if not ticker:
            ticker = str(config_dict.get('ticker', ae_consts.TICKER)).upper()
    if not ticker:
        log.error('usage error: please set a ticker with -t <TICKER>')
        sys.exit(1)

    if verbose_algo:
        config_dict['verbose'] = verbose_algo
    if verbose_processor:
        config_dict['verbose_processor'] = verbose_processor
    if verbose_indicators:
        config_dict['verbose_indicators'] = verbose_indicators
    if inspect_datasets:
        config_dict['inspect_datasets'] = inspect_datasets
    if run_this_date:
        config_dict['run_this_date'] = run_this_date
    """
    Run a custom algo module from disk
    """
    if algo_mod_path:

        if args.backtest_loc:
            backtest_loc = args.backtest_loc
            if ('file:/' not in backtest_loc and 's3://' not in backtest_loc
                    and 'redis://' not in backtest_loc):
                log.error(
                    'invalid -b <backtest dataset file> specified. '
                    '{} '
                    'please use either: '
                    '-b file:/opt/sa/tests/datasets/algo/SPY-latest.json or '
                    '-b s3://algoready/SPY-latest.json or '
                    '-b redis://SPY-latest'.format(backtest_loc))
                sys.exit(1)
            if 's3://' in backtest_loc:
                load_from_s3_bucket = backtest_loc.split('/')[-2]
                load_from_s3_key = backtest_loc.split('/')[-1]
            elif 'redis://' in backtest_loc:
                load_from_redis_key = backtest_loc.split('/')[-1]
            elif 'file:/' in backtest_loc:
                load_from_file = backtest_loc.split(':')[-1]
            load_publish = True
        # end of parsing supported transport - loading an algo-ready

        if args.algo_history_loc:
            algo_history_loc = args.algo_history_loc
            if ('file:/' not in algo_history_loc
                    and 's3://' not in algo_history_loc
                    and 'redis://' not in algo_history_loc):
                log.error(
                    'invalid -b <backtest dataset file> specified. '
                    '{} '
                    'please use either: '
                    '-p file:/opt/sa/tests/datasets/algo/SPY-latest.json or '
                    '-p s3://algoready/SPY-latest.json or '
                    '-p redis://SPY-latest'.format(algo_history_loc))
                sys.exit(1)
            if 's3://' in algo_history_loc:
                history_s3_bucket = algo_history_loc.split('/')[-2]
                history_s3_key = algo_history_loc.split('/')[-1]
            elif 'redis://' in algo_history_loc:
                history_redis_key = algo_history_loc.split('/')[-1]
            elif 'file:/' in algo_history_loc:
                history_file = algo_history_loc.split(':')[-1]
            history_publish = True
        # end of parsing supported transport - trading history

        if args.algo_report_loc:
            algo_report_loc = args.algo_report_loc
            if ('file:/' not in algo_report_loc
                    and 's3://' not in algo_report_loc
                    and 'redis://' not in algo_report_loc):
                log.error(
                    'invalid -b <backtest dataset file> specified. '
                    '{} '
                    'please use either: '
                    '-o file:/opt/sa/tests/datasets/algo/SPY-latest.json or '
                    '-o s3://algoready/SPY-latest.json or '
                    '-o redis://SPY-latest'.format(algo_report_loc))
                sys.exit(1)
            if 's3://' in algo_report_loc:
                report_s3_bucket = algo_report_loc.split('/')[-2]
                report_s3_key = algo_report_loc.split('/')[-1]
            elif 'redis://' in algo_report_loc:
                report_redis_key = algo_report_loc.split('/')[-1]
            elif 'file:/' in algo_report_loc:
                report_file = algo_report_loc.split(':')[-1]
            report_publish = True
        # end of parsing supported transport - trading performance report

        if args.algo_extract_loc:
            algo_extract_loc = args.algo_extract_loc
            if ('file:/' not in algo_extract_loc
                    and 's3://' not in algo_extract_loc
                    and 'redis://' not in algo_extract_loc):
                log.error(
                    'invalid -b <backtest dataset file> specified. '
                    '{} '
                    'please use either: '
                    '-e file:/opt/sa/tests/datasets/algo/SPY-latest.json or '
                    '-e s3://algoready/SPY-latest.json or '
                    '-e redis://SPY-latest'.format(algo_extract_loc))
                sys.exit(1)
            if 's3://' in algo_extract_loc:
                extract_s3_bucket = algo_extract_loc.split('/')[-2]
                extract_s3_key = algo_extract_loc.split('/')[-1]
            elif 'redis://' in algo_extract_loc:
                extract_redis_key = algo_extract_loc.split('/')[-1]
            elif 'file:/' in algo_extract_loc:
                extract_file = algo_extract_loc.split(':')[-1]
            extract_publish = True
        # end of parsing supported transport - extract algorithm-ready

        if args.run_on_engine:
            run_on_engine = True
            if verbose_algo:
                log.info('starting algo on the engine')

        use_name = config_dict.get('name', 'missing-algo-name')
        auto_fill = config_dict.get('auto_fill', auto_fill)
        timeseries = config_dict.get('timeseries', timeseries)
        trade_strategy = config_dict.get('trade_strategy', trade_strategy)

        algo_res = run_custom_algo.run_custom_algo(
            mod_path=algo_mod_path,
            ticker=config_dict['ticker'],
            balance=config_dict['balance'],
            commission=config_dict['commission'],
            name=use_name,
            start_date=use_start_date,
            end_date=use_end_date,
            auto_fill=auto_fill,
            config_dict=config_dict,
            load_from_s3_bucket=load_from_s3_bucket,
            load_from_s3_key=load_from_s3_key,
            load_from_redis_key=load_from_redis_key,
            load_from_file=load_from_file,
            load_compress=load_compress,
            load_publish=load_publish,
            load_config=load_config,
            report_redis_key=report_redis_key,
            report_s3_bucket=report_s3_bucket,
            report_s3_key=report_s3_key,
            report_file=report_file,
            report_compress=report_compress,
            report_publish=report_publish,
            report_config=report_config,
            history_redis_key=history_redis_key,
            history_s3_bucket=history_s3_bucket,
            history_s3_key=history_s3_key,
            history_file=history_file,
            history_compress=history_compress,
            history_publish=history_publish,
            history_config=history_config,
            extract_redis_key=extract_redis_key,
            extract_s3_bucket=extract_s3_bucket,
            extract_s3_key=extract_s3_key,
            extract_file=extract_file,
            extract_save_dir=extract_save_dir,
            extract_compress=extract_compress,
            extract_publish=extract_publish,
            extract_config=extract_config,
            publish_to_slack=publish_to_slack,
            publish_to_s3=publish_to_s3,
            publish_to_redis=publish_to_redis,
            dataset_type=dataset_type,
            serialize_datasets=serialize_datasets,
            compress=compress,
            encoding=encoding,
            redis_enabled=redis_enabled,
            redis_key=redis_key,
            redis_address=redis_address,
            redis_db=redis_db,
            redis_password=redis_password,
            redis_expire=redis_expire,
            redis_serializer=redis_serializer,
            redis_encoding=redis_encoding,
            s3_enabled=s3_enabled,
            s3_key=s3_key,
            s3_address=s3_address,
            s3_bucket=s3_bucket_name,
            s3_access_key=s3_access_key,
            s3_secret_key=s3_secret_key,
            s3_region_name=s3_region_name,
            s3_secure=s3_secure,
            slack_enabled=slack_enabled,
            slack_code_block=slack_code_block,
            slack_full_width=slack_full_width,
            dataset_publish_extract=extract_publish,
            dataset_publish_history=history_publish,
            dataset_publish_report=report_publish,
            run_on_engine=run_on_engine,
            auth_url=broker_url,
            backend_url=backend_url,
            include_tasks=include_tasks,
            ssl_options=ssl_options,
            transport_options=transport_options,
            path_to_config_module=path_to_config_module,
            timeseries=timeseries,
            trade_strategy=trade_strategy,
            verbose=verbose_algo)

        show_label = 'algo.name={}'.format(use_name)
        show_extract = '{}'.format(algo_extract_loc)
        show_history = '{}'.format(algo_history_loc)
        show_report = '{}'.format(algo_report_loc)
        base_label = ('load={} '
                      'extract={} '
                      'history={} '
                      'report={}'.format(args.run_algo_in_file, show_extract,
                                         show_history, show_report))
        algo_obj = algo_res.get('algo', None)
        if not algo_obj:
            log.error(
                '{} - failed creating algorithm object'.format(show_label))
            sys.exit(1)
        if not run_on_engine:
            algo_trade_history_recs = algo_res['rec'].get('history', [])
            show_label = ('{} algo.name={} {} trade_history_len={}'.format(
                ticker, use_name, base_label, len(algo_trade_history_recs)))
        if args.debug:
            log.info('algo_res={}'.format(algo_res))
            if algo_res['status'] == ae_consts.SUCCESS:
                log.info('{} - done running {}'.format(
                    ae_consts.get_status(status=algo_res['status']),
                    show_label))
            else:
                log.error('{} - done running {}'.format(
                    ae_consts.get_status(status=algo_res['status']),
                    show_label))
        else:
            if algo_res['status'] == ae_consts.SUCCESS:
                log.info('{} - done running {}'.format(
                    ae_consts.get_status(status=algo_res['status']),
                    show_label))
            else:
                log.error('run_custom_algo returned error: {}'.format(
                    algo_res['err']))
                sys.exit(1)
        # end of running the custom algo handler

    # end if running a custom algorithm module
    else:
        if args.backtest_loc:
            backtest_loc = args.backtest_loc
            if ('file:/' not in backtest_loc and 's3://' not in backtest_loc
                    and 'redis://' not in backtest_loc):
                log.error(
                    'invalid -b <backtest dataset file> specified. '
                    '{} '
                    'please use either: '
                    '-b file:/opt/sa/tests/datasets/algo/SPY-latest.json or '
                    '-b s3://algoready/SPY-latest.json or '
                    '-b redis://SPY-latest'.format(backtest_loc))
                sys.exit(1)
            load_from_s3_bucket = None
            load_from_s3_key = None
            load_from_redis_key = None
            load_from_file = None

            if 's3://' in backtest_loc:
                load_from_s3_bucket = backtest_loc.split('/')[-2]
                load_from_s3_key = backtest_loc.split('/')[-1]
            elif 'redis://' in backtest_loc:
                load_from_redis_key = backtest_loc.split('/')[-1]
            elif 'file:/' in backtest_loc:
                load_from_file = backtest_loc.split(':')[-1]
            # end of parsing supported transport - loading an algo-ready
        # end of backtest_loc

        load_config = build_publish_request.build_publish_request(
            ticker=ticker,
            output_file=load_from_file,
            s3_bucket=load_from_s3_bucket,
            s3_key=load_from_s3_key,
            redis_key=load_from_redis_key,
            redis_address=redis_address,
            redis_db=redis_db,
            redis_password=redis_password,
            redis_expire=redis_expire,
            s3_address=s3_address,
            s3_access_key=s3_access_key,
            s3_secret_key=s3_secret_key,
            s3_region_name=s3_region_name,
            s3_secure=s3_secure,
            verbose=debug,
            label='load-{}'.format(backtest_loc))
        if load_from_file:
            load_config['output_file'] = load_from_file
        if load_from_redis_key:
            load_config['redis_key'] = load_from_redis_key
            load_config['redis_enabled'] = True
        if load_from_s3_bucket and load_from_s3_key:
            load_config['s3_bucket'] = load_from_s3_bucket
            load_config['s3_key'] = load_from_s3_key
            load_config['s3_enabled'] = True

        log.info('starting algo')

        algo_obj = ExampleCustomAlgo(ticker=config_dict['ticker'],
                                     config_dict=config_dict)

        algo_res = run_algo.run_algo(ticker=ticker,
                                     algo=algo_obj,
                                     start_date=use_start_date,
                                     end_date=use_end_date,
                                     raise_on_err=True)

        if algo_res['status'] != ae_consts.SUCCESS:
            log.error('failed running algo backtest '
                      '{} hit status: {} error: {}'.format(
                          algo_obj.get_name(),
                          ae_consts.get_status(status=algo_res['status']),
                          algo_res['err']))
            return
        # if not successful

        log.info('backtest: {} {}'.format(
            algo_obj.get_name(),
            ae_consts.get_status(status=algo_res['status'])))
    # end of use custom algo or not

    if algo_obj:

        trading_history_dict = algo_obj.get_history_dataset()
        history_df = trading_history_dict[ticker]
        if not hasattr(history_df, 'to_json'):
            return

        if history_json_file:
            log.info('saving history to: {}'.format(history_json_file))
            history_df.to_json(history_json_file,
                               orient='records',
                               date_format='iso')

        log.info('plotting history')

        first_date = history_df['date'].iloc[0]
        end_date = history_df['date'].iloc[-1]
        title = ('Trading History {} for Algo {}\n'
                 'Backtest dates from {} to {}'.format(
                     ticker, trading_history_dict['algo_name'], first_date,
                     end_date))
        use_xcol = 'date'
        use_as_date_format = '%d\n%b'
        if config_dict['timeseries'] == 'minute':
            use_xcol = 'minute'
            use_as_date_format = '%d %H:%M:%S\n%b'
        xlabel = 'Dates vs {} values'.format(trading_history_dict['algo_name'])
        ylabel = 'Algo {}\nvalues'.format(trading_history_dict['algo_name'])
        df_filter = (history_df['close'] > 0.01)

        # set default hloc columns:
        blue = None
        green = None
        orange = None

        red = 'close'
        blue = 'balance'

        if debug:
            for i, r in history_df.iterrows():
                log.debug('{} - {}'.format(r['minute'], r['close']))

        plot_trading_history.plot_trading_history(
            title=title,
            df=history_df,
            red=red,
            blue=blue,
            green=green,
            orange=orange,
            date_col=use_xcol,
            date_format=use_as_date_format,
            xlabel=xlabel,
            ylabel=ylabel,
            df_filter=df_filter,
            show_plot=True,
            dropna_for_all=True)