Example #1
0
 def test_build_colorized_logger_with_splunk_user_and_password(self):
     """test_build_colorized_logger"""
     log = build_colorized_logger(name='build_colorized_logger_with_splunk',
                                  splunk_user='******',
                                  splunk_password='******',
                                  splunk_sleep_interval=0)
     self.assertIsNotNone(log)
Example #2
0
def run_main():
    """run_main"""

    log = build_colorized_logger(
        name='helloworld',
        splunk_user='******',
        splunk_password='******',
        # handler_name='simple',
        # handler_name='not-real',
        # splunk_address='localhost:8088',
        # splunk_token='55df5127-cb0e-4182-932e-c71c454699b8',
        splunk_debug=False)

    max_recs = 1
    msg_sent = 0
    not_done = True
    while not_done:

        log.debug('testing DEBUG message_id={}'.format(str(uuid.uuid4())))
        log.info('testing INFO message_id={}'.format(str(uuid.uuid4())))
        log.error('testing ERROR message_id={}'.format(str(uuid.uuid4())))
        log.critical('testing CRITICAL message_id={}'.format(str(
            uuid.uuid4())))
        log.warning('testing WARNING message_id={}'.format(str(uuid.uuid4())))

        try:
            raise Exception('Throw for testing exceptions')
        except Exception as e:
            log.error(('Testing EXCEPTION with ex={} message_id={}').format(
                e, str(uuid.uuid4())))
        # end of try/ex

        msg_sent += 6
        if msg_sent >= max_recs:
            not_done = False
    # end of while
    """
    The threaded and multiprocessing Splunk Publishers
    require an exit sleep to prevent message loss when
    the parent process exits. The other handler(s) do not.
    """
    wait_for_exit.wait_for_exit(log)
import glob
import copy
import random
import pandas as pd
from spylunking.log.setup_logging import build_colorized_logger
from antinex_utils.consts import VALID
from antinex_utils.consts import INVALID
from antinex_utils.utils import ev
from antinex_utils.utils import ppj
from antinex_utils.utils import rnow

log = build_colorized_logger(name='prepare_dataset_tools')


def find_all_headers(use_log_id=None, pipeline_files=[], label_rules=None):
    """find_all_headers

    :param use_log_id: label for debugging in logs
    :param pipeline_files: list of files to prep
    :param label_rules: dict of rules to apply
    """

    log_id = ""
    if use_log_id:
        log_id = use_log_id

    log.info(("{} find_all_headers - START").format(log_id))

    headers = ["src_file"]
    headers_dict = {"src_file": None}
Example #4
0
    export SHARED_LOG_CFG=/opt/sa/analysis_engine/log/debug-logging.json

"""

import redis
import pandas as pd
import analysis_engine.build_result as build_result
import analysis_engine.get_data_from_redis_key as redis_get
from spylunking.log.setup_logging import build_colorized_logger
from analysis_engine.consts import SUCCESS
from analysis_engine.consts import NOT_RUN
from analysis_engine.consts import ERR
from analysis_engine.consts import ev
from analysis_engine.consts import ppj

log = build_colorized_logger(name=__name__)


def build_df_from_redis(label=None,
                        client=None,
                        address=None,
                        host=None,
                        port=None,
                        password=None,
                        db=None,
                        key=None,
                        expire=None,
                        serializer='json',
                        encoding='utf-8',
                        orient='records'):
    """build_df_from_redis
    .. note:: This command pushes data to the webapp
        in the other terminal listening on port ``3434``

    ::

        vprof -c cm ./analysis_engine/perf/profile_algo_runner.py
"""

import datetime
import vprof.runner as perf_runner
import analysis_engine.consts as ae_consts
import analysis_engine.algo_runner as algo_runner
import spylunking.log.setup_logging as log_utils

log = log_utils.build_colorized_logger(name='profile-algo')


def start():
    """start"""

    back_a_few_days = (datetime.datetime.now() - datetime.timedelta(days=3))
    start_date = back_a_few_days.strftime(ae_consts.COMMON_DATE_FORMAT)

    ticker = 'SPY'
    s3_bucket = 'perftests'
    s3_key = (f'{ticker}_{start_date}')

    algo_config = (f'./cfg/default_algo.json')
    history_loc = (f's3://{s3_bucket}/{s3_key}')
::

    backtest_with_runner.py -l -t TICKER -b S3_BUCKET -k S3_KEY -c ALGO_CONFIG

Debug by adding ``-d`` as an argument
"""

import sys
import argparse
import analysis_engine.utils as ae_utils
import analysis_engine.algo_runner as algo_runner
import analysis_engine.plot_trading_history as plot
import spylunking.log.setup_logging as log_utils

log = log_utils.build_colorized_logger(name='algo-runner')


def backtest_with_runner():
    """backtest_with_runner

    build and publish a trading history from an algorithm config.

    ::

        backtest_with_runner.py -t TICKER -c ALGO_CONFIG -s START_DATE
        -k S3_KEY -b S3_BUCKET -l
    """

    parser = argparse.ArgumentParser(
        description=('backtest an algorithm and publish '
Example #7
0
import analysis_engine.iex.consts as iex_consts
import analysis_engine.api_requests as api_requests
import analysis_engine.work_tasks.get_new_pricing_data as task_pricing
import analysis_engine.work_tasks.task_screener_analysis as screener_utils
import analysis_engine.utils as ae_utils
import spylunking.log.setup_logging as log_utils


# Disable celery log hijacking
# https://github.com/celery/celery/issues/2509
@celery.signals.setup_logging.connect
def setup_celery_logging(**kwargs):
    pass


log = log_utils.build_colorized_logger(
    name='fetch', log_config_path=ae_consts.LOG_CONFIG_PATH)


def start_screener_analysis(req):
    """start_screener_analysis

    Start screener-driven analysis with a simple workflow:

    1) Convert FinViz screeners into a list of tickers
       and a ``pandas.DataFrames`` from each ticker's html row
    2) Build unique list of tickers
    3) Pull datasets for each ticker
    4) Run sale-side processing - coming soon
    5) Run buy-side processing - coming soon
    6) Issue alerts to slack - coming soon
Example #8
0
a file in s3 from running the backtester with
the save to file option enabled:

::

    run_backtest_and_plot_history.py -t SPY -f <SAVE_HISTORY_TO_THIS_FILE>
"""

import argparse
import pandas as pd
import analysis_engine.consts as consts
import analysis_engine.plot_trading_history as plot_trading_history
import analysis_engine.load_history_dataset as load_history
import spylunking.log.setup_logging as log_utils

log = log_utils.build_colorized_logger(name='view-history-in-s3')


def plot_history_from_s3():
    """plot_history_from_s3

    Run a derived algorithm with an algorithm config dictionary

    :param config_dict: algorithm config dictionary
    """

    log.debug('start - plot')

    parser = argparse.ArgumentParser(
        description=('plot a local algorithm trading history file'))
    parser.add_argument('-b',
import json
import pandas as pd
from celery import Celery
from spylunking.log.setup_logging import build_colorized_logger
from antinex_utils.consts import SUCCESS
from antinex_utils.consts import ERROR
from antinex_utils.consts import FAILED


log = build_colorized_logger(
    name='send_results')


def send_results_to_broker(
        loc,
        final_results):
    """send_results_to_broker

    :param loc: api-generated dictionary for where to send the results
    :param final_results: prediction results from the worker
    """

    log.info((
        'sending back={}').format(
            loc))

    status = ERROR
    org_model = None
    org_rounded = None
    org_train_scaler = None
    org_test_scaler = None
import os
import json
import numpy as np
import pandas as pd
from spylunking.log.setup_logging import build_colorized_logger
from antinex_utils.utils import ev
from antinex_utils.consts import VALID
from antinex_utils.consts import INVALID
from antinex_utils.consts import ERROR
from sklearn.model_selection import train_test_split

log = build_colorized_logger(name='build_training_request')


def build_training_request(
        csv_file=ev("CSV_FILE", "/tmp/cleaned_attack_scans.csv"),
        meta_file=ev("CSV_META_FILE", "/tmp/cleaned_metadata.json"),
        predict_feature=ev("PREDICT_FEATURE", "label_value"),
        ignore_features=[
            "label_name",
            "ip_src",  # need to make this an int
            "ip_dst",  # need to make this an int
            "eth_src",  # need to make this an int
            "eth_dst"  # need to make this an int
        ],
        seed=None,
        test_size=float(ev("TEST_SIZE", "0.20")),
        preproc_rules=None):
    """build_training_request

    :param csv_file: csv file built with prepare_dataset.py
from analysis_engine.consts import REDIS_PASSWORD
from analysis_engine.consts import REDIS_DB
from analysis_engine.consts import REDIS_EXPIRE
from analysis_engine.consts import get_status
from analysis_engine.consts import ppj
from analysis_engine.consts import is_celery_disabled


# Disable celery log hijacking
# https://github.com/celery/celery/issues/2509
@signals.setup_logging.connect
def setup_celery_logging(**kwargs):
    pass


log = build_colorized_logger(name='pub-tic-agg-s3-to-redis',
                             log_config_path=LOG_CONFIG_PATH)


def publish_ticker_aggregate_from_s3():
    """publish_ticker_aggregate_from_s3

    Download all ticker data from S3 and publish it's contents
    to Redis and back to S3

    """

    log.info('start - publish_ticker_aggregate_from_s3')

    parser = argparse.ArgumentParser(
        description=('Download and aggregated all ticker data, '
                     'and store it in S3 and Redis. '))
Example #12
0
#!/usr/bin/env python

import celery
import analysis_engine.work_tasks.get_celery_app as get_celery_app
import analysis_engine.consts as consts
import spylunking.log.setup_logging as log_utils


# Disable celery log hijacking
# https://github.com/celery/celery/issues/2509
@celery.signals.setup_logging.connect
def setup_celery_logging(**kwargs):
    pass


log = log_utils.build_colorized_logger(name=consts.APP_NAME,
                                       log_config_path=consts.LOG_CONFIG_PATH)

log.info('start - {}'.format(consts.APP_NAME))

log.info('broker={} backend={} '
         'config={} include_tasks={}'.format(
             consts.WORKER_BROKER_URL, consts.WORKER_BACKEND_URL,
             consts.WORKER_CELERY_CONFIG_MODULE, consts.WORKER_TASKS))

# Get the Celery app from the project's get_celery_app module
app = get_celery_app.get_celery_app(
    name=consts.APP_NAME,
    path_to_config_module=consts.WORKER_CELERY_CONFIG_MODULE,
    auth_url=consts.WORKER_BROKER_URL,
    backend_url=consts.WORKER_BACKEND_URL,
    include_tasks=consts.INCLUDE_TASKS)
Example #13
0
 def test_build_colorized_logger_without_splunk(self):
     """test_build_colorized_logger"""
     log = build_colorized_logger(
         name='build_colorized_logger_without_splunk',
         splunk_sleep_interval=0)
     self.assertIsNotNone(log)
import datetime
import json
import pandas as pd
import antinex_utils.make_predictions
from spylunking.log.setup_logging import build_colorized_logger
from antinex_utils.utils import ppj
from antinex_utils.consts import SUCCESS
from antinex_core.send_results_to_broker import send_results_to_broker

log = build_colorized_logger(name='processor')


class AntiNexProcessor:
    """
    AntiNexProcessor handles messages found in the subscribed queues.

    Conceptually ``Wokers use a Processor`` to handle messages.

    This one is responsible for processing ``Prediction`` and ``Training``
    messages. It also manages a dictionary (``self.models``) of pre-trained
    deep neural networks for reused by a ``label`` name inside the
    consumed JSON dictionary message.

    """
    def __init__(self, name="prc", max_msgs=100, max_models=100):
        """__init__

        :param name: log label
        :param max_msgs: num msgs to save for replay debugging (FIFO)
        :param max_models: num pre-trained models to keep in memory (FIFO)
        """
a locally saved file from running the backtester with
the save to file option enabled:

::

    run_backtest_and_plot_history.py -t SPY -f <SAVE_HISTORY_TO_THIS_FILE>
"""

import os
import argparse
import pandas as pd
import analysis_engine.consts as ae_consts
import analysis_engine.plot_trading_history as plot_trading_history
import spylunking.log.setup_logging as log_utils

log = log_utils.build_colorized_logger(
    name='plot-history', log_config_path=ae_consts.LOG_CONFIG_PATH)


def plot_local_history_file():
    """plot_local_history_file

    Run a derived algorithm with an algorithm config dictionary

    :param config_dict: algorithm config dictionary
    """

    log.debug('start - plot')

    parser = argparse.ArgumentParser(
        description=('plot a local algorithm trading history file'))
    parser.add_argument('-f',
Example #16
0
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from keras.models import Sequential
from keras.layers import Dense
from keras.wrappers.scikit_learn import KerasClassifier
from sklearn.model_selection import cross_val_score
from sklearn.model_selection import StratifiedKFold
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import MinMaxScaler
from sklearn.pipeline import Pipeline
from sklearn.metrics import confusion_matrix
from sklearn.model_selection import train_test_split
from spylunking.log.setup_logging import build_colorized_logger

log = build_colorized_logger(name='standalone_scaler_django')


def build_model(num_features, loss, optimizer, metrics):
    """build_model

    Build the Keras Deep Neural Network Model

    :param num_features: number of features
    :param loss: loss function to apply
    :param optimizer: optimizer to use
    :param metrics: list of metrics
    """
    log.info("building model")
    model = Sequential()
    model.add(
from celery.task import task
from spylunking.log.setup_logging import build_colorized_logger
from celery_loaders.work_tasks.custom_task import CustomTask

log = build_colorized_logger(name='always_fails_tasks')


@task(bind=True, base=CustomTask, queue="always_fails")
def always_fails(self, work_dict):
    """always_fails

    :param work_dict: dictionary for key/values
    """

    label = "always_fails"

    log.info(("task - {} - start " "work_dict={}").format(label, work_dict))

    raise Exception(work_dict.get("test_failure", "simulating a failure"))

    log.info(("task - {} - done").format(label))

    return True


# end of always_fails
#!/usr/bin/env python

import os
import sys
import datetime
import argparse
import json
import pandas as pd
from spylunking.log.setup_logging import build_colorized_logger
from celery_connectors.publisher import Publisher

log = build_colorized_logger(name='publish_regression_predict')


def publish_regression_prediction_to_broker():
    """publish_regression_prediction_to_broker

    Publish a Regression Prediction message to the
    Celery Worker's broker queue. This message
    is a JSON Dictionary.

    Default Broker: ``redis://localhost:6379/6``
    Default Exchange: ``webapp.predict.requests``
    Default Routing Key: ``webapp.predict.requests``
    Default Queue: ``webapp.predict.requests``

    """
    parser = argparse.ArgumentParser(description=("Launch a Predict Dataset "
                                                  "Request into the AntiNex "
                                                  "core"))
    parser.add_argument("-f",
import pandas as pd
from sklearn.preprocessing import MinMaxScaler

from spylunking.log.setup_logging import build_colorized_logger
from antinex_utils.consts import SUCCESS
from antinex_utils.consts import ERR
from antinex_utils.consts import NOTRUN

log = build_colorized_logger(name='build_scaler_dataset')


def build_scaler_dataset_from_records(record_list,
                                      label="build-scaled-dataset",
                                      min_feature=-1,
                                      max_feature=1,
                                      cast_to_type="float32"):
    """build_scaler_dataset_from_records
    :param record_list: list of json records to scale between min/max
    :param label: log label for tracking
    :param min_feature: min feature range for scale normalization
    :param max_feature: max feature range for scale normalization
    :param cast_to_type: cast all of the dataframe to this datatype
    """

    status = NOTRUN
    last_step = "not-run"
    df = None
    scaler = None
    dataset = None

    res = {
Example #20
0
import celery
from spylunking.log.setup_logging import build_colorized_logger


log = build_colorized_logger(
    name='custom-task')


class CustomTask(celery.Task):
    """CustomTask"""

    log_label = "custom_task"

    def on_success(self, retval, task_id, args, kwargs):
        """on_success

        http://docs.celeryproject.org/en/latest/reference/celery.app.task.html

        :param retval: return value
        :param task_id: celery task id
        :param args: arguments passed into task
        :param kwargs: keyword arguments passed into task
        """

        log.info(("{} SUCCESS - retval={} task_id={} "
                  "args={} kwargs={}")
                 .format(
                     self.log_label,
                     retval,
                     task_id,
                     args,
#!/usr/bin/env python

import os
import uuid
from spylunking.log.setup_logging import build_colorized_logger
from celery_connectors.redis.redis_json_application import RedisJSONApplication

port = 6379
host = os.getenv("ENV_REDIS_HOST", "localhost").strip().lstrip()
db = int(os.getenv("ENV_REDIS_DB_ID", 0))
# if set to empty string use password=None
redis_pw = os.getenv("ENV_REDIS_PASSWORD", "")
queue_name = os.getenv("Q_1", "reporting.accounts").strip().lstrip()
name = "redis-producer"

log = build_colorized_logger(
    name='redis-publish')

log.info("START - {} - Sending messages to redis={}:{}/{} queue={}".format(name,
                                                                           host,
                                                                           port,
                                                                           db,
                                                                           queue_name))

if str(redis_pw) == "":
    redis_pw = None

app = RedisJSONApplication(name, redis_address=host, redis_port=port, redis_queue=queue_name, logger=log)
app.connect()

max_msgs = 1000
msgs = []
    def __init__(self,
                 config_dict,
                 path_to_module=None,
                 name=None,
                 verbose=False):
        """__init__

        Base class for building your own indicators to work
        within an ``IndicatorProcessor``.

        Please derive the ``self.process()`` method as needed

        .. tip:: any keys passed in with ``config_dict`` will
            become class member variables that can be accessed
            and used as normal member variables within the
            derived Indicator class

        :param config_dict: dictionary for this indicator
        :param name: name of the indicator
        :param path_to_module: work in progress -
            this will allow loading indicators from
            outside the repo like the derived algorithm
            classes
        :param verbose: optional - bool for toggling more logs
        """
        self.name = name
        self.log = log_utils.build_colorized_logger(name=name)

        self.config = config_dict
        self.path_to_module = path_to_module
        self.verbose = verbose

        if not self.config:
            raise Exception('please provide a config_dict for loading '
                            'the buy and sell rules for this indicator')

        if not self.verbose:
            self.verbose = self.config.get('verbose', False)

        if not self.name:
            self.name = 'ind_{}'.format(str(uuid.uuid4()).replace('-', ''))

        self.starter_dict = None
        self.previous_df = self.config.get('previous_df', None)
        self.name_of_df = self.config.get('uses_data', None)
        self.uses_data = self.name_of_df
        self.report = self.config.get('report', {})
        self.ind_id = self.report.get('id', self.name)
        self.metrics = self.report.get('metrics', {})
        self.ind_type = self.metrics.get('type',
                                         ae_consts.INDICATOR_TYPE_UNKNOWN)
        self.ind_category = self.metrics.get(
            'category', ae_consts.INDICATOR_CATEGORY_UNKNOWN)
        self.ind_uses_data = self.metrics.get(
            'ind_uses_data', ae_consts.INDICATOR_USES_DATA_ANY)
        self.dataset_df_str = self.config.get('dataset_df', None)

        self.report_key_prefix = self.report.get('report_key_prefix',
                                                 self.name)

        # this should be mostly numeric values
        # to allow converting to an AI-ready dataset
        # once the algorithm finishes
        self.report_dict = {
            'type': self.ind_type,
            'category': self.ind_category,
            'uses_data': self.ind_uses_data
        }

        self.report_ignore_keys = self.config.get(
            'report_ignore_keys', ae_consts.INDICATOR_IGNORED_CONIGURABLE_KEYS)
        self.use_df = pd.DataFrame(ae_consts.EMPTY_DF_LIST)
        self.configurables = self.config
        self.ind_confs = []
        self.convert_config_keys_to_members()
#!/usr/bin/env python

import os
import json
from celery import Celery
from django.conf import settings
from spylunking.log.setup_logging import build_colorized_logger
from antinex_utils.utils import ppj


name = 'send-worker-publish-to-core'
log = build_colorized_logger(
    name=name)

os.environ.setdefault(
    "DJANGO_SETTINGS_MODULE",
    "drf_network_pipeline.settings")

log.info("creating celery app")
app = Celery("test-app")

app.config_from_object(
    "django.conf:settings",
    namespace="CELERY")

app.autodiscover_tasks(
    lambda: settings.INSTALLED_APPS)

datafile = "./drf_network_pipeline/tests/pubsub/publish-to-core.json"
data = {}
with open(datafile, "r") as f:
import json
import pandas as pd
from spylunking.log.setup_logging import build_colorized_logger
from antinex_utils.consts import SUCCESS
from antinex_utils.consts import ERR
from antinex_utils.consts import NOTRUN

log = build_colorized_logger(name='merge_inverse_data_into_original')


def merge_inverse_data_into_original(req,
                                     sort_on_index=None,
                                     ordered_columns=None):
    """merge_inverse_data_into_original

    :param req: managed dictionary
    :param sort_on_index: sort the dataframe on this column name
    :param ordered_columns: column list to rename the inverse transform
    """
    label = req.get("label", "")
    last_step = "not-run"
    status = NOTRUN
    org_df = None
    predict_df = None
    merge_df = None
    res = {
        "status": status,
        "err": last_step,
        "sorted_org_df": org_df,
        "predict_df": predict_df,
        "merge_df": merge_df
Example #25
0
                    examine
                    IEX Cloud company data, calls = examine Tradier calls
                    data,
                    puts = examine Tradier puts data, and comma delimited is
                    supported as well
    -s START_DATE  start date format YYYY-MM-DD (default is 2019-01-01)
"""

import datetime
import argparse
import analysis_engine.consts as ae_consts
import analysis_engine.utils as ae_utils
import analysis_engine.extract as ae_extract
import spylunking.log.setup_logging as log_utils

log = log_utils.build_colorized_logger(name='inspect-redis-data',
                                       handler_name='no_date_colors')


def inspect_datasets(ticker=None, start_date=None, datasets=None):
    """inspect_datasets

    Loop over all cached data in redis by going sequentially per date
    and examine the latest ``date`` value in the cache to
    check if it matches the redis key's date.

    For IEX Cloud minute data errors, running this function will print out
    commands to fix any issues (if possible):

    ::

        fetch -t TICKER -g iex_min -F DATE_TO_FIX
from celery import Celery
from celery import signals
from spylunking.log.setup_logging import build_colorized_logger
from antinex_utils.utils import ev
from celery_connectors.subscriber import Subscriber
from antinex_core.antinex_processor import AntiNexProcessor


# Disable celery log hijacking
# https://github.com/celery/celery/issues/2509
@signals.setup_logging.connect
def setup_celery_logging(**kwargs):
    pass


log = build_colorized_logger(name='worker')


class AntiNexCore:
    """
    AntiNex Celery Worker Core (core)

    This is a Celery Worker used to connect to a message broker
    (``BROKER_URL=redis://localhost:6379/6`` by default) and monitor
    messages to consume off the following queues:

    ``TRAIN_QUEUE`` - ``webapp.train.requests``

    ``PREDICT_QUEUE`` - ``webapp.predict.requests``

    The core trains and manages pre-trained deep neural networks (dnn) by
#!/usr/bin/env python

import os
import sys
import json
import argparse
import pandas as pd
from spylunking.log.setup_logging import build_colorized_logger
from antinex_utils.utils import ppj

log = build_colorized_logger(name='convert_bottom_rows_to_json')


def convert_bottom_rows_to_json():
    """convert_bottom_rows_to_json

    Convert the last few rows in a dataset to JSON
    """

    parser = argparse.ArgumentParser(description=("Convert the last few "
                                                  "rows in a dataset to JSON"))
    parser.add_argument(
        "-f",
        help=(
            "dataset to use default /opt/antinex/antinex-datasets/v1/webapps/"
            "django/training-ready/v1_django_cleaned.csv"),
        required=False,
        dest="dataset")
    parser.add_argument("-b",
                        help=("rows from bottom of dataset"),
                        required=False,
import tensorflow as tf
import analysis_engine.consts as ae_consts
import analysis_engine.load_history_dataset as load_history
import analysis_engine.ai.build_regression_dnn as build_dnn
import analysis_engine.ai.build_datasets_using_scalers as build_scaler_datasets
import analysis_engine.ai.build_scaler_dataset_from_df as build_scaler_df
import analysis_engine.ai.plot_dnn_fit_history as plot_fit_history
import analysis_engine.plot_trading_history as plot_trading_history
import spylunking.log.setup_logging as log_utils
from copy import deepcopy

# ensure reproducible results
# machinelearningmastery.com/reproducible-results-neural-networks-keras/
np_random.seed(1)

log = log_utils.build_colorized_logger(name='train-dnn-from-history')

choices = ['close', 'high', 'low']


def train_and_predict_from_history_in_s3():
    """train_and_predict_from_history_in_s3

    Run a derived algorithm with an algorithm config dictionary

    :param config_dict: algorithm config dictionary
    """

    log.debug('start - plot')

    parser = argparse.ArgumentParser(
Example #29
0
"""
Helper for creating a buy order
"""

import analysis_engine.consts as ae_consts
import analysis_engine.utils as ae_utils
import spylunking.log.setup_logging as log_utils

log = log_utils.build_colorized_logger(name=__name__)


def build_buy_order(ticker,
                    num_owned,
                    close,
                    balance,
                    commission,
                    date,
                    details,
                    use_key,
                    minute=None,
                    shares=None,
                    version=1,
                    auto_fill=True,
                    is_live_trading=False,
                    backtest_shares_default=10,
                    reason=None):
    """build_buy_order

    Create an algorithm buy order as a dictionary

    .. note:: setting the ``minute`` is required to build
from antinex_utils.build_training_request import build_training_request
from antinex_utils.make_predictions import make_predictions
from drf_network_pipeline.users.db_lookup_user import \
    db_lookup_user
from drf_network_pipeline.pipeline.create_ml_prepare_record import \
    create_ml_prepare_record
from drf_network_pipeline.pipeline.process_worker_results import \
    process_worker_results
from drf_network_pipeline.job_utils.build_task_response import \
    build_task_response
from kombu import Connection
from kombu import Producer
from kombu import Exchange
from kombu import Queue

log = build_colorized_logger(name='ml.tasks')


# allow tasks to be sent straight to the worker
@shared_task(name=("drf_network_pipeline.pipeline.tasks."
                   "task_ml_prepare"),
             queue=("drf_network_pipeline.pipeline.tasks."
                    "task_ml_prepare"),
             bind=True)
def task_ml_prepare(self=None, req_node=None):
    """task_ml_prepare

    :param self: parent task object for bind=True
    :param req_node: job utils dictionary for passing a dictionary
    """