Пример #1
0
    async def task(task_name: str):
        def inject_extra(record):
            record.extra['task_name'] = task_name
            record.extra['task_id'] = id(asyncio.current_task())

        with Handler(bubble=True).contextbound():
            with Processor(inject_extra).contextbound():
                logger.info('I am the task')
                await asyncio.sleep(0)
                await util()
                logger.info('I am still the task')
Пример #2
0
    def __init__(self,
                 algo,
                 sim_params,
                 data_portal,
                 clock,
                 benchmark_source,
                 restrictions,
                 universe_func,
                 instant_fill=False):

        # ==============
        # Simulation
        # Param Setup
        # ==============
        self.sim_params = sim_params
        self.env = algo.trading_environment
        self.data_portal = data_portal
        self.restrictions = restrictions
        self.instant_fill = instant_fill

        # ==============
        # Algo Setup
        # ==============
        self.algo = algo

        # ==============
        # Snapshot Setup
        # ==============

        # This object is the way that user algorithms interact with OHLCV data,
        # fetcher data, and some API methods like `data.can_trade`.
        self.current_data = self._create_bar_data(universe_func)

        # We don't have a datetime for the current snapshot until we
        # receive a message.
        self.simulation_dt = None

        self.clock = clock

        self.benchmark_source = benchmark_source

        # =============
        # Logging Setup
        # =============

        # Processor function for injecting the algo_dt into
        # user prints/logs.
        def inject_algo_dt(record):
            if 'algo_dt' not in record.extra:
                record.extra['algo_dt'] = self.simulation_dt

        self.processor = Processor(inject_algo_dt)
Пример #3
0
def test_mdc_works():
    def inject_extra(record):
        record.extra['ip'] = '127.0.0.1'
        record.extra['username'] = '******'

    with TestHandler() as handler:
        handler.formatter = text_formatter
        logger = logging.getLogger('Dummy')
        with redirected_logging():
            with Processor(inject_extra):
                logger.info('hello world')
        assert len(handler.formatted_records) == 1
        assert 'INFO: Dummy: hello world <ip=127.0.0.1, username=Andrey>' in handler.formatted_records[0]
Пример #4
0
    def __init__(self, blotter, perf_tracker, algo, algo_start):

        # ==========
        # Algo Setup
        # ==========

        # We extract the order book from the txn client so that
        # the algo can place new orders.
        self.blotter = blotter
        self.perf_tracker = perf_tracker

        self.perf_key = self.EMISSION_TO_PERF_KEY_MAP[
            perf_tracker.emission_rate]

        self.algo = algo
        self.algo_start = algo_start.replace(hour=0,
                                             minute=0,
                                             second=0,
                                             microsecond=0)

        # Monkey patch the user algorithm to place orders in the
        # TransactionSimulator's order book and use our logger.
        self.algo.set_order(self.order)

        # ==============
        # Snapshot Setup
        # ==============

        # The algorithm's universe as of our most recent event.
        # We want an ndict that will have empty objects as default
        # values on missing keys.
        self.universe = ndict(internal=defaultdict(SIDData))

        # We don't have a datetime for the current snapshot until we
        # receive a message.
        self.simulation_dt = None
        self.snapshot_dt = None

        # =============
        # Logging Setup
        # =============

        # Processor function for injecting the algo_dt into
        # user prints/logs.
        def inject_algo_dt(record):
            if not 'algo_dt' in record.extra:
                record.extra['algo_dt'] = self.snapshot_dt

        self.processor = Processor(inject_algo_dt)
Пример #5
0
def transact_stub(slippage, commission, event, open_orders):
    """
    This is intended to be wrapped in a partial, so that the
    slippage and commission models can be enclosed.
    """
    def inject_algo_dt(record):
        if not 'algo_dt' in record.extra:
            record.extra['algo_dt'] = event['dt']

    with Processor(inject_algo_dt).threadbound():

        transaction = slippage.simulate(event, open_orders)
        if transaction and transaction.amount != 0:
            direction = abs(transaction.amount) / transaction.amount
            per_share, total_commission = commission.calculate(transaction)
            transaction.price = transaction.price + (per_share * direction)
            transaction.commission = total_commission
        return transaction
Пример #6
0
def test_json_formatting_works():
    def inject_extra(record):
        record.extra['ip'] = '127.0.0.1'
        record.extra['username'] = '******'

    with TestHandler() as handler:
        handler.formatter = json_formatter
        logger = logging.getLogger('Dummy')
        with redirected_logging():
            with Processor(inject_extra):
                logger.info('hello world')
        assert len(handler.formatted_records) == 1
        record = json.loads(handler.formatted_records[0])
        assert record['level'] == 'INFO'
        assert record['name'] == 'Dummy'
        assert record['message'] == 'hello world'
        assert record['ip'] == '127.0.0.1'
        assert record['username'] == 'Andrey'
Пример #7
0
    def __init__(self, algo, sim_params):

        # ==============
        # Simulation
        # Param Setup
        # ==============
        self.sim_params = sim_params

        # ==============
        # Algo Setup
        # ==============
        self.algo = algo
        self.algo_start = self.sim_params.first_open
        self.algo_start = self.algo_start.replace(hour=0,
                                                  minute=0,
                                                  second=0,
                                                  microsecond=0)

        # ==============
        # Snapshot Setup
        # ==============

        # The algorithm's data as of our most recent event.
        # We want an object that will have empty objects as default
        # values on missing keys.
        self.current_data = BarData()

        # We don't have a datetime for the current snapshot until we
        # receive a message.
        self.simulation_dt = None

        # =============
        # Logging Setup
        # =============

        # Processor function for injecting the algo_dt into
        # user prints/logs.
        def inject_algo_dt(record):
            if not 'algo_dt' in record.extra:
                record.extra['algo_dt'] = self.simulation_dt

        self.processor = Processor(inject_algo_dt)
Пример #8
0
def _bootstrap():
    """Get the Inyoka version and store it."""
    global INYOKA_REVISION

    # the path to the contents of the Inyoka module
    conts = os.environ.setdefault('INYOKA_MODULE',
                                  realpath(join(dirname(__file__))))
    # the path to the Inyoka instance folder
    os.environ['INYOKA_INSTANCE'] = realpath(join(conts, pardir))
    os.environ['CELERY_LOADER'] = 'inyoka.core.celery_support.CeleryLoader'

    # get the `INYOKA_REVISION` using the mercurial python api
    try:
        ui = hgui.ui()
        repository = localrepository(ui, join(conts, '..'))
        ctx = repository['tip']
        INYOKA_REVISION = ('%(num)s:%(id)s' % {
            'num': ctx.rev(),
            'id': shorthex(ctx.node())
        })
    except TypeError:
        # fail silently
        pass

    # This value defines the timeout for sockets in seconds.  Per default python
    # sockets do never timeout and as such we have blocking workers.
    # Socket timeouts are set globally within the whole application.
    # The value *must* be a floating point value.
    socket.setdefaulttimeout(10.0)

    #: bind the context
    ctx = ApplicationContext()
    ctx.bind()

    # setup components
    ctx.load_packages(ctx.cfg['activated_components'])
    if ctx.cfg['testing']:
        logger.level_name = 'ERROR'

    # makes INYOKA_REVISION visible in the extra dict of every log record
    proc = Processor(lambda x: x.extra.update(INYOKA_REVISION=INYOKA_REVISION))
    proc.push_application()
Пример #9
0
def transact_stub(slippage, commission, event, open_orders):
    """
    This is intended to be wrapped in a partial, so that the
    slippage and commission models can be enclosed.
    """
    def inject_algo_dt(record):
        if not 'algo_dt' in record.extra:
            record.extra['algo_dt'] = event['dt']

    with Processor(inject_algo_dt).threadbound():

        transactions = slippage.simulate(event, open_orders)

        for transaction in transactions:
            if (transaction
                    and not zp_math.tolerant_equals(transaction.amount, 0)):
                direction = math.copysign(1, transaction.amount)
                per_share, total_commission = commission.calculate(transaction)
                transaction.price = transaction.price + (per_share * direction)
                transaction.commission = total_commission
        return transactions
Пример #10
0
    def run(self):
        def inject_worker_id(record):
            record.extra['worker_id'] = self.id

        with Processor(inject_worker_id).threadbound():
            iterations_left = NUM_ITERATIONS
            num_fails_in_sequence = 0

            while iterations_left > 0 and not self.exit_event.is_set():
                # acquire the locks
                locked = []
                for data_id in range(NUM_DATA_SLOTS):
                    key = 'key-%d' % data_id
                    if self.lock(key):
                        locked.append(key)
                    else:
                        break

                if len(locked) == NUM_DATA_SLOTS:
                    num_fails_in_sequence = 0
                    for data_id in range(NUM_DATA_SLOTS):
                        self.d[data_id].append(self.id)
                    iterations_left -= 1
                    logger.info('decrement: locked=%r' % (locked, ))
                else:
                    num_fails_in_sequence += 1
                    if num_fails_in_sequence > NUM_ITERATIONS * NUM_DATA_SLOTS * NUM_WORKERS:
                        # seems that algorithm stuck somewhere,
                        # exit from worker
                        logger.warning('seems that algorithm stuck somewhere')
                        break

                # release the locks
                for key in reversed(locked):
                    self.unlock(key)

                time.sleep(random.random())

            logger.debug('exit from the worker, locals=%r' % (locals(), ))
Пример #11
0
    def __init__(self, algo, sim_params):

        # ==============
        # Simulation
        # Param Setup
        # ==============
        self.sim_params = sim_params

        # ==============
        # Algo Setup
        # ==============
        self.algo = algo
        self.algo_start = normalize_date(self.sim_params.first_open)
        self.env = algo.trading_environment

        # ==============
        # Snapshot Setup
        # ==============

        # The algorithm's data as of our most recent event.
        # We want an object that will have empty objects as default
        # values on missing keys.
        self.current_data = BarData()

        # We don't have a datetime for the current snapshot until we
        # receive a message.
        self.simulation_dt = None
        self.previous_dt = self.algo_start

        # =============
        # Logging Setup
        # =============

        # Processor function for injecting the algo_dt into
        # user prints/logs.
        def inject_algo_dt(record):
            if 'algo_dt' not in record.extra:
                record.extra['algo_dt'] = self.simulation_dt
        self.processor = Processor(inject_algo_dt)
Пример #12
0
 def g(*args, **kargs):
     with Processor(partial(inject_func, f.__name__, args,
                            kargs)).threadbound():
         return f(*args, **kargs)
Пример #13
0
"""

import os
import sys
from logbook import Processor, StreamHandler, DEBUG, Logger, FileHandler

my_handler = FileHandler("test.log", encoding="utf-8", level=DEBUG)
# my_handler = StreamHandler(sys.stdout, level=DEBUG)


def log_other_info(record):
    """
    a) 通过 with.processor可以让在其中的日志拥有共同的逻辑,相当于一个切面注入
    比如这里的例子是 在每条日志中记录一些额外的信息(额外的信息是通过在日志对象(logRecord)的extra(字典对象)属性中添加
    一些其他的信息),这样每条日志都会有这里添加的额外的信息。
    b) 有个疑问就是,这些额外的信息怎么运用呢,比如这些信息如何能和日志一块记录在文件中呢
    c) 关于日志的属性,见 logrecord.py
    """
    record.extra['myname'] = 'kute'
    record.extra['mycwd'] = os.getcwd()
    # update myname propertiy
    record.extra.update(myname="lisa")
    print(record.to_dict())


if __name__ == "__main__":
    with my_handler.applicationbound():
        with Processor(log_other_info).applicationbound():
            mylog = Logger("processor")
            mylog.notice("notice msg.")
Пример #14
0
Файл: log.py Проект: mnms/LTCLI
        ' : ',
        '{record.message}',
    ]),
}


def inject_extra(record):
    record.extra['basename'] = os.path.basename(record.filename)
    record.extra['level_color'] = get_log_color(record.level)
    record.extra['clear_color'] = color.ENDC


logger = Logger('root')

# extra info
processor = Processor(inject_extra)
processor.push_application()

# for screen log
screen_level = INFO
stream_handler = StreamHandler(sys.stdout, level=screen_level, bubble=True)
stream_handler.format_string = formatter['screen']
stream_handler.push_application()

# for rolling file log
p = os.environ['FBPATH']
if not os.path.isdir(p):
    os.system('mkdir -p {}'.format(p))
file_path = os.path.expanduser(os.path.join(p, 'logs'))
if os.path.isdir(file_path):
    backup_count = 7
Пример #15
0
    def __init__(self, algo, sim_params):

        # ==============
        # Simulation
        # Param Setup
        # ==============
        self.sim_params = sim_params

        # ==============
        # Algo Setup
        # ==============
        self.algo = algo
        self.algo_start = normalize_date(self.sim_params.first_open)
        self.env = algo.trading_environment

        # ==============
        # Snapshot Setup
        # ==============

        _day = timedelta(days=1)

        def _get_removal_date(sid,
                              finder=self.env.asset_finder,
                              default=self.sim_params.last_close + _day):
            """
            Get the date of the morning on which we should remove an asset from
            data.

            If we don't have an auto_close_date, this is just the end of the
            simulation.

            If we have an auto_close_date, then we remove assets from data on
            max(asset.auto_close_date, asset.end_date + timedelta(days=1))

            We hold assets at least until auto_close_date because up until that
            date the user might still hold positions or have open orders in an
            expired asset.

            We hold assets at least until end_date + 1, because an asset
            continues trading until the **end** of its end_date.  Even if an
            asset auto-closed before the end_date (say, because Interactive
            Brokers clears futures positions prior the actual notice or
            expiration), there may still be trades arriving that represent
            signals for other assets that are still tradeable. (Particularly in
            the futures case, trading in the final days of a contract are
            likely relevant for trading the next contract on the same future
            chain.)
            获取上午的日期,我们应该从数据中删除资产。

            #如果我们没有auto_close_date,那只是结束了
            #模拟。

            #如果我们有auto_close_date,那么我们从数据中删除资产
            #max(asset.auto_close_date,asset.end_date + timedelta(days = 1))

            #我们至少持有资产直到auto_close_date,因为直到那之前
            #用户可能仍然持有头寸或在一个开仓订单的日期
            #过期资产。

            #我们持有资产至少到end_date + 1,因为资产
            #继续交易直到其end_date的**结束**。即使是
            #end_date之前的资产自动关闭(比如因为Interactive
            #经纪人在实际通知之前清算期货头寸或
            #到期),可能仍有交易到达代表
            #其他资产仍可交易的信号。 (特别是在
            #期货案例,在合约的最后几天进行交易
            #可能与在同一个未来交易下一份合约有关
            #链。)
            """
            try:
                asset = finder.retrieve_asset(sid)
            except ValueError:
                # Handle sid not an int, such as from a custom source.
                # So that they don't compare equal to other sids, and we'd
                # blow up comparing strings to ints, let's give them unique
                # close dates.
                return default + timedelta(microseconds=id(sid))
            except SidsNotFound:
                return default

            auto_close_date = asset.auto_close_date
            if auto_close_date is None:
                # If we don't have an auto_close_date, we never remove an asset
                # from the user's portfolio.
                return default

            end_date = asset.end_date
            if end_date is None:
                # If we have an auto_close_date but not an end_date, clear the
                # asset from data when we clear positions/orders.
                return auto_close_date

            # If we have both, make close once we're on or after the
            # auto_close_date, and strictly after the end_date.
            # See docstring above for an explanation of this logic.
            return max(auto_close_date, end_date + _day)

        self._get_removal_date = _get_removal_date

        # The algorithm's data as of our most recent event.
        # Maintain sids in order by asset close date, so that we can more
        # efficiently remove them when their times come...算法的数据是我们最近的事件。
        # 按资产截止日期按顺序维护sid,以便我们可以在时间到来时更有效地删除它们...
        self.current_data = BarData(SortedDict(self._get_removal_date))
        # We don't have a datetime for the current snapshot until we
        # receive a message.在收到消息之前,我们没有当前快照的日期时间
        self.simulation_dt = None

        # =============
        # Logging Setup
        # =============

        # Processor function for injecting the algo_dt into
        # user prints/logs.
        def inject_algo_dt(record):
            if 'algo_dt' not in record.extra:
                record.extra['algo_dt'] = self.simulation_dt

        self.processor = Processor(inject_algo_dt)
Пример #16
0
from utils import get_ip


def inject_information(record):
    record.extra['ip'] = get_ip()

log_format = u'[{record.time:%Y-%m-%d %H:%M}] {record.channel} - {record.level_name}: {record.message} \t({record.extra[ip]})'

# a nested handler setup can be used to configure more complex setups
setup = NestedSetup([
    #StderrHandler(format_string=u'[{record.time:%Y-%m-%d %H:%M}] {record.channel} - {record.level_name}: {record.message} \t({record.extra[ip]})'),
    StreamHandler(sys.stdout, format_string=log_format),
    # then write messages that are at least warnings to to a logfile
    FileHandler(os.environ['QTRADE_LOG'], level='WARNING'),
    Processor(inject_information)
])

color_setup = NestedSetup([
    StreamHandler(sys.stdout, format_string=log_format),
    ColorizedStderrHandler(format_string=log_format, level='NOTICE'),
    Processor(inject_information)
])

remote_setup = NestedSetup([
    ZeroMQHandler('tcp://127.0.0.1:5540'),
    Processor(inject_information)
])

log = Logger('Trade Labo')
Пример #17
0
    def __init__(self, algo, sim_params):

        # ==============
        # Simulation
        # Param Setup
        # ==============
        self.sim_params = sim_params

        # ==============
        # Algo Setup
        # ==============
        self.algo = algo
        self.algo_start = normalize_date(self.sim_params.first_open)
        self.env = algo.trading_environment

        # ==============
        # Snapshot Setup
        # ==============

        _day = timedelta(days=1)

        def _get_removal_date(sid,
                              finder=self.env.asset_finder,
                              default=self.sim_params.last_close + _day):
            """
            Get the date of the morning on which we should remove an asset from
            data.

            If we don't have an auto_close_date, this is just the end of the
            simulation.

            If we have an auto_close_date, then we remove assets from data on
            max(asset.auto_close_date, asset.end_date + timedelta(days=1))

            We hold assets at least until auto_close_date because up until that
            date the user might still hold positions or have open orders in an
            expired asset.

            We hold assets at least until end_date + 1, because an asset
            continues trading until the **end** of its end_date.  Even if an
            asset auto-closed before the end_date (say, because Interactive
            Brokers clears futures positions prior the actual notice or
            expiration), there may still be trades arriving that represent
            signals for other assets that are still tradeable. (Particularly in
            the futures case, trading in the final days of a contract are
            likely relevant for trading the next contract on the same future
            chain.)
            """
            try:
                asset = finder.retrieve_asset(sid)
            except ValueError:
                # Handle sid not an int, such as from a custom source.
                # So that they don't compare equal to other sids, and we'd
                # blow up comparing strings to ints, let's give them unique
                # close dates.
                return default + timedelta(microseconds=id(sid))
            except SidsNotFound:
                return default

            auto_close_date = asset.auto_close_date
            if auto_close_date is None:
                # If we don't have an auto_close_date, we never remove an asset
                # from the user's portfolio.
                return default

            end_date = asset.end_date
            if end_date is None:
                # If we have an auto_close_date but not an end_date, clear the
                # asset from data when we clear positions/orders.
                return auto_close_date

            # If we have both, make close once we're on or after the
            # auto_close_date, and strictly after the end_date.
            # See docstring above for an explanation of this logic.
            return max(auto_close_date, end_date + _day)

        self._get_removal_date = _get_removal_date

        # The algorithm's data as of our most recent event.
        # Maintain sids in order by asset close date, so that we can more
        # efficiently remove them when their times come...
        self.current_data = BarData(SortedDict(self._get_removal_date))

        # We don't have a datetime for the current snapshot until we
        # receive a message.
        self.simulation_dt = None

        # =============
        # Logging Setup
        # =============

        # Processor function for injecting the algo_dt into
        # user prints/logs.
        def inject_algo_dt(record):
            if 'algo_dt' not in record.extra:
                record.extra['algo_dt'] = self.simulation_dt
        self.processor = Processor(inject_algo_dt)
Пример #18
0
# GMail 邮箱
ghandler = GMailHandler(
    account_id="*****@*****.**",
    password="******",
    recipients=["*****@*****.**"],
    format_string=formatstr
)


def main():
    mylog = Logger("MailHandler-APP")
    mylog.info("mailhandler message")


def inject_other_info(record):
    record.extra['myscret'] = "do not tell you"
    record.extra.update(
        # some other info
        pp="pp info",
        ip="127.0.0.1",
        url="http://logbook.readthedocs.io/en/stable/",
        method="GET",
        myscret="do not tell you yet"
    )


if __name__ == "__main__":
    with ghandler.threadbound():
        with Processor(callback=inject_other_info).threadbound():
            main()
Пример #19
0
from logbook import warn, StreamHandler
import sys

from termcc.cc import cc

my_handler = StreamHandler(sys.stdout)
my_handler.push_application()
warn(cc(':red: :yin_yang: This is a warning :reset:'))

import os
from logbook import Processor


def inject_cwd(record):
    record.extra['cwd'] = os.getcwd()


with my_handler.applicationbound():
    with Processor(inject_cwd).applicationbound():
        warn(cc(':blue: :yin_yang: This is a warning'))
Пример #20
0
    def __init__(self, algo, sim_params):

        # ==============
        # Simulation
        # Param Setup
        # ==============
        self.sim_params = sim_params

        # ==============
        # Algo Setup
        # ==============
        self.algo = algo
        self.algo_start = normalize_date(self.sim_params.first_open)
        self.env = algo.trading_environment

        # ==============
        # Snapshot Setup
        # ==============

        def _get_asset_close_date(sid,
                                  finder=self.env.asset_finder,
                                  default=self.sim_params.last_close +
                                  timedelta(days=1)):
            try:
                asset = finder.retrieve_asset(sid)
            except ValueError:
                # Handle sid not an int, such as from a custom source.
                # So that they don't compare equal to other sids, and we'd
                # blow up comparing strings to ints, let's give them unique
                # close dates.
                return default + timedelta(microseconds=id(sid))
            except SidsNotFound:
                return default
            # Default is used when the asset has no auto close date,
            # and is set to a time after the simulation ends, so that the
            # relevant asset isn't removed from the universe at all
            # (at least not for this reason).
            return asset.auto_close_date or default

        self._get_asset_close = _get_asset_close_date

        # The algorithm's data as of our most recent event.
        # Maintain sids in order by asset close date, so that we can more
        # efficiently remove them when their times come...
        self.current_data = BarData(SortedDict(self._get_asset_close))

        # We don't have a datetime for the current snapshot until we
        # receive a message.
        self.simulation_dt = None

        # =============
        # Logging Setup
        # =============

        # Processor function for injecting the algo_dt into
        # user prints/logs.
        def inject_algo_dt(record):
            if 'algo_dt' not in record.extra:
                record.extra['algo_dt'] = self.simulation_dt

        self.processor = Processor(inject_algo_dt)
Пример #21
0
import sys
from kute.easylog.easylog import geteasylog
from logbook import StreamHandler, DEBUG, Processor, Logger

easylog = geteasylog()
StreamHandler(sys.stdout, level=DEBUG).push_application()


def printrecorddetail(record):
    easylog.info(record.channel)  # name of the logger
    easylog.info(record.dispatcher.name)
    easylog.info(record.exception_message)
    easylog.info(record.exception_name)
    easylog.info(record.extra)
    easylog.info(record.filename)
    easylog.info(record.level)
    easylog.info(record.level_name)
    easylog.info(record.message)
    easylog.info(record.msg)
    easylog.info(record.thread_name)
    easylog.info(record.time)

    easylog.info(record.to_dict(True))  # 纵览全部日志属性


if __name__ == "__main__":
    with Processor(printrecorddetail).applicationbound():
        mylog = Logger("log-record-app-name")
        mylog.info("log record detail")