Exemplo n.º 1
0
def test_logger_custom_extras_context():
    lazy_context = logger_context(extras=('extra1', 'extra2'))
    logger = lazy_context({
        'extra1': 'value1',
        'extra2': 'value2',
        'key': 'value'
    })
    logger.info('test log message')
    assert logger.extra == {'extra1': 'value1', 'extra2': 'value2'}

    lazy_context = logger_context(extras={
        'extra1': context,
        'extra2': 'fixed_value'
    })
    logger = lazy_context({
        'extra1': 'value1',
        'extra2': 'value2',
        'key': 'value'
    })
    assert logger.extra == {'extra1': 'value1', 'extra2': 'fixed_value'}
Exemplo n.º 2
0
def test_logger_context():
    lazy_context = logger_context(logger_name='test')
    logger = lazy_context({
        'program_id': 'program_id1',
        'processor_id': 'processor_id1',
        'key': 'value'
    })
    assert isinstance(logger, LoggerAdapter)
    logger.info('test log message')
    assert logger.extra == {
        'program_id': 'program_id1',
        'processor_id': 'processor_id1'
    }
    assert logger.logger.name == 'test'
Exemplo n.º 3
0
# we use pagination contextmanager as a context factory in this example


@pagination
@pipe_processor
def log_page(logger, page=0, updated=False):
    # pagination contextmanager creates a page context for the processor, page=0 by default
    # also this processor expects an `updated` context but no one generates the context for now.
    logger.info('Current page: %s, updated: %s', page, updated)
    return {}


# And here we create another processor that is based on previous one
# but redefine the `page` context generated by `pagination` and add a new context `updated`
# Also here we redefine an infrastructure context `logger` with new custom logger factory
new_logger_factory = logger_context('custom')
log_fixed_page = define(page=10, updated=True,
                        logger=new_logger_factory)(log_page)


# In same way you may redefine default config but it's useful to use
# a special context manager `define_config` for this
@pipe_processor
def log_config(logger, config):
    logger.info('Config: key1=%s, key2=%s', config.key1, config.key2)
    return {}


log_replaced_config = define_config({'key1': 'UPDATED'})(log_config)
log_merged_config = define_config({'key2': {
    'key2_1': 'MERGED'
Exemplo n.º 4
0
                'error_retry_delay': 10,
            },
            'redis': {
                'host': 'localhost',
                'port': '6379'
            },
            'memcached': {
                'servers': ['localhost:11211']
            }
        }, from_environ('VNEXT')))

init_pipe_log('example_celery.log')

context = {
    'config': config,
    'logger': logger_context(),
    'lock': redis_lock_pool,
    'counter': redis_counter_pool,
    'storage': redis_storage_pool,
    'cursor_storage': versioned_cursor_storage_context
}

infrastructure = CeleryInf(context)
infrastructure.load(program)

# start celery worker in a separate terminal
# celery -A example_celery worker -c=4 -l=DEBUG
app = infrastructure.app


class VersionCommand(Command):
Exemplo n.º 5
0
                      'start': Event.on_start >> Message.log('Program started'),
                      'job1': job1_pipeline,
                      'job2': job2_pipeline,
                  },
                  message_mapping={'job_id': message._job,
                                   'job_arg1': message.job_arg1,
                                   'job_arg2': message.job_arg2})

init_logging(default=pipe_logger(
    '%(levelname)s %(program_id)s.%(processor_id)s:[%(job_id)s]%(message)s',
    loglevel=logging.DEBUG))

config = Config({'celery': {'app': {'broker': 'redis://localhost/0'}}})
context = {
    'config': config,
    'logger': logger_context(extras=('program_id', 'processor_id', 'job_id')),
    'lock': local_redis_lock_pool,
    'storage': local_redis_storage_pool,
}

infrastructure = CeleryInf(context)
infrastructure.load(program)

# start celery worker in a separate terminal
# celery -A example_job worker -c=4 -l=DEBUG
app = infrastructure.app

if __name__ == '__main__':
    with handle_command_error(True):
        run_command(infrastructure,
                    commands={'job': JobCommand(job1, job2)})
Exemplo n.º 6
0
@pipe_processor
def processor_with_rate_limit(message, logger, counter):
    # counter should be almost similar for different messaged
    # because of rate limit guard limits messages with id=1 to only 5 per 5 seconds
    counter.msg.increment(message.id)
    logger.info('Processed message %s', message.id)
    logger.info('Message balance: %s, %s',
                counter.msg.increment(1, value=0), counter.msg.increment(2, value=0))


pipeline = Scheduler.start_period(seconds=1) >> emit_messages >> processor_with_rate_limit


init_logging()

program = Program(name='test1',
                  pipelines={'rate_example': pipeline})

if __name__ == '__main__':
    services = {'logger': logger_context(),
                'rate_counter': memory_rate_pool,
                'counter': memory_counter_pool,  # count messages
                'lock': memory_lock_pool}

    infrastructure = GeventInf(services)
    infrastructure.load(program)
    infrastructure.start(program)

    # run gevent worker
    infrastructure.run_worker()