示例#1
0
def test_configure_api_client_factory_custom_config(load_spec_mock):
    load_spec_mock.return_value = API_SPEC
    metrics = Mock()

    operation_cache_ttl_map = {'api.health': 60}
    config = Config({
        'api': {
            'test': {
                'url': 'http://url.com',
                'max_retries': 5,
                'operation_cache_ttl': 100,
                'operation_cache_ttl_map': operation_cache_ttl_map
            }
        }
    })
    cache = Mock()

    client = configure_api_client_factory('test',
                                          config,
                                          cache=cache,
                                          metrics=metrics)
    assert client
    assert isinstance(client, SwaggerClient)
    load_spec_mock.assert_called_once_with('http://url.com/swagger.json')

    http_client = client.swagger_spec.http_client.session
    assert http_client.api_name == 'test'
    assert http_client._metrics == metrics
    assert http_client._cache == cache.api
    assert http_client._operation_cache_ttl_map == operation_cache_ttl_map
    assert http_client._max_retries == 5
示例#2
0
def test_create_http_client_configured():
    metrics = Mock()
    cache = Mock()
    config = Config({
        'requests': {
            'test': {
                'api_name': 'custom_name',
                'cache': {
                    'enabled': True,
                    'ttl': 100
                },
                'max_retries': 5
            }
        }
    })

    client = create_http_client(http_client_name='test',
                                metrics=metrics,
                                cache=cache,
                                config=config)

    assert client
    assert isinstance(client, HttpClient)
    assert client.api_name == 'custom_name'
    assert client._metrics == metrics
    assert client._cache == cache.http
    assert client._cache_ttl == 100
    assert client._max_retries == 5
示例#3
0
 def config(self):
     """
     Return configuration
     :return: configuration
     :rtype: pypipes.config.Config
     """
     return self.context.get(
         'config') or Config()  # use an empty config by default
示例#4
0
def test_default_quote_pool(memory_rate_counter):
    config = Config({})
    rate_counter_pool = ContextPool(memory_rate_counter)
    pool = QuotaPool(config, rate_counter_pool)

    # quota is unlimited if no quota configuration
    assert isinstance(pool.default, UnlimitedQuota)
    assert isinstance(pool.any_name, UnlimitedQuota)
示例#5
0
def test_configure_api_client_factory(load_spec_mock):
    load_spec_mock.return_value = API_SPEC
    metrics = Mock()
    config = Config({'api': {'test': {'url': 'http://url.com'}}})
    cache = Mock()

    client = configure_api_client_factory('test',
                                          config,
                                          cache=cache,
                                          metrics=metrics)
    assert client
    assert isinstance(client, SwaggerClient)
    load_spec_mock.assert_called_once_with('http://url.com/swagger.json')
示例#6
0
def test_create_http_client_default():
    metrics = Mock()
    config = Config()
    cache = Mock()

    client = create_http_client(http_client_name='test',
                                metrics=metrics,
                                cache=cache,
                                config=config)

    assert client
    assert isinstance(client, HttpClient)
    assert client.api_name == 'test'
    assert client._metrics == metrics
    assert client._cache is None
    assert client._cache_ttl == 60
    assert client._max_retries == 0
示例#7
0
def test_quota_pool(memory_rate_counter):
    config = Config({
        'quota': {
            'default': {
                'limit': 10,
                'threshold': 60
            },
            'day_quota': {
                'limit': 10,
                'days': 2
            },
            'hour_quota': {
                'limit': 10,
                'hours': 3
            },
            'month_quota': {
                'limit': 10,
                'months': 4
            },
            'unlimited': {
                'limit': 0
            }
        }
    })

    rate_counter_pool = ContextPool(memory_rate_counter)
    pool = QuotaPool(config, rate_counter_pool)

    assert isinstance(pool.default, Quota)
    pool.default.consume('key1')

    assert isinstance(pool.day_quota, DayQuota)
    pool.day_quota.consume('key1')

    assert isinstance(pool.hour_quota, HourQuota)
    pool.hour_quota.consume('key1')

    assert isinstance(pool.month_quota, MonthQuota)
    pool.month_quota.consume('key1')

    assert isinstance(pool.unlimited, UnlimitedQuota)
    pool.unlimited.consume('key1')

    # if quota is not configured the default is used
    assert isinstance(pool.not_configured, Quota)
    pool.default.consume('key1')
示例#8
0
    _page=1) >> log_page >> log_fixed_page
config_pipeline = Event.on_start >> log_config >> log_replaced_config >> log_merged_config

program = Program(name='test',
                  pipelines={
                      'paging': paging_pipeline,
                      'config': config_pipeline
                  })

default_config = {
    'key1': 'value1',
    'key2': {
        'key2_1': 'value2.1',
        'key2_2': 'value2.2'
    }
}

context = {
    'logger': logger_context('default'),
    'config': Config(default_config)
}
infrastructure = RunInline(context)
infrastructure.load(program)

logging.basicConfig(level=logging.INFO,
                    format='Logger: %(name)s > %(message)s')

if __name__ == '__main__':
    # The program will be started immediately as only start event is sent
    infrastructure.start(program)
示例#9
0
                      'error_pipeline': error_pipeline
                  },
                  message_mapping={'upper_level_page': message.total_page})

config = Config(
    merge(
        {
            'celery': {
                'queue_name': {
                    'processor': '{program_name}.{pipeline}',
                    'scheduler': '{program_name}.scheduler',
                    'error': '{program_name}.error.{processor_id}.{error_type}'
                },
                'app': {
                    'broker': 'amqp://*****:*****@localhost:5672'
                },
                'max_error_retries': 2,
                'error_retry_delay': 10,
            },
            'redis': {
                'host': 'localhost',
                'port': '6379'
            },
            'memcached': {
                'servers': ['localhost:11211']
            }
        }, from_environ('VNEXT')))

init_pipe_log('example_celery.log')

context = {
示例#10
0
from pypipes.context import LazyContextCollection

logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)

# this is an example of http_client_context usage.
# http_client_context is a lazy context
# for using in infrastructure setup for pipeline processing

# it could use metrics and cache context if they are defined
# so lets create a context collection that will initialize the context for us
# like pipeline infrastructure does it.

enable_cache = Config(http_client={
    'cache': {'enabled': True}  # enable caching in http_client context
})

context = LazyContextCollection(
    http=http_client_context,
    cache=memory_cache_pool,  # we want to cache response
    metrics=log_metrics_context,  # we want to collect http_client metric
    config=Config(enable_cache),
    logger=logger)  # log is used by log_metrics_context to output metrics


# lets initialize our context
http_client = context['http']
print('HTTP client is : ', http_client)

# http_client inherits all features of requests.Session
示例#11
0
from __future__ import print_function

from pypipes.config import Config
from pypipes.service.storage import redis_storage_pool

from pypipes.context import LazyContextCollection

# most pipe processor and content managers expects that storage is an IContentPool
# see service/example_pool.py for pool usage example
# There are several predefined storage pools that you may use in your application
# This example demonstrates a usage of redis_storage_pool

# lets specify a context for lazy storage initialization
config = Config({'redis': {
    'host': 'localhost',
    'port': 6379,
}})

context = {'config': config, 'storage': redis_storage_pool}

# redis_storage_pool is a lazy context
# lets initialize the storage like pipeline application does it.
storage = LazyContextCollection(context)['storage']

# storage is now a ContextPoolFactory of RedisStorage objects
print('Storage:', storage, type(storage.default))

# all the storages in pool will use the same redis client because configuration is the same
# but will have different key prefix that is a name of storage

print('Client is the same:', storage.default.redis == storage.cursor.redis)
示例#12
0
            'limit': 5,
            'hours': 2
        },  # 5 / each 2 full hours, starting from this one
        'day_quota': {
            'limit': 5,
            'days': 1
        },  # 5 / 1 day
        'month_quota': {
            'limit': 5,
            'months': 1
        },  # 5 / 1 month
    }
}
context = LazyContextCollection(
    quota=quota_pool,
    config=Config(config),
    rate_counter=memory_rate_pool,
)

# lets initialize a quota pool like infrastructure does it.
pool = context['quota']
print('Quota pool:', pool)

# we have several quotas configured
print('Get rate_quota:', pool.rate_quota)
print('Get hour_quota:', pool.hour_quota)

# UnlimitedQuota is returned if you try to get some not configured quota.
# UnlimitedQuota will never expire
print('Get unknown_quota:', pool.unknown_quota)
示例#13
0
program = Program(name='celery_example',
                  pipelines={
                      'start': Event.on_start >> Message.log('Program started'),
                      'job1': job1_pipeline,
                      'job2': job2_pipeline,
                  },
                  message_mapping={'job_id': message._job,
                                   'job_arg1': message.job_arg1,
                                   'job_arg2': message.job_arg2})

init_logging(default=pipe_logger(
    '%(levelname)s %(program_id)s.%(processor_id)s:[%(job_id)s]%(message)s',
    loglevel=logging.DEBUG))

config = Config({'celery': {'app': {'broker': 'redis://localhost/0'}}})
context = {
    'config': config,
    'logger': logger_context(extras=('program_id', 'processor_id', 'job_id')),
    'lock': local_redis_lock_pool,
    'storage': local_redis_storage_pool,
}

infrastructure = CeleryInf(context)
infrastructure.load(program)

# start celery worker in a separate terminal
# celery -A example_job worker -c=4 -l=DEBUG
app = infrastructure.app

if __name__ == '__main__':
示例#14
0
def test_init_logging_config(logging_mock):
    logging_config = {'version': '1.0'}
    config = Config({'logging': logging_config})
    init_logging(config)
    logging_mock.config.dictConfig.assert_called_once_with(logging_config)
示例#15
0
 def _define_config(config=None):
     if merge and config:
         _config = merge_configs(dict(config), new_config)
     else:
         _config = new_config
     yield {'config': Config(_config)}
示例#16
0
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)

# this is an example of api_client_context usage.
# api_client_context is a lazy context
# for using in infrastructure setup for pipeline processing

# it could use metrics and cache context if they are defined
# so lets create a context collection that will initialize the context for us
# like pipeline infrastructure does it.

API_SERVICE_URL = "http://localhost:8000/api/v1/doc/"
api_config = Config(api={
    'incidents': {
        'url': API_SERVICE_URL
    }
})

context = LazyContextCollection(
    api=api_client_pool,
    cache=memory_cache_pool,  # we plan to cache some responses
    metrics=log_metrics_context,  # we want to collect request metrics
    config=Config(api_config),
    logger=logger)  # logger is used by log_metrics_context to output metrics


# lets initialize our context
api = context['api']
print('API client is: ', api)