Beispiel #1
0
def _make_phylesystem_cache_region(**kwargs):
    """Only intended to be called by the Phylesystem singleton.
    """
    global _CACHE_REGION_CONFIGURED, _REGION
    if _CACHE_REGION_CONFIGURED:
        return _REGION
    _CACHE_REGION_CONFIGURED = True
    try:
        from dogpile.cache import make_region
    except:
        _LOG.debug("dogpile.cache not available")
        return
    region = None
    trial_key = "test_key"
    trial_val = {"test_val": [4, 3]}
    trying_redis = True
    if trying_redis:
        try:
            a = {
                "host": "localhost",
                "port": 6379,
                "db": 0,  # default is 0
                "redis_expiration_time": 60 * 60 * 24 * 2,  # 2 days
                "distributed_lock": False,  # True if multiple processes will use redis
            }
            region = make_region().configure("dogpile.cache.redis", arguments=a)
            _LOG.debug("cache region set up with cache.redis.")
            _LOG.debug("testing redis caching...")
            region.set(trial_key, trial_val)
            assert trial_val == region.get(trial_key)
            _LOG.debug("redis caching works")
            region.delete(trial_key)
            _REGION = region
            return region
        except:
            _LOG.debug("redis cache set up failed.")
            region = None
    trying_file_dbm = False
    if trying_file_dbm:
        _LOG.debug("Going to try dogpile.cache.dbm ...")
        first_par = _get_phylesystem_parent(**kwargs)[0]
        cache_db_dir = os.path.split(first_par)[0]
        cache_db = os.path.join(cache_db_dir, "phylesystem-cachefile.dbm")
        _LOG.debug('dogpile.cache region using "{}"'.format(cache_db))
        try:
            a = {"filename": cache_db}
            region = make_region().configure("dogpile.cache.dbm", expiration_time=36000, arguments=a)
            _LOG.debug("cache region set up with cache.dbm.")
            _LOG.debug("testing anydbm caching...")
            region.set(trial_key, trial_val)
            assert trial_val == region.get(trial_key)
            _LOG.debug("anydbm caching works")
            region.delete(trial_key)
            _REGION = region
            return region
        except:
            _LOG.debug("anydbm cache set up failed")
            _LOG.debug("exception in the configuration of the cache.")
    _LOG.debug("Phylesystem will not use caching")
    return None
    def _regions(self):
        from dogpile.cache import make_region

        my_regions = {
            "short": make_region().configure("dogpile.cache.memory", expiration_time=1),
            "long": make_region().configure("dogpile.cache.memory", expiration_time=60),
            "myregion": make_region().configure("dogpile.cache.memory", expiration_time=60),
        }

        return my_regions
Beispiel #3
0
def create_app(db_url, cache_config=None):
    if cache_config is None:
        cache_config = {"backend": "dogpile.cache.null"}
    cache = make_region().configure(**cache_config)
    app = Flask(__name__)
    app.config["DB"] = sessionmaker(bind=create_engine(db_url))
    app.config["Cache"] = cache
    app.jinja_env.undefined = StrictUndefined
    app.jinja_env.filters["md"] = markdown_filter
    app.jinja_env.filters["format_rpm_name"] = format_rpm_name

    def _add_route(url, func):
        @functools.wraps(func)
        def decorated(*args, **kwargs):
            creator = functools.partial(func, *args, **kwargs)
            key = json.dumps({"url": url, "args": args, "kwargs": kwargs}, sort_keys=True)
            print(key)
            return cache.get_or_create(key, creator)

        app.route(url)(decorated)

    _add_route("/", hello)
    _add_route("/pkg/<pkg>/", package)
    _add_route("/grp/<grp>/", group)
    _add_route("/graph/", lambda: render_template("graph.html"))
    _add_route("/graph/portingdb.json", graph_json)

    return app
Beispiel #4
0
def get_dogpile_region(
    name='default',
    regions={},
    expiration_time=300,
    **arguments
) -> CacheRegion:
    """
    Get a cache object that

    Args:
        name: name to memoize the CacheRegion under
        regions: a dit like object to memoize CacheRegions

    Returns: A cache region object
    """

    REDIS_URL = getattr(settings, 'REDIS_URL', None)

    region = regions.get(name)
    if not region:
        arguments.setdefault('url', REDIS_URL)
        region = regions.setdefault(
            name,
            make_region().configure(
                'dogpile.cache.' + ('redis' if REDIS_URL else 'memory'),
                expiration_time=expiration_time,
                arguments=arguments,
            )
        )

    return region
Beispiel #5
0
def create_app(db_url, cache_config=None):
    if cache_config is None:
        cache_config = {'backend': 'dogpile.cache.null'}
    cache = make_region().configure(**cache_config)
    app = Flask(__name__)
    app.config['DB'] = sessionmaker(bind=create_engine(db_url))
    app.config['Cache'] = cache
    app.jinja_env.undefined = StrictUndefined
    app.jinja_env.filters['md'] = markdown_filter
    app.jinja_env.filters['format_rpm_name'] = format_rpm_name

    @app.context_processor
    def add_cache_tag():
        return {'cache_tag': uuid.uuid4()}

    def _add_route(url, func):
        @functools.wraps(func)
        def decorated(*args, **kwargs):
            creator = functools.partial(func, *args, **kwargs)
            key = json.dumps({'url': url, 'args': args, 'kwargs': kwargs},
                             sort_keys=True)
            print(key)
            return cache.get_or_create(key, creator)
        app.route(url)(decorated)

    _add_route("/", hello)
    _add_route("/pkg/<pkg>/", package)
    _add_route("/grp/<grp>/", group)
    _add_route("/graph/", graph)
    _add_route("/graph/portingdb.json", graph_json)
    _add_route("/piechart.svg", piechart_svg)
    _add_route("/grp/<grp>/piechart.svg", piechart_grp)
    _add_route("/pkg/<pkg>/piechart.svg", piechart_pkg)

    return app
Beispiel #6
0
def create_app(db_url, cache_config=None):
    if cache_config is None:
        cache_config = {'backend': 'dogpile.cache.null'}
    cache = make_region().configure(**cache_config)
    app = Flask(__name__)
    app.config['DB'] = sessionmaker(bind=create_engine(db_url))
    db = app.config['DB']()
    app.config['Cache'] = cache
    app.config['CONFIG'] = {c.key: json.loads(c.value)
                            for c in db.query(tables.Config)}
    app.jinja_env.undefined = StrictUndefined
    app.jinja_env.filters['md'] = markdown_filter
    app.jinja_env.filters['format_rpm_name'] = format_rpm_name
    app.jinja_env.filters['format_quantity'] = format_quantity
    app.jinja_env.filters['format_percent'] = format_percent
    app.jinja_env.filters['format_time_ago'] = format_time_ago

    @app.context_processor
    def add_template_globals():
        return {
            'cache_tag': uuid.uuid4(),
            'len': len,
            'log': math.log,
            'config': app.config['CONFIG'],
        }

    def _add_route(url, func, get_keys=()):
        @functools.wraps(func)
        def decorated(*args, **kwargs):
            creator = functools.partial(func, *args, **kwargs)
            key_dict = {'url': url,
                        'args': args,
                        'kwargs': kwargs,
                        'get': {k: request.args.get(k) for k in get_keys}}
            key = json.dumps(key_dict, sort_keys=True)
            print(key)
            return cache.get_or_create(key, creator)
        app.route(url)(decorated)

    _add_route("/", hello)
    _add_route("/stats.json", jsonstats)
    _add_route("/pkg/<pkg>/", package)
    _add_route("/grp/<grp>/", group)
    _add_route("/graph/", graph)
    _add_route("/graph/portingdb.json", graph_json)
    _add_route("/piechart.svg", piechart_svg)
    _add_route("/grp/<grp>/piechart.svg", piechart_grp)
    _add_route("/pkg/<pkg>/piechart.svg", piechart_pkg)
    _add_route("/grp/<grp>/graph/", graph_grp)
    _add_route("/grp/<grp>/graph/data.json", graph_json_grp)
    _add_route("/pkg/<pkg>/graph/", graph_pkg)
    _add_route("/pkg/<pkg>/graph/data.json", graph_json_pkg)
    _add_route("/by_loc/", by_loc, get_keys={'sort', 'reverse'})
    _add_route("/by_loc/grp/<grp>/", group_by_loc, get_keys={'sort', 'reverse'})
    _add_route("/mispackaged/", mispackaged, get_keys={'requested'})
    _add_route("/history/", history, get_keys={'expand'})
    _add_route("/history/data.csv", history_csv)
    _add_route("/howto/", howto)

    return app
Beispiel #7
0
 def test_datetime_expiration_time(self):
     my_region = make_region()
     my_region.configure(
         backend='mock',
         expiration_time=datetime.timedelta(days=1, hours=8)
     )
     eq_(my_region.expiration_time, 32*60*60)
Beispiel #8
0
    def test_registration(self):
        import urllib.parse
        from dogpile.cache import make_region
        tokens = make_region()
        tokens.configure_from_config(settings, 'cache.registration.')
        app = self._makeApp(**settings)

        app = webtest.TestApp(app)

        res = app.get('/register')
        email = '*****@*****.**'
        res.form['email'] = email
        res = res.form.submit('register')

        token = tokens.get(email)
        assert res.location == 'http://localhost/'
        params = {'t': token,
                  'e': email}
        res = app.get('/activate?' + urllib.parse.urlencode(params))
        res.form['username'] = '******'
        res.form['password'] = '******'
        res = res.form.submit('activate')
        assert res.location == 'http://localhost/mypage'

        app.get(res.location)
    def test_custom_host_routes_failure(self):
        subnet = dict(
            cidr='192.168.89.0/24',
            gateway_ip='192.168.89.1',
            dhcp_enabled=True,
            dns_nameservers=[],
            host_routes=[{
                'destination': '192.240.128.0/20',
                'nexthop': '192.168.89.2'
            }]
        )
        network = dict(
            network_id='netid',
            interface=dict(ifname='ge0', addresses=['fe80::2']),
            subnets=[subnet]
        )
        c = models.Configuration({'networks': [network]})

        cache = make_region().configure('dogpile.cache.memory')
        with mock.patch.object(self.mgr, 'sudo') as sudo:

            sudo.side_effect = RuntimeError("Kaboom!")

            self.mgr.update_host_routes(c, cache)
            sudo.assert_called_once_with(
                '-4', 'route', 'add', '192.240.128.0/20', 'via',
                '192.168.89.2', 'dev', 'eth0'
            )
            self.assertEqual(len(cache.get('host_routes')), 0)
Beispiel #10
0
def includeme(config):
    add_mako_renderer(config, '.txt')
    reg = config.registry
    region = make_region()
    region.configure_from_config(config.registry.settings,
                                 'cache.registration.')
    store = DogPileTokenStore(region)
    message_factory = RegistrationMessageFactory(
        sender=config.registry.settings['registration.mail.sender'])

    reg.utilities.register([], ITokenStore,
                           "",
                           store)
    reg.utilities.register([], ITokenGenerator,
                           "",
                           generate_uuid_token)
    reg.utilities.register([], IMessageFactory,
                           "",
                           message_factory)
    reg.utilities.register([], IUserFactory,
                           "",
                           user_factory)
    reg.adapters.register([IMailer, IMessageFactory,
                           ITokenStore, ITokenGenerator,
                           IUserFactory,
                           ],
                          IRegistration,
                          "",
                          Registration)
Beispiel #11
0
def get_null_region():
    """Region for a "NULL" cache that doesn't really cache at all.

    Returns:
       (CacheRegion) object
    """
    return make_region().configure('dogpile.cache.null')
Beispiel #12
0
def create_app(db_url, cache_config=None):
    if cache_config is None:
        cache_config = {'backend': 'dogpile.cache.null'}
    cache = make_region().configure(**cache_config)
    app = Flask(__name__)
    app.config['DB'] = sessionmaker(bind=create_engine(db_url))
    app.config['Cache'] = cache
    app.jinja_env.undefined = StrictUndefined
    app.jinja_env.filters['md'] = markdown_filter

    def _add_route(url, func):
        @functools.wraps(func)
        def decorated(*args, **kwargs):
            creator = functools.partial(func, *args, **kwargs)
            key = json.dumps({'url': url, 'args': args, 'kwargs': kwargs},
                             sort_keys=True)
            print(key)
            return cache.get_or_create(key, creator)
        app.route(url)(decorated)

    _add_route("/", hello)
    _add_route("/pkg/<pkg>/", package)
    _add_route("/grp/<grp>/", group)

    return app
    def __init__(self, auth_url, user_id, password, project_id,
                 keystoneclient=None):
        '''Initialize Keystone wrapper.

        @param string auth_url   auth_url for keystoneclient
        @param string user_id    user_id for keystoneclient
        @param string project_id project_id for keystoneclient
        @param object keystoneclient optional keystoneclient implementation.
                                     Uses keystoneclient.v3 if unspecified.
        '''
        self.keystoneclient = keystoneclient or ks_keystoneclient
        self.auth_url = auth_url
        self.user_id = user_id
        self.password = password
        self.project_id = project_id
        self._client = None
        if CONF.keystone.cache_dir:
            if not os.path.isdir(CONF.keystone.cache_dir):
                os.makedirs(CONF.keystone.cache_dir, mode=0o700)

            dbm_path = os.path.join(CONF.keystone.cache_dir, 'keystone.db')
            self.cache = cache.make_region().configure(
                'dogpile.cache.dbm',
                expiration_time=CONF.keystone.cache_ttl,
                arguments={"filename": dbm_path})
        else:
            self.cache = None
    def __init__(
        self, region=None, kwargs_handlers=None, http_status_codes=None
    ):
        if not region:
            dbm_path = os.path.dirname(DEFAULT_DBM_PATH)
            if not os.path.exists(dbm_path):
                os.makedirs(dbm_path)

            region = make_region().configure(
                'dogpile.cache.dbm',
                expiration_time=DEFAULT_TIMEOUT,
                arguments={'filename': DEFAULT_DBM_PATH}
            )
        self.region = region

        if not kwargs_handlers:
            kwargs_handlers = [kwarg_range_header_handler]
        self.kwargs_handlers = kwargs_handlers

        if not http_status_codes:
            http_status_codes = (
                requests.codes.ok,  # 200
                requests.codes.not_found,  # 404
                requests.codes.method_not_allowed,  # 405
                requests.codes.request_entity_too_large,  # 413
            )
        self.http_status_codes = http_status_codes
Beispiel #15
0
def configure_cache(settings, name=None):
    prefix = 'netprofile.cache.'
    if name is not None:
        prefix = ''.join((prefix, name, '.'))
    else:
        name = 'MAIN'
    return make_region(name=name).configure_from_config(settings, prefix)
Beispiel #16
0
    def test_reject_invalid_expiration_time(self):
        my_region = make_region()

        assert_raises_message(
            exception.ValidationError,
            "expiration_time is not a number or timedelta.",
            my_region.configure, 'mock', 'one hour'
        )
Beispiel #17
0
def make_cache_region(store=None):
    from dogpile.cache import make_region

    if store is None:
        store = {}
    return make_region().configure(
        "dogpile.cache.memory", arguments={"cache_dict": store}
    )
Beispiel #18
0
def includeme(config):  # pragma: no cover
    cache_region = make_region(key_mangler=key_mangler)
    cache_region.configure_from_config(config.registry.settings, "dogpile.")

    def cache_region_factory(context, request):
        return cache_region

    config.register_service_factory(cache_region_factory, name="cache")
Beispiel #19
0
def main(global_config, **settings):
    """ This function returns a Pyramid WSGI application.
    """

    try:
        settings["api.token"] = os.environ["SEMETRIC_API"]
        settings["memcache.password"] = os.environ["MEMCACHIER_PASSWORD"]
        settings["memcache.servers"] = os.environ["MEMCACHIER_SERVERS"]
        settings["memcache.user"] = os.environ["MEMCACHIER_USERNAME"]
        settings["deezer.key"] = os.environ["DEEZER_KEY"]
    except KeyError as exc:
        raise ConfigurationError("Failed to load config from env: {0}".format(exc))

    config = Configurator(settings=settings)
    #config.add_route('mongo_chart', '/chart/{id}')
    config.add_route('augment_chart', '/chart/{id}')
    config.add_route('proxy', '/*url')
    config.scan()

    config.include("hairyoctobear.api")

    registry = config.registry

    registry._cache_region = make_region().configure(
        'dogpile.cache.bmemcached',
        expiration_time = None,
        arguments = {
            'url': settings["memcache.servers"].split(),
            'username':settings["memcache.user"],
            'password':settings["memcache.password"]
        }
    )


    registry.get_or_create = registry._cache_region.get_or_create

    mongo_uri = os.environ["MONGOHQ_URL"]
    mongo_db = mongo_uri.rsplit('/', 1)[-1]

    mongo = MongoClient(mongo_uri)
    db = mongo[mongo_db]
    registry.charts = db['charts']

    def get_chart(request, chart_id):
        city = request.params.get('city', 'London')
        key = "{0}-chart-{1}".format(chart_id, city.lower())
        res = registry.charts.find_one({"key": key})
        if res:
            return json.loads(res['value'])

    config.add_request_method(get_chart)

    return CORS(config.make_wsgi_app(),
                headers="*",
                methods="*",
                maxage="180",
                origin="copy",
                credentials="true")
Beispiel #20
0
def get_cache():
    global region
    if region is None:
        settings = get_settings()
        region = make_region().configure_from_config(settings, 'cache.')
	if settings['cache.backend'] == 'dogpile.cache.dbm':
	    os.chmod(settings['cache.arguments.filename'], 0700)

    return region
 def setUp(self):
     self.app = flask.Flask('system_test')
     self.app.register_blueprint(v1.system.blueprint)
     self.test_app = self.app.test_client()
     # Replace the default cache with an in-memory version.
     self._old_cache = v1.system._cache
     v1.system._cache = make_region().configure(
         'dogpile.cache.memory',
     )
Beispiel #22
0
def cached_node_function_more_args():
    
    test_cache = make_region(function_key_generator=cache_key_generator_for_node_argument).configure('dogpile.cache.memory')
    
    @test_cache.cache_on_arguments()
    def function_with_node_arg_and_more(node, arg1, arg2):
        return node.id, arg1, arg2
    
    return function_with_node_arg_and_more
Beispiel #23
0
    def _cache(cls):
        if not cls.__cache and any(['cache.connectors.' in k for k in config]):
            cls.__cache = make_region(
                function_key_generator=cache_key_generator,
                key_mangler=cache_key_mangler,
            )
            cls.__cache.configure_from_config(config, 'cache.connectors.')

        return cls.__cache
Beispiel #24
0
 def _new_region(self):
     from dogpile import cache as dc
     self._cache_region = dc.make_region()
     config = dict(pymor.core.dogpile_backends.DEFAULT_DISK_CONFIG)
     if self.filename:
         config['arguments.filename'] = os.path.expanduser(self.filename)
     if self.max_size:
         config['arguments.max_size'] = self.max_size
     self._cache_region.configure_from_config(config, '')
Beispiel #25
0
    def __init__(self, path=DEFAULT_CONFIG_PATH):
        #: The config file's location.
        #: Default: ~/.config/mytardisclient/mytardisclient.cfg
        self.path = path

        #: The logging config path.
        #: Default: ~/.config/mytardisclient/logging.cfg
        self.logging_config_path = LOGGING_CONFIG_PATH
        if not os.path.exists(os.path.dirname(self.logging_config_path)):
            os.makedirs(os.path.dirname(self.logging_config_path))
        if not os.path.exists(self.logging_config_path):
            with open(self.logging_config_path, 'w') as logging_config:
                logging_config.write(DEFAULT_LOGGING_CONF)
        self.logfile_path = LOGFILE_PATH

        #: The MyTardis URL, e.g. 'http://mytardisdemo.erc.monash.edu.au'
        self.url = ""

        #: The MyTardis username, e.g. 'demofacility"
        self.username = ""

        #: The MyTardis API key, e.g. '644be179cc6773c30fc471bad61b50c90897146c'
        self.apikey = ""

        #: Path for caching results of frequently used queries.
        #: Default: ~/.cache/mytardisclient/mytardisclient.cache
        self.cache_path = CACHE_PATH

        #: Cache backend.
        #: Default: 'dogpile.cache.dbm'.
        #: Set to 'dogpile.cache.null' to disable caching.
        self.cache_backend = 'dogpile.cache.dbm'

        #: Cache expiry time.
        #: Default: 30 seconds.
        self.cache_expiry = 30

        def key_generator(namespace, function):
            # pylint: disable=unused-argument
            def generate_key(*args, **kwargs):
                return "%s(%s,%s)" % \
                    (function.__name__, str(args), str(kwargs))
            return generate_key
        if not os.path.exists(os.path.dirname(self.cache_path)):
            os.makedirs(os.path.dirname(self.cache_path))
        self.region = \
            make_region(function_key_generator=key_generator) \
                .configure(
                    self.cache_backend,
                    expiration_time=self.cache_expiry,
                    arguments={
                        "filename": self.cache_path
                    })

        if path:
            self.load()
Beispiel #26
0
def _get_cache():
    global _cache
    if _cache is None:
        _cache = make_region().configure(
            'dogpile.cache.dbm',
            arguments={
                "filename": "/etc/akanda-state"
            }
        )
    return _cache
Beispiel #27
0
def initialize_cache(args):
    global MEMOIZE
    global CACHE
    if args is None:
        MEMOIZE = lambda func: func
        CACHE = MockCache()
        return
    region = make_region().configure(**args)
    MEMOIZE = cache_responses(region)
    CACHE = CommonCache(region)
Beispiel #28
0
    def __init__(self, key_uri_supplier):
        """Constructs an instance of JwksSupplier.

        Args:
          key_uri_supplier: a KeyUriSupplier instance that returns the `jwks_uri`
            based on the given issuer.
        """
        self._key_uri_supplier = key_uri_supplier
        self._jwks_cache = cache.make_region().configure(
            u"dogpile.cache.memory", expiration_time=datetime.timedelta(minutes=5))
Beispiel #29
0
    def _cache(cls):
        if not cls.__cache and any(['cache.connectors.' in k for k in config]):
            cls.__cache = make_region(
                function_key_generator=cache_key_generator,
                key_mangler=lambda key: hashlib.sha1(key).hexdigest(),
                async_creation_runner=async_creation_runner,
            )
            cls.__cache.configure_from_config(config, 'cache.connectors.')

        return cls.__cache
Beispiel #30
0
def init_cache():
    # TODO: make env configurable, for testing. Will likely require us to set an ENV when running tests.
    # default
    CACHE_REGION = make_region().configure(
        CONFIG.ProdSettings.DEFAULT_CACHE_BACKEND,
        expiration_time=CONFIG.ProdSettings.DEFAULT_CACHE_EXPIRE_TIME,
        arguments={
            'distributed_lock': CONFIG.ProdSettings.DEFAULT_CACHE_ARGUMENTS['distributed_lock']
        }, wrap=[CacheLoggingProxy]
    )
    return CACHE_REGION
Beispiel #31
0
 def __init__(cls, name, bases, attrs):
     cls.cache = make_region().configure(
         'dogpile.cache.memory',
         expiration_time=getattr(cls, 'cache_lifetime',
                                 default_cache_lifetime),
     )
Beispiel #32
0
        '''
        generate_key
        '''
        return str(rse)

    return generate_key


if rsemanager.CLIENT_MODE:  # pylint:disable=no-member
    setattr(rsemanager, '__request_rse_info', get_rse_client)
    setattr(rsemanager, '__request_rse_info', get_rse_client)
    setattr(rsemanager, '__get_signed_url', get_signed_url_client)

    # Preparing region for dogpile.cache
    RSE_REGION = make_region(
        function_key_generator=rse_key_generator).configure(
            'dogpile.cache.memory', expiration_time=3600)
    setattr(rsemanager, 'RSE_REGION', RSE_REGION)

if rsemanager.SERVER_MODE:  # pylint:disable=no-member
    from rucio.core.rse import get_rse_protocols, get_rse_id
    from rucio.core.credential import get_signed_url

    def tmp_rse_info(rse, session=None):
        rse_id = get_rse_id(rse=rse)
        return get_rse_protocols(rse_id=rse_id, session=session)

    setattr(rsemanager, '__request_rse_info', tmp_rse_info)
    setattr(rsemanager, '__get_signed_url', get_signed_url)
    RSE_REGION = make_region(
        function_key_generator=rse_key_generator).configure(
Beispiel #33
0
"""

try:
    from cmdb.client import Client as cmdb_client
except Exception:
    cmdb_client = None
from dogpile.cache import make_region

from zmon_worker_monitor.adapters.ifunctionfactory_plugin import IFunctionFactoryPlugin, propartial

import json
import redis
import time

HOSTS_CACHE_EXPIRATION_TIME = 600  # 10 minutes
memory_cache = make_region().configure(
    'dogpile.cache.memory', expiration_time=HOSTS_CACHE_EXPIRATION_TIME)


class JoblocksFactory(IFunctionFactoryPlugin):
    def __init__(self):
        super(JoblocksFactory, self).__init__()
        # fields from configuration
        self.cmdb_url = None

    def configure(self, conf):
        """
        Called after plugin is loaded to pass the [configuration] section in their plugin info file
        :param conf: configuration dictionary
        """
        self.cmdb_url = conf['cmdb_url']
from app.resources import polkascan

# Database connection
engine = create_engine(DB_CONNECTION,
                       echo=DEBUG,
                       isolation_level="READ_UNCOMMITTED",
                       pool_pre_ping=True)
session_factory = sessionmaker(bind=engine, autoflush=False, autocommit=False)

# Define cache region
cache_region = make_region().configure(
    'dogpile.cache.redis',
    arguments={
        'host': DOGPILE_CACHE_SETTINGS['host'],
        'port': DOGPILE_CACHE_SETTINGS['port'],
        'db': DOGPILE_CACHE_SETTINGS['db'],
        'redis_expiration_time': 60 * 60 * 2,  # 2 hours
        'distributed_lock': True
    })

# Define application
app = falcon.API(middleware=[
    ContextMiddleware(),
    SQLAlchemySessionManager(session_factory),
    CacheMiddleware(cache_region)
])

# Application routes
app.add_route('/block', polkascan.BlockListResource())
app.add_route('/block/{block_id}', polkascan.BlockDetailsResource())
Beispiel #35
0
    config.get('hdx.caching.redis_host', 'gisredis') or 'gisredis',
    'cache.redis.arguments.port':
    int(config.get('hdx.caching.redis_port', '6379') or 6379),
    'cache.redis.arguments.db':
    int(config.get('hdx.caching.redis_db', '3') or 3),
    'cache.redis.arguments.redis_expiration_time':
    60 * 60 * 24 * 3,  # 3 days - higher than the expiration time
    'cache.redis.arguments.distributed_lock':
    True
}
dogpile_config = {
    'cache.redis.expiration_time': 60 * 60 * 24,
}
dogpile_config.update(dogpile_standard_config)

dogpile_requests_region = make_region(
    key_mangler=lambda key: 'requests-' + key)
dogpile_requests_region.configure_from_config(dogpile_config,
                                              dogpile_config_filter)


class HDXRedisInvalidationStrategy(RegionInvalidationStrategy):
    def __init__(self, dogpile_region):
        '''

        :param dogpile_region:
        :type dogpile_region: CacheRegion
        '''
        self.dogpile_region = dogpile_region

    def invalidate(self, hard=None):
        mangler, redis = self._find_backend_info()
Beispiel #36
0
# -*- coding: utf-8 -*-
import datetime

from dogpile.cache import make_region

#: Expiration time for show caching
SHOW_EXPIRATION_TIME = datetime.timedelta(weeks=3).total_seconds()

#: Expiration time for episode caching
EPISODE_EXPIRATION_TIME = datetime.timedelta(days=3).total_seconds()

#: Expiration time for scraper searches
REFINER_EXPIRATION_TIME = datetime.timedelta(weeks=1).total_seconds()


region = make_region()
Beispiel #37
0
def initialize_dogpile():

    global REGIONS

    REGIONS["region_redis"] = make_region().configure(
        "dogpile.cache.redis",
        expiration_time=3600,
        arguments={
            "host": REDIS_HOST,
            "port": REDIS_PORT,
            "db": 0,
            "redis_expiration_time": 3600,
        },
    )

    REGIONS["region_redis_local"] = make_region().configure(
        "dogpile.cache.redis",
        expiration_time=3600,
        arguments={
            "host": REDIS_HOST,
            "port": REDIS_PORT,
            "db": 0,
            "redis_expiration_time": None,
        },
    )

    REGIONS["region_msgpack"] = make_region().configure(
        "dogpile_backend_redis_advanced",
        expiration_time=3600,
        arguments={
            "host": REDIS_HOST,
            "port": REDIS_PORT,
            "db": 0,
            "loads": msgpack_loads,
            "dumps": msgpack.packb,
            "redis_expiration_time": 3600,
        },
    )

    REGIONS["region_msgpack_local"] = make_region().configure(
        "dogpile_backend_redis_advanced",
        expiration_time=3600,
        arguments={
            "host": REDIS_HOST,
            "port": REDIS_PORT,
            "db": 0,
            "loads": msgpack_loads,
            "dumps": msgpack.packb,
            "redis_expiration_time": None,
        },
    )

    REGIONS["region_msgpack_local_int"] = make_region().configure(
        "dogpile_backend_redis_advanced",
        expiration_time=3600,
        arguments={
            "host": REDIS_HOST,
            "port": REDIS_PORT,
            "db": 0,
            "loads": SerializerMsgpackInt.loads,
            "dumps": SerializerMsgpackInt.dumps,
            "redis_expiration_time": None,
        },
    )

    REGIONS["region_msgpack_raw"] = make_region().configure(
        "dogpile_backend_redis_advanced",
        expiration_time=3600,
        arguments={
            "host": REDIS_HOST,
            "port": REDIS_PORT,
            "db": 0,
            "loads": SerializerMsgpackRaw.loads,
            "dumps": SerializerMsgpackRaw.dumps,
            "redis_expiration_time": 3600,
        },
    )

    REGIONS["region_msgpack_raw_local"] = make_region().configure(
        "dogpile_backend_redis_advanced",
        expiration_time=3600,
        arguments={
            "host": REDIS_HOST,
            "port": REDIS_PORT,
            "db": 0,
            "loads": SerializerMsgpackRaw.loads,
            "dumps": SerializerMsgpackRaw.dumps,
        },
    )

    REGIONS["region_msgpack_raw_hash"] = make_region().configure(
        "dogpile_backend_redis_advanced_hstore",
        expiration_time=3600,
        arguments={
            "host": REDIS_HOST,
            "port": REDIS_PORT,
            "db": 0,
            "loads": SerializerMsgpackRaw.loads,
            "dumps": SerializerMsgpackRaw.dumps,
            "redis_expiration_time": 3600,
            "redis_expiration_time_hash": None,
        },
    )

    REGIONS["region_json"] = make_region().configure(
        "dogpile_backend_redis_advanced",
        expiration_time=3600,
        arguments={
            "host": REDIS_HOST,
            "port": REDIS_PORT,
            "db": 0,
            "loads": SerializerJson.loads,
            "dumps": SerializerJson.dumps,
            "redis_expiration_time": 3600,
        },
    )

    REGIONS["region_json_local"] = make_region().configure(
        "dogpile_backend_redis_advanced",
        expiration_time=3600,
        arguments={
            "host": REDIS_HOST,
            "port": REDIS_PORT,
            "db": 0,
            "loads": SerializerJson.loads,
            "dumps": SerializerJson.dumps,
            "redis_expiration_time": None,
        },
    )

    REGIONS["region_json_local_int"] = make_region().configure(
        "dogpile_backend_redis_advanced",
        expiration_time=3600,
        arguments={
            "host": REDIS_HOST,
            "port": REDIS_PORT,
            "db": 0,
            "loads": SerializerJsonInt.loads,
            "dumps": SerializerJsonInt.dumps,
            "redis_expiration_time": None,
        },
    )

    REGIONS["region_json_raw"] = make_region().configure(
        "dogpile_backend_redis_advanced",
        expiration_time=3600,
        arguments={
            "host": REDIS_HOST,
            "port": REDIS_PORT,
            "db": 0,
            "loads": SerializerJsonRaw.loads,
            "dumps": SerializerJsonRaw.dumps,
            "redis_expiration_time": 3600,
        },
    )

    REGIONS["region_json_raw_local"] = make_region().configure(
        "dogpile_backend_redis_advanced",
        expiration_time=3600,
        arguments={
            "host": REDIS_HOST,
            "port": REDIS_PORT,
            "db": 0,
            "loads": SerializerJsonRaw.loads,
            "dumps": SerializerJsonRaw.dumps,
            "redis_expiration_time": None,
        },
    )

    REGIONS["region_json_raw_hash"] = make_region().configure(
        "dogpile_backend_redis_advanced_hstore",
        expiration_time=3600,
        arguments={
            "host": REDIS_HOST,
            "port": REDIS_PORT,
            "db": 0,
            "loads": SerializerJsonRaw.loads,
            "dumps": SerializerJsonRaw.dumps,
            "redis_expiration_time": 3600,
            "redis_expiration_time_hash": None,
        },
    )
Beispiel #38
0
 def test_datetime_expiration_time(self):
     my_region = make_region()
     my_region.configure(backend="mock",
                         expiration_time=datetime.timedelta(days=1,
                                                            hours=8))
     eq_(my_region.expiration_time, 32 * 60 * 60)
Beispiel #39
0
    def test_to_repair_a_rule_with_only_1_rse_whose_site_is_blocklisted(self):
        """ JUDGE REPAIRER: Test to repair a rule with only 1 rse whose site is blocklisted"""

        rse = rse_name_generator()
        rse_id = add_rse(rse, **self.vo)
        set_local_account_limit(self.jdoe, rse_id, -1)
        rule_repairer(once=True)  # Clean out the repairer

        region = make_region().configure('dogpile.cache.memcached',
                                         expiration_time=900,
                                         arguments={
                                             'url':
                                             config_get(
                                                 'cache', 'url', False,
                                                 '127.0.0.1:11211'),
                                             'distributed_lock':
                                             True
                                         })

        def change_availability(new_value):
            update_rse(rse_id, {'availability_write': new_value})
            # clear cache
            region.delete(sha256(rse.encode()).hexdigest())

        for grouping, ignore_availability in itertools.product(
            ["NONE", "DATASET", "ALL"], [True, False]):
            scope = InternalScope('mock', **self.vo)
            files = create_files(1, scope, self.rse4_id, bytes_=100)
            dataset = 'dataset_' + str(uuid())
            add_did(scope, dataset, DIDType.DATASET, self.jdoe)
            attach_dids(scope, dataset, files, self.jdoe)

            if ignore_availability:
                change_availability(False)
                rule_id = add_rule(dids=[{
                    'scope': scope,
                    'name': dataset
                }],
                                   account=self.jdoe,
                                   copies=1,
                                   rse_expression=rse,
                                   grouping=grouping,
                                   weight=None,
                                   lifetime=None,
                                   locked=False,
                                   subscription_id=None,
                                   ignore_availability=ignore_availability,
                                   activity='DebugJudge')[0]
                assert (RuleState.STUCK == get_rule(rule_id)['state'])

                rule_repairer(once=True)
                assert (RuleState.REPLICATING == get_rule(rule_id)['state'])

                change_availability(True)
            else:
                rule_id = add_rule(dids=[{
                    'scope': scope,
                    'name': dataset
                }],
                                   account=self.jdoe,
                                   copies=1,
                                   rse_expression=rse,
                                   grouping=grouping,
                                   weight=None,
                                   lifetime=None,
                                   locked=False,
                                   subscription_id=None,
                                   ignore_availability=ignore_availability,
                                   activity='DebugJudge')[0]
                failed_transfer(scope=scope,
                                name=files[0]['name'],
                                rse_id=get_replica_locks(
                                    scope=files[0]['scope'],
                                    name=files[0]['name'])[0].rse_id)
                change_availability(False)
                assert (RuleState.STUCK == get_rule(rule_id)['state'])

                rule_repairer(once=True)
                assert (RuleState.STUCK == get_rule(rule_id)['state'])

                change_availability(True)
                rule_repairer(once=True)
                assert (RuleState.REPLICATING == get_rule(rule_id)['state'])
Beispiel #40
0
# There are two separate caches being created.
#
# The "static" cache contains items that are unlikely to change while the BLL
# is run and whose number of entries is not going to continue to grow.  These
# items are not specific to the logged in user.
#
# The "session" cache contains items that are tied to keystone sessions,
# and the region's timeout is set to match keystone's (4 hours).   After
# the expiration time is hit, dogpile will not return the value from the cache,
# but will trigger the function to run and re-obtain its values.  The
# in-memory backends supplied by dogpile do not actually delete expired
# entries from the cache, so a separate thread is spawned to periodically
# clean these up to avoid runaway memory usage.
#
static = make_region(
    key_mangler=sha1_mangle_key).configure('dogpile.cache.memory')

cache = {}
cache_expiration = get_conf('cache_expiration', 14400)  # 4 hours
# session = make_region(key_mangler=sha1_mangle_key).configure(
session_cache = make_region().configure('dogpile.cache.memory',
                                        expiration_time=cache_expiration,
                                        arguments={"cache_dict": cache})

start_cache_cleaner(cache, cache_expiration, "SessionCacheCleaner")


def login(username, password, domain='Default'):
    """
    Perform the initial login to the BLL, using the given credentials.  This
    uses the "normal" keystone workflow of:
        return cache_dict

register_backend("sqlalchemy.session", __name__, "ScopedSessionBackend")


if __name__ == '__main__':
    from .environment import Session, regions
    from .caching_query import FromCache
    from dogpile.cache import make_region

    # set up a region based on the ScopedSessionBackend,
    # pointing to the scoped_session declared in the example
    # environment.
    regions['local_session'] = make_region().configure(
        'sqlalchemy.session',
        arguments={
            "scoped_session": Session
        }
    )

    from .model import Person

    # query to load Person by name, with criterion
    # of "person 10"
    q = Session.query(Person).\
        options(FromCache("local_session")).\
        filter(Person.name == "person 10")

    # load from DB
    person10 = q.one()

    # next call, the query is cached.
    def test_custom_host_routes(self):
        subnet = dict(cidr='192.168.89.0/24',
                      gateway_ip='192.168.89.1',
                      dhcp_enabled=True,
                      dns_nameservers=[],
                      host_routes=[{
                          'destination': '192.240.128.0/20',
                          'nexthop': '192.168.89.2'
                      }])
        network = dict(network_id='netid',
                       interface=dict(ifname='ge0', addresses=['fe80::2']),
                       subnets=[subnet])
        c = models.Configuration({'networks': [network]})

        cache = make_region().configure('dogpile.cache.memory')
        with mock.patch.object(self.mgr, 'sudo') as sudo:

            # ...so let's add one!
            self.mgr.update_host_routes(c, cache)
            sudo.assert_called_once_with('-4', 'route', 'add',
                                         '192.240.128.0/20', 'via',
                                         '192.168.89.2', 'dev', 'eth0')

            # db[subnet.cidr] should contain the above route
            expected = set()
            expected.add((netaddr.IPNetwork('192.240.138.0/20'),
                          netaddr.IPAddress('192.168.89.2')))
            self.assertEqual(len(cache.get('host_routes')), 1)
            self.assertEqual(
                cache.get('host_routes')[subnet['cidr']] - expected, set())

            # Empty the host_routes list
            sudo.reset_mock()
            subnet['host_routes'] = []
            c = models.Configuration({'networks': [network]})
            self.mgr.update_host_routes(c, cache)
            sudo.assert_called_once_with('-4', 'route', 'del',
                                         '192.240.128.0/20', 'via',
                                         '192.168.89.2', 'dev', 'eth0')
            self.assertEqual(len(cache.get('host_routes')), 0)

            # ...this time, let's add multiple routes and ensure they're added
            sudo.reset_mock()
            subnet['host_routes'] = [{
                'destination': '192.240.128.0/20',
                'nexthop': '192.168.89.2'
            }, {
                'destination': '192.220.128.0/20',
                'nexthop': '192.168.89.3'
            }]
            c = models.Configuration({'networks': [network]})
            self.mgr.update_host_routes(c, cache)
            self.assertEqual(sudo.call_args_list, [
                mock.call('-4', 'route', 'add', '192.240.128.0/20', 'via',
                          '192.168.89.2', 'dev', 'eth0'),
                mock.call('-4', 'route', 'add', '192.220.128.0/20', 'via',
                          '192.168.89.3', 'dev', 'eth0'),
            ])

            # ...let's remove one and add another...
            sudo.reset_mock()
            subnet['host_routes'] = [{
                'destination': '192.240.128.0/20',
                'nexthop': '192.168.89.2'
            }, {
                'destination': '192.185.128.0/20',
                'nexthop': '192.168.89.4'
            }]
            c = models.Configuration({'networks': [network]})
            self.mgr.update_host_routes(c, cache)
            self.assertEqual(sudo.call_args_list, [
                mock.call('-4', 'route', 'del', '192.220.128.0/20', 'via',
                          '192.168.89.3', 'dev', 'eth0'),
                mock.call('-4', 'route', 'add', '192.185.128.0/20', 'via',
                          '192.168.89.4', 'dev', 'eth0')
            ])

            # ...let's add another subnet...
            self.assertEqual(len(cache.get('host_routes')), 1)
            sudo.reset_mock()
            network['subnets'].append(
                dict(cidr='192.168.90.0/24',
                     gateway_ip='192.168.90.1',
                     dhcp_enabled=True,
                     dns_nameservers=[],
                     host_routes=[{
                         'destination': '192.240.128.0/20',
                         'nexthop': '192.168.90.1'
                     }]))
            c = models.Configuration({'networks': [network]})
            self.mgr.update_host_routes(c, cache)
            self.assertEqual(sudo.call_args_list, [
                mock.call('-4', 'route', 'add', '192.240.128.0/20', 'via',
                          '192.168.90.1', 'dev', 'eth0')
            ])
            self.assertEqual(len(cache.get('host_routes')), 2)

            # ...and finally, delete all custom host_routes...
            sudo.reset_mock()
            network['subnets'][0]['host_routes'] = []
            network['subnets'][1]['host_routes'] = []
            c = models.Configuration({'networks': [network]})
            self.mgr.update_host_routes(c, cache)
            self.assertEqual(sudo.call_args_list, [
                mock.call('-4', 'route', 'del', '192.185.128.0/20', 'via',
                          '192.168.89.4', 'dev', 'eth0'),
                mock.call('-4', 'route', 'del', '192.240.128.0/20', 'via',
                          '192.168.89.2', 'dev', 'eth0'),
                mock.call('-4', 'route', 'del', '192.240.128.0/20', 'via',
                          '192.168.90.1', 'dev', 'eth0'),
            ])
            self.assertEqual(len(cache.get('host_routes')), 0)
Beispiel #43
0
from database import TableHistory, TableHistoryMovie, TableBlacklist, TableBlacklistMovie, TableShowsRootfolder, \
    TableMoviesRootfolder
from event_handler import event_stream
from get_languages import language_from_alpha2, alpha3_from_alpha2
from helper import path_mappings
from list_subtitles import store_subtitles, store_subtitles_movie
from subliminal_patch.subtitle import Subtitle
from subliminal_patch.core import get_subtitle_path
from subzero.language import Language
from subliminal import region as subliminal_cache_region
from deep_translator import GoogleTranslator
from dogpile.cache import make_region
import datetime
import glob

region = make_region().configure('dogpile.cache.memory')
headers = {"User-Agent": os.environ["SZ_USER_AGENT"]}


class BinaryNotFound(Exception):
    pass


def history_log(action,
                sonarr_series_id,
                sonarr_episode_id,
                description,
                video_path=None,
                language=None,
                provider=None,
                score=None,
Beispiel #44
0
    This provides a cache backend for dogpile.cache which is designed
    to work in a thread-safe manner using cherrypy.request, a thread local
    storage that only lasts for the duration of a request.
    """
    def __init__(self, arguments):
        pass

    @property
    def _cache(self):
        if not hasattr(cherrypy.request, '_girderCache'):
            cherrypy.request._girderCache = {}

        return cherrypy.request._girderCache


register_backend('cherrypy_request', 'girderformindlogger.utility._cache',
                 'CherrypyRequestBackend')

# These caches must be configured with the null backend upon creation due to the fact
# that user-based configuration of the regions doesn't happen until server start, which
# doesn't occur when using Girder as a library.
cache = make_region(name='girderformindlogger.cache').configure(
    backend='dogpile.cache.null')
requestCache = make_region(name='girderformindlogger.request').configure(
    backend='dogpile.cache.null')

# This cache is not configurable by the user, and will always be configured when the server is.
# It holds data for rate limiting, which is ephemeral, but must be persisted (i.e. it's not optional
# or best-effort).
rateLimitBuffer = make_region(name='girderformindlogger.rate_limit')
Beispiel #45
0
            raise ValueError('Keyword arguments not supported')
        if has_self:
            args = args[1:]
        return namespace + '|' + ' '.join(map(to_str, args))

    return generate_key


class MutexLock(AbstractFileLock):
    """:class:`MutexLock` is a thread-based rw lock based on :class:`dogpile.core.ReadWriteMutex`"""
    def __init__(self, filename):
        self.mutex = ReadWriteMutex()

    def acquire_read_lock(self, wait):
        ret = self.mutex.acquire_read_lock(wait)
        return wait or ret

    def acquire_write_lock(self, wait):
        ret = self.mutex.acquire_write_lock(wait)
        return wait or ret

    def release_read_lock(self):
        return self.mutex.release_read_lock()

    def release_write_lock(self):
        return self.mutex.release_write_lock()


#: The dogpile.cache region
region = make_region(function_key_generator=subliminal_key_generator)
Beispiel #46
0
"""

import json
import os
import logging
import sys

from functools import wraps

from ConfigParser import NoOptionError, NoSectionError
from dogpile.cache import make_region
from dogpile.cache.api import NoValue

from rucio.common.config import config_get

REGION = make_region().configure('dogpile.cache.memory', expiration_time=1800)

logging.basicConfig(
    stream=sys.stdout,
    level=getattr(
        logging,
        config_get('common',
                   'loglevel',
                   raise_exception=False,
                   default='DEBUG').upper()),
    format='%(asctime)s\t%(process)d\t%(levelname)s\t%(message)s')


def get_policy():
    policy = REGION.get('policy')
    if isinstance(policy, NoValue):
Beispiel #47
0
from rucio.core import config as config_core
from rucio.core.rse import get_rse_id, get_rse_transfer_limits

queue_mode = config_get('conveyor', 'queue_mode', False, 'default')
if queue_mode.upper() == 'STRICT':
    queue_mode = 'strict'

config_memcache = config_get('conveyor', 'using_memcache', False, 'False')
if config_memcache.upper() == 'TRUE':
    using_memcache = True
else:
    using_memcache = False

cache_time = int(config_get('conveyor', 'cache_time', False, 600))

REGION_SHORT = make_region().configure('dogpile.cache.memory',
                                       expiration_time=cache_time)


def get_transfer_limits(activity, rse_id):
    """
    Get RSE transfer limits.

    :param activity:  The activity.
    :param rse_id:    The RSE id.

    :returns: max_transfers if exists else None.
    """
    try:
        if queue_mode == 'strict':
            threshold = get_config_limit(activity, rse_id)
            if threshold:
Beispiel #48
0
    def __init__(self,
                 role,
                 name,
                 client_name,
                 client,
                 *,
                 type=None,
                 monitoring=None,
                 notification=None,
                 venv_path=None,
                 witness_id=None,
                 signing_key=None,
                 **kwargs):
        self.role = role
        if type is not None:
            self._type = type
        self.name = name
        self.witness_signing_key = None
        self.monitoring = to_list(monitoring)
        self.notification = to_list(notification)
        self.client_name = client_name
        data_dir = client.get('data_dir')
        if data_dir:
            data_dir = expanduser(data_dir)

            try:
                log.info('Loading RPC config for %s from %s (client = %s)' %
                         (self.name, data_dir, client_name))
                config = configparser.ConfigParser()
                config_str = '[bts]\n' + open(
                    expanduser(join(data_dir, 'config.ini'))).read()
                # config parser can't handle duplicate values, and we don't need seed nodes
                config_lines = [
                    l for l in config_str.splitlines()
                    if not l.startswith('seed-node')
                ]
                config.read_string('\n'.join(config_lines))
                rpc = {
                }  # FIXME: need it to get the rpc user and rpc password, if necessary
                try:
                    cfg_port = int(config['bts']['rpc-endpoint'].split(':')[1])
                except KeyError:
                    cfg_port = 0
                try:
                    if self.affiliation() == 'steem':
                        self.witness_signing_key = config['bts']['private-key']
                    else:
                        self.witness_signing_key = json.loads(
                            config['bts']['private-key'])[0]
                except KeyError:
                    self.witness_signing_key = None
                log.debug('signing key: {}'.format(self.witness_signing_key))

            except Exception as e:
                log.warning('Cannot read RPC config from %s' % data_dir)
                log.exception(e)
                rpc = {}
                cfg_port = None
        else:
            rpc = {}
            cfg_port = None

        self.witness_host = client.get('witness_host')
        self.witness_port = client.get('witness_port')
        self.witness_user = client.get('witness_user')
        self.witness_password = client.get('witness_password')
        self.wallet_host = client.get('wallet_host')
        self.wallet_port = client.get('wallet_port') or cfg_port or 0
        self.wallet_user = client.get('wallet_user')
        self.wallet_password = client.get('wallet_password')
        self.proxy_host = client.get('proxy_host')
        self.proxy_port = client.get('proxy_port')
        self.proxy_user = client.get('proxy_user')
        self.proxy_password = client.get('proxy_password')

        self.rpc_id = (self.wallet_host, self.wallet_port)
        self.ws_rpc_id = (self.witness_host, self.witness_port)
        self.venv_path = venv_path
        self.witness_id = witness_id
        self.witness_signing_key = signing_key or self.witness_signing_key

        # direct json-rpc call
        def direct_call(funcname, *args):
            # we want to avoid connecting to the client and block because
            # it is in a stopped state (eg: in gdb after having crashed)
            if self.is_localhost() and not bts_binary_running(self):
                raise RPCError(
                    'Connection aborted: {} binary does not seem to be running'
                    .format(self.type()))

            if self.proxy_host is not None and self.proxy_port is not None:
                return rpc_call(self.proxy_host,
                                self.proxy_port,
                                None,
                                None,
                                funcname,
                                *args,
                                rpc_args=dict(
                                    proxy_user=self.proxy_user,
                                    proxy_password=self.proxy_password,
                                    wallet_port=self.wallet_port))

            return rpc_call(self.wallet_host, self.wallet_port,
                            self.wallet_user, self.wallet_password, funcname,
                            *args)

        self._rpc_call = direct_call

        if core.config.get('profile', False):
            self._rpc_call = core.profile(self._rpc_call)

        self.opts = kwargs
        if self.opts:
            log.debug('Additional opts for node {} - {}'.format(
                self.name, self.opts))

        # get a special "smart" cache for slots as it is a very expensive call
        self._slot_cache = make_region().configure('dogpile.cache.memory')

        # caches for committee member and witness names
        self._witness_names = {}
        self._committee_member_names = {}
Beispiel #49
0
# coding: utf-8
import os
import datetime

from pyramid.settings import aslist

from dogpile.cache import make_region

from analytics import utils
from analytics import choices

cache_region = make_region(name='control_manager')


def current_url(current_url, data):

    params = {
        'collection': data.get('selected_collection_code', None),
        'journal': data.get('selected_journal_code', None),
        'py_range': '-'.join(data.get('py_range', None)),
        'document': data.get('selected_document_code', None),
        'range_start': data.get('range_start', None),
        'range_end': data.get('range_end', None)
    }

    # escopo de áreas temáticas
    sa_scope = '&'.join(['sa_scope=%s' % i for i in data.get('sa_scope', [])])

    # escopo de idiomas de publicação dos documentos
    la_scope = '&'.join(['la_scope=%s' % i for i in data.get('la_scope', [])])
Beispiel #50
0
logging.basicConfig(
    stream=sys.stdout,
    level=getattr(
        logging,
        config_get('common',
                   'loglevel',
                   raise_exception=False,
                   default='DEBUG').upper()),
    format='%(asctime)s\t%(process)d\t%(levelname)s\t%(message)s')

GRACEFUL_STOP = threading.Event()

REGION = make_region().configure('dogpile.cache.memcached',
                                 expiration_time=600,
                                 arguments={
                                     'url': "127.0.0.1:11211",
                                     'distributed_lock': True
                                 })


def delete_from_storage(replicas, prot, rse_info, staging_areas, prepend_str):
    deleted_files = []
    rse_name = rse_info['rse']
    rse_id = rse_info['id']
    try:
        prot.connect()
        for replica in replicas:
            # Physical deletion
            try:
                deletion_dict = {
                    'scope': replica['scope'],
Beispiel #51
0
from rucio.db.sqla import models
from rucio.db.sqla.constants import IdentityType
from rucio.db.sqla.session import read_session, transactional_session


def token_key_generator(namespace, fni, **kwargs):
    """ :returns: generate key function """
    def generate_key(token, session=None):
        """ :returns: token """
        return token

    return generate_key


TOKENREGION = make_region(
    function_key_generator=token_key_generator).configure(
        'dogpile.cache.memory', expiration_time=3600)


@read_session
def exist_identity_account(identity, type, account, session=None):
    """
    Check if an identity is mapped to an account.

    :param identity: The user identity as string.
    :param type: The type of identity as a string, e.g. userpass, x509, gss...
    :param account: The account identifier as a string.
    :param session: The database session in use.

    :returns: True if identity is mapped to account, otherwise False
    """
Beispiel #52
0
import geoip2.database
import requests
from dogpile.cache import make_region
from dogpile.cache.api import NO_VALUE

from rucio.common import utils
from rucio.common.config import config_get, config_get_bool
from rucio.common.exception import InvalidRSEExpression
from rucio.core.rse_expression_parser import parse_expression

if TYPE_CHECKING:
    from typing import Dict, List, Optional

REGION = make_region(function_key_generator=utils.my_key_generator).configure(
    'dogpile.cache.memory',
    expiration_time=30 * 86400,
)


def __download_geoip_db(directory, filename):
    download_url = config_get('core',
                              'geoip_download_url',
                              raise_exception=False,
                              default=None)
    verify_tls = config_get_bool('core',
                                 'geoip_download_verify_tls',
                                 raise_exception=False,
                                 default=True)
    if not download_url:
        licence_key = config_get('core',
                                 'geoip_licence_key',
Beispiel #53
0
#coding: utf-8
import requests

from pyramid.view import view_config

from dogpile.cache import make_region

from analytics.control_manager import base_data_manager
from analytics.custom_queries import custom_query

cache_region = make_region(name='views_website_cache')


@view_config(route_name='bibliometrics_journal_web',
             renderer='templates/website/bibliometrics_journal.mako')
@base_data_manager
def bibliometrics_journal(request):

    data = request.data_manager
    data['page'] = 'bibliometrics'
    titles = request.GET.get('titles', None)

    titles = titles.split('||') if titles else []

    if data['selected_journal_code']:
        journal = request.stats.articlemeta.journal(
            code=data['selected_journal_code'])
        titles.append(journal.title)
        titles.append(journal.abbreviated_title)
        titles.extend(x['title'] for x in custom_query.load(
            data['selected_journal_code']).get('should', [])
Beispiel #54
0
from traceback import format_exc

from dogpile.cache import make_region
from dogpile.cache.api import NO_VALUE

from rucio.common.exception import Duplicate, RucioException, InvalidObject
from rucio.common.config import config_get
from rucio.db.sqla import models
from rucio.db.sqla.constants import KeyType
from rucio.db.sqla.session import read_session, transactional_session

REGION = make_region().configure('dogpile.cache.memcached',
                                 expiration_time=3600,
                                 arguments={
                                     'url':
                                     config_get('cache', 'url', False,
                                                '127.0.0.1:11211'),
                                     'distributed_lock':
                                     True
                                 })


@transactional_session
def add_naming_convention(scope, regexp, convention_type, session=None):
    """
    add a naming convention for a given scope

    :param scope: the name for the scope.
    :param regexp: the regular expression to validate the name.
    :param convention_type: the did_type on which the regexp should apply.
    :param session: The database session in use.
Beispiel #55
0
import re

from appdirs import user_cache_dir
from lxml import etree, objectify
from dogpile.cache import make_region

from . import namespaces
from .version import __version__

region = make_region().configure(
    'dogpile.cache.dbm',
    expiration_time=1209600,  # 14 days
    arguments={
        "filename":
        "{dir}/{version}.dbm".format(
            dir=user_cache_dir('anentropic', 'dirtyebay'),
            version='dirtyebay_{}'.format(__version__))
    })

VERSION_COMMENT = re.compile(r'\s*Version\s*(\d+)\s*')

NS_MAP = {
    'wsdl': namespaces.WSDL,
    'ebay': namespaces.EBAY,
    'xs': namespaces.XSD,
}


class VersionNotFound(Exception):
    pass
Beispiel #56
0
 def test_set_name(self):
     my_region = make_region(name="my-name")
     eq_(my_region.name, "my-name")
Beispiel #57
0
from dogpile.cache import make_region

HOURS = 60 * 60

cache = make_region(key_mangler='backslash:cache:{}'.format).configure(
    'dogpile.cache.redis',
    arguments = {
        'host': 'localhost',
        'port': 6379,
        'db': 0,
        'redis_expiration_time': 2 * HOURS,
        'distributed_lock': True
        }
)

Beispiel #58
0
import fedmsg.meta

from dogpile.cache import make_region
from fedora.client.fas2 import AccountSystem

log = logging.getLogger(__name__)

try:
    import re2 as re
except ImportError:
    log.warning("Couldn't import the 're2' module.")
    import re

# We cache fancy stuff here from pkgdb, etc.. stuff that we want to expire.
_cache = make_region()
_FAS = None

# This doesn't need any expiration.  Cache forever.
# We do this because the compilation step for python-re2 is 16x slower than
# stdlib, but the match is 10x faster.  So, cache the slow part once and use
# the fast part at the tightest part of the loop.
_regex_cache = {}


def compile_regex(pattern):
    if not pattern in _regex_cache:
        # This is expensive with python-re2, so we cache it.  Forever.
        _regex_cache[pattern] = re.compile(pattern)
    return _regex_cache[pattern]
Beispiel #59
0
    def generate_key(rse, vo='def', session=None):
        '''
        generate_key
        '''
        return '{}:{}'.format(rse, vo)

    return generate_key


if rsemanager.CLIENT_MODE:  # pylint:disable=no-member
    setattr(rsemanager, '__request_rse_info', get_rse_client)
    setattr(rsemanager, '__get_signed_url', get_signed_url_client)

    # Preparing region for dogpile.cache
    RSE_REGION = make_region(
        function_key_generator=rse_key_generator).configure(
            'dogpile.cache.memory', expiration_time=900)
    setattr(rsemanager, 'RSE_REGION', RSE_REGION)

if rsemanager.SERVER_MODE:  # pylint:disable=no-member
    from rucio.core.rse import get_rse_protocols, get_rse_id
    from rucio.core.vo import map_vo

    def tmp_rse_info(rse=None, vo='def', rse_id=None, session=None):
        if rse_id is None:
            # This can be called directly by client tools if they're co-located on a server
            # i.e. running rucio cli on a server and during the test suite.
            # We have to map to VO name here for this situations, despite this nominally
            # not being a client interface.
            rse_id = get_rse_id(rse=rse, vo=map_vo(vo))
        return get_rse_protocols(rse_id=rse_id, session=session)
Beispiel #60
0
def create_region(name):
    return make_region(
        # prefix all keys (e.g. returns 'c2corg_ui_main:detail:3575-1-c796286')
        key_mangler=lambda key: '{0}:{1}:{2}'.format(KEY_PREFIX, name, key))