Example #1
0
class StaticsMiddleware(object):
    def _adapt_path(self, path):
        return normcase(normpath(path))

    def __init__(self, app, root_dir, cache_max_age=3600):
        self.app = app
        self.cache_max_age = cache_max_age
        self.doc_root = self._adapt_path(root_dir)
        self.paths_cache = LRUCache(1024)

    def __call__(self, environ, start_response):
        full_path = environ['PATH_INFO']
        filepath = self.paths_cache.get(full_path)

        if filepath is None:
            path = full_path.split('/')
            if INVALID_PATH_PARTS(path):
                return HTTPNotFound('Out of bounds: %s' %
                                    environ['PATH_INFO'])(environ,
                                                          start_response)
            filepath = self._adapt_path(join(self.doc_root, *path))
            self.paths_cache.put(full_path, filepath)

        if isfile(filepath):
            return FileServeApp(filepath, self.cache_max_age)(environ,
                                                              start_response)

        return self.app(environ, start_response)
Example #2
0
class StaticsMiddleware(object):
    def _adapt_path(self, path):
        return normcase(normpath(path))

    def __init__(self, app, root_dir, cache_max_age=3600):
        self.app = app
        self.cache_max_age = cache_max_age
        self.doc_root = self._adapt_path(root_dir)
        self.paths_cache = LRUCache(1024)

    def __call__(self, environ, start_response):
        full_path = environ['PATH_INFO']
        filepath = self.paths_cache.get(full_path)

        if filepath is None:
            path = full_path.split('/')
            if INVALID_PATH_PARTS(path):
                return HTTPNotFound('Out of bounds: %s' % environ['PATH_INFO'])(environ, start_response)
            filepath = self._adapt_path(join(self.doc_root, *path))
            self.paths_cache.put(full_path, filepath)

        if isfile(filepath):
            return FileServeApp(filepath, self.cache_max_age)(environ, start_response)

        return self.app(environ, start_response)
Example #3
0
class AdapterRegistry(object):
    """ Registry of adapters"""

    _sentinel = object()

    def __init__(self):
        self.underlying = adapter.AdapterRegistry()
        self.cache = LRUCache(500)

    def lookup_adapter(self, typ):
        """ Lookup adapter for ``typ``"""
        adapter = self.cache.get(typ, self._sentinel)
        if adapter is self._sentinel:
            adapter = self.underlying.lookup([typ], IJSONSerializeable, "")
            self.cache.put(typ, adapter)
        return adapter

    def register_adapter(self, typ, adapter=None):
        """ Register ``adapter`` for type ``typ``

        If no ``adapter`` supplied then this method returns decorator.
        """
        if adapter is None:

            def decorator(adapter):
                self.register_adapter_impl(typ, adapter)
                return adapter

            return decorator
        return self.register_adapter_impl(typ, adapter)

    def register_adapter_impl(self, typ, adapter):
        self.underlying.register([implementedBy(typ)], IJSONSerializeable, "",
                                 adapter)
        self.cache.clear()
Example #4
0
class AdapterRegistry(object):
    """ Registry of adapters"""

    _sentinel = object()

    def __init__(self):
        self.underlying = adapter.AdapterRegistry()
        self.cache = LRUCache(500)

    def lookup_adapter(self, typ):
        """ Lookup adapter for ``typ``"""
        adapter = self.cache.get(typ, self._sentinel)
        if adapter is self._sentinel:
            adapter = self.underlying.lookup([typ], IJSONSerializeable, "")
            self.cache.put(typ, adapter)
        return adapter

    def register_adapter(self, typ, adapter=None):
        """ Register ``adapter`` for type ``typ``

        If no ``adapter`` supplied then this method returns decorator.
        """
        if adapter is None:
            def decorator(adapter):
                self.register_adapter_impl(typ, adapter)
                return adapter
            return decorator
        return self.register_adapter_impl(typ, adapter)

    def register_adapter_impl(self, typ, adapter):
        self.underlying.register(
            [implementedBy(typ)], IJSONSerializeable, "", adapter)
        self.cache.clear()
Example #5
0
class LRUCache(StorageInterface):
    """In memory LRU cache"""

    def __init__(self, max_size=1024):
        if max_size < 1:
            raise ValueError("max_size must be a positive integer greater than 0")
        self.max_size = max_size
        self.engine = LRUCacheEngine(max_size)

    def __getitem__(self, key):
        value = self.engine.get(key)
        if value is None:
            raise KeyError
        return value

    def __setitem__(self, key, value):
        self.engine.put(key, value)

    def __contains__(self, key):
        value = self.engine.get(key)
        if value is not None:
            return True
        else:
            return False

    def __len__(self):
        return self.max_size
Example #6
0
File: registry.py Project: jean/reg
 def __init__(self, key_lookup, component_cache_size, all_cache_size,
              fallback_cache_size):
     self.key_lookup = key_lookup
     self.predicate_key = key_lookup.predicate_key
     self.key_dict_to_predicate_key = key_lookup.key_dict_to_predicate_key
     self.component_cache = LRUCache(component_cache_size)
     self.all_cache = LRUCache(all_cache_size)
     self.fallback_cache = LRUCache(fallback_cache_size)
Example #7
0
 def __init__(self, dict=None, **kwargs):
     self.data = {}
     self._lkpcache = LRUCache(1000)
     if dict is not None:
         self.update(dict)
     if len(kwargs):
         self.update(kwargs)
     self.listener_registered = False  # at least one listener registered
Example #8
0
 def __init__(self,
              namespace=BASE_NAMESPACE,
              base_url=None,
              long_timeout=600):
     self.namespace = namespace
     self._client = TimeoutClient(version="1.15",
                                  base_url=base_url,
                                  long_timeout=long_timeout)
     self._image_cache = LRUCache(100)
Example #9
0
 def __init__(self,
              namespace=BASE_NAMESPACE,
              base_url=None,
              long_timeout=600):
     self.namespace = namespace
     self._client = dockerpy_client(
         version="1.15",
         base_url=base_url,
         long_timeout=timedelta(seconds=long_timeout),
     )
     self._image_cache = LRUCache(100)
Example #10
0
 def __init__(self, app, transport):
     self.app = app
     self.transport = transport
     self.privkey = decode_hex(app.config['node']['privkey_hex'])
     self.pubkey = crypto.privtopub(self.privkey)
     self.nodes = LRUCache(2048)   # nodeid->Node,  fixme should be loaded
     self.this_node = Node(self.pubkey, self.transport.address)
     self.kademlia = KademliaProtocolAdapter(self.this_node, wire=self)
     this_enode = utils.host_port_pubkey_to_uri(self.app.config['discovery']['listen_host'],
                                                self.app.config['discovery']['listen_port'],
                                                self.pubkey)
     log.info('starting discovery proto', this_enode=this_enode)
Example #11
0
 def __init__(self, xom):
     from queue import Empty, PriorityQueue
     self.Empty = Empty
     self.xom = xom
     self.queue = PriorityQueue()
     self.error_queue = PriorityQueue()
     self.deleted = LRUCache(100)
     self.index_types = LRUCache(1000)
     self.errors = ReplicationErrors()
     self.importer = ImportFileReplica(self.xom, self.errors)
     self._replica_in_sync_cv = threading.Condition()
     self.last_added = None
     self.last_errored = None
     self.last_processed = None
Example #12
0
    def __init__(self):

        self.cli = False
        self.args = {}

        self.debug = False
        self.debug2 = False

        self.quiet = False

        self.profile = False

        self.components = OrderedDict()

        self.start_item = OrderedDict()
        self.start_nodes = []
        self.config_files = []
        self.included_files = []

        self.props = {}
        self.properties = self.props

        self.var = {}

        self.working_dir = os.getcwd()
        self.library_path = os.path.dirname(
            os.path.realpath(__file__)) + "/../../library"

        self.comp = Components(self)

        self._functions = {
            "text": functions,
            "xml": xmlfunctions,
            "datetime": datetime,
            "dt": datetime,
            "re": re,
            "sys": sys,
            "urllib": urllib,
            "random": random.Random()
        }
        self._globals = self._functions

        class Functions():
            pass

        self.f = Functions()
        for k, v in self._functions.items():
            setattr(self.f, k, v)

        self._compiled = LRUCache(512)  # TODO: Configurable
Example #13
0
File: keyfs.py Project: t-8ch/devpi
 def __init__(self, basedir, notify_on_commit):
     self.basedir = basedir
     self._notify_on_commit = notify_on_commit
     self._changelog_cache = LRUCache(1000)  # is thread safe
     with self.get_sqlconn() as conn:
         row = conn.execute("select max(serial) from changelog").fetchone()
         serial = row[0]
         if serial is None:
             self.next_serial = 0
         else:
             self.next_serial = serial + 1
             # perform some crash recovery
             data = self.get_raw_changelog_entry(serial)
             changes, rel_renames = loads(data)
             check_pending_renames(str(self.basedir), rel_renames)
Example #14
0
    def __init__(self, blocklen=128, deg=1, *args, **kwargs):
        # block length in bits, term size.
        self.blocklen = blocklen

        # term degree
        self.deg = deg

        # evaluated base terms of deg=1
        self.base = []
        self.cur_tv_size = None
        self.cur_evals = None
        self.last_base_size = None

        # caches
        self.sim_norm_cache = LRUCache(64)
Example #15
0
 def __init__(self, basedir, notify_on_commit, cache_size):
     self.basedir = basedir
     self.sqlpath = self.basedir.join(".sqlite")
     self._notify_on_commit = notify_on_commit
     self._changelog_cache = LRUCache(cache_size)  # is thread safe
     self.last_commit_timestamp = time.time()
     self.ensure_tables_exist()
Example #16
0
    def __init__(self, name, server=None, create=False):
        self.server = server or Server()
        self.session = server.session
        self.name = name
        self.database = server.host + "/" + name

        self.cache = LRUCache(100)

        if create:
            self.create()
        else:
            response = self.session.head(self.database)
            if not response.ok:
                if response.status_code == 404:
                    raise excepts.DBNotExists
                raise Exception(response.status_code)
Example #17
0
 def __init__(
         self, namespace=BASE_NAMESPACE, base_url=None,
         long_timeout=600):
     self.namespace = namespace
     self._client = TimeoutClient(
         version="1.15", base_url=base_url, long_timeout=long_timeout)
     self._image_cache = LRUCache(100)
Example #18
0
 def __init__(self, request):
     self.request = request
     self.rasters = self.request.registry.settings["raster"]
     global _rasters
     if _rasters is None:
         cache_size = self.rasters.get('cache_size', 10)
         log.debug('initialize LRUCache with size %d' % cache_size)
         _rasters = LRUCache(cache_size)
Example #19
0
 def __init__(self, dict=None, **kwargs):
     self.data = {}
     self._lkpcache = LRUCache(1000)
     if dict is not None:
         self.update(dict)
     if len(kwargs):
         self.update(kwargs)
     self.listener_registered = False # at least one listener registered
Example #20
0
 def __init__(
         self, namespace=BASE_NAMESPACE, base_url=None,
         long_timeout=600):
     self.namespace = namespace
     self._client = dockerpy_client(
         version="1.15", base_url=base_url,
         long_timeout=timedelta(seconds=long_timeout),
     )
     self._image_cache = LRUCache(100)
Example #21
0
class Collection(object):
    def __init__(self, directory, cache_size=128):
        self.dir = directory
        self.index = susi_native.load(self.dir)
        self._cache = LRUCache(cache_size)

    def lookup(self, pattern, k=10, snippet_size=1000000):
        key = (pattern, k, snippet_size)
        cached_result = self._cache.get(key)
        if cached_result is not None:
            return cached_result

        if len(pattern) < 3:
            return []
        res = susi_native.search(self.index, pattern.encode('latin1'), k,
                                 snippet_size)
        self._cache.put(key, res)
        return res
Example #22
0
class ClusterCache(object):
    def __init__(self, cache_size):
        self.lru = LRUCache(cache_size)
        self.hits = 0
        self.misses = 0

    def get(self, file_buffer, ptr):
        v = self.lru.get((file_buffer, ptr))
        if v is not None:
            self.hits += 1
            return v
        v = ClusterData(file_buffer, ptr)
        self.lru.put((file_buffer, ptr), v)
        self.misses += 1
        return v

    def clear(self):
        logger.debug("CACHE HITS " + str(self.hits) + " VS MISSES " + str(self.misses))
        self.lru.clear()
Example #23
0
    def __init__(self, wiki, page_urls, registry_size=None):
        self.wiki = wiki
        self.page_urls = page_urls

        self._cache = LRUCache(registry_size or self.PAGE_REGISTRY_SIZE)
        self._pages_meta = None

        self._queue = None
        self._results = None
        self._pool = None
        self._done = False
Example #24
0
class ClusterCache(object):
    def __init__(self, cache_size):
        self.lru = LRUCache(cache_size)
        self.hits = 0
        self.misses = 0

    def get(self, file_buffer, ptr):
        v = self.lru.get((file_buffer, ptr))
        if v is not None:
            self.hits += 1
            return v
        v = ClusterData(file_buffer, ptr)
        self.lru.put((file_buffer, ptr), v)
        self.misses += 1
        return v

    def clear(self):
        logger.debug("CACHE HITS " + str(self.hits) + " VS MISSES " +
                     str(self.misses))
        self.lru.clear()
Example #25
0
    def __init__(self):

        self.cli = False
        self.args = {}

        self.debug = False
        self.debug2 = False

        self.quiet = False

        self.profile = False

        self.config_files = []

        self.start_node = None
        self.start_message = {
        }  # Bunch()  # {}   # TODO: Review if this is definitive, compare performance

        self.props = {}
        self.properties = self.props

        self.var = {}

        self.working_dir = os.getcwd()
        self.library_path = os.path.dirname(
            os.path.realpath(__file__)) + "/../../library"

        self._globals = {
            "text": functions,
            "xml": xmlfunctions,
            "cubetl": cubetl,
            "datetime": datetime,
            "re": re,
            "sys": sys,
            "urllib": urllib,
            "random": random.Random()
        }

        self._compiled = LRUCache(512)  # TODO: Configurable

        self.comp = Components(self)
Example #26
0
def partly_distinct(iterable):
    """
    Filters items from iterable and **tries to return only distincts**.
    Keeps order.

    :param Iterable iterable: Something iterable we have to filter.

    >>> list(partly_distinct([1, 2, 3, 2, 1, 2, 3, 4]))
    ... [1, 2, 3, 4]

    .. note::
        Unlike :py:func:`distinct` it won't guarantee that all elements would
        be distinct. But if you have rather small cardinality of the stream,
        this would work.

    .. note::
        Current implementation guarantees support for 10000 distinct values.
        If your cardinality is bigger, there might be some duplicates.
    """
    cache = LRUCache(10000)
    for item in iterable:
        if not cache.get(item):
            cache.put(item, True)
            yield item
Example #27
0
File: keyfs.py Project: t-8ch/devpi
 def __init__(self, basedir, notify_on_commit):
     self.basedir = basedir
     self._notify_on_commit = notify_on_commit
     self._changelog_cache = LRUCache(1000)  # is thread safe
     with self.get_sqlconn() as conn:
         row = conn.execute("select max(serial) from changelog").fetchone()
         serial = row[0]
         if serial is None:
             self.next_serial = 0
         else:
             self.next_serial = serial + 1
             # perform some crash recovery
             data = self.get_raw_changelog_entry(serial)
             changes, rel_renames = loads(data)
             check_pending_renames(str(self.basedir), rel_renames)
Example #28
0
    def __init__(self, name, server=None, create=False):
        self.server = server or Server()
        self.session = server.session
        self.name = name
        self.database = server.host + "/" + name

        self.cache = LRUCache(100)

        if create:
            self.create()
        else:
            response = self.session.head(self.database)
            if not response.ok:
                if response.status_code == 404:
                    raise excepts.DBNotExists
                raise Exception(response.status_code)
Example #29
0
    def __init__(self):

        self.cli = False
        self.args = {}

        self.debug = False
        self.debug2 = False

        self.quiet = False

        self.profile = False

        self.components = OrderedDict()

        self.start_item = OrderedDict()
        self.start_nodes = []
        self.config_files = []
        self.included_files = []

        self.props = {}
        self.properties = self.props

        self.var = {}

        self.working_dir = os.getcwd()
        self.library_path = os.path.dirname(os.path.realpath(__file__)) + "/../../library"

        self.comp = Components(self)

        self._functions = {"text": functions,
                           "xml": xmlfunctions,
                           "datetime": datetime,
                           "dt": datetime,
                           "re": re,
                           "sys": sys,
                           "urllib": urllib,
                           "random": random.Random()}
        self._globals = self._functions

        class Functions():
            pass
        self.f = Functions()
        for k, v in self._functions.items():
            setattr(self.f, k, v)

        self._compiled = LRUCache(512)  # TODO: Configurable
Example #30
0
class EDBag(Counter):
    def __init__(self):
        super(EDBag, self).__init__()
        self.cache1 = LRUCache(256) # values where distance=1
        self.cache2 = LRUCache(256) # values where distance>1

    def add(self, x):
        if not x in self:
            self.cache2.clear()
        self[x] += 1

    def closest_by_edit_distance(self, x):
        if x in self:
            # Optimization: if x is in multiset, then closest
            # edit dist = 0. Nothing can be any closer.
            return (x, 0)

        # Optimization: If we've looked up this value before, 
        # return previously computed answer.
        cached_answer = self.cache1.get(x)
        if cached_answer:
            return cached_answer
        cached_answer = self.cache2.get(x)
        if cached_answer:
            return cached_answer

        closest = None
        closest_dist = None
        for y,_ in self.most_common():
            d = editdistance.eval(x, y)
            if not closest_dist or d < closest_dist:
                closest = y
                closest_dist = d
                if d == 1:
                    # Optimization: nothing can be any closer, as
                    # we know there's nothing at edit distance 0 (x is not
                    # in the multiset).
                    self.cache1.put(x, (closest, closest_dist))
                    return (closest, closest_dist)

        self.cache2.put(x, (closest, closest_dist))
        return (closest, closest_dist)
Example #31
0
File: main.py Project: vytas7/devpi
 def __init__(self, basedir, notify_on_commit, cache_size, settings=None):
     if settings is None:
         settings = {}
     for key in ("database", "host", "port", "unix_sock", "user", "password"):
         if key in settings:
             setattr(self, key, settings[key])
     self.basedir = basedir
     self._notify_on_commit = notify_on_commit
     self._changelog_cache = LRUCache(cache_size)  # is thread safe
     self.last_commit_timestamp = time.time()
     self.ensure_tables_exist()
     with self.get_connection() as conn:
         c = conn._sqlconn.cursor()
         c.execute("select max(serial) from changelog")
         row = c.fetchone()
         c.close()
         serial = row[0]
         if serial is None:
             self.next_serial = 0
         else:
             self.next_serial = serial + 1
Example #32
0
    def __init__(self):

        self.args = {}

        self.debug = False
        self.debug2 = False

        self.quiet = False

        self.config_files = []

        self.start_node = None
        self.start_message = {}

        self.props = {}

        self._globals = {"text": text, "cubetl": cubetl}

        self._compiled = LRUCache(512)  # TODO: Configurable

        self.comp = Components(self)
Example #33
0
class LRUShelf(Shelf):
    """An in-memory Least-Recently Used shelf up to `maxsize`.."""
    def __init__(self, maxsize=1000):
        self.store = LRUCache(int(maxsize))

    def getitem(self, key):
        value = self.store.get(key, UNSET)
        if value is UNSET:
            raise KeyError(key)
        return value

    def setitem(self, key, value):
        self.store.put(key, value)

    def delitem(self, key):
        self.store.invalidate(key)

    def clear(self):
        self.store.clear()
Example #34
0
class LRUShelf(Shelf):
    """An in-memory Least-Recently Used shelf up to `maxsize`.."""

    def __init__(self, maxsize=1000):
        self.store = LRUCache(int(maxsize))

    def getitem(self, key):
        value = self.store.get(key, UNSET)
        if value is UNSET:
            raise KeyError(key)
        return value

    def setitem(self, key, value):
        self.store.put(key, value)

    def delitem(self, key):
        self.store.invalidate(key)

    def clear(self):
        self.store.clear()
Example #35
0
    def __init__(self):

        self.args = {}

        self.debug = False
        self.debug2 = False

        self.quiet = False

        self.profile = False

        self.config_files = []

        self.start_node = None
        self.start_message = {}  # Bunch()  # {}   # TODO: Review if this is definitive, compare performance

        self.props = {}
        self.properties = self.props

        self.var = {}

        self.working_dir = os.getcwd()
        self.library_path = os.path.dirname(os.path.realpath(__file__)) + "/../../library"

        self._globals = {
                         "text": functions,
                         "xml": xmlfunctions,
                         "cubetl": cubetl,
                         "datetime": datetime,
                         "re": re,
                         "sys": sys,
                         "urllib": urllib,
                         "random": random.Random()
                         }

        self._compiled = LRUCache(512)  # TODO: Configurable

        self.comp = Components(self)
Example #36
0
    def __init__(self, basedir, notify_on_commit, cache_size, settings=None):
        if settings is None:
            settings = {}
        for key in ("database", "host", "port", "unix_sock", "user",
                    "password"):
            if key in settings:
                setattr(self, key, settings[key])

        if any(key in settings for key in self.SSL_OPT_KEYS):
            self.ssl_context = ssl_context = ssl.create_default_context(
                cafile=settings.get('ssl_ca_certs'))

            if 'ssl_certfile' in settings:
                ssl_context.load_cert_chain(
                    settings['ssl_certfile'],
                    keyfile=settings.get('ssl_keyfile'))

            check_hostname = settings.get('ssl_check_hostname')
            if check_hostname is not None and not ensure_boolean(
                    check_hostname):
                ssl_context.check_hostname = False

        self.basedir = basedir
        self._notify_on_commit = notify_on_commit
        self._changelog_cache = LRUCache(cache_size)  # is thread safe
        self.last_commit_timestamp = time.time()
        self.ensure_tables_exist()
        with self.get_connection() as conn:
            c = conn._sqlconn.cursor()
            c.execute("select max(serial) from changelog")
            row = c.fetchone()
            c.close()
            serial = row[0]
            if serial is None:
                self.next_serial = 0
            else:
                self.next_serial = serial + 1
Example #37
0
class RMemorySessionStore(Singleton):
    def __init__(self, config):
        self._cache = LRUCache(config.session_cache_size)

    def push(self, token_id, data):
        self._cache.put(token_id, data)

    def get(self, token):
        return self._cache.get(token, None)

    def remove(self, token_id):
        try:
            self._cache.put(token_id, None)
        except KeyError:
            pass

    def contains(self, session_id):
        return self._cache.get(session_id) is not None
Example #38
0
class VladCache(object):
    def __init__(self):
        self.word_cache = LRUCache(1)
        self.vlad_cache = LRUCache(1000)

    def load_words(self, data):
        words = self.word_cache.get('words')
        if words is None:
            words, _ = bow.load_vlad_words_and_frequencies(data.config)
            self.word_cache.put('words', words)
        return words

    def vlad_histogram(self, image, features, words):
        vlad = self.vlad_cache.get(image)
        if vlad is None:
            vlad = unnormalized_vlad(features, words)
            vlad = signed_square_root_normalize(vlad)
            self.vlad_cache.put(image, vlad)
        return vlad
Example #39
0
class RMemorySessionStore(RUtils.singleton.Singleton):
    def __init__(self):
        if hasattr(self, '_init'):
            return
        self._init = True
        self.config = RUtils.config.RConfig()
        self._cache = LRUCache(self.config.session_cache_size)

    def push(self, token_id, data):
        self._cache.put(token_id, data)

    def get(self, token):
        return self._cache.get(token, None)

    def remove(self, session_id):
        try:
            self._cache.put(session_id, None)
        except KeyError:
            pass

    def contains(self, session_id):
        return self._cache.get(session_id) is not None
Example #40
0
def trim(flist, flowmaxbytes, trimmed_extension, preserve_times, post_process):
    cache = LRUCache(10000)
    trimmed_bytes = 0
    for pcap_file in flist:
        trimmed_file = pcap_file + trimmed_extension
        with open(pcap_file, "rb") as f:
            try:
                if pcap_file.endswith("pcapng"):
                    pcap = dpkt.pcapng.Reader(f)
                else:
                    pcap = dpkt.pcap.Reader(f)
                with open(trimmed_file, "wb") as trimmed:
                    if pcap_file.endswith("pcapng"):
                        pcap_out = dpkt.pcapng.Writer(trimmed)
                    else:
                        pcap_out = dpkt.pcap.Writer(trimmed)
                    for ts, buf in pcap:
                        fivetuple = get_fivetuple(buf, pcap, pcap_file)
                        bytes = len(buf)
                        if not cache.get(fivetuple) is None:
                            bytes += cache.get(fivetuple)
                        cache.put(fivetuple, bytes)
                        if bytes < flowmaxbytes:
                            pcap_out.writepkt(buf, ts)
                        else:
                            trimmed_bytes += len(buf)
            except dpkt.dpkt.NeedData:
                pass
            except ValueError:
                pass
        if os.path.exists(trimmed_file):
            if preserve_times:
                stat = os.stat(pcap_file)
                os.utime(trimmed_file, (stat.st_atime, stat.st_mtime))
            if post_process:
                post_process(pcap_file, trimmed_file)
    return trimmed_bytes
Example #41
0
 def __init__(self, maxsize=1000):
     self.store = LRUCache(int(maxsize))
Example #42
0
class ResolveScheduler(object):
    """ A class that can resolve multiple pages in a potentially
        multi-threaded way.
    """
    PAGE_REGISTRY_SIZE = 256

    def __init__(self, wiki, page_urls, registry_size=None):
        self.wiki = wiki
        self.page_urls = page_urls

        self._cache = LRUCache(registry_size or self.PAGE_REGISTRY_SIZE)
        self._pages_meta = None

        self._queue = None
        self._results = None
        self._pool = None
        self._done = False

    def getPage(self, url):
        page = self._cache.get(url)
        if page is None:
            logger.debug("Caching page in scheduler registry: %s" % url)
            fields = ['url', 'title', 'path', 'formatted_text', 'local_meta',
                      'local_links']
            page = self.wiki.db.getPage(url, fields=fields)
            self._cache.put(url, page)
        return page

    def getPagesMeta(self):
        if self._pages_meta is None:
            fields = ['url', 'title', 'local_meta']
            self._pages_meta = list(self.wiki.db.getPages(fields=fields))
        return self._pages_meta

    def run(self, num_workers=1):
        logger.info("Running resolve scheduler (%d workers)" % num_workers)

        if num_workers > 1:
            # Multi-threaded resolving.
            logger.debug("Main thread is %d" % threading.get_ident())

            self._done = False
            self._queue = Queue()
            self._results = Queue()

            self.getPagesMeta()

            job_count = 0
            for url in self.page_urls:
                self._queue.put_nowait(JobDesc(url))
                job_count += 1

            self._pool = []
            for i in range(num_workers):
                ctx = JobContext(self)
                self._pool.append(JobWorker(i, ctx))

            for thread in self._pool:
                thread.start()

            while job_count > 0:
                try:
                    url, page, exc = self._results.get(True, 10)
                except Empty:
                    logger.error("Resolve workers timed out, still have %d "
                                 "jobs to go." % job_count)
                    return

                job_count -= 1
                if page:
                    self.wiki.db.cachePage(page)
                if exc:
                    logger.error("Error resolving page: %s" % url)
                    logger.exception(exc)

            logger.debug("Queue is empty... terminating workers.")
            self._done = True

            for thread in self._pool:
                thread.join()
                logger.debug("Worker [%d] ended." % thread.wid)
        else:
            # Single-threaded resolving.
            for url in self.page_urls:
                page = self.getPage(url)
                r = PageResolver(
                        page,
                        page_getter=self.getPage,
                        pages_meta_getter=self.getPagesMeta)
                runner = PageResolverRunner(page, r)
                runner.run(raise_on_failure=True)
                self.wiki.db.cachePage(page)
Example #43
0
class DockerClient(object):
    """
    Talk to the real Docker server directly.

    Some operations can take a while (e.g. stopping a container), so we
    use a thread pool. See https://clusterhq.atlassian.net/browse/FLOC-718
    for using a custom thread pool.

    :ivar unicode namespace: A namespace prefix to add to container names
        so we don't clobber other applications interacting with Docker.
    :ivar str base_url: URL for connection to the Docker server.
    :ivar int long_timeout: Maximum time in seconds to wait for
        long-running operations, particularly pulling an image.
    :ivar LRUCache _image_cache: Mapped cache of image IDs to their data.
    """
    def __init__(
            self, namespace=BASE_NAMESPACE, base_url=None,
            long_timeout=600):
        self.namespace = namespace
        self._client = dockerpy_client(
            version="1.15", base_url=base_url,
            long_timeout=timedelta(seconds=long_timeout),
        )
        self._image_cache = LRUCache(100)

    def _to_container_name(self, unit_name):
        """
        Add the namespace to the container name.

        :param unicode unit_name: The unit's name.

        :return unicode: The container's name.
        """
        return self.namespace + unit_name

    def _parse_container_ports(self, data):
        """
        Parse the ports from a data structure representing the Ports
        configuration of a Docker container in the format returned by
        ``self._client.inspect_container`` and return a list containing
        ``PortMap`` instances mapped to the container and host exposed ports.

        :param dict data: The data structure for the representation of
            container and host port mappings in a single container.
            This takes the form of the ``NetworkSettings.Ports`` portion
            of a container's state and configuration as returned by inspecting
            the container. This is a dictionary mapping container ports to a
            list of host bindings, e.g.
            "3306/tcp": [{"HostIp": "0.0.0.0","HostPort": "53306"},
                         {"HostIp": "0.0.0.0","HostPort": "53307"}]

        :return list: A list that is either empty or contains ``PortMap``
            instances.
        """
        ports = []
        for internal, hostmap in data.items():
            internal_map = internal.split(u'/')
            internal_port = internal_map[0]
            internal_port = int(internal_port)
            if hostmap:
                for host in hostmap:
                    external_port = host[u"HostPort"]
                    external_port = int(external_port)
                    portmap = PortMap(internal_port=internal_port,
                                      external_port=external_port)
                    ports.append(portmap)
        return ports

    def _parse_restart_policy(self, data):
        """
        Parse the restart policy from the configuration of a Docker container
        in the format returned by ``self._client.inspect_container`` and return
        an ``IRestartPolicy``.

        :param dict data: The data structure representing the restart policy of
            a container, e.g.

            {"Name": "policy-name", "MaximumRetryCount": 0}

        :return IRestartPolicy: The model of the restart policy.

        :raises ValueError: if an unknown policy is passed.
        """
        POLICIES = {
            u"": lambda data:
                RestartNever(),
            u"always": lambda data:
                RestartAlways(),
            u"on-failure": lambda data:
                RestartOnFailure(
                    maximum_retry_count=data[u"MaximumRetryCount"] or None)
        }
        try:
            # docker will treat an unknown plolicy as "never".
            # We error out here, in case new policies are added.
            return POLICIES[data[u"Name"]](data)
        except KeyError:
            raise ValueError("Unknown restart policy: %r" % (data[u"Name"],))

    def _serialize_restart_policy(self, restart_policy):
        """
        Serialize the restart policy from an ``IRestartPolicy`` to the format
        expected by the docker API.

        :param IRestartPolicy restart_policy: The model of the restart policy.

        :returns: A dictionary suitable to pass to docker

        :raises ValueError: if an unknown policy is passed.
        """
        SERIALIZERS = {
            RestartNever: lambda policy:
                {u"Name": u""},
            RestartAlways: lambda policy:
                {u"Name": u"always"},
            RestartOnFailure: lambda policy:
                {u"Name": u"on-failure",
                 u"MaximumRetryCount": policy.maximum_retry_count or 0},
        }
        try:
            return SERIALIZERS[restart_policy.__class__](restart_policy)
        except KeyError:
            raise ValueError("Unknown restart policy: %r" % (restart_policy,))

    def _image_not_found(self, apierror):
        """
        Inspect a ``docker.errors.APIError`` to determine if it represents a
        failure to start a container because the container's image wasn't
        found.

        :return: ``True`` if this is the case, ``False`` if the error has
            another cause.
        :rtype: ``bool``
        """
        return apierror.response.status_code == NOT_FOUND

    def _address_in_use(self, apierror):
        """
        Inspect a ``docker.errors.APIError`` to determine if it represents a
        failure to start a container because the container is configured to use
        ports that are already in use on the system.

        :return: If this is the reason, an exception to raise describing the
            problem.  Otherwise, ``None``.
        """
        # Recognize an error (without newline) like:
        #
        # Cannot start container <name>: Error starting userland proxy:
        # listen tcp <ip>:<port>: bind: address already in use
        #
        # Or (without newline) like:
        #
        # Cannot start container <name>: Bind for <ip>:<port> failed:
        # port is already allocated
        #
        # because Docker can't make up its mind about which format to use.
        parts = apierror.explanation.split(b": ")
        if parts[-1] == b"address already in use":
            ip, port = parts[-3].split()[-1].split(b":")
        elif parts[-1] == b"port is already allocated":
            ip, port = parts[-2].split()[2].split(b":")
        else:
            return None
        return AddressInUse(address=(ip, int(port)), apierror=apierror)

    def _image_data(self, image):
        """
        Supply data about an image, by either inspecting it or returning
        cached data if available.

        :param unicode image: The ID of the image.

        :return: ``dict`` representing data about the image properties.
        """
        cached_image = self._image_cache.get(image)
        if cached_image is not None:
            LOG_CACHED_IMAGE(image=image).write()
            return cached_image
        try:
            image_data = self._client.inspect_image(image)
            Message.new(
                message_type="flocker:node:docker:image_inspected",
                image=image
            ).write()
        except APIError as e:
            if e.response.status_code == NOT_FOUND:
                # Image has been deleted, so just fill in some
                # stub data so we can return *something*. This
                # should happen only for stopped containers so
                # some inaccuracy is acceptable.
                # We won't cache stub data though.
                Message.new(
                    message_type="flocker:node:docker:image_not_found",
                    image=image
                ).write()
                image_data = {u"Config": {u"Env": [], u"Cmd": []}}
            else:
                raise
        cached_data = ImageDataCache(
            command=image_data[u"Config"][u"Cmd"],
            environment=image_data[u"Config"][u"Env"]
        )
        self._image_cache.put(image, cached_data)
        Message.new(
            message_type="flocker:node:docker:image_data_cached",
            image=image
        ).write()
        return cached_data

    def add(self, unit_name, image_name, ports=None, environment=None,
            volumes=(), mem_limit=None, cpu_shares=None,
            restart_policy=RestartNever(), command_line=None):
        container_name = self._to_container_name(unit_name)

        if environment is not None:
            environment = environment.to_dict()
        if ports is None:
            ports = []

        restart_policy_dict = self._serialize_restart_policy(restart_policy)

        def _create():
            binds = list(
                # The "Z" mode tells Docker to "relabel file objects" on the
                # volume.  This makes things work when SELinux is enabled, at
                # least in the default configuration on CentOS 7.  See
                # <https://docs.docker.com/reference/commandline/run/>, in the
                # `--volumes-from` section (or just search for SELinux).
                u"{}:{}:Z".format(
                    volume.node_path.path, volume.container_path.path
                )
                for volume in volumes
            )
            port_bindings = {
                p.internal_port: p.external_port
                for p in ports
            }
            host_config = self._client.create_host_config(
                binds=binds,
                port_bindings=port_bindings,
                restart_policy=restart_policy_dict,
            )
            # We're likely to get e.g. pvector, so make sure we're passing
            # in something JSON serializable:
            command_line_values = command_line
            if command_line_values is not None:
                command_line_values = list(command_line_values)

            self._client.create_container(
                name=container_name,
                image=image_name,
                command=command_line_values,
                environment=environment,
                ports=[p.internal_port for p in ports],
                mem_limit=mem_limit,
                cpu_shares=cpu_shares,
                host_config=host_config,
            )

        def _add():
            try:
                _create()
            except APIError as e:
                if self._image_not_found(e):
                    # Pull it and try again
                    self._client.pull(image_name)
                    _create()
                else:
                    # Unrecognized, just raise it.
                    raise

            # Just because we got a response doesn't mean Docker has
            # actually updated any internal state yet! So if e.g. we did a
            # start on this container Docker might well complain it knows
            # not the container of which we speak. To prevent this we poll
            # until it does exist.
            while True:
                try:
                    self._client.start(container_name)
                except NotFound:
                    sleep(0.01)
                else:
                    break

        d = deferToThread(_add)

        def _extract_error(failure):
            failure.trap(APIError)
            code = failure.value.response.status_code
            if code == 409:
                raise AlreadyExists(unit_name)

            in_use = self._address_in_use(failure.value)
            if in_use is not None:
                # We likely can't start the container because its
                # configuration conflicts with something else happening on
                # the system.  Reflect this failure condition in a more
                # easily recognized way.
                raise in_use

            return failure
        d.addErrback(_extract_error)
        return d

    def _blocking_exists(self, container_name):
        """
        Blocking API to check if container exists.

        :param unicode container_name: The name of the container whose
            existence we're checking.

        :return: ``True`` if unit exists, otherwise ``False``.
        """
        try:
            self._client.inspect_container(container_name)
            return True
        except APIError:
            return False

    def exists(self, unit_name):
        container_name = self._to_container_name(unit_name)
        return deferToThread(self._blocking_exists, container_name)

    def _stop_container(self, container_name):
        """Attempt to stop the given container.

        There is a race condition between a process dying and
        Docker noticing that fact:

        https://github.com/docker/docker/issues/5165#issuecomment-65753753

        If we get an error indicating that this race condition happened,
        return False. This means the caller should try again. If we *do*
        successfully stop the container, return True.

        :raise APIError: If the container failed to stop for some unknown
            reason.
        :return: True if we stopped the container, False otherwise.

        """
        try:
            with start_action(
                action_type='flocker:docker:container_stop',
                container=container_name
            ):
                self._client.stop(container_name)
        except APIError as e:
            if e.response.status_code == NOT_FOUND:
                # If the container doesn't exist, we swallow the error,
                # since this method is supposed to be idempotent.
                return True
            elif e.response.status_code == INTERNAL_SERVER_ERROR:
                # Docker returns this if the process had died, but
                # hasn't noticed it yet.
                return False
            else:
                raise
        return True

    def _remove_container(self, container_name):
        """
        Attempt to remove a container.

        Assumes the given container has already been stopped.

        :param unicode container_name: The fully-namespaced name of the
            container.
        :return: True if we removed the container, False otherwise.
        """
        try:
            # The ``docker.Client.stop`` method sometimes returns a
            # 404 error, even though the container exists.
            # See https://github.com/docker/docker/issues/13088
            # Wait until the container has actually stopped running
            # before attempting to remove it.  Otherwise we are
            # likely to see: 'docker.errors.APIError: 409 Client
            # Error: Conflict ("Conflict, You cannot remove a
            # running container. Stop the container before
            # attempting removal or use -f")'
            # This code should probably be removed once the above
            # issue has been resolved. See [FLOC-1850]
            self._client.wait(container_name)

            with start_action(
                action_type='flocker:docker:container_remove',
                container=container_name
            ):
                self._client.remove_container(container_name)
        except APIError as e:
            if e.response.status_code == NOT_FOUND:
                # If the container doesn't exist, we swallow the error,
                # since this method is supposed to be idempotent.
                return True
            elif e.response.status_code == INTERNAL_SERVER_ERROR:
                # Failure to remove container - see FLOC-3262 for an example.
                return False
            else:
                raise
        return True

    def remove(self, unit_name):
        container_name = self._to_container_name(unit_name)

        def _remove():
            # Previously, this looped forever and didn't pause between loops.
            # We've arbitrarily chosen a wait interval of 0.001 seconds and
            # 1000 retries (i.e. a second of polling). These values may need
            # tuning.
            poll_until(
                partial(self._stop_container, container_name),
                repeat(0.001, 1000))

            # Previously, the container remove was only tried once. Again,
            # these parameters may need tuning.
            poll_until(
                partial(self._remove_container, container_name),
                repeat(0.001, 1000))

        d = deferToThread(_remove)
        return d

    def list(self):
        def _list():
            result = set()
            ids = [d[u"Id"] for d in
                   self._client.containers(quiet=True, all=True)]
            for i in ids:

                try:
                    data = self._client.inspect_container(i)
                except APIError as e:
                    # The container ID returned by the list API call above, may
                    # have been removed in another thread.
                    if e.response.status_code == NOT_FOUND:
                        continue
                    else:
                        raise

                state = (u"active" if data[u"State"][u"Running"]
                         else u"inactive")
                name = data[u"Name"]
                # Since tags (e.g. "busybox") aren't stable, ensure we're
                # looking at the actual image by using the hash:
                image = data[u"Image"]
                image_tag = data[u"Config"][u"Image"]
                command = data[u"Config"][u"Cmd"]
                with start_action(
                    action_type=u"flocker:node:docker:inspect_image",
                    container=i,
                    running=data[u"State"][u"Running"]
                ):
                    image_data = self._image_data(image)
                if image_data.command == command:
                    command = None
                port_bindings = data[u"NetworkSettings"][u"Ports"]
                if port_bindings is not None:
                    ports = self._parse_container_ports(port_bindings)
                else:
                    ports = list()
                volumes = []
                binds = data[u"HostConfig"]['Binds']
                if binds is not None:
                    for bind_config in binds:
                        parts = bind_config.split(':', 2)
                        node_path, container_path = parts[:2]
                        volumes.append(
                            Volume(container_path=FilePath(container_path),
                                   node_path=FilePath(node_path))
                        )
                if name.startswith(u"/" + self.namespace):
                    name = name[1 + len(self.namespace):]
                else:
                    continue
                # Retrieve environment variables for this container,
                # disregarding any environment variables that are part
                # of the image, rather than supplied in the configuration.
                unit_environment = []
                container_environment = data[u"Config"][u"Env"]
                if image_data.environment is None:
                    image_environment = []
                else:
                    image_environment = image_data.environment
                if container_environment is not None:
                    for environment in container_environment:
                        if environment not in image_environment:
                            env_key, env_value = environment.split('=', 1)
                            unit_environment.append((env_key, env_value))
                unit_environment = (
                    Environment(variables=frozenset(unit_environment))
                    if unit_environment else None
                )
                # Our Unit model counts None as the value for cpu_shares and
                # mem_limit in containers without specified limits, however
                # Docker returns the values in these cases as zero, so we
                # manually convert.
                cpu_shares = data[u"Config"][u"CpuShares"]
                cpu_shares = None if cpu_shares == 0 else cpu_shares
                mem_limit = data[u"Config"][u"Memory"]
                mem_limit = None if mem_limit == 0 else mem_limit
                restart_policy = self._parse_restart_policy(
                    data[U"HostConfig"][u"RestartPolicy"])
                result.add(Unit(
                    name=name,
                    container_name=self._to_container_name(name),
                    activation_state=state,
                    container_image=image_tag,
                    ports=frozenset(ports),
                    volumes=frozenset(volumes),
                    environment=unit_environment,
                    mem_limit=mem_limit,
                    cpu_shares=cpu_shares,
                    restart_policy=restart_policy,
                    command_line=command)
                )
            return result
        return deferToThread(_list)
Example #44
0
 def __init__(self):
     self.points_cache = LRUCache(1000)
     self.colors_cache = LRUCache(1000)
     self.features_cache = LRUCache(200)
     self.words_cache = LRUCache(200)
     self.masks_cache = LRUCache(1000)
     self.index_cache = LRUCache(200)
     self.masked_index_cache = LRUCache(200)
Example #45
0
class FeatureLoader(object):
    def __init__(self):
        self.points_cache = LRUCache(1000)
        self.colors_cache = LRUCache(1000)
        self.features_cache = LRUCache(200)
        self.words_cache = LRUCache(200)
        self.masks_cache = LRUCache(1000)
        self.index_cache = LRUCache(200)
        self.masked_index_cache = LRUCache(200)

    def clear_cache(self):
        self.points_cache.clear()
        self.colors_cache.clear()
        self.features_cache.clear()
        self.words_cache.clear()
        self.masks_cache.clear()

    def load_mask(self, data, image, points=None):
        masks = self.masks_cache.get(image)
        if masks is None:
            if points is None:
                points, _ = self.load_points_colors(data, image, masked=False)
            masks = data.load_features_mask(image, points[:, :2])
            self.masks_cache.put(image, masks)
        return masks

    def load_points_colors(self, data, image, masked=False):
        points = self.points_cache.get(image)
        colors = self.colors_cache.get(image)
        if points is None or colors is None:
            points, _, colors = self._load_features_nocache(data, image)
            self.points_cache.put(image, points)
            self.colors_cache.put(image, colors)
        if masked:
            mask = self.load_mask(data, image, points)
            if mask is not None:
                points = points[mask]
                colors = colors[mask]
        return points, colors

    def load_points_features_colors(self, data, image, masked=False):
        points = self.points_cache.get(image)
        features = self.features_cache.get(image)
        colors = self.colors_cache.get(image)
        if points is None or features is None or colors is None:
            points, features, colors = self._load_features_nocache(data, image)
            self.points_cache.put(image, points)
            self.features_cache.put(image, features)
            self.colors_cache.put(image, colors)
        if masked:
            mask = self.load_mask(data, image, points)
            if mask is not None:
                points = points[mask]
                features = features[mask]
                colors = colors[mask]
        return points, features, colors

    def load_features_index(self, data, image, masked=False):
        cache = self.masked_index_cache if masked else self.index_cache
        cached = cache.get(image)
        if cached is None:
            _, features, _ = self.load_points_features_colors(data, image,
                                                              masked)
            index = ft.build_flann_index(features, data.config)
            cache.put(image, (features, index))
        else:
            features, index = cached
        return index

    def load_words(self, data, image, masked):
        words = self.words_cache.get(image)
        if words is None:
            words = data.load_words(image)
            self.words_cache.put(image, words)
        if masked and words is not None:
            mask = self.load_mask(data, image)
            if mask is not None:
                words = words[mask]
        return words

    def _load_features_nocache(self, data, image):
        points, features, colors = data.load_features(image)
        if points is None:
            logger.error('Could not load features for image {}'.format(image))
        else:
            points = np.array(points[:, :3], dtype=float)
        return points, features, colors
Example #46
0
 def __init__(self, name_cache_size=512):
     ServiceBase.__init__(self)
     self._sublist_lock = Lock()
     self._twitch = TwitchEngine()
     self._channel_name_cache = LRUCache(name_cache_size)
Example #47
0
class Service(ServiceBase):
    CMD = 'twitch'
    SUB_FILE = 'twitch_sublist.p'
    CHECK_PERIOD = 300

    def __init__(self, name_cache_size=512):
        ServiceBase.__init__(self)
        self._sublist_lock = Lock()
        self._twitch = TwitchEngine()
        self._channel_name_cache = LRUCache(name_cache_size)

    def _setup_argument(self, cmd_group):
        cmd_group.add_argument(
            '-subscribe',
            nargs='+',
            func=self._subscribe,
            help=
            'Subscribe channels and receive notification when channel goes live.\n'
            'ex: {} -subscribe kaydada'.format(self.CMD))
        cmd_group.add_argument('-unsubscribe',
                               nargs='+',
                               func=self._unsubscribe,
                               help='Unsubscribe channels.\n'
                               'ex: {} -unsubscribe kaydada'.format(self.CMD))
        cmd_group.add_argument(
            '-unsuball',
            action='store_true',
            func=self._unsub_all,
            help=
            "Unsubscribe all channels in Linot. I won't send any notification to you anymore."
        )
        cmd_group.add_argument('-listchannel',
                               action='store_true',
                               func=self._list_channel,
                               help="List channels you've subscribed.")
        cmd_group.add_argument(
            '-import',
            nargs=1,
            func=self._import,
            help='Import the following list of a twitch user.\n'
            'ex: {} -import kaydada'.format(self.CMD))

        # below, admin only
        cmd_group.add_argument('-refresh',
                               action='store_true',
                               func=self._refresh,
                               help=argparse.SUPPRESS)
        cmd_group.add_argument('-listusers',
                               nargs='*',
                               func=self._list_users,
                               help=argparse.SUPPRESS)
        cmd_group.add_direct_command(self._sub_by_url,
                                     'twitch\.tv/(\w+)[\s\t,]*', re.IGNORECASE)

    def _start(self):
        # Load subscribe list
        try:
            logger.debug('Loading subscribe list from file')
            self._sublist = pickle.load(open(self.SUB_FILE, 'rb'))
            self._calculate_channel_sub_count()
        except IOError:
            logger.debug('Subscribe list file not found, create empty.')
            self._sublist = defaultdict(list)
            self._channel_sub_count = defaultdict(int)
        self._check_thread = Checker(self.CHECK_PERIOD, self._twitch,
                                     self.get_sublist)
        self._check_thread.start()

    def _stop(self):
        self._check_thread.stop()

    def get_sublist(self):
        self._sublist_lock.acquire(True)
        local_sublist = copy.copy(self._sublist)
        self._sublist_lock.release()
        return local_sublist

    def _sub_by_url(self, match_iter, cmd, sender):
        logger.debug('sub by url: ' + str(match_iter))
        logger.debug('sub by url, direct cmd: ' + cmd)
        self._subscribe(match_iter, sender)

    def _calculate_channel_sub_count(self):
        self._channel_sub_count = defaultdict(int)
        for subr in self._sublist:
            for ch in self._sublist[subr]:
                self._channel_sub_count[ch] += 1

    def _import(self, twitch_user, sender):
        # get the following list of twitch_user and subscribe them for sender
        user = twitch_user[0]
        followed_channels = self._twitch.get_followed_channels(user)
        if followed_channels is None:
            sender.send_message('Twitch user: {} not found'.format(user))
        else:
            if len(followed_channels) > 8:
                sender.send_message(
                    'Number of followed channels is more than 8. It may take a while to process.'
                )
            self._subscribe(followed_channels, sender)

    def _unsub_all(self, value, sender):
        # unsubscribe all channels for sender
        # we can not send self._sublist directly, since unsub operates
        # self._sublist
        user_sub = copy.copy(self._sublist[sender])
        self._unsubscribe(user_sub, sender)

    def _subscribe(self, chs, sender):
        # Handles user request for subscribing channels
        # We actually let the LinotServant to follow these channels
        # so that we can check if they are online use streams/followed API

        # prompt a message to let user know i am still alive...
        sender.send_message('Processing ...')
        msg = io.BytesIO()

        not_found = []
        for ch in chs:
            check_name = ch.lower()
            # reduce api invocation
            if check_name in self._sublist[sender]:  # pragma: no cover
                continue
            ch_disp_name, stat = self._twitch.follow_channel(ch)
            if stat is False:
                not_found.append(ch)
            else:
                self._sublist_lock.acquire(True)
                self._sublist[sender].append(check_name)
                self._sublist_lock.release()
                self._channel_sub_count[check_name] += 1
                self._channel_name_cache.put(ch_disp_name.lower(),
                                             ch_disp_name)
                pickle.dump(self._sublist, open(self.SUB_FILE, 'wb+'))

        if len(not_found) > 0:
            print('Channel not found: ' + ' '.join(not_found), file=msg)
        print('Done', file=msg)
        sender.send_message(msg.getvalue())
        return

    def _unsubscribe(self, chs, sender):
        # prompt a message to let user know i am still alive...
        sender.send_message('Processing ...')
        msg = io.BytesIO()

        # Handles user request for unsubscribing channels
        not_found = []
        for ch in chs:
            check_name = ch.lower()
            self._sublist_lock.acquire(True)
            try:
                self._sublist[sender].remove(check_name)
            except ValueError:
                not_found.append(ch)
                self._sublist_lock.release()
                continue
            self._sublist_lock.release()
            self._channel_sub_count[check_name] -= 1
            if self._channel_sub_count[check_name] <= 0:
                # maybe we can try to not unfollow, so that we don't keep
                # generating follow message to the caster
                # self._twitch.unfollow_channel(ch)
                self._channel_sub_count.pop(check_name, None)

        if len(self._sublist[sender]) == 0:
            self._sublist_lock.acquire(True)
            self._sublist.pop(sender)
            self._sublist_lock.release()

        pickle.dump(self._sublist, open(self.SUB_FILE, 'wb+'))
        if len(not_found) > 0:
            print('Channel not found: ' + ' '.join(not_found), file=msg)
        print('Done', file=msg)
        sender.send_message(msg.getvalue())
        return

    def _list_channel(self, value, sender):
        msg = io.BytesIO()
        print('Your subscribed channels are:', file=msg)
        live_channels = self._check_thread.get_live_channels()
        for ch in self._sublist[sender]:
            if ch in [x.lower() for x in live_channels]:
                stat = '[LIVE]'
            else:
                stat = '[OFF]'
            display_name = self._channel_name_cache.get(ch)
            if display_name is None:
                display_name = self._twitch.get_channel_info(
                    ch)['display_name']
                self._channel_name_cache.put(ch, display_name)
            print('{}\t{}'.format(stat, display_name), file=msg)
        sender.send_message(msg.getvalue())

    def _refresh(self, value, sender):
        # <Admin only>
        if sender.code == config['interface'][
                sender.interface_name]['admin_id']:
            self._check_thread.refresh()
            sender.send_message('Done')

    def _list_users(self, check_users, sender):
        # List all user who has subscription
        # <Admin only>
        if sender.code != config['interface'][
                sender.interface_name]['admin_id']:
            return

        user_list = self._sublist.keys()
        msg = io.StringIO()
        if len(check_users) == 0:
            # if no check_user list is inputed, list all user with sub count
            for user_index, user in enumerate(user_list):
                print(u'#{}) {}'.format(user_index, unicode(user)), file=msg)
                print(u'Subscribed count: {}'.format(len(self._sublist[user])),
                      file=msg)
                print(u'----------------------------', file=msg)
        else:
            # list users sub channel list
            not_found = []
            for user_index in check_users:
                try:
                    index = int(user_index)
                    user = user_list[index]
                except (ValueError, IndexError):
                    not_found.append(user_index)
                    continue

                if user not in self._sublist:
                    not_found.append(user_index)
                    continue

                print(u'#{}) {}'.format(user_index, unicode(user)), file=msg)
                print(u'- Subscribed Channels: ', file=msg)
                for ch in self._sublist[user]:
                    print(unicode(ch), end=u', ', file=msg)
                print(u'', file=msg)
                print(u'- Total Count: {}'.format(len(self._sublist[user])),
                      file=msg)
                print(u'----------------------------', file=msg)

            if len(not_found) > 0:
                print(u'Not found: ', end=u'', file=msg)
                for na in not_found:
                    print(unicode(na), end=u', ', file=msg)
                print(u'', file=msg)

        print(u'Done', file=msg)  # make sure we are sending something to user
        sender.send_message(msg.getvalue())
        return
Example #48
0
        See ``IArgumentType`` for argument and return type documentation.
        """

        value = BytesIO()
        for counter in count(0):
            chunk = strings.get("%s.%d" % (name, counter))
            if chunk is None:
                break
            value.write(chunk)
            strings[name] = value.getvalue()
        self.another_argument.fromBox(name, strings, objects, proto)


# The configuration and state can get pretty big, so don't want too many:
_wire_encode_cache = LRUCache(50)


def caching_wire_encode(obj):
    """
    Encode an object to bytes using ``wire_encode`` and cache the result,
    or return cached result if available.

    This relies on cached objects being immutable, or at least not being
    modified. Given our usage patterns that is currently the case and
    should continue to be, but worth keeping in mind.

    :param obj: Object to encode.
    :return: Resulting ``bytes``.
    """
    result = _wire_encode_cache.get(obj)
Example #49
0
 def __init__(self, max_size=1024):
     if max_size < 1:
         raise ValueError("max_size must be a positive integer greater than 0")
     self.max_size = max_size
     self.engine = LRUCacheEngine(max_size)
Example #50
0
File: registry.py Project: jean/reg
class CachingKeyLookup(object):
    """
    A key lookup that caches.

    Implements the read-only API of :class:`Registry`, using
    a cache to speed up access.

    The cache is LRU.

    :param: key_lookup - the :class:`Registry` to cache.
    :param component_cache_size: how many cache entries to store for
      the :meth:`component` method. This is also used by dispatch
      calls.
    :param all_cache_size: how many cache entries to store for the
      the :meth:`all` method.
    :param fallback_cache_size: how many cache entries to store for
      the :meth:`fallback` method.
    """
    def __init__(self, key_lookup, component_cache_size, all_cache_size,
                 fallback_cache_size):
        self.key_lookup = key_lookup
        self.predicate_key = key_lookup.predicate_key
        self.key_dict_to_predicate_key = key_lookup.key_dict_to_predicate_key
        self.component_cache = LRUCache(component_cache_size)
        self.all_cache = LRUCache(all_cache_size)
        self.fallback_cache = LRUCache(fallback_cache_size)

    def component(self, key, predicate_key):
        """Lookup value in registry based on predicate_key.

        If value for predicate_key cannot be found, looks up first
        permutation of predicate_key for which there is a value. Permutations
        are made according to the predicates registered for the key.

        :param key: an immutable for which to look up the predicate_key.
        :param predicate_key: an immutable predicate key, constructed
          for predicates given for this key.
        :returns: a registered value, or ``None``.
        """
        result = self.component_cache.get((key, predicate_key), NOT_FOUND)
        if result is not NOT_FOUND:
            return result
        result = self.key_lookup.component(key, predicate_key)
        self.component_cache.put((key, predicate_key), result)
        return result

    def fallback(self, key, predicate_key):
        """Lookup fallback based on predicate_key.

        This finds the fallback for the most specific predicate
        that fails to match.

        :param key: an immutable for which to look up the predicate_key.
        :param predicate_key: an immutable predicate key, constructed
          for predicates given for this key.
        :returns: the fallback value for the most specific predicate
          the failed to match.
        """
        result = self.fallback_cache.get((key, predicate_key), NOT_FOUND)
        if result is not NOT_FOUND:
            return result
        result = self.key_lookup.fallback(key, predicate_key)
        self.fallback_cache.put((key, predicate_key), result)
        return result

    def all(self, key, predicate_key):
        """Lookup iterable of values registered for predicate_key.

        Looks up values registered for all permutations of
        predicate_key, the most specific first.

        :param key: an immutable for which to look up the values.
        :param predicate_key: an immutable predicate key, constructed for
          the predicates given for this key.
        :returns: An iterable of registered values.
        """
        result = self.all_cache.get((key, predicate_key), NOT_FOUND)
        if result is not NOT_FOUND:
            return result
        result = list(self.key_lookup.all(key, predicate_key))
        self.all_cache.put((key, predicate_key), result)
        return result

    def lookup(self):
        """A :class:`Lookup` for this registry.
        """
        return Lookup(self)
Example #51
0
 def __init__(self, cache_size):
     self.lru = LRUCache(cache_size)
     self.hits = 0
     self.misses = 0
Example #52
0
 def __init__(self):
     if hasattr(self, '_init'):
         return
     self._init = True
     self.config = RUtils.config.RConfig()
     self._cache = LRUCache(self.config.session_cache_size)
Example #53
0
class DockerClient(object):
    """
    Talk to the real Docker server directly.

    Some operations can take a while (e.g. stopping a container), so we
    use a thread pool. See https://clusterhq.atlassian.net/browse/FLOC-718
    for using a custom thread pool.

    :ivar unicode namespace: A namespace prefix to add to container names
        so we don't clobber other applications interacting with Docker.
    :ivar str base_url: URL for connection to the Docker server.
    :ivar int long_timeout: Maximum time in seconds to wait for
        long-running operations, particularly pulling an image.
    :ivar LRUCache _image_cache: Mapped cache of image IDs to their data.
    """
    def __init__(self,
                 namespace=BASE_NAMESPACE,
                 base_url=None,
                 long_timeout=600):
        self.namespace = namespace
        self._client = dockerpy_client(
            version="1.15",
            base_url=base_url,
            long_timeout=timedelta(seconds=long_timeout),
        )
        self._image_cache = LRUCache(100)

    def _to_container_name(self, unit_name):
        """
        Add the namespace to the container name.

        :param unicode unit_name: The unit's name.

        :return unicode: The container's name.
        """
        return self.namespace + unit_name

    def _parse_container_ports(self, data):
        """
        Parse the ports from a data structure representing the Ports
        configuration of a Docker container in the format returned by
        ``self._client.inspect_container`` and return a list containing
        ``PortMap`` instances mapped to the container and host exposed ports.

        :param dict data: The data structure for the representation of
            container and host port mappings in a single container.
            This takes the form of the ``NetworkSettings.Ports`` portion
            of a container's state and configuration as returned by inspecting
            the container. This is a dictionary mapping container ports to a
            list of host bindings, e.g.
            "3306/tcp": [{"HostIp": "0.0.0.0","HostPort": "53306"},
                         {"HostIp": "0.0.0.0","HostPort": "53307"}]

        :return list: A list that is either empty or contains ``PortMap``
            instances.
        """
        ports = []
        for internal, hostmap in data.items():
            internal_map = internal.split(u'/')
            internal_port = internal_map[0]
            internal_port = int(internal_port)
            if hostmap:
                for host in hostmap:
                    external_port = host[u"HostPort"]
                    external_port = int(external_port)
                    portmap = PortMap(internal_port=internal_port,
                                      external_port=external_port)
                    ports.append(portmap)
        return ports

    def _parse_restart_policy(self, data):
        """
        Parse the restart policy from the configuration of a Docker container
        in the format returned by ``self._client.inspect_container`` and return
        an ``IRestartPolicy``.

        :param dict data: The data structure representing the restart policy of
            a container, e.g.

            {"Name": "policy-name", "MaximumRetryCount": 0}

        :return IRestartPolicy: The model of the restart policy.

        :raises ValueError: if an unknown policy is passed.
        """
        POLICIES = {
            u"":
            lambda data: RestartNever(),
            u"always":
            lambda data: RestartAlways(),
            u"on-failure":
            lambda data: RestartOnFailure(maximum_retry_count=data[
                u"MaximumRetryCount"] or None)
        }
        try:
            # docker will treat an unknown plolicy as "never".
            # We error out here, in case new policies are added.
            return POLICIES[data[u"Name"]](data)
        except KeyError:
            raise ValueError("Unknown restart policy: %r" % (data[u"Name"], ))

    def _serialize_restart_policy(self, restart_policy):
        """
        Serialize the restart policy from an ``IRestartPolicy`` to the format
        expected by the docker API.

        :param IRestartPolicy restart_policy: The model of the restart policy.

        :returns: A dictionary suitable to pass to docker

        :raises ValueError: if an unknown policy is passed.
        """
        SERIALIZERS = {
            RestartNever: lambda policy: {
                u"Name": u""
            },
            RestartAlways: lambda policy: {
                u"Name": u"always"
            },
            RestartOnFailure: lambda policy: {
                u"Name": u"on-failure",
                u"MaximumRetryCount": policy.maximum_retry_count or 0
            },
        }
        try:
            return SERIALIZERS[restart_policy.__class__](restart_policy)
        except KeyError:
            raise ValueError("Unknown restart policy: %r" % (restart_policy, ))

    def _image_not_found(self, apierror):
        """
        Inspect a ``docker.errors.APIError`` to determine if it represents a
        failure to start a container because the container's image wasn't
        found.

        :return: ``True`` if this is the case, ``False`` if the error has
            another cause.
        :rtype: ``bool``
        """
        return apierror.response.status_code == NOT_FOUND

    def _address_in_use(self, apierror):
        """
        Inspect a ``docker.errors.APIError`` to determine if it represents a
        failure to start a container because the container is configured to use
        ports that are already in use on the system.

        :return: If this is the reason, an exception to raise describing the
            problem.  Otherwise, ``None``.
        """
        # Recognize an error (without newline) like:
        #
        # Cannot start container <name>: Error starting userland proxy:
        # listen tcp <ip>:<port>: bind: address already in use
        #
        # Or (without newline) like:
        #
        # Cannot start container <name>: Bind for <ip>:<port> failed:
        # port is already allocated
        #
        # because Docker can't make up its mind about which format to use.
        parts = apierror.explanation.split(b": ")
        if parts[-1] == b"address already in use":
            ip, port = parts[-3].split()[-1].split(b":")
        elif parts[-1] == b"port is already allocated":
            ip, port = parts[-2].split()[2].split(b":")
        else:
            return None
        return AddressInUse(address=(ip, int(port)), apierror=apierror)

    def _image_data(self, image):
        """
        Supply data about an image, by either inspecting it or returning
        cached data if available.

        :param unicode image: The ID of the image.

        :return: ``dict`` representing data about the image properties.
        """
        cached_image = self._image_cache.get(image)
        if cached_image is not None:
            LOG_CACHED_IMAGE(image=image).write()
            return cached_image
        try:
            image_data = self._client.inspect_image(image)
            Message.new(message_type="flocker:node:docker:image_inspected",
                        image=image).write()
        except APIError as e:
            if e.response.status_code == NOT_FOUND:
                # Image has been deleted, so just fill in some
                # stub data so we can return *something*. This
                # should happen only for stopped containers so
                # some inaccuracy is acceptable.
                # We won't cache stub data though.
                Message.new(message_type="flocker:node:docker:image_not_found",
                            image=image).write()
                image_data = {u"Config": {u"Env": [], u"Cmd": []}}
            else:
                raise
        cached_data = ImageDataCache(command=image_data[u"Config"][u"Cmd"],
                                     environment=image_data[u"Config"][u"Env"])
        self._image_cache.put(image, cached_data)
        Message.new(message_type="flocker:node:docker:image_data_cached",
                    image=image).write()
        return cached_data

    def add(self,
            unit_name,
            image_name,
            ports=None,
            environment=None,
            volumes=(),
            mem_limit=None,
            cpu_shares=None,
            restart_policy=RestartNever(),
            command_line=None,
            swappiness=0):
        container_name = self._to_container_name(unit_name)

        if environment is not None:
            environment = environment.to_dict()
        if ports is None:
            ports = []

        restart_policy_dict = self._serialize_restart_policy(restart_policy)

        def _create():
            binds = list(
                # The "Z" mode tells Docker to "relabel file objects" on the
                # volume.  This makes things work when SELinux is enabled, at
                # least in the default configuration on CentOS 7.  See
                # <https://docs.docker.com/reference/commandline/run/>, in the
                # `--volumes-from` section (or just search for SELinux).
                u"{}:{}:Z".format(volume.node_path.path,
                                  volume.container_path.path)
                for volume in volumes)
            port_bindings = {p.internal_port: p.external_port for p in ports}
            host_config = self._client.create_host_config(
                binds=binds,
                port_bindings=port_bindings,
                restart_policy=restart_policy_dict,
            )
            # We're likely to get e.g. pvector, so make sure we're passing
            # in something JSON serializable:
            command_line_values = command_line
            if command_line_values is not None:
                command_line_values = list(command_line_values)

            memswap_limit = -1
            if swappiness != 0:
                memswap_limit = mem_limit + mem_limit * swappiness

            self._client.create_container(
                name=container_name,
                image=image_name,
                command=command_line_values,
                environment=environment,
                ports=[p.internal_port for p in ports],
                mem_limit=mem_limit,
                cpu_shares=cpu_shares,
                host_config=host_config,
                memswap_limit=memswap_limit,
            )

        def _add():
            try:
                _create()
            except APIError as e:
                if self._image_not_found(e):
                    # Pull it and try again
                    self._client.pull(image_name)
                    _create()
                else:
                    # Unrecognized, just raise it.
                    raise

            # Just because we got a response doesn't mean Docker has
            # actually updated any internal state yet! So if e.g. we did a
            # start on this container Docker might well complain it knows
            # not the container of which we speak. To prevent this we poll
            # until it does exist.
            while True:
                try:
                    self._client.start(container_name)
                except NotFound:
                    sleep(0.01)
                else:
                    break

        d = deferToThread(_add)

        def _extract_error(failure):
            failure.trap(APIError)
            code = failure.value.response.status_code
            if code == 409:
                raise AlreadyExists(unit_name)

            in_use = self._address_in_use(failure.value)
            if in_use is not None:
                # We likely can't start the container because its
                # configuration conflicts with something else happening on
                # the system.  Reflect this failure condition in a more
                # easily recognized way.
                raise in_use

            return failure

        d.addErrback(_extract_error)
        return d

    def _blocking_exists(self, container_name):
        """
        Blocking API to check if container exists.

        :param unicode container_name: The name of the container whose
            existence we're checking.

        :return: ``True`` if unit exists, otherwise ``False``.
        """
        try:
            self._client.inspect_container(container_name)
            return True
        except APIError:
            return False

    def exists(self, unit_name):
        container_name = self._to_container_name(unit_name)
        return deferToThread(self._blocking_exists, container_name)

    def _stop_container(self, container_name):
        """Attempt to stop the given container.

        There is a race condition between a process dying and
        Docker noticing that fact:

        https://github.com/docker/docker/issues/5165#issuecomment-65753753

        If we get an error indicating that this race condition happened,
        return False. This means the caller should try again. If we *do*
        successfully stop the container, return True.

        :raise APIError: If the container failed to stop for some unknown
            reason.
        :return: True if we stopped the container, False otherwise.

        """
        try:
            with start_action(action_type='flocker:docker:container_stop',
                              container=container_name):
                self._client.stop(container_name)
        except APIError as e:
            if e.response.status_code == NOT_FOUND:
                # If the container doesn't exist, we swallow the error,
                # since this method is supposed to be idempotent.
                return True
            elif e.response.status_code == INTERNAL_SERVER_ERROR:
                # Docker returns this if the process had died, but
                # hasn't noticed it yet.
                return False
            else:
                raise
        return True

    def _remove_container(self, container_name):
        """
        Attempt to remove a container.

        Assumes the given container has already been stopped.

        :param unicode container_name: The fully-namespaced name of the
            container.
        :return: True if we removed the container, False otherwise.
        """
        try:
            # The ``docker.Client.stop`` method sometimes returns a
            # 404 error, even though the container exists.
            # See https://github.com/docker/docker/issues/13088
            # Wait until the container has actually stopped running
            # before attempting to remove it.  Otherwise we are
            # likely to see: 'docker.errors.APIError: 409 Client
            # Error: Conflict ("Conflict, You cannot remove a
            # running container. Stop the container before
            # attempting removal or use -f")'
            # This code should probably be removed once the above
            # issue has been resolved. See [FLOC-1850]
            self._client.wait(container_name)

            with start_action(action_type='flocker:docker:container_remove',
                              container=container_name):
                self._client.remove_container(container_name)
        except APIError as e:
            if e.response.status_code == NOT_FOUND:
                # If the container doesn't exist, we swallow the error,
                # since this method is supposed to be idempotent.
                return True
            elif e.response.status_code == INTERNAL_SERVER_ERROR:
                # Failure to remove container - see FLOC-3262 for an example.
                return False
            else:
                raise
        return True

    def remove(self, unit_name):
        container_name = self._to_container_name(unit_name)

        def _remove():
            # Previously, this looped forever and didn't pause between loops.
            # We've arbitrarily chosen a wait interval of 0.001 seconds and
            # 1000 retries (i.e. a second of polling). These values may need
            # tuning.
            poll_until(partial(self._stop_container, container_name),
                       repeat(0.001, 1000))

            # Previously, the container remove was only tried once. Again,
            # these parameters may need tuning.
            poll_until(partial(self._remove_container, container_name),
                       repeat(0.001, 1000))

        d = deferToThread(_remove)
        return d

    def list(self):
        def _list():
            result = set()
            ids = [
                d[u"Id"] for d in self._client.containers(quiet=True, all=True)
            ]
            for i in ids:

                try:
                    data = self._client.inspect_container(i)
                except APIError as e:
                    # The container ID returned by the list API call above, may
                    # have been removed in another thread.
                    if e.response.status_code == NOT_FOUND:
                        continue
                    else:
                        raise

                state = (u"active"
                         if data[u"State"][u"Running"] else u"inactive")
                name = data[u"Name"]
                # Since tags (e.g. "busybox") aren't stable, ensure we're
                # looking at the actual image by using the hash:
                image = data[u"Image"]
                image_tag = data[u"Config"][u"Image"]
                command = data[u"Config"][u"Cmd"]
                with start_action(
                        action_type=u"flocker:node:docker:inspect_image",
                        container=i,
                        running=data[u"State"][u"Running"]):
                    image_data = self._image_data(image)
                if image_data.command == command:
                    command = None
                port_bindings = data[u"NetworkSettings"][u"Ports"]
                if port_bindings is not None:
                    ports = self._parse_container_ports(port_bindings)
                else:
                    ports = list()
                volumes = []
                binds = data[u"HostConfig"]['Binds']
                if binds is not None:
                    for bind_config in binds:
                        parts = bind_config.split(':', 2)
                        node_path, container_path = parts[:2]
                        volumes.append(
                            Volume(container_path=FilePath(container_path),
                                   node_path=FilePath(node_path)))
                if name.startswith(u"/" + self.namespace):
                    name = name[1 + len(self.namespace):]
                else:
                    continue
                # Retrieve environment variables for this container,
                # disregarding any environment variables that are part
                # of the image, rather than supplied in the configuration.
                unit_environment = []
                container_environment = data[u"Config"][u"Env"]
                if image_data.environment is None:
                    image_environment = []
                else:
                    image_environment = image_data.environment
                if container_environment is not None:
                    for environment in container_environment:
                        if environment not in image_environment:
                            env_key, env_value = environment.split('=', 1)
                            unit_environment.append((env_key, env_value))
                unit_environment = (Environment(
                    variables=frozenset(unit_environment))
                                    if unit_environment else None)
                # Our Unit model counts None as the value for cpu_shares and
                # mem_limit in containers without specified limits, however
                # Docker returns the values in these cases as zero, so we
                # manually convert.
                cpu_shares = data[u"Config"][u"CpuShares"]
                cpu_shares = None if cpu_shares == 0 else cpu_shares
                mem_limit = data[u"Config"][u"Memory"]
                mem_limit = None if mem_limit == 0 else mem_limit
                restart_policy = self._parse_restart_policy(
                    data[U"HostConfig"][u"RestartPolicy"])
                result.add(
                    Unit(name=name,
                         container_name=self._to_container_name(name),
                         activation_state=state,
                         container_image=image_tag,
                         ports=frozenset(ports),
                         volumes=frozenset(volumes),
                         environment=unit_environment,
                         mem_limit=mem_limit,
                         cpu_shares=cpu_shares,
                         restart_policy=restart_policy,
                         command_line=command))
            return result

        return deferToThread(_list)
Example #54
0
 def __init__(self):
     super(EDBag, self).__init__()
     self.cache1 = LRUCache(256) # values where distance=1
     self.cache2 = LRUCache(256) # values where distance>1
Example #55
0
 def __init__(self):
     if hasattr(self, '_init'):
         return
     self._init = True
     config = RConfig()
     self._cache = LRUCache(config.session_cache_size)
Example #56
0
class Mapper(SubMapperParent):
    """Mapper handles URL generation and URL recognition in a web
    application.
    
    Mapper is built handling dictionary's. It is assumed that the web
    application will handle the dictionary returned by URL recognition
    to dispatch appropriately.
    
    URL generation is done by passing keyword parameters into the
    generate function, a URL is then returned.
    
    """
    def __init__(self, controller_scan=controller_scan, directory=None, 
                 always_scan=False, register=True, explicit=True):
        """Create a new Mapper instance
        
        All keyword arguments are optional.
        
        ``controller_scan``
            Function reference that will be used to return a list of
            valid controllers used during URL matching. If
            ``directory`` keyword arg is present, it will be passed
            into the function during its call. This option defaults to
            a function that will scan a directory for controllers.
            
            Alternatively, a list of controllers or None can be passed
            in which are assumed to be the definitive list of
            controller names valid when matching 'controller'.
        
        ``directory``
            Passed into controller_scan for the directory to scan. It
            should be an absolute path if using the default 
            ``controller_scan`` function.
        
        ``always_scan``
            Whether or not the ``controller_scan`` function should be
            run during every URL match. This is typically a good idea
            during development so the server won't need to be restarted
            anytime a controller is added.
        
        ``register``
            Boolean used to determine if the Mapper should use 
            ``request_config`` to register itself as the mapper. Since
            it's done on a thread-local basis, this is typically best
            used during testing though it won't hurt in other cases.
        
        ``explicit``
            Boolean used to determine if routes should be connected
            with implicit defaults of::
                
                {'controller':'content','action':'index','id':None}
            
            When set to True, these defaults will not be added to route
            connections and ``url_for`` will not use Route memory.
                
        Additional attributes that may be set after mapper
        initialization (ie, map.ATTRIBUTE = 'something'):
        
        ``encoding``
            Used to indicate alternative encoding/decoding systems to
            use with both incoming URL's, and during Route generation
            when passed a Unicode string. Defaults to 'utf-8'.
        
        ``decode_errors``
            How to handle errors in the encoding, generally ignoring
            any chars that don't convert should be sufficient. Defaults
            to 'ignore'.
        
        ``minimization``
            Boolean used to indicate whether or not Routes should
            minimize URL's and the generated URL's, or require every
            part where it appears in the path. Defaults to True.
        
        ``hardcode_names``
            Whether or not Named Routes result in the default options
            for the route being used *or* if they actually force url
            generation to use the route. Defaults to False.
        
        """
        self.matchlist = []
        self.maxkeys = {}
        self.minkeys = {}
        self.urlcache = LRUCache(1600)
        self._created_regs = False
        self._created_gens = False
        self._master_regexp = None
        self.prefix = None
        self.req_data = threading.local()
        self.directory = directory
        self.always_scan = always_scan
        self.controller_scan = controller_scan
        self._regprefix = None
        self._routenames = {}
        self.debug = False
        self.append_slash = False
        self.sub_domains = False
        self.sub_domains_ignore = []
        self.domain_match = '[^\.\/]+?\.[^\.\/]+'
        self.explicit = explicit
        self.encoding = 'utf-8'
        self.decode_errors = 'ignore'
        self.hardcode_names = True
        self.minimization = False
        self.create_regs_lock = threading.Lock()
        if register:
            config = request_config()
            config.mapper = self
    
    def __str__(self):
        """Generates a tabular string representation."""
        def format_methods(r):
            if r.conditions:
                method = r.conditions.get('method', '')
                return type(method) is str and method or ', '.join(method)
            else:
                return ''

        table = [('Route name', 'Methods', 'Path')] + \
                [(r.name or '', format_methods(r), r.routepath or '')
                 for r in self.matchlist]
            
        widths = [max(len(row[col]) for row in table)
                  for col in range(len(table[0]))]
        
        return '\n'.join(
            ' '.join(row[col].ljust(widths[col])
                     for col in range(len(widths)))
            for row in table)

    def _envget(self):
        try:
            return self.req_data.environ
        except AttributeError:
            return None
    def _envset(self, env):
        self.req_data.environ = env
    def _envdel(self):
        del self.req_data.environ
    environ = property(_envget, _envset, _envdel)
    
    def extend(self, routes, path_prefix=''):
        """Extends the mapper routes with a list of Route objects
        
        If a path_prefix is provided, all the routes will have their
        path prepended with the path_prefix.
        
        Example::
            
            >>> map = Mapper(controller_scan=None)
            >>> map.connect('home', '/', controller='home', action='splash')
            >>> map.matchlist[0].name == 'home'
            True
            >>> routes = [Route('index', '/index.htm', controller='home',
            ...                 action='index')]
            >>> map.extend(routes)
            >>> len(map.matchlist) == 2
            True
            >>> map.extend(routes, path_prefix='/subapp')
            >>> len(map.matchlist) == 3
            True
            >>> map.matchlist[2].routepath == '/subapp/index.htm'
            True
        
        .. note::
            
            This function does not merely extend the mapper with the
            given list of routes, it actually creates new routes with
            identical calling arguments.
        
        """
        for route in routes:
            if path_prefix and route.minimization:
                routepath = '/'.join([path_prefix, route.routepath])
            elif path_prefix:
                routepath = path_prefix + route.routepath
            else:
                routepath = route.routepath
            self.connect(route.name, routepath, **route._kargs)
                
    def connect(self, *args, **kargs):
        """Create and connect a new Route to the Mapper.
        
        Usage:
        
        .. code-block:: python
        
            m = Mapper()
            m.connect(':controller/:action/:id')
            m.connect('date/:year/:month/:day', controller="blog", action="view")
            m.connect('archives/:page', controller="blog", action="by_page",
            requirements = { 'page':'\d{1,2}' })
            m.connect('category_list', 'archives/category/:section', controller='blog', action='category',
            section='home', type='list')
            m.connect('home', '', controller='blog', action='view', section='home')
        
        """
        routename = None
        if len(args) > 1:
            routename = args[0]
        else:
            args = (None,) + args
        if '_explicit' not in kargs:
            kargs['_explicit'] = self.explicit
        if '_minimize' not in kargs:
            kargs['_minimize'] = self.minimization
        route = Route(*args, **kargs)
                
        # Apply encoding and errors if its not the defaults and the route 
        # didn't have one passed in.
        if (self.encoding != 'utf-8' or self.decode_errors != 'ignore') and \
           '_encoding' not in kargs:
            route.encoding = self.encoding
            route.decode_errors = self.decode_errors
        
        if not route.static:
            self.matchlist.append(route)
        
        if routename:
            self._routenames[routename] = route
            route.name = routename
        if route.static:
            return
        exists = False
        for key in self.maxkeys:
            if key == route.maxkeys:
                self.maxkeys[key].append(route)
                exists = True
                break
        if not exists:
            self.maxkeys[route.maxkeys] = [route]
        self._created_gens = False
    
    def _create_gens(self):
        """Create the generation hashes for route lookups"""
        # Use keys temporailly to assemble the list to avoid excessive
        # list iteration testing with "in"
        controllerlist = {}
        actionlist = {}
        
        # Assemble all the hardcoded/defaulted actions/controllers used
        for route in self.matchlist:
            if route.static:
                continue
            if route.defaults.has_key('controller'):
                controllerlist[route.defaults['controller']] = True
            if route.defaults.has_key('action'):
                actionlist[route.defaults['action']] = True
        
        # Setup the lists of all controllers/actions we'll add each route
        # to. We include the '*' in the case that a generate contains a
        # controller/action that has no hardcodes
        controllerlist = controllerlist.keys() + ['*']
        actionlist = actionlist.keys() + ['*']
        
        # Go through our list again, assemble the controllers/actions we'll
        # add each route to. If its hardcoded, we only add it to that dict key.
        # Otherwise we add it to every hardcode since it can be changed.
        gendict = {} # Our generated two-deep hash
        for route in self.matchlist:
            if route.static:
                continue
            clist = controllerlist
            alist = actionlist
            if 'controller' in route.hardcoded:
                clist = [route.defaults['controller']]
            if 'action' in route.hardcoded:
                alist = [unicode(route.defaults['action'])]
            for controller in clist:
                for action in alist:
                    actiondict = gendict.setdefault(controller, {})
                    actiondict.setdefault(action, ([], {}))[0].append(route)
        self._gendict = gendict
        self._created_gens = True

    def create_regs(self, *args, **kwargs):
        """Atomically creates regular expressions for all connected
        routes
        """
        self.create_regs_lock.acquire()
        try:
            self._create_regs(*args, **kwargs)
        finally:
            self.create_regs_lock.release()
    
    def _create_regs(self, clist=None):
        """Creates regular expressions for all connected routes"""
        if clist is None:
            if self.directory:
                clist = self.controller_scan(self.directory)
            elif callable(self.controller_scan):
                clist = self.controller_scan()
            elif not self.controller_scan:
                clist = []
            else:
                clist = self.controller_scan
        
        for key, val in self.maxkeys.iteritems():
            for route in val:
                route.makeregexp(clist)
        
        regexps = []
        routematches = []
        for route in self.matchlist:
            if not route.static:
                routematches.append(route)
                regexps.append(route.makeregexp(clist, include_names=False))
        self._routematches = routematches
        
        # Create our regexp to strip the prefix
        if self.prefix:
            self._regprefix = re.compile(self.prefix + '(.*)')
        
        # Save the master regexp
        regexp = '|'.join(['(?:%s)' % x for x in regexps])
        self._master_reg = regexp
        self._master_regexp = re.compile(regexp)
        self._created_regs = True
    
    def _match(self, url, environ):
        """Internal Route matcher
        
        Matches a URL against a route, and returns a tuple of the match
        dict and the route object if a match is successfull, otherwise
        it returns empty.
        
        For internal use only.
        
        """
        if not self._created_regs and self.controller_scan:
            self.create_regs()
        elif not self._created_regs:
            raise RoutesException("You must generate the regular expressions"
                                 " before matching.")
        
        if self.always_scan:
            self.create_regs()
        
        matchlog = []
        if self.prefix:
            if re.match(self._regprefix, url):
                url = re.sub(self._regprefix, r'\1', url)
                if not url:
                    url = '/'
            else:
                return (None, None, matchlog)
                
        environ = environ or self.environ
        sub_domains = self.sub_domains
        sub_domains_ignore = self.sub_domains_ignore
        domain_match = self.domain_match
        debug = self.debug
        
        # Check to see if its a valid url against the main regexp
        # Done for faster invalid URL elimination
        valid_url = re.match(self._master_regexp, url)
        if not valid_url:
            return (None, None, matchlog)
        
        for route in self.matchlist:
            if route.static:
                if debug:
                    matchlog.append(dict(route=route, static=True))
                continue
            match = route.match(url, environ, sub_domains, sub_domains_ignore,
                                domain_match)
            if debug:
                matchlog.append(dict(route=route, regexp=bool(match)))
            if isinstance(match, dict) or match:
                return (match, route, matchlog)
        return (None, None, matchlog)
    
    def match(self, url=None, environ=None):
        """Match a URL against against one of the routes contained.
        
        Will return None if no valid match is found.
        
        .. code-block:: python
            
            resultdict = m.match('/joe/sixpack')
        
        """
        if not url and not environ:
            raise RoutesException('URL or environ must be provided')
        
        if not url:
            url = environ['PATH_INFO']
                
        result = self._match(url, environ)
        if self.debug:
            return result[0], result[1], result[2]
        if isinstance(result[0], dict) or result[0]:
            return result[0]
        return None
    
    def routematch(self, url=None, environ=None):
        """Match a URL against against one of the routes contained.
        
        Will return None if no valid match is found, otherwise a
        result dict and a route object is returned.
        
        .. code-block:: python
        
            resultdict, route_obj = m.match('/joe/sixpack')
        
        """
        if not url and not environ:
            raise RoutesException('URL or environ must be provided')
        
        if not url:
            url = environ['PATH_INFO']
        result = self._match(url, environ)
        if self.debug:
            return result[0], result[1], result[2]
        if isinstance(result[0], dict) or result[0]:
            return result[0], result[1]
        return None
    
    def generate(self, *args, **kargs):
        """Generate a route from a set of keywords
        
        Returns the url text, or None if no URL could be generated.
        
        .. code-block:: python
            
            m.generate(controller='content',action='view',id=10)
        
        """
        # Generate ourself if we haven't already
        if not self._created_gens:
            self._create_gens()
        
        if self.append_slash:
            kargs['_append_slash'] = True
        
        if not self.explicit:
            if 'controller' not in kargs:
                kargs['controller'] = 'content'
            if 'action' not in kargs:
                kargs['action'] = 'index'
        
        environ = kargs.pop('_environ', self.environ)
        controller = kargs.get('controller', None)
        action = kargs.get('action', None)

        # Cache the URL keyed by SCRIPT_NAME and kargs

        cache_key = unicode(args).encode('utf8') + \
            unicode(kargs).encode('utf8')

        if self.environ:
            cache_key = '{0}:{1}'.format(self.environ.get('SCRIPT_NAME', '@&?NO_SCRIPT_NAME?&@'), cache_key)
        else:
            cache_key = '@&?NO_ENVIRON?&@:' + cache_key
        
        if self.urlcache is not None:
            # Check the url cache to see if it exists, use it if it does
            val = self.urlcache.get(cache_key)
            if val is not None:
                return val
        
        actionlist = self._gendict.get(controller) or self._gendict.get('*', {})
        if not actionlist and not args:
            return None
        (keylist, sortcache) = actionlist.get(action) or \
                               actionlist.get('*', (None, {}))
        if not keylist and not args:
            return None

        keys = frozenset(kargs.keys())
        cacheset = False
        cachekey = unicode(keys)
        cachelist = sortcache.get(cachekey)
        if args:
            keylist = args
        elif cachelist:
            keylist = cachelist
        else:
            cacheset = True
            newlist = []
            for route in keylist:
                if len(route.minkeys - route.dotkeys - keys) == 0:
                    newlist.append(route)
            keylist = newlist
            
            def keysort(a, b):
                """Sorts two sets of sets, to order them ideally for
                matching."""
                am = a.minkeys
                a = a.maxkeys
                b = b.maxkeys
                
                lendiffa = len(keys^a)
                lendiffb = len(keys^b)
                # If they both match, don't switch them
                if lendiffa == 0 and lendiffb == 0:
                    return 0
                
                # First, if a matches exactly, use it
                if lendiffa == 0:
                    return -1
                
                # Or b matches exactly, use it
                if lendiffb == 0:
                    return 1
                
                # Neither matches exactly, return the one with the most in 
                # common
                if cmp(lendiffa, lendiffb) != 0:
                    return cmp(lendiffa, lendiffb)
                
                # Neither matches exactly, but if they both have just as much 
                # in common
                if len(keys&b) == len(keys&a):
                    # Then we return the shortest of the two
                    return cmp(len(a), len(b))
                
                # Otherwise, we return the one that has the most in common
                else:
                    return cmp(len(keys&b), len(keys&a))
            
            keylist.sort(keysort)
            if cacheset:
                sortcache[cachekey] = keylist
                
        # Iterate through the keylist of sorted routes (or a single route if
        # it was passed in explicitly for hardcoded named routes)
        for route in keylist:
            fail = False
            for key in route.hardcoded:
                kval = kargs.get(key)
                if not kval:
                    continue
                if isinstance(kval, str):
                    kval = kval.decode(self.encoding)
                else:
                    kval = unicode(kval)
                if kval != route.defaults[key] and not callable(route.defaults[key]):
                    fail = True
                    break
            if fail:
                continue
            path = route.generate(**kargs)
            if path:
                if self.prefix:
                    path = self.prefix + path

                external_static = route.static and route.external
                if environ and environ.get('SCRIPT_NAME', '') != ''\
                        and not route.absolute and not external_static:
                    path = environ['SCRIPT_NAME'] + path

                if self.urlcache is not None:
                    self.urlcache.put(cache_key, str(path))
                return str(path)
            else:
                continue
        return None
    
    def resource(self, member_name, collection_name, **kwargs):
        """Generate routes for a controller resource
        
        The member_name name should be the appropriate singular version
        of the resource given your locale and used with members of the
        collection. The collection_name name will be used to refer to
        the resource collection methods and should be a plural version
        of the member_name argument. By default, the member_name name
        will also be assumed to map to a controller you create.
        
        The concept of a web resource maps somewhat directly to 'CRUD' 
        operations. The overlying things to keep in mind is that
        mapping a resource is about handling creating, viewing, and
        editing that resource.
        
        All keyword arguments are optional.
        
        ``controller``
            If specified in the keyword args, the controller will be
            the actual controller used, but the rest of the naming
            conventions used for the route names and URL paths are
            unchanged.
        
        ``collection``
            Additional action mappings used to manipulate/view the
            entire set of resources provided by the controller.
            
            Example::
                
                map.resource('message', 'messages', collection={'rss':'GET'})
                # GET /message/rss (maps to the rss action)
                # also adds named route "rss_message"
        
        ``member``
            Additional action mappings used to access an individual
            'member' of this controllers resources.
            
            Example::
                
                map.resource('message', 'messages', member={'mark':'POST'})
                # POST /message/1/mark (maps to the mark action)
                # also adds named route "mark_message"
        
        ``new``
            Action mappings that involve dealing with a new member in
            the controller resources.
            
            Example::
                
                map.resource('message', 'messages', new={'preview':'POST'})
                # POST /message/new/preview (maps to the preview action)
                # also adds a url named "preview_new_message"
        
        ``path_prefix``
            Prepends the URL path for the Route with the path_prefix
            given. This is most useful for cases where you want to mix
            resources or relations between resources.
        
        ``name_prefix``
            Perpends the route names that are generated with the
            name_prefix given. Combined with the path_prefix option,
            it's easy to generate route names and paths that represent
            resources that are in relations.
            
            Example::
                
                map.resource('message', 'messages', controller='categories', 
                    path_prefix='/category/:category_id', 
                    name_prefix="category_")
                # GET /category/7/message/1
                # has named route "category_message"
                
        ``parent_resource`` 
            A ``dict`` containing information about the parent
            resource, for creating a nested resource. It should contain
            the ``member_name`` and ``collection_name`` of the parent
            resource. This ``dict`` will 
            be available via the associated ``Route`` object which can
            be accessed during a request via
            ``request.environ['routes.route']``
 
            If ``parent_resource`` is supplied and ``path_prefix``
            isn't, ``path_prefix`` will be generated from
            ``parent_resource`` as
            "<parent collection name>/:<parent member name>_id". 

            If ``parent_resource`` is supplied and ``name_prefix``
            isn't, ``name_prefix`` will be generated from
            ``parent_resource`` as  "<parent member name>_". 
 
            Example:: 
 
                >>> from routes.util import url_for 
                >>> m = Mapper() 
                >>> m.resource('location', 'locations', 
                ...            parent_resource=dict(member_name='region', 
                ...                                 collection_name='regions'))
                >>> # path_prefix is "regions/:region_id" 
                >>> # name prefix is "region_"  
                >>> url_for('region_locations', region_id=13) 
                '/regions/13/locations'
                >>> url_for('region_new_location', region_id=13) 
                '/regions/13/locations/new'
                >>> url_for('region_location', region_id=13, id=60) 
                '/regions/13/locations/60'
                >>> url_for('region_edit_location', region_id=13, id=60) 
                '/regions/13/locations/60/edit'

            Overriding generated ``path_prefix``::

                >>> m = Mapper()
                >>> m.resource('location', 'locations',
                ...            parent_resource=dict(member_name='region',
                ...                                 collection_name='regions'),
                ...            path_prefix='areas/:area_id')
                >>> # name prefix is "region_"
                >>> url_for('region_locations', area_id=51)
                '/areas/51/locations'

            Overriding generated ``name_prefix``::

                >>> m = Mapper()
                >>> m.resource('location', 'locations',
                ...            parent_resource=dict(member_name='region',
                ...                                 collection_name='regions'),
                ...            name_prefix='')
                >>> # path_prefix is "regions/:region_id" 
                >>> url_for('locations', region_id=51)
                '/regions/51/locations'

        """
        collection = kwargs.pop('collection', {})
        member = kwargs.pop('member', {})
        new = kwargs.pop('new', {})
        path_prefix = kwargs.pop('path_prefix', None)
        name_prefix = kwargs.pop('name_prefix', None)
        parent_resource = kwargs.pop('parent_resource', None)
        
        # Generate ``path_prefix`` if ``path_prefix`` wasn't specified and 
        # ``parent_resource`` was. Likewise for ``name_prefix``. Make sure
        # that ``path_prefix`` and ``name_prefix`` *always* take precedence if
        # they are specified--in particular, we need to be careful when they
        # are explicitly set to "".
        if parent_resource is not None: 
            if path_prefix is None: 
                path_prefix = '%s/:%s_id' % (parent_resource['collection_name'], 
                                             parent_resource['member_name']) 
            if name_prefix is None:
                name_prefix = '%s_' % parent_resource['member_name']
        else:
            if path_prefix is None: path_prefix = ''
            if name_prefix is None: name_prefix = ''
        
        # Ensure the edit and new actions are in and GET
        member['edit'] = 'GET'
        new.update({'new': 'GET'})
        
        # Make new dict's based off the old, except the old values become keys,
        # and the old keys become items in a list as the value
        def swap(dct, newdct):
            """Swap the keys and values in the dict, and uppercase the values
            from the dict during the swap."""
            for key, val in dct.iteritems():
                newdct.setdefault(val.upper(), []).append(key)
            return newdct
        collection_methods = swap(collection, {})
        member_methods = swap(member, {})
        new_methods = swap(new, {})
        
        # Insert create, update, and destroy methods
        collection_methods.setdefault('POST', []).insert(0, 'create')
        member_methods.setdefault('PUT', []).insert(0, 'update')
        member_methods.setdefault('DELETE', []).insert(0, 'delete')
        
        # If there's a path prefix option, use it with the controller
        controller = strip_slashes(collection_name)
        path_prefix = strip_slashes(path_prefix)
        path_prefix = '/' + path_prefix
        if path_prefix and path_prefix != '/':
            path = path_prefix + '/' + controller
        else:
            path = '/' + controller
        collection_path = path
        new_path = path + "/new"
        member_path = path + "/:(id)"
        
        options = { 
            'controller': kwargs.get('controller', controller),
            '_member_name': member_name,
            '_collection_name': collection_name,
            '_parent_resource': parent_resource,
            '_filter': kwargs.get('_filter')
        }
        
        def requirements_for(meth):
            """Returns a new dict to be used for all route creation as the
            route options"""
            opts = options.copy()
            if method != 'any': 
                opts['conditions'] = {'method':[meth.upper()]}
            return opts
        
        # Add the routes for handling collection methods
        for method, lst in collection_methods.iteritems():
            primary = (method != 'GET' and lst.pop(0)) or None
            route_options = requirements_for(method)
            for action in lst:
                route_options['action'] = action
                route_name = "%s%s_%s" % (name_prefix, action, collection_name)
                self.connect("formatted_" + route_name, "%s/%s.:(format)" % \
                             (collection_path, action), **route_options)
                self.connect(route_name, "%s/%s" % (collection_path, action),
                                                    **route_options)
            if primary:
                route_options['action'] = primary
                self.connect("%s.:(format)" % collection_path, **route_options)
                self.connect(collection_path, **route_options)
        
        # Specifically add in the built-in 'index' collection method and its 
        # formatted version
        self.connect("formatted_" + name_prefix + collection_name, 
            collection_path + ".:(format)", action='index', 
            conditions={'method':['GET']}, **options)
        self.connect(name_prefix + collection_name, collection_path, 
                     action='index', conditions={'method':['GET']}, **options)
        
        # Add the routes that deal with new resource methods
        for method, lst in new_methods.iteritems():
            route_options = requirements_for(method)
            for action in lst:
                path = (action == 'new' and new_path) or "%s/%s" % (new_path, 
                                                                    action)
                name = "new_" + member_name
                if action != 'new':
                    name = action + "_" + name
                route_options['action'] = action
                formatted_path = (action == 'new' and new_path + '.:(format)') or \
                    "%s/%s.:(format)" % (new_path, action)
                self.connect("formatted_" + name_prefix + name, formatted_path, 
                             **route_options)
                self.connect(name_prefix + name, path, **route_options)

        requirements_regexp = '[^\/]+(?<!\\\)'

        # Add the routes that deal with member methods of a resource
        for method, lst in member_methods.iteritems():
            route_options = requirements_for(method)
            route_options['requirements'] = {'id':requirements_regexp}
            if method not in ['POST', 'GET', 'any']:
                primary = lst.pop(0)
            else:
                primary = None
            for action in lst:
                route_options['action'] = action
                self.connect("formatted_%s%s_%s" % (name_prefix, action, 
                                                    member_name),
                    "%s/%s.:(format)" % (member_path, action), **route_options)
                self.connect("%s%s_%s" % (name_prefix, action, member_name),
                    "%s/%s" % (member_path, action), **route_options)
            if primary:
                route_options['action'] = primary
                self.connect("%s.:(format)" % member_path, **route_options)
                self.connect(member_path, **route_options)
        
        # Specifically add the member 'show' method
        route_options = requirements_for('GET')
        route_options['action'] = 'show'
        route_options['requirements'] = {'id':requirements_regexp}
        self.connect("formatted_" + name_prefix + member_name, 
                     member_path + ".:(format)", **route_options)
        self.connect(name_prefix + member_name, member_path, **route_options)
    
    def redirect(self, match_path, destination_path, *args, **kwargs):
        """Add a redirect route to the mapper
        
        Redirect routes bypass the wrapped WSGI application and instead
        result in a redirect being issued by the RoutesMiddleware. As
        such, this method is only meaningful when using
        RoutesMiddleware.
        
        By default, a 302 Found status code is used, this can be
        changed by providing a ``_redirect_code`` keyword argument
        which will then be used instead. Note that the entire status
        code string needs to be present.
        
        When using keyword arguments, all arguments that apply to
        matching will be used for the match, while generation specific
        options will be used during generation. Thus all options
        normally available to connected Routes may be used with
        redirect routes as well.
        
        Example::
            
            map = Mapper()
            map.redirect('/legacyapp/archives/{url:.*}, '/archives/{url})
            map.redirect('/home/index', '/', _redirect_code='301 Moved Permanently')
        
        """
        both_args = ['_encoding', '_explicit', '_minimize']
        gen_args = ['_filter']
        
        status_code = kwargs.pop('_redirect_code', '302 Found')
        gen_dict, match_dict = {}, {}
        
        # Create the dict of args for the generation route
        for key in both_args + gen_args:
            if key in kwargs:
                gen_dict[key] = kwargs[key]
        gen_dict['_static'] = True
        
        # Create the dict of args for the matching route
        for key in kwargs:
            if key not in gen_args:
                match_dict[key] = kwargs[key]
        
        self.connect(match_path, **match_dict)
        match_route = self.matchlist[-1]
        
        self.connect('_redirect_%s' % id(match_route), destination_path,
                     **gen_dict)
        match_route.redirect = True
        match_route.redirect_status = status_code
Example #57
0
 def __init__(self, controller_scan=controller_scan, directory=None, 
              always_scan=False, register=True, explicit=True):
     """Create a new Mapper instance
     
     All keyword arguments are optional.
     
     ``controller_scan``
         Function reference that will be used to return a list of
         valid controllers used during URL matching. If
         ``directory`` keyword arg is present, it will be passed
         into the function during its call. This option defaults to
         a function that will scan a directory for controllers.
         
         Alternatively, a list of controllers or None can be passed
         in which are assumed to be the definitive list of
         controller names valid when matching 'controller'.
     
     ``directory``
         Passed into controller_scan for the directory to scan. It
         should be an absolute path if using the default 
         ``controller_scan`` function.
     
     ``always_scan``
         Whether or not the ``controller_scan`` function should be
         run during every URL match. This is typically a good idea
         during development so the server won't need to be restarted
         anytime a controller is added.
     
     ``register``
         Boolean used to determine if the Mapper should use 
         ``request_config`` to register itself as the mapper. Since
         it's done on a thread-local basis, this is typically best
         used during testing though it won't hurt in other cases.
     
     ``explicit``
         Boolean used to determine if routes should be connected
         with implicit defaults of::
             
             {'controller':'content','action':'index','id':None}
         
         When set to True, these defaults will not be added to route
         connections and ``url_for`` will not use Route memory.
             
     Additional attributes that may be set after mapper
     initialization (ie, map.ATTRIBUTE = 'something'):
     
     ``encoding``
         Used to indicate alternative encoding/decoding systems to
         use with both incoming URL's, and during Route generation
         when passed a Unicode string. Defaults to 'utf-8'.
     
     ``decode_errors``
         How to handle errors in the encoding, generally ignoring
         any chars that don't convert should be sufficient. Defaults
         to 'ignore'.
     
     ``minimization``
         Boolean used to indicate whether or not Routes should
         minimize URL's and the generated URL's, or require every
         part where it appears in the path. Defaults to True.
     
     ``hardcode_names``
         Whether or not Named Routes result in the default options
         for the route being used *or* if they actually force url
         generation to use the route. Defaults to False.
     
     """
     self.matchlist = []
     self.maxkeys = {}
     self.minkeys = {}
     self.urlcache = LRUCache(1600)
     self._created_regs = False
     self._created_gens = False
     self._master_regexp = None
     self.prefix = None
     self.req_data = threading.local()
     self.directory = directory
     self.always_scan = always_scan
     self.controller_scan = controller_scan
     self._regprefix = None
     self._routenames = {}
     self.debug = False
     self.append_slash = False
     self.sub_domains = False
     self.sub_domains_ignore = []
     self.domain_match = '[^\.\/]+?\.[^\.\/]+'
     self.explicit = explicit
     self.encoding = 'utf-8'
     self.decode_errors = 'ignore'
     self.hardcode_names = True
     self.minimization = False
     self.create_regs_lock = threading.Lock()
     if register:
         config = request_config()
         config.mapper = self
Example #58
0
 def __init__(self):
     self.underlying = adapter.AdapterRegistry()
     self.cache = LRUCache(500)