Beispiel #1
0
def find_injections(pcap_file):
    _cache = LRUCache(10000) #100.000 entries - should last at least 100 msec on a 100% utilized gigabit network
    _hitset = Set()
    with open(pcap_file, "rb") as f:
        pcap = dpkt.pcap.Reader(f)
        try:
            for ts, buf in pcap:
                ip = get_ip_packet(buf, pcap)
                try:
                    if(ip is not None and ip.p == dpkt.ip.IP_PROTO_TCP):
                        tcp = ip.data
                        if((tcp.sport in PORT_SET or tcp.dport in PORT_SET) and len(tcp.data) > 1):
                            key = get_key(ip, tcp)
                            #ip.len    : 11 bits
                            #ip.ttl    : 8 bits
                            #tcp.flags : 8 bits (normally)
                            value = ip.ttl<<24 ^ (tcp.flags<<16) ^ ip.len
                            if(_cache.get(key) is None):
                                _cache.put(key, value)
                            else:
                                if(_cache.get(key) != value):
                                    _hitset.add(key)
                except: pass

        except dpkt.dpkt.NeedData: pass
    injection_count = 0
    if(len(_hitset) > 0):
        _cache = LRUCache(1024)
        with open(pcap_file, "rb") as f:
            pcap = dpkt.pcap.Reader(f)
            try:
                for ts, buf in pcap:
                    ip = get_ip_packet(buf, pcap)
                    if(ip is not None and ip.p == dpkt.ip.IP_PROTO_TCP and (ip.data.sport in PORT_SET or ip.data.dport in PORT_SET)):
                        key = get_key(ip, ip.data)
                        if(key in _hitset and len(ip.data.data) > 1):
                            tcp = ip.data
                            _cached_tcp_data = _cache.get(key)
                            if(_cached_tcp_data is None):
                                _cache.put(key, tcp.data)
                            else:
                                if(tcp.data != _cached_tcp_data):
                                    if(len(tcp.data) > len(_cached_tcp_data)):
                                        #new data is longer, store that
                                        if(tcp.data[:len(_cached_tcp_data)] != _cached_tcp_data):
                                            injection_found(ip, tcp, _cached_tcp_data)
                                            injection_count+=1
                                        _cache.put(key, tcp.data)
                                    elif(len(tcp.data) < len(_cached_tcp_data)):
                                        if(tcp.data != _cached_tcp_data[:len(tcp.data)]):
                                            injection_found(ip, tcp, _cached_tcp_data)
                                            injection_count+=1
                                    else:
                                        injection_found(ip, tcp, _cached_tcp_data)
                                        injection_count+=1
            except dpkt.dpkt.NeedData:
                pass
    if(injection_count == 0):
        print(" - no injections")
Beispiel #2
0
 def __init__(self, xom):
     from queue import Empty, PriorityQueue
     self.Empty = Empty
     self.xom = xom
     self.queue = PriorityQueue()
     self.error_queue = PriorityQueue()
     self.deleted = LRUCache(100)
     self.index_types = LRUCache(1000)
     self.errors = ReplicationErrors()
     self.importer = ImportFileReplica(self.xom, self.errors)
     self._replica_in_sync_cv = threading.Condition()
     self.last_added = None
     self.last_errored = None
     self.last_processed = None
Beispiel #3
0
 def __init__(self, basedir, notify_on_commit, cache_size):
     self.basedir = basedir
     self.sqlpath = self.basedir.join(".sqlite")
     self._notify_on_commit = notify_on_commit
     self._changelog_cache = LRUCache(cache_size)  # is thread safe
     self.last_commit_timestamp = time.time()
     self.ensure_tables_exist()
Beispiel #4
0
 def __init__(self, dict=None, **kwargs):
     self.data = {}
     self._lkpcache = LRUCache(1000)
     if dict is not None:
         self.update(dict)
     if len(kwargs):
         self.update(kwargs)
     self.listener_registered = False  # at least one listener registered
Beispiel #5
0
 def __init__(self, request):
     self.request = request
     self.rasters = self.request.registry.settings["raster"]
     global _rasters
     if _rasters is None:
         cache_size = self.rasters.get('cache_size', 10)
         log.debug('initialize LRUCache with size %d' % cache_size)
         _rasters = LRUCache(cache_size)
Beispiel #6
0
 def __init__(self,
              namespace=BASE_NAMESPACE,
              base_url=None,
              long_timeout=600):
     self.namespace = namespace
     self._client = TimeoutClient(version="1.15",
                                  base_url=base_url,
                                  long_timeout=long_timeout)
     self._image_cache = LRUCache(100)
Beispiel #7
0
 def __init__(self):
     self.points_cache = LRUCache(1000)
     self.colors_cache = LRUCache(1000)
     self.features_cache = LRUCache(200)
     self.words_cache = LRUCache(200)
     self.masks_cache = LRUCache(1000)
     self.index_cache = LRUCache(200)
     self.masked_index_cache = LRUCache(200)
Beispiel #8
0
 def __init__(self,
              namespace=BASE_NAMESPACE,
              base_url=None,
              long_timeout=600):
     self.namespace = namespace
     self._client = dockerpy_client(
         version="1.15",
         base_url=base_url,
         long_timeout=timedelta(seconds=long_timeout),
     )
     self._image_cache = LRUCache(100)
Beispiel #9
0
 def __init__(self, app, transport):
     self.app = app
     self.transport = transport
     self.privkey = decode_hex(app.config['node']['privkey_hex'])
     self.pubkey = crypto.privtopub(self.privkey)
     self.nodes = LRUCache(2048)   # nodeid->Node,  fixme should be loaded
     self.this_node = Node(self.pubkey, self.transport.address)
     self.kademlia = KademliaProtocolAdapter(self.this_node, wire=self)
     this_enode = utils.host_port_pubkey_to_uri(self.app.config['discovery']['listen_host'],
                                                self.app.config['discovery']['listen_port'],
                                                self.pubkey)
     log.info('starting discovery proto', this_enode=this_enode)
Beispiel #10
0
    def __init__(self):

        self.cli = False
        self.args = {}

        self.debug = False
        self.debug2 = False

        self.quiet = False

        self.profile = False

        self.components = OrderedDict()

        self.start_item = OrderedDict()
        self.start_nodes = []
        self.config_files = []
        self.included_files = []

        self.props = {}
        self.properties = self.props

        self.var = {}

        self.working_dir = os.getcwd()
        self.library_path = os.path.dirname(
            os.path.realpath(__file__)) + "/../../library"

        self.comp = Components(self)

        self._functions = {
            "text": functions,
            "xml": xmlfunctions,
            "datetime": datetime,
            "dt": datetime,
            "re": re,
            "sys": sys,
            "urllib": urllib,
            "random": random.Random()
        }
        self._globals = self._functions

        class Functions():
            pass

        self.f = Functions()
        for k, v in self._functions.items():
            setattr(self.f, k, v)

        self._compiled = LRUCache(512)  # TODO: Configurable
Beispiel #11
0
 def __init__(self, basedir, notify_on_commit):
     self.basedir = basedir
     self._notify_on_commit = notify_on_commit
     self._changelog_cache = LRUCache(1000)  # is thread safe
     with self.get_sqlconn() as conn:
         row = conn.execute("select max(serial) from changelog").fetchone()
         serial = row[0]
         if serial is None:
             self.next_serial = 0
         else:
             self.next_serial = serial + 1
             # perform some crash recovery
             data = self.get_raw_changelog_entry(serial)
             changes, rel_renames = loads(data)
             check_pending_renames(str(self.basedir), rel_renames)
Beispiel #12
0
    def __init__(self, blocklen=128, deg=1, *args, **kwargs):
        # block length in bits, term size.
        self.blocklen = blocklen

        # term degree
        self.deg = deg

        # evaluated base terms of deg=1
        self.base = []
        self.cur_tv_size = None
        self.cur_evals = None
        self.last_base_size = None

        # caches
        self.sim_norm_cache = LRUCache(64)
Beispiel #13
0
    def __init__(self, name, server=None, create=False):
        self.server = server or Server()
        self.session = server.session
        self.name = name
        self.database = server.host + "/" + name

        self.cache = LRUCache(100)

        if create:
            self.create()
        else:
            response = self.session.head(self.database)
            if not response.ok:
                if response.status_code == 404:
                    raise excepts.DBNotExists
                raise Exception(response.status_code)
Beispiel #14
0
    def __init__(self):

        self.cli = False
        self.args = {}

        self.debug = False
        self.debug2 = False

        self.quiet = False

        self.profile = False

        self.config_files = []

        self.start_node = None
        self.start_message = {
        }  # Bunch()  # {}   # TODO: Review if this is definitive, compare performance

        self.props = {}
        self.properties = self.props

        self.var = {}

        self.working_dir = os.getcwd()
        self.library_path = os.path.dirname(
            os.path.realpath(__file__)) + "/../../library"

        self._globals = {
            "text": functions,
            "xml": xmlfunctions,
            "cubetl": cubetl,
            "datetime": datetime,
            "re": re,
            "sys": sys,
            "urllib": urllib,
            "random": random.Random()
        }

        self._compiled = LRUCache(512)  # TODO: Configurable

        self.comp = Components(self)
Beispiel #15
0
 def __init__(self, basedir, notify_on_commit, cache_size, settings=None):
     if settings is None:
         settings = {}
     for key in ("database", "host", "port", "unix_sock", "user", "password"):
         if key in settings:
             setattr(self, key, settings[key])
     self.basedir = basedir
     self._notify_on_commit = notify_on_commit
     self._changelog_cache = LRUCache(cache_size)  # is thread safe
     self.last_commit_timestamp = time.time()
     self.ensure_tables_exist()
     with self.get_connection() as conn:
         c = conn._sqlconn.cursor()
         c.execute("select max(serial) from changelog")
         row = c.fetchone()
         c.close()
         serial = row[0]
         if serial is None:
             self.next_serial = 0
         else:
             self.next_serial = serial + 1
Beispiel #16
0
    def __init__(self, basedir, notify_on_commit, cache_size, settings=None):
        if settings is None:
            settings = {}
        for key in ("database", "host", "port", "unix_sock", "user",
                    "password"):
            if key in settings:
                setattr(self, key, settings[key])

        if any(key in settings for key in self.SSL_OPT_KEYS):
            self.ssl_context = ssl_context = ssl.create_default_context(
                cafile=settings.get('ssl_ca_certs'))

            if 'ssl_certfile' in settings:
                ssl_context.load_cert_chain(
                    settings['ssl_certfile'],
                    keyfile=settings.get('ssl_keyfile'))

            check_hostname = settings.get('ssl_check_hostname')
            if check_hostname is not None and not ensure_boolean(
                    check_hostname):
                ssl_context.check_hostname = False

        self.basedir = basedir
        self._notify_on_commit = notify_on_commit
        self._changelog_cache = LRUCache(cache_size)  # is thread safe
        self.last_commit_timestamp = time.time()
        self.ensure_tables_exist()
        with self.get_connection() as conn:
            c = conn._sqlconn.cursor()
            c.execute("select max(serial) from changelog")
            row = c.fetchone()
            c.close()
            serial = row[0]
            if serial is None:
                self.next_serial = 0
            else:
                self.next_serial = serial + 1
Beispiel #17
0
def trim(flist, flowmaxbytes, trimmed_extension, preserve_times, post_process):
    cache = LRUCache(10000)
    trimmed_bytes = 0
    for pcap_file in flist:
        trimmed_file = pcap_file + trimmed_extension
        with open(pcap_file, "rb") as f:
            try:
                if pcap_file.endswith("pcapng"):
                    pcap = dpkt.pcapng.Reader(f)
                else:
                    pcap = dpkt.pcap.Reader(f)
                with open(trimmed_file, "wb") as trimmed:
                    if pcap_file.endswith("pcapng"):
                        pcap_out = dpkt.pcapng.Writer(trimmed)
                    else:
                        pcap_out = dpkt.pcap.Writer(trimmed)
                    for ts, buf in pcap:
                        fivetuple = get_fivetuple(buf, pcap, pcap_file)
                        bytes = len(buf)
                        if not cache.get(fivetuple) is None:
                            bytes += cache.get(fivetuple)
                        cache.put(fivetuple, bytes)
                        if bytes < flowmaxbytes:
                            pcap_out.writepkt(buf, ts)
                        else:
                            trimmed_bytes += len(buf)
            except dpkt.dpkt.NeedData:
                pass
            except ValueError:
                pass
        if os.path.exists(trimmed_file):
            if preserve_times:
                stat = os.stat(pcap_file)
                os.utime(trimmed_file, (stat.st_atime, stat.st_mtime))
            if post_process:
                post_process(pcap_file, trimmed_file)
    return trimmed_bytes
Beispiel #18
0
def partly_distinct(iterable):
    """
    Filters items from iterable and **tries to return only distincts**.
    Keeps order.

    :param Iterable iterable: Something iterable we have to filter.

    >>> list(partly_distinct([1, 2, 3, 2, 1, 2, 3, 4]))
    ... [1, 2, 3, 4]

    .. note::
        Unlike :py:func:`distinct` it won't guarantee that all elements would
        be distinct. But if you have rather small cardinality of the stream,
        this would work.

    .. note::
        Current implementation guarantees support for 10000 distinct values.
        If your cardinality is bigger, there might be some duplicates.
    """
    cache = LRUCache(10000)
    for item in iterable:
        if not cache.get(item):
            cache.put(item, True)
            yield item
Beispiel #19
0
 def __init__(self, app, root_dir, cache_max_age=3600):
     self.app = app
     self.cache_max_age = cache_max_age
     self.doc_root = self._adapt_path(root_dir)
     self.paths_cache = LRUCache(1024)
Beispiel #20
0
    def initialize(self):

        if (self._cache == None):

            self._cache = LRUCache(512) # 100 max length
Beispiel #21
0
class Block(object):
    index_key_lru = LRUCache(config.get().CACHE_WRITE_INDEX_KEY)

    def __init__(self, master, connection, n):
        self.master = master
        self.connection = connection
        self.item = self.master.query(n__eq=n, consistent=True).next()
        self.dp_writer = self.data_points_table = self.index_table = None
        # noinspection PyBroadException
        try:
            self.bind()
        except:
            pass  # TODO log

    def bind(self):
        """Bind to existing tables.
        """
        if self.data_points_name and self.index_name:
            data_points_table = Table(self.data_points_name,
                                      connection=self.connection)
            try:
                s1 = data_points_table.describe()['Table']['TableStatus']
            except:
                raise
            else:
                self.data_points_table = data_points_table
                self.dp_writer = TimedBatchTable(
                    self.data_points_table.batch_write())

            index_table = Table(self.index_name, connection=self.connection)
            try:
                s2 = index_table.describe()['Table']['TableStatus']
            except:
                raise
            else:
                self.index_table = index_table

            if s1 == s2:
                self.item['state'] = s1
            else:
                self.item['state'] = 'UNDEFINED'

        return self.state

    def create_tables(self):
        """Create tables.
        """
        if self.data_points_table and self.index_table:
            return self.state

        self.item['data_points_name'] = '%s_%s' % (config.table_name('dp'),
                                                   self.tbase)
        self.item['index_name'] = '%s_%s' % (config.table_name('dp_index'),
                                             self.tbase)

        try:
            self.bind()
        except:
            if not self.data_points_table:
                Table.create(self.data_points_name,
                             schema=[
                                 HashKey('domain_metric_tbase_tags'),
                                 RangeKey('toffset', data_type=NUMBER)
                             ],
                             throughput={
                                 'read':
                                 config.get().TP_READ_DATAPOINTS / BLOCKS,
                                 'write': config.get().TP_WRITE_DATAPOINTS
                             },
                             connection=self.connection)
            if not self.index_table:
                Table.create(
                    self.index_name,
                    schema=[HashKey('domain_metric'),
                            RangeKey('tbase_tags')],
                    throughput={
                        'read': config.get().TP_READ_INDEX_KEY / BLOCKS,
                        'write': config.get().TP_WRITE_INDEX_KEY
                    },
                    connection=self.connection)

            self.item['state'] = self.bind()

        self.item.save(overwrite=True)
        return self.state

    def replace(self, new_timestamp):
        """Replace this block with new block.
        """
        if block_pos(new_timestamp) != self.n:
            raise ValueError(
                'time %s (pos=%s) is not valid for block (pos=%s)' %
                (new_timestamp, block_pos(new_timestamp), self.n))
        if base_time(new_timestamp) == self.tbase:
            return self
        self.delete_tables(new_timestamp)
        return self

    def delete_tables(self, new_timestamp=None):
        """Delete the tables for this block.
        """
        if not new_timestamp:
            new_timestamp = self.tbase

        if self.data_points_table:
            # noinspection PyBroadException
            try:
                self.data_points_table.delete()
            except:
                pass
            self.data_points_table = None
            self.dp_writer = None
        if self.index_table:
            try:
                self.index_table.delete()
            except:
                pass
            self.index_table = None

        try:
            self.item.delete()
        except:
            pass

        self.item = Item(self.master, data=dict(self.item.items()))
        self.item['state'] = 'INITIAL'
        self.item['tbase'] = base_time(new_timestamp)
        self.item.save(overwrite=True)

        return self.state

    def turndown_tables(self):
        """Reduce write throughput for this block.
        """
        try:
            self.dp_writer.flush()
        except:
            pass
        self.dp_writer = None
        if self.data_points_table:
            self.data_points_table.update({
                'read':
                config.get().TP_READ_DATAPOINTS / BLOCKS,
                'write':
                1
            })
        if self.index_table:
            self.index_table.update({
                'read': config.get().TP_READ_INDEX_KEY / BLOCKS,
                'write': 1
            })

    @property
    def n(self):
        return self.item['n']

    @property
    def tbase(self):
        return self.item['tbase']

    @property
    def data_points_name(self):
        return self.item['data_points_name']

    @property
    def index_name(self):
        return self.item['index_name']

    @property
    def state(self):
        state = self.item['state']
        if state == 'INITIAL':
            return state
        s1 = self._calc_state(self.data_points_table.describe())
        s2 = self._calc_state(self.index_table.describe())
        if s1 != s2:
            return 'UNDEFINED'
        return s1

    def store_datapoint(self, timestamp, metric, tags, value, domain):
        """Store index key and datapoint value in tables.
        """
        #TODO: exception
        if not self.dp_writer:
            return

        key = util.hdata_points_key(domain, metric, timestamp, tags)
        self._store_index(key, timestamp, metric, tags, domain)
        return self.dp_writer.put_item(
            data={
                'domain_metric_tbase_tags': key,
                'toffset': util.offset_time(timestamp),
                'value': value
            })

    def query_index(self, domain, metric, start_time, end_time):
        """Query index for keys.
        """
        if not self.index_table:
            return []

        key = util.index_hash_key(domain, metric)
        time_range = map(
            str, [util.base_time(start_time),
                  util.base_time(end_time) + 1])
        return [
            IndexKey(k)
            for k in self.index_table.query(consistent=False,
                                            domain_metric__eq=key,
                                            tbase_tags__between=time_range)
        ]

    def query_datapoints(self,
                         index_key,
                         start_time,
                         end_time,
                         attributes=tuple(['value'])):
        """Query datapoints.
        """
        if not self.data_points_table: return []

        key = index_key.to_data_points_key()
        time_range = util.offset_range(index_key, start_time, end_time)
        attributes_ = ['toffset']
        attributes_.extend(attributes)
        return [
            value for value in self.data_points_table.query(
                consistent=False,
                reverse=True,
                attributes=attributes_,
                domain_metric_tbase_tags__eq=key,
                toffset__between=time_range)
        ]

    # noinspection PyMethodMayBeStatic
    def _calc_state(self, desc):
        desc = desc['Table']
        state = desc['TableStatus']
        if state == 'ACTIVE' and desc['ProvisionedThroughput'][
                'WriteCapacityUnits'] == 1:
            state = 'TURNED_DOWN'
        return state

    # noinspection PyMethodMayBeStatic
    def _store_cache(self, key, cache, table, data):
        if not cache.get(key):
            table.put_item(data=data(), overwrite=True)
            cache.put(key, 1)

    def _store_index(self, key, timestamp, metric, tags, domain):
        """Store an index key if not yet stored.
        """
        self._store_cache(
            key, Block.index_key_lru, self.index_table, lambda: {
                'domain_metric': util.index_hash_key(domain, metric),
                'tbase_tags': util.index_range_key(timestamp, tags)
            })

    def __str__(self):
        return str((self.n, self.state, self.tbase, self.data_points_name,
                    self.index_name))

    def __repr__(self):
        return str(self)
Beispiel #22
0
    def initialize(self):

        if self._cache is None:

            self._cache = LRUCache(512)  # 100 max length
Beispiel #23
0
 def __init__(self):
     self.word_cache = LRUCache(1)
     self.vlad_cache = LRUCache(1000)
Beispiel #24
0
from twisted.internet.defer import (
    inlineCallbacks,
    returnValue,
)
from twisted.internet.error import (ConnectError, ConnectionRefusedError,
                                    UserError)
from twisted.python import log
from twisted.web.client import FileBodyProducer

from autopush.protocol import IgnoreBody
from autopush.router.interface import (
    RouterException,
    RouterResponse,
)

dead_cache = LRUCache(150)


def node_key(node_id):
    """Generate a node key for the dead node cache"""
    return node_id + "-%s" % int(time.time() / 3600)


class SimpleRouter(object):
    """Implements :class:`autopush.router.interface.IRouter` for internal
    routing to an Autopush node"""
    def __init__(self, ap_settings, router_conf):
        """Create a new SimpleRouter"""
        self.ap_settings = ap_settings
        self.metrics = ap_settings.metrics
        self.conf = router_conf
Beispiel #25
0
                            lastDoc.put(key, row)
                            row = row.copy()
                            row['t'] = now
                            coll.insert(row)

    influx.write_points(points, time_precision='ms')
    sock.setsockopt(bluez.SOL_HCI, bluez.HCI_FILTER, old_filter)


if __name__ == '__main__':
    mongoHost, myLocation = sys.argv[1:]

    influx = InfluxDBClient(mongoHost, 9060, 'root', 'root', 'beacon')
    client = MongoClient(mongoHost)
    coll = client['beacon']['data']

    dev_id = 0
    sock = bluez.hci_open_dev(dev_id)
    sock.getsockopt(bluez.SOL_HCI, bluez.HCI_FILTER, 14)

    hci_enable_le_scan(sock)

    lastDoc = LRUCache(1000)  # (addr, evt_type) : data row
    while True:
        parse_events(sock,
                     10,
                     source=myLocation,
                     coll=coll,
                     influx=influx,
                     lastDoc=lastDoc)
Beispiel #26
0
        See ``IArgumentType`` for argument and return type documentation.
        """

        value = BytesIO()
        for counter in count(0):
            chunk = strings.get("%s.%d" % (name, counter))
            if chunk is None:
                break
            value.write(chunk)
            strings[name] = value.getvalue()
        self.another_argument.fromBox(name, strings, objects, proto)


# The configuration and state can get pretty big, so don't want too many:
_wire_encode_cache = LRUCache(50)


def caching_wire_encode(obj):
    """
    Encode an object to bytes using ``wire_encode`` and cache the result,
    or return cached result if available.

    This relies on cached objects being immutable, or at least not being
    modified. Given our usage patterns that is currently the case and
    should continue to be, but worth keeping in mind.

    :param obj: Object to encode.
    :return: Resulting ``bytes``.
    """
    result = _wire_encode_cache.get(obj)
Beispiel #27
0
 def __init__(self, name_cache_size=512):
     ServiceBase.__init__(self)
     self._sublist_lock = Lock()
     self._twitch = TwitchEngine()
     self._channel_name_cache = LRUCache(name_cache_size)
Beispiel #28
0
    def __init__(self,
                 controller_scan=controller_scan,
                 directory=None,
                 always_scan=False,
                 register=True,
                 explicit=True):
        """Create a new Mapper instance

        All keyword arguments are optional.

        ``controller_scan``
            Function reference that will be used to return a list of
            valid controllers used during URL matching. If
            ``directory`` keyword arg is present, it will be passed
            into the function during its call. This option defaults to
            a function that will scan a directory for controllers.

            Alternatively, a list of controllers or None can be passed
            in which are assumed to be the definitive list of
            controller names valid when matching 'controller'.

        ``directory``
            Passed into controller_scan for the directory to scan. It
            should be an absolute path if using the default
            ``controller_scan`` function.

        ``always_scan``
            Whether or not the ``controller_scan`` function should be
            run during every URL match. This is typically a good idea
            during development so the server won't need to be restarted
            anytime a controller is added.

        ``register``
            Boolean used to determine if the Mapper should use
            ``request_config`` to register itself as the mapper. Since
            it's done on a thread-local basis, this is typically best
            used during testing though it won't hurt in other cases.

        ``explicit``
            Boolean used to determine if routes should be connected
            with implicit defaults of::

                {'controller':'content','action':'index','id':None}

            When set to True, these defaults will not be added to route
            connections and ``url_for`` will not use Route memory.

        Additional attributes that may be set after mapper
        initialization (ie, map.ATTRIBUTE = 'something'):

        ``encoding``
            Used to indicate alternative encoding/decoding systems to
            use with both incoming URL's, and during Route generation
            when passed a Unicode string. Defaults to 'utf-8'.

        ``decode_errors``
            How to handle errors in the encoding, generally ignoring
            any chars that don't convert should be sufficient. Defaults
            to 'ignore'.

        ``minimization``
            Boolean used to indicate whether or not Routes should
            minimize URL's and the generated URL's, or require every
            part where it appears in the path. Defaults to False.

        ``hardcode_names``
            Whether or not Named Routes result in the default options
            for the route being used *or* if they actually force url
            generation to use the route. Defaults to False.

        """
        self.matchlist = []
        self.maxkeys = {}
        self.minkeys = {}
        self.urlcache = LRUCache(1600)
        self._created_regs = False
        self._created_gens = False
        self._master_regexp = None
        self.prefix = None
        self.req_data = threading.local()
        self.directory = directory
        self.always_scan = always_scan
        self.controller_scan = controller_scan
        self._regprefix = None
        self._routenames = {}
        self.debug = False
        self.append_slash = False
        self.sub_domains = False
        self.sub_domains_ignore = []
        self.domain_match = r'[^\.\/]+?\.[^\.\/]+'
        self.explicit = explicit
        self.encoding = 'utf-8'
        self.decode_errors = 'ignore'
        self.hardcode_names = True
        self.minimization = False
        self.create_regs_lock = threading.Lock()
        if register:
            config = request_config()
            config.mapper = self
Beispiel #29
0
 def __init__(self):
     if hasattr(self, '_init'):
         return
     self._init = True
     self.config = RUtils.config.RConfig()
     self._cache = LRUCache(self.config.session_cache_size)
Beispiel #30
0
 def __init__(self, cache_size):
     self.lru = LRUCache(cache_size)
     self.hits = 0
     self.misses = 0