Ejemplo n.º 1
0
    def test05TimeBasedCache(self):
        original_time = time.time

        # Mock the time.time function
        time.time = lambda: 100

        key = "key"

        tested_cache = utils.TimeBasedCache(max_age=50)

        # Stop the housekeeper thread - we test it explicitely here
        tested_cache.exit = True
        tested_cache.Put(key, "hello")

        self.assertEqual(tested_cache.Get(key), "hello")

        # Fast forward time
        time.time = lambda: 160

        # Force the housekeeper to run
        tested_cache.house_keeper_thread.target()

        # This should now be expired
        self.assertRaises(KeyError, tested_cache.Get, key)

        # Fix up the mock
        time.time = original_time
Ejemplo n.º 2
0
    def testWeakRefSet(self):

        c1 = utils.TimeBasedCache()
        c2 = utils.TimeBasedCache()

        self.assertIn(c1, utils.TimeBasedCache.active_caches)
        self.assertIn(c2, utils.TimeBasedCache.active_caches)

        l = len(utils.TimeBasedCache.active_caches)

        del c1

        # This should work even though the weak ref to c1 should be gone.
        utils.TimeBasedCache.house_keeper_thread.target()

        # Make sure it's actually gone.
        self.assertLess(len(utils.TimeBasedCache.active_caches), l)
Ejemplo n.º 3
0
  def __init__(self, **kwargs):
    if ContainerFileTable.content_cache is None:
      ContainerFileTable.content_cache = utils.TimeBasedCache()

    super(ContainerFileTable, self).__init__(**kwargs)

    self.AddColumn(semantic.RDFValueColumn(
        "Icon", renderer=semantic.IconRenderer, width="40px"))
    self.AddColumn(semantic.AttributeColumn("subject", width="100%"))
Ejemplo n.º 4
0
 def __init__(self, **kwargs):
   if EventTable.content_cache is None:
     EventTable.content_cache = utils.TimeBasedCache()
   super(EventTable, self).__init__(**kwargs)
   self.AddColumn(semantic.AttributeColumn("event.id"))
   self.AddColumn(semantic.AttributeColumn("timestamp"))
   self.AddColumn(semantic.AttributeColumn("subject"))
   self.AddColumn(semantic.RDFValueColumn(
       "Message", renderer=EventMessageRenderer, width="100%"))
Ejemplo n.º 5
0
    def __init__(self, certificate=None, private_key=None):
        """Creates a communicator.

    Args:
       certificate: Our own certificate in string form (as PEM).
       private_key: Our own private key in string form (as PEM).
    """
        # A cache of cipher objects.
        self.cipher_cache = utils.TimeBasedCache(max_age=24 * 3600)
        self.private_key = private_key
        self.certificate = certificate

        # A cache for encrypted ciphers
        self.encrypted_cipher_cache = utils.FastStore(max_size=50000)
Ejemplo n.º 6
0
    def __init__(self,
                 queues=queues_config.WORKER_LIST,
                 threadpool_prefix="grr_threadpool",
                 threadpool_size=None,
                 token=None):
        """Constructor.

    Args:
      queues: The queues we use to fetch new messages from.
      threadpool_prefix: A name for the thread pool used by this worker.
      threadpool_size: The number of workers to start in this thread pool.
      token: The token to use for the worker.

    Raises:
      RuntimeError: If the token is not provided.
    """
        logging.info("started worker with queues: " + str(queues))
        self.queues = queues

        # self.queued_flows is a timed cache of locked flows. If this worker
        # encounters a lock failure on a flow, it will not attempt to grab this flow
        # until the timeout.
        self.queued_flows = utils.TimeBasedCache(max_size=10, max_age=60)

        if token is None:
            raise RuntimeError("A valid ACLToken is required.")

        # Make the thread pool a global so it can be reused for all workers.
        if self.__class__.thread_pool is None:
            if threadpool_size is None:
                threadpool_size = config.CONFIG["Threadpool.size"]

            self.__class__.thread_pool = threadpool.ThreadPool.Factory(
                threadpool_prefix, min_threads=2, max_threads=threadpool_size)

            self.__class__.thread_pool.Start()

        self.token = token
        self.last_active = 0

        # Well known flows are just instantiated.
        self.well_known_flows = flow.WellKnownFlow.GetAllWellKnownFlows(
            token=token)
        self.flow_lease_time = config.CONFIG["Worker.flow_lease_time"]
        self.well_known_flow_lease_time = config.CONFIG[
            "Worker.well_known_flow_lease_time"]
Ejemplo n.º 7
0
    def test05TimeBasedCache(self):

        key = "key"
        tested_cache = utils.TimeBasedCache(max_age=50)
        with test_lib.FakeTime(100):

            # Stop the housekeeper thread - we test it explicitely here
            tested_cache.exit = True
            tested_cache.Put(key, "hello")

            self.assertEqual(tested_cache.Get(key), "hello")

        with test_lib.FakeTime(160):

            # Force the housekeeper to run
            tested_cache.house_keeper_thread.target()

            # This should now be expired
            self.assertRaises(KeyError, tested_cache.Get, key)
Ejemplo n.º 8
0
    def __init__(self,
                 queue=None,
                 threadpool_prefix="grr_threadpool",
                 threadpool_size=None,
                 token=None):
        """Constructor.

    Args:
      queue: The queue we use to fetch new messages from.
      threadpool_prefix: A name for the thread pool used by this worker.
      threadpool_size: The number of workers to start in this thread pool.
      token: The token to use for the worker.

    Raises:
      RuntimeError: If the token is not provided.
    """
        self.queue = queue
        self.queued_flows = utils.TimeBasedCache(max_size=10, max_age=60)

        if token is None:
            raise RuntimeError("A valid ACLToken is required.")

        # Make the thread pool a global so it can be reused for all workers.
        if GRRWorker.thread_pool is None:
            if threadpool_size is None:
                threadpool_size = config_lib.CONFIG["Threadpool.size"]

            GRRWorker.thread_pool = threadpool.ThreadPool.Factory(
                threadpool_prefix, min_threads=2, max_threads=threadpool_size)

            GRRWorker.thread_pool.Start()

        self.token = token
        self.last_active = 0

        # Well known flows are just instantiated.
        self.well_known_flows = flow.WellKnownFlow.GetAllWellKnownFlows(
            token=token)
        self.lease_time = config_lib.CONFIG["Worker.flow_lease_time"]
Ejemplo n.º 9
0
    def testTimeBasedCacheSingleThread(self):

        utils.TimeBasedCache()
        num_threads = threading.active_count()
        utils.TimeBasedCache()
        self.assertEqual(threading.active_count(), num_threads)
Ejemplo n.º 10
0
import logging
import os
import platform
import re
import sys
import threading

from grr.client import client_utils
from grr.client import vfs
from grr.lib import utils
from grr.lib.rdfvalues import client
from grr.lib.rdfvalues import paths

# File handles are cached here. They expire after a couple minutes so
# we don't keep files locked on the client.
FILE_HANDLE_CACHE = utils.TimeBasedCache(max_age=300)


class LockedFileHandle(object):
    """An object which encapsulates access to a file."""
    def __init__(self, filename, mode="rb"):
        self.lock = threading.RLock()
        self.fd = open(filename, mode)
        self.filename = filename

    def Seek(self, offset, whence=0):
        self.fd.seek(offset, whence)

    def Read(self, length):
        return self.fd.read(length)
Ejemplo n.º 11
0
            ("/bin/rm", ["-f", "/tmp/ss.dat"]),
            ("/usr/sbin/system_profiler", ["-xml", "SPHardwareDataType"]),
            ("/usr/bin/who", []),
            ("/usr/bin/last", []),
        ]
    else:
        whitelist = []

    for (allowed_cmd, allowed_args) in whitelist:
        if cmd == allowed_cmd and args == allowed_args:
            return True

    return False


LOG_THROTTLE_CACHE = utils.TimeBasedCache(max_size=10, max_age=60 * 60)


def ErrorOnceAnHour(msg, *args, **kwargs):
    """Logging helper function mirroring logging but reduces spam. Read notes.

  Args:
    msg: The message.
    *args: Passthrough to logging function.
    **kwargs: Passthrough to logging function.

  Note:
    The same msg will only be logged once per hour. Note that args will be
    ignored so the following will only output one line.
      ThrottledLog(logging.WARN, "oh no %s", "joe")
      ThrottledLog(logging.WARN, "oh no %s", "bob")
Ejemplo n.º 12
0
#!/usr/bin/env python
"""This file implements a VFS abstraction on the client."""


from grr.client import client_utils
from grr.lib import config_lib
from grr.lib import registry
from grr.lib import utils
from grr.lib.rdfvalues import paths as rdf_paths

# A central Cache for vfs handlers. This can be used to keep objects alive
# for a limited time.
DEVICE_CACHE = utils.TimeBasedCache()


class VFSHandler(object):
  """Base class for handling objects in the VFS."""
  supported_pathtype = -1

  # Should this handler be auto-registered?
  auto_register = False

  size = 0
  offset = 0

  # This is the VFS path to this specific handler.
  path = "/"

  # This will be set by the VFSOpen factory to the pathspec of the final
  # destination of this handler. This pathspec will be case corrected and
  # updated to reflect any potential recursion.
Ejemplo n.º 13
0
  def __init__(self, **kwargs):
    super(AbstractFileTable, self).__init__(**kwargs)

    if AbstractFileTable.content_cache is None:
      AbstractFileTable.content_cache = utils.TimeBasedCache()