def test_multiple_endpoints(self):
        raise SkipTest("This test doesn't apply to Server Density.")
        config = {
            "endpoints": {
                "https://app.datadoghq.com": ['api_key'],
                "https://app.example.com":  ['api_key']
            },
            "dd_url": "https://app.datadoghq.com",
            "api_key": 'api_key',
            "use_sd": True
        }
        app = Application()
        app.skip_ssl_validation = False
        app._agentConfig = config
        app.use_simple_http_client = True
        trManager = TransactionManager(timedelta(seconds=0), MAX_QUEUE_SIZE,
                                       THROTTLING_DELAY, max_endpoint_errors=100)
        trManager._flush_without_ioloop = True  # Use blocking API to emulate tornado ioloop
        MetricTransaction._trManager = trManager
        MetricTransaction.set_application(app)
        MetricTransaction.set_endpoints(config['endpoints'])

        MetricTransaction({}, {})
        # 2 endpoints = 2 transactions
        self.assertEqual(len(trManager._transactions), 2)
        self.assertEqual(trManager._transactions[0]._endpoint, 'https://app.datadoghq.com')
        self.assertEqual(trManager._transactions[1]._endpoint, 'https://app.example.com')
    def testCustomEndpoint(self):
        MetricTransaction._endpoints = []

        config = {
            "endpoints": {"https://foo.bar.com": ["foo"]},
            "dd_url": "https://foo.bar.com",
            "api_key": "foo",
            "use_dd": True
        }

        app = Application()
        app.skip_ssl_validation = False
        app._agentConfig = config
        app.use_simple_http_client = True

        trManager = TransactionManager(timedelta(seconds=0), MAX_QUEUE_SIZE,
                                       THROTTLING_DELAY, max_endpoint_errors=100)
        trManager._flush_without_ioloop = True  # Use blocking API to emulate tornado ioloop
        MetricTransaction._trManager = trManager
        MetricTransaction.set_application(app)
        MetricTransaction.set_endpoints(config['endpoints'])

        transaction = MetricTransaction(None, {}, "msgtype")
        endpoints = []
        for endpoint in transaction._endpoints:
            for api_key in transaction._endpoints[endpoint]:
                endpoints.append(transaction.get_url(endpoint, api_key))
        expected = ['https://foo.bar.com/intake/msgtype?api_key=foo']
        self.assertEqual(endpoints, expected, (endpoints, expected))
Beispiel #3
0
    def test_endpoint_error(self):
        trManager = TransactionManager(timedelta(seconds=0), MAX_QUEUE_SIZE,
                                       timedelta(seconds=0), max_endpoint_errors=2)

        step = 10
        oneTrSize = (MAX_QUEUE_SIZE / step) - 1
        for i in xrange(step):
            trManager.append(memTransaction(oneTrSize, trManager))

        trManager.flush()

        # There should be exactly step transaction in the list,
        # and only 2 of them with a flush count of 1
        self.assertEqual(len(trManager._transactions), step)
        flush_count = 0
        for tr in trManager._transactions:
            flush_count += tr._flush_count
        self.assertEqual(flush_count, 2)

        # If we retry to flush, two OTHER transactions should be tried
        trManager.flush()

        self.assertEqual(len(trManager._transactions), step)
        flush_count = 0
        for tr in trManager._transactions:
            flush_count += tr._flush_count
            self.assertIn(tr._flush_count, [0, 1])
        self.assertEqual(flush_count, 4)

        # Finally when it's possible to flush, everything should go smoothly
        for tr in trManager._transactions:
            tr.is_flushable = True

        trManager.flush()
        self.assertEqual(len(trManager._transactions), 0)
Beispiel #4
0
    def __init__(self,
                 port,
                 agent_config,
                 watchdog=True,
                 skip_ssl_validation=False,
                 use_simple_http_client=False):
        self._port = int(port)
        self._agentConfig = agent_config
        self._metrics = {}
        MetricTransaction.set_application(self)
        MetricTransaction.set_endpoints(MonAPI(agent_config['Api']))
        self._tr_manager = TransactionManager(MAX_WAIT_FOR_REPLAY,
                                              MAX_QUEUE_SIZE, THROTTLING_DELAY)
        MetricTransaction.set_tr_manager(self._tr_manager)

        self._watchdog = None
        self.skip_ssl_validation = skip_ssl_validation or agent_config.get(
            'skip_ssl_validation', False)
        self.use_simple_http_client = use_simple_http_client
        if self.skip_ssl_validation:
            log.info(
                "Skipping SSL hostname validation, useful when using a transparent proxy"
            )

        if watchdog:
            watchdog_timeout = TRANSACTION_FLUSH_INTERVAL * WATCHDOG_INTERVAL_MULTIPLIER
            self._watchdog = Watchdog(watchdog_timeout,
                                      max_mem_mb=agent_config.get(
                                          'limit_memory_consumption', None))
Beispiel #5
0
def new_revision(
    session: Session,
    tm: TransactionManager,
    content: Content,
    force_create_new_revision: bool = False,
) -> Content:
    """
    Prepare context to update a Content. It will add a new updatable revision
    to the content.
    :param session: Database _session
    :param tm: TransactionManager
    :param content: Content instance to update
    :param force_create_new_revision: Decide if new_rev should or should not
    be forced.
    :return:
    """
    with session.no_autoflush:
        try:
            if force_create_new_revision \
                    or inspect(content.revision).has_identity:
                content.new_revision()
            RevisionsIntegrity.add_to_updatable(content.revision)
            yield content
        except Exception as e:
            # INFO - GM - 14-11-2018 - rollback session and renew
            # transaction when error happened
            # This avoid bad _session data like new "temporary" revision
            # to be add when problem happen.
            session.rollback()
            tm.abort()
            tm.begin()
            raise e
        finally:
            RevisionsIntegrity.remove_from_updatable(content.revision)
    def test_multiple_endpoints(self):
        config = {
            "endpoints": {
                "https://app.datadoghq.com": ['api_key'],
                "https://app.example.com": ['api_key']
            },
            "dd_url": "https://app.datadoghq.com",
            "api_key": 'api_key',
            "use_dd": True
        }
        app = Application()
        app.skip_ssl_validation = False
        app._agentConfig = config
        app.use_simple_http_client = True
        trManager = TransactionManager(timedelta(seconds=0),
                                       MAX_QUEUE_SIZE,
                                       THROTTLING_DELAY,
                                       max_endpoint_errors=100)
        trManager._flush_without_ioloop = True  # Use blocking API to emulate tornado ioloop
        MetricTransaction._trManager = trManager
        MetricTransaction.set_application(app)
        MetricTransaction.set_endpoints(config['endpoints'])

        MetricTransaction({}, {})
        # 2 endpoints = 2 transactions
        self.assertEqual(len(trManager._transactions), 2)
        self.assertEqual(trManager._transactions[0]._endpoint,
                         'https://app.datadoghq.com')
        self.assertEqual(trManager._transactions[1]._endpoint,
                         'https://app.example.com')
Beispiel #7
0
    def test_proxy(self):
        config = {
            "endpoints": {"https://app.datadoghq.com": ["foo"]},
            "proxy_settings": {
                "host": "localhost",
                "port": PROXY_PORT,
                "user": None,
                "password": None
            }
        }

        app = Application()
        app.skip_ssl_validation = True
        app._agentConfig = config

        trManager = TransactionManager(MAX_WAIT_FOR_REPLAY, MAX_QUEUE_SIZE, THROTTLING_DELAY)
        trManager._flush_without_ioloop = True  # Use blocking API to emulate tornado ioloop
        CustomAgentTransaction.set_tr_manager(trManager)
        app.use_simple_http_client = False # We need proxy capabilities
        app.agent_dns_caching = False
        # _test is the instance of this class. It is needed to call the method stop() and deal with the asynchronous
        # calls as described here : http://www.tornadoweb.org/en/stable/testing.html
        CustomAgentTransaction._test = self
        CustomAgentTransaction.set_application(app)
        CustomAgentTransaction.set_endpoints(config['endpoints'])

        CustomAgentTransaction('body', {}, "") # Create and flush the transaction
        self.wait()
        del CustomAgentTransaction._test
        access_log = self.docker_client.exec_start(
            self.docker_client.exec_create(CONTAINER_NAME, 'cat /var/log/squid/access.log')['Id'])
        self.assertTrue("CONNECT" in access_log) # There should be an entry in the proxy access log
        self.assertEquals(len(trManager._endpoints_errors), 1) # There should be an error since we gave a bogus api_key
    def testCustomEndpoint(self):
        MetricTransaction._endpoints = []

        config = {
            "sd_url": "https://foo.bar.com",
            "agent_key": "foo",
            "use_dd": True
        }

        app = Application()
        app.skip_ssl_validation = False
        app._agentConfig = config
        app.use_simple_http_client = True

        trManager = TransactionManager(timedelta(seconds=0), MAX_QUEUE_SIZE, THROTTLING_DELAY)
        trManager._flush_without_ioloop = True  # Use blocking API to emulate tornado ioloop
        MetricTransaction._trManager = trManager
        MetricTransaction.set_application(app)
        MetricTransaction.set_endpoints()

        transaction = MetricTransaction(None, {}, "msgtype")
        endpoints = [transaction.get_url(e) for e in transaction._endpoints]
        # Direct metric submission is not being enabled.
        #expected = ['https://foo.bar.com/intake/msgtype?agent_key=foo']
        expected = []
        self.assertEqual(endpoints, expected, (endpoints, expected))
Beispiel #9
0
    def test_proxy(self):
        config = {
            "endpoints": {"https://app.datadoghq.com": ["foo"]},
            "proxy_settings": {
                "host": "localhost",
                "port": PROXY_PORT,
                "user": None,
                "password": None
            }
        }

        app = Application()
        app.skip_ssl_validation = True
        app._agentConfig = config

        trManager = TransactionManager(MAX_WAIT_FOR_REPLAY, MAX_QUEUE_SIZE, THROTTLING_DELAY)
        trManager._flush_without_ioloop = True  # Use blocking API to emulate tornado ioloop
        CustomAgentTransaction.set_tr_manager(trManager)
        app.use_simple_http_client = False # We need proxy capabilities
        app.agent_dns_caching = False
        # _test is the instance of this class. It is needed to call the method stop() and deal with the asynchronous
        # calls as described here : http://www.tornadoweb.org/en/stable/testing.html
        CustomAgentTransaction._test = self
        CustomAgentTransaction.set_application(app)
        CustomAgentTransaction.set_endpoints(config['endpoints'])

        CustomAgentTransaction('body', {}, "") # Create and flush the transaction
        self.wait()
        del CustomAgentTransaction._test
        access_log = self.docker_client.exec_start(
            self.docker_client.exec_create(CONTAINER_NAME, 'cat /var/log/squid/access.log')['Id'])
        self.assertTrue("CONNECT" in access_log) # There should be an entry in the proxy access log
        self.assertEquals(len(trManager._endpoints_errors), 1) # There should be an error since we gave a bogus api_key
    def testCustomEndpoint(self):
        MetricTransaction._endpoints = []

        config = {
            "dd_url": "https://foo.bar.com",
            "api_key": "foo",
            "use_dd": True
        }

        app = Application()
        app.skip_ssl_validation = False
        app._agentConfig = config
        app.use_simple_http_client = True

        trManager = TransactionManager(timedelta(seconds=0), MAX_QUEUE_SIZE,
                                       THROTTLING_DELAY)
        trManager._flush_without_ioloop = True  # Use blocking API to emulate tornado ioloop
        MetricTransaction._trManager = trManager
        MetricTransaction.set_application(app)
        MetricTransaction.set_endpoints()

        transaction = MetricTransaction(None, {}, "msgtype")
        endpoints = [transaction.get_url(e) for e in transaction._endpoints]
        expected = ['https://foo.bar.com/intake/msgtype?api_key=foo']
        self.assertEqual(endpoints, expected, (endpoints, expected))
Beispiel #11
0
    def checkResolve(self, resolvable=True):
        db = DB(self._storage)

        t1 = TransactionManager()
        c1 = db.open(t1)
        o1 = c1.root()['p'] = (PCounter if resolvable else PCounter2)()
        o1.inc()
        t1.commit()

        t2 = TransactionManager()
        c2 = db.open(t2)
        o2 = c2.root()['p']
        o2.inc(2)
        t2.commit()

        o1.inc(3)
        try:
            t1.commit()
        except ConflictError as err:
            self.assertIn(".PCounter2,", str(err))
            self.assertEqual(o1._value, 3)
        else:
            self.assertTrue(resolvable, "Expected ConflictError")
            self.assertEqual(o1._value, 6)

        t2.begin()
        self.assertEqual(o2._value, o1._value)

        db.close()
    def testCustomEndpoint(self):
        MetricTransaction._endpoints = []

        config = {
            "endpoints": {
                "https://foo.bar.com": ["foo"]
            },
            "dd_url": "https://foo.bar.com",
            "api_key": "foo",
            "use_dd": True
        }

        app = Application()
        app.skip_ssl_validation = False
        app.agent_dns_caching = False
        app._agentConfig = config
        app.use_simple_http_client = True

        trManager = TransactionManager(timedelta(seconds=0),
                                       MAX_QUEUE_SIZE,
                                       THROTTLING_DELAY,
                                       max_endpoint_errors=100)
        trManager._flush_without_ioloop = True  # Use blocking API to emulate tornado ioloop
        MetricTransaction._trManager = trManager
        MetricTransaction.set_application(app)
        MetricTransaction.set_endpoints(config['endpoints'])

        transaction = MetricTransaction(None, {}, "msgtype")
        endpoints = []
        for endpoint in transaction._endpoints:
            for api_key in transaction._endpoints[endpoint]:
                endpoints.append(transaction.get_url(endpoint, api_key))
        expected = ['https://foo.bar.com/intake/msgtype?api_key=foo']
        self.assertEqual(endpoints, expected, (endpoints, expected))
Beispiel #13
0
 def exec_eager(self, *args, **kwargs):
     """Run transaction aware task in eager mode."""
     # We are run in a post-commit hook, so there is no transaction manager available
     tm = TransactionManager()
     # Do not attempt any transaction retries in eager mode
     tm.retry_attempt_count = 1
     self.request.update(request=self.make_faux_request(tm=tm))
     return self.run(*args, **kwargs)
    def test_endpoint_error(self):
        trManager = TransactionManager(timedelta(seconds=0), MAX_QUEUE_SIZE,
                                       timedelta(seconds=0), max_endpoint_errors=2)

        step = 10
        oneTrSize = (MAX_QUEUE_SIZE / step) - 1
        for i in xrange(step):
            trManager.append(memTransaction(oneTrSize, trManager))

        trManager.flush()

        # There should be exactly step transaction in the list,
        # and only 2 of them with a flush count of 1
        self.assertEqual(len(trManager._transactions), step)
        flush_count = 0
        for tr in trManager._transactions:
            flush_count += tr._flush_count
        self.assertEqual(flush_count, 2)

        # If we retry to flush, two OTHER transactions should be tried
        trManager.flush()

        self.assertEqual(len(trManager._transactions), step)
        flush_count = 0
        for tr in trManager._transactions:
            flush_count += tr._flush_count
            self.assertIn(tr._flush_count, [0, 1])
        self.assertEqual(flush_count, 4)

        # Finally when it's possible to flush, everything should go smoothly
        for tr in trManager._transactions:
            tr.is_flushable = True

        trManager.flush()
        self.assertEqual(len(trManager._transactions), 0)
Beispiel #15
0
def test_zbigarray_vs_cache_invalidation():
    root = testdb.dbopen()
    conn = root._p_jar
    db   = conn.db()
    conn.close()
    del root, conn

    tm1 = TransactionManager()
    tm2 = TransactionManager()

    conn1 = db.open(transaction_manager=tm1)
    root1 = conn1.root()

    # setup zarray
    root1['zarray3'] = a1 = ZBigArray((10,), uint8)
    tm1.commit()

    # set zarray initial data
    a1[0:1] = [1]           # XXX -> [0] = 1  after BigArray can
    tm1.commit()


    # read zarray in conn2
    conn2 = db.open(transaction_manager=tm2)
    root2 = conn2.root()

    a2 = root2['zarray3']
    assert a2[0:1] == [1]   # read data in conn2 + make sure read correctly
                            # XXX -> [0] == 1  after BigArray can

    # now zarray content is both in ZODB.Connection cache and in _ZBigFileH
    # cache for each conn1 and conn2. Modify data in conn1 and make sure it
    # fully propagate to conn2.

    a1[0:1] = [2]           # XXX -> [0] = 2  after BigArray can
    tm1.commit()

    # still should be read as old value in conn2
    assert a2[0:1]  == [1]
    # and even after virtmem pages reclaim
    # ( verifies that _p_invalidate() in ZBlk.loadblkdata() does not lead to
    #   reloading data as updated )
    ram_reclaim_all()
    assert a2[0:1]  == [1]

    tm2.commit()            # transaction boundary for t2

    # data from tm1 should propagate -> ZODB -> ram pages for _ZBigFileH in conn2
    assert a2[0] == 2

    conn2.close()
    del conn2, root2
    dbclose(root1)
Beispiel #16
0
    def testEndpoints(self):
        """Tests that the logic behind the agent version specific endpoints is ok.
        Also tests that these endpoints actually exist.
        """
        MetricTransaction._endpoints = []

        config = {
            "dd_url": "https://app.datadoghq.com",
            "api_key": "foo",
            "use_dd": True
        }

        app = Application()
        app.skip_ssl_validation = False
        app._agentConfig = config
        app.use_simple_http_client = True

        trManager = TransactionManager(timedelta(seconds=0), MAX_QUEUE_SIZE,
                                       THROTTLING_DELAY)
        trManager._flush_without_ioloop = True  # Use blocking API to emulate tornado ioloop
        MetricTransaction._trManager = trManager
        MetricTransaction.set_application(app)
        MetricTransaction.set_endpoints()

        transaction = MetricTransaction(None, {})
        endpoints = [transaction.get_url(e) for e in transaction._endpoints]
        expected = [
            'https://{0}-app.agent.datadoghq.com/intake?api_key=foo'.format(
                get_version().replace(".", "-"))
        ]
        self.assertEqual(endpoints, expected, (endpoints, expected))

        for url in endpoints:
            r = requests.post(url,
                              data=json.dumps({"foo": "bar"}),
                              headers={'Content-Type': "application/json"})
            r.raise_for_status()

        transaction = APIMetricTransaction(None, {})
        endpoints = [transaction.get_url(e) for e in transaction._endpoints]
        expected = [
            'https://{0}-app.agent.datadoghq.com/api/v1/series/?api_key=foo'.
            format(get_version().replace(".", "-"))
        ]
        self.assertEqual(endpoints, expected, (endpoints, expected))

        for url in endpoints:
            r = requests.post(url,
                              data=json.dumps({"foo": "bar"}),
                              headers={'Content-Type': "application/json"})
            r.raise_for_status()
Beispiel #17
0
    def __init__(self, port, agentConfig, watchdog=True):
        self._port = int(port)
        self._agentConfig = agentConfig
        self._metrics = {}
        MetricTransaction.set_application(self)
        MetricTransaction.set_endpoints()
        self._tr_manager = TransactionManager(MAX_WAIT_FOR_REPLAY,
                                              MAX_QUEUE_SIZE, THROTTLING_DELAY)
        MetricTransaction.set_tr_manager(self._tr_manager)

        self._watchdog = None
        if watchdog:
            watchdog_timeout = TRANSACTION_FLUSH_INTERVAL * WATCHDOG_INTERVAL_MULTIPLIER
            self._watchdog = Watchdog(watchdog_timeout)
 def test_no_parallelism(self):
     step = 2
     trManager = TransactionManager(timedelta(seconds=0), MAX_QUEUE_SIZE,
                                    timedelta(seconds=0), max_parallelism=1,
                                    max_endpoint_errors=100)
     for i in xrange(step):
         trManager.append(SleepingTransaction(trManager, delay=1))
     trManager.flush()
     # Flushes should be sequential
     for i in xrange(step):
         self.assertEqual(trManager._running_flushes, 1)
         self.assertEqual(trManager._finished_flushes, i)
         self.assertEqual(len(trManager._trs_to_flush), step - (i + 1))
         time.sleep(1)
Beispiel #19
0
    def exec_eager(self, *args, **kwargs):
        """Run transaction aware task in eager mode."""

        # We are run in a post-commit hook, so there is no transaction manager available
        tm = TransactionManager()

        # Do not attempt any transaction retries in eager mode
        tm.retry_attempt_count = 1

        self.request.update(request=self.make_faux_request(
            transaction_manager=tm))

        with tm:
            # This doesn't do transaction retry attempts, but should be good enough for eager
            return self.run(*args, **kwargs)
Beispiel #20
0
    def __init__(self,
                 port,
                 agentConfig,
                 watchdog=True,
                 skip_ssl_validation=False,
                 use_simple_http_client=False):
        self._port = int(port)
        self._agentConfig = agentConfig
        self._metrics = {}
        self._dns_cache = None
        AgentTransaction.set_application(self)
        AgentTransaction.set_endpoints(agentConfig['endpoints'])
        if agentConfig['endpoints'] == {}:
            log.warning(
                u"No valid endpoint found. Forwarder will drop all incoming payloads."
            )
        AgentTransaction.set_request_timeout(agentConfig['forwarder_timeout'])

        max_parallelism = self.NO_PARALLELISM
        # Multiple endpoints => enable parallelism
        if len(agentConfig['endpoints']) > 1:
            max_parallelism = self.DEFAULT_PARALLELISM

        self._tr_manager = TransactionManager(MAX_WAIT_FOR_REPLAY,
                                              MAX_QUEUE_SIZE,
                                              THROTTLING_DELAY,
                                              max_parallelism=max_parallelism)
        AgentTransaction.set_tr_manager(self._tr_manager)

        self._watchdog = None
        self.skip_ssl_validation = skip_ssl_validation or _is_affirmative(
            agentConfig.get('skip_ssl_validation'))
        self.agent_dns_caching = _is_affirmative(
            agentConfig.get('dns_caching'))
        self.agent_dns_ttl = int(agentConfig.get('dns_ttl', DEFAULT_DNS_TTL))
        if self.agent_dns_caching:
            self._dns_cache = DNSCache(ttl=self.agent_dns_ttl)
        self.use_simple_http_client = use_simple_http_client
        if self.skip_ssl_validation:
            log.info(
                "Skipping SSL hostname validation, useful when using a transparent proxy"
            )

        # Monitor activity
        if watchdog:
            watchdog_timeout = TRANSACTION_FLUSH_INTERVAL * WATCHDOG_INTERVAL_MULTIPLIER / 1000
            self._watchdog = Watchdog.create(
                watchdog_timeout, max_resets=WATCHDOG_HIGH_ACTIVITY_THRESHOLD)
Beispiel #21
0
    def __init__(self, port, agentConfig, watchdog=True,
                 skip_ssl_validation=False, use_simple_http_client=False):
        self._port = int(port)
        self._agentConfig = agentConfig
        self._metrics = {}
        AgentTransaction.set_application(self)
        AgentTransaction.set_endpoints(agentConfig['endpoints'])
        AgentTransaction.set_request_timeout(agentConfig['forwarder_timeout'])

        max_parallelism = self.NO_PARALLELISM
        # Multiple endpoints => enable parallelism
        if len(agentConfig['endpoints']) > 1:
            max_parallelism = self.DEFAULT_PARALLELISM

        self._tr_manager = TransactionManager(MAX_WAIT_FOR_REPLAY,
                                              MAX_QUEUE_SIZE, THROTTLING_DELAY,
                                              max_parallelism=max_parallelism)
        AgentTransaction.set_tr_manager(self._tr_manager)

        self._watchdog = None
        self.skip_ssl_validation = skip_ssl_validation or agentConfig.get('skip_ssl_validation', False)
        self.use_simple_http_client = use_simple_http_client
        if self.skip_ssl_validation:
            log.info("Skipping SSL hostname validation, useful when using a transparent proxy")

        # Monitor activity
        if watchdog:
            watchdog_timeout = TRANSACTION_FLUSH_INTERVAL * WATCHDOG_INTERVAL_MULTIPLIER / 1000
            self._watchdog = Watchdog(
                watchdog_timeout,
                max_mem_mb=agentConfig.get('limit_memory_consumption', None),
                max_resets=WATCHDOG_HIGH_ACTIVITY_THRESHOLD
            )
Beispiel #22
0
    def __init__(self,
                 port,
                 agentConfig,
                 watchmonitor=True,
                 skip_ssl_validation=False,
                 use_simple_http_client=False):
        self.ip = get_ip(agentConfig, log)
        self._port = int(port)
        self._agentConfig = agentConfig
        self._metrics = {}
        AgentTransaction.set_application(self)
        AgentTransaction.set_endpoints()
        self._tr_manager = TransactionManager(MAX_WAIT_FOR_REPLAY,
                                              MAX_QUEUE_SIZE, THROTTLING_DELAY)
        AgentTransaction.set_tr_manager(self._tr_manager)

        self._watchmonitor = None
        self.skip_ssl_validation = skip_ssl_validation or agentConfig.get(
            'skip_ssl_validation', False)
        self.use_simple_http_client = use_simple_http_client
        self._send_controler = 0  # control self.postAgentInfoToServer run once a minute
        if self.skip_ssl_validation:
            log.info(
                "Skipping SSL hostname validation, useful when using a transparent proxy"
            )

        if watchmonitor:
            watchmonitor_timeout = TRANSACTION_FLUSH_INTERVAL * WATCHmonitor_INTERVAL_MULTIPLIER / 1000
            self._watchmonitor = Watchmonitor(watchmonitor_timeout,
                                              max_mem_mb=agentConfig.get(
                                                  'limit_memory_consumption',
                                                  None))
Beispiel #23
0
def make_routable_request(dbsession: Optional[Session],
                          registry: Registry,
                          path="/") -> IRequest:
    """Creates a dummy request that has route_url and other routing methods.

    As this request does not get HTTP hostname and such stuff from WSGI environment, a configuration variable ``websauna.site_url`` is passed as the base URL.

    See also :func:`make_dummy_request`.

    :param dbsession: Use existing dbsession or set to ``None`` to generate a new dbsession and transaction manager. None that this TM is not the thread local transaction manager in ``transaction.mananger``.
    """

    base_url = registry.get("websauna.site_url", None)

    # TODO: Honour request_factory here
    request = Request.blank(path, base_url=base_url)
    # apply_request_extensions()?
    request.registry = registry
    request.user = None

    if dbsession:
        request.dbsession = dbsession
    else:
        tm = TransactionManager()
        dbsession = create_dbsession(request.registry, tm)
        request.dbsession = dbsession
        request.tm = request.transaction_manager = tm

        def terminate_session(request):
            # Close db session at the end of the request and return the db connection back to the pool
            dbsession.close()

        request.add_finished_callback(terminate_session)

    return request
Beispiel #24
0
def test_zbigarray_invalidate_shape():
    root = testdb.dbopen()
    conn = root._p_jar
    db   = conn.db()
    conn.close()
    del root, conn

    print
    tm1 = TransactionManager()
    tm2 = TransactionManager()

    conn1 = db.open(transaction_manager=tm1)
    root1 = conn1.root()

    # setup zarray
    root1['zarray4'] = a1 = ZBigArray((10,), uint8)
    tm1.commit()

    # set zarray initial data
    a1[0:1] = [1]           # XXX -> [0] = 1  after BigArray can
    tm1.commit()

    # read zarray in conn2
    conn2 = db.open(transaction_manager=tm2)
    root2 = conn2.root()

    a2 = root2['zarray4']
    assert a2[0:1] == [1]   # read data in conn2 + make sure read correctly
                            # XXX -> [0] == 1  after BigArray can

    # append to a1 which changes both RAM pages and a1.shape
    assert a1.shape == (10,)
    a1.append([123])
    assert a1.shape == (11,)
    assert a1[10:11] == [123]   # XXX -> [10] = 123  after BigArray can
    tm1.commit()
    tm2.commit()            # just transaction boundary for t2

    # data from tm1 should propagate to tm
    assert a2.shape == (11,)
    assert a2[10:11] == [123]   # XXX -> [10] = 123  after BigArray can


    conn2.close()
    del conn2, root2, a2
    dbclose(root1)
Beispiel #25
0
 def __init__(self, port, agentConfig):
     self._port = port
     self._agentConfig = agentConfig
     self._metrics = {}
     self._watchdog = Watchdog(TRANSACTION_FLUSH_INTERVAL * WATCHDOG_INTERVAL_MULTIPLIER)
     MetricTransaction.set_application(self)
     self._tr_manager = TransactionManager(MAX_WAIT_FOR_REPLAY,
         MAX_QUEUE_SIZE, THROTTLING_DELAY)
     MetricTransaction.set_tr_manager(self._tr_manager)
Beispiel #26
0
    def exec_eager(self, *args, **kwargs):
        """Run transaction aware task in eager mode."""

        # We are run in a post-commit hook, so there is no transaction manager available
        tm = TransactionManager()

        self.request.update(request=self.make_faux_request(
            transaction_manager=tm))
        return self.run(*args, **kwargs)
Beispiel #27
0
 def __init__(self):
     if not hasattr(self, '_conf'):
         raise Exception('Unconfigured Context')
     self.log = self._conf.log
     self.log.debug('Initializing')
     self._constructed_attrs = OrderedDict()
     self._active = True
     self.tx_manager = TransactionManager()
     for callback in self._conf._create_callbacks:
         callback(self)
    def test_parallelism(self):
        step = 4
        trManager = TransactionManager(timedelta(seconds=0), MAX_QUEUE_SIZE,
                                       timedelta(seconds=0), max_parallelism=step,
                                       max_endpoint_errors=100)
        for i in xrange(step):
            trManager.append(SleepingTransaction(trManager))

        trManager.flush()
        self.assertEqual(trManager._running_flushes, step)
        self.assertEqual(trManager._finished_flushes, 0)
        # If _trs_to_flush != None, it means that it's still running as it should be
        self.assertEqual(trManager._trs_to_flush, [])
        time.sleep(1)

        # It should be finished
        self.assertEqual(trManager._running_flushes, 0)
        self.assertEqual(trManager._finished_flushes, step)
        self.assertIs(trManager._trs_to_flush, None)
Beispiel #29
0
def _create_session(transaction_manager: TransactionManager, engine: Engine) -> Session:
    """Create a new database session with Zope transaction manager attached.

    The attached transaction manager takes care of committing the transaction at the end of the request.
    """
    dbsession = Session(bind=engine)
    transaction_manager.retry_attempt_count = 3  # TODO: Hardcoded for now
    zope.sqlalchemy.register(dbsession, transaction_manager=transaction_manager)
    dbsession.transaction_manager = transaction_manager
    return dbsession
Beispiel #30
0
class Replica(ServiceBase):
    
    transaction_manager = TransactionManager()

    @rpc(String)
    def set_server(ctx, server):
        logging.info(' set_server()')
        Replica.transaction_manager.set_server(server)

    @rpc(String)
    def add_replica(ctx, replica):
        logging.info(' add_replica()')
        Replica.transaction_manager.add_replica(replica)

    @srpc(String, _returns=Boolean)    
    def tpc_vote_replica(msg):
        logging.info(' tpc_vote_replica()')
        return Replica.transaction_manager.tpc_vote_replica(msg)

    @srpc(String, _returns=Boolean)
    def tpc_commit_replica(msg,):
        logging.info(' tpc_commit_replica()')
        return Replica.transaction_manager.tpc_commit_replica(msg)

    @srpc(String, _returns=Boolean)
    def tpc_abort_replica(msg):
        logging.info(' tpc_abort_replica()')
        return Replica.transaction_manager.tpc_abort_replica(msg)
    
    @rpc(String)
    def tpc_ack(ctx, msg):
        logging.info(' tpc_ack()')
        Replica.transaction_manager.tpc_ack(msg)

    @rpc(String, String)
    def put(ctx, key, value):
        logging.info(' put()')
        return Replica.transaction_manager.put(key, value)

    @srpc(String, _returns=Boolean)      
    def delete(key):
        logging.info(' delete()')
        return Replica.transaction_manager.delete(key)

    @srpc(String, _returns=String)
    def get(key):
        logging.info(' get()')
        return Replica.transaction_manager.get(key)
    
    @srpc(String, _returns=String)
    def get_value_replica(key):
        logging.info(' get_value_replica()')
        return Replica.transaction_manager.get_value_replica(key)
Beispiel #31
0
    def __init__(self, cache_backend=None, keyhandler=None, keygen=None):
        self.__dict__ = self.__shared_state
        self.prefix = settings.MIDDLEWARE_KEY_PREFIX
        if keyhandler:
            self.kh_class = keyhandler
        if keygen:
            self.kg_class = keygen
        if not cache_backend and not hasattr(self, 'cache_backend'):
            cache_backend = settings._get_backend()

        if not keygen and not hasattr(self, 'kg_class'):
            self.kg_class = KeyGen
        if keyhandler is None and not hasattr(self, 'kh_class'):
            self.kh_class = KeyHandler

        if cache_backend:
            self.cache_backend = TransactionManager(cache_backend,
                                                    self.kg_class)
            self.keyhandler = self.kh_class(self.cache_backend, self.kg_class,
                                            self.prefix)
        self._patched = getattr(self, '_patched', False)
Beispiel #32
0
    def __init__(self,
                 port,
                 agentConfig,
                 watchdog=True,
                 skip_ssl_validation=False,
                 use_simple_http_client=False):
        self._port = int(port)
        self._agentConfig = agentConfig
        self._metrics = {}
        AgentTransaction.set_application(self)
        AgentTransaction.set_endpoints(agentConfig['endpoints'])
        AgentTransaction.set_request_timeout(agentConfig['forwarder_timeout'])

        max_parallelism = self.NO_PARALLELISM
        # Multiple endpoints => enable parallelism
        if len(agentConfig['endpoints']) > 1:
            max_parallelism = self.DEFAULT_PARALLELISM

        self._tr_manager = TransactionManager(MAX_WAIT_FOR_REPLAY,
                                              MAX_QUEUE_SIZE,
                                              THROTTLING_DELAY,
                                              max_parallelism=max_parallelism)
        AgentTransaction.set_tr_manager(self._tr_manager)

        self._watchdog = None
        self.skip_ssl_validation = skip_ssl_validation or agentConfig.get(
            'skip_ssl_validation', False)
        self.use_simple_http_client = use_simple_http_client
        if self.skip_ssl_validation:
            log.info(
                "Skipping SSL hostname validation, useful when using a transparent proxy"
            )

        # Monitor activity
        if watchdog:
            watchdog_timeout = TRANSACTION_FLUSH_INTERVAL * WATCHDOG_INTERVAL_MULTIPLIER / 1000
            self._watchdog = Watchdog(
                watchdog_timeout,
                max_mem_mb=agentConfig.get('limit_memory_consumption', None),
                max_resets=WATCHDOG_HIGH_ACTIVITY_THRESHOLD)
Beispiel #33
0
    def __init__(self, port, agentConfig, watchdog=True):
        self._port = int(port)
        self._agentConfig = agentConfig
        self._metrics = {}
        MetricTransaction.set_application(self)
        MetricTransaction.set_endpoints()
        self._tr_manager = TransactionManager(MAX_WAIT_FOR_REPLAY, MAX_QUEUE_SIZE, THROTTLING_DELAY)
        MetricTransaction.set_tr_manager(self._tr_manager)

        self._watchdog = None
        if watchdog:
            watchdog_timeout = TRANSACTION_FLUSH_INTERVAL * WATCHDOG_INTERVAL_MULTIPLIER
            self._watchdog = Watchdog(watchdog_timeout)
    def test_drop_repeated_error(self):
        trManager = TransactionManager(timedelta(seconds=0),
                                       MAX_QUEUE_SIZE,
                                       timedelta(seconds=0),
                                       max_endpoint_errors=1)

        # Fail it once
        oneTrSize = 10
        trManager.append(memTransaction(oneTrSize, trManager))

        # It should still be there after flush
        trManager.flush()
        self.assertEqual(len(trManager._transactions), 1)

        #Try again, now it should be gone
        trManager.flush()
        self.assertEqual(len(trManager._transactions), 0)
Beispiel #35
0
def update_schema_manager(event):
    # Use a local transaction manager to sidestep any issues
    # with an active transaction or explicit mode.
    txm = TransactionManager(explicit=True)
    txm.begin()
    with contextlib.closing(event.database.open(txm)) as conn:
        state = conn.root().get(PersistentWebhookSchemaManager.key, State())
        txm.abort()

    schema = get_schema_manager()
    schema.compareSubscriptionsAndComputeGeneration(state)
Beispiel #36
0
    def __init__(self, cache_backend=None, keyhandler=None, keygen=None):
        self.__dict__ = self.__shared_state
        self.prefix = settings.MIDDLEWARE_KEY_PREFIX
        if keyhandler: self.kh_class = keyhandler
        if keygen: self.kg_class = keygen
        if not cache_backend and not hasattr(self, 'cache_backend'):
            cache_backend = settings._get_backend()

        if not keygen and not hasattr(self, 'kg_class'):
            self.kg_class = KeyGen
        if keyhandler is None and not hasattr(self, 'kh_class'):
            self.kh_class = KeyHandler

        if cache_backend:
            self.cache_backend = TransactionManager(cache_backend, self.kg_class)
            self.keyhandler = self.kh_class(self.cache_backend, self.kg_class, self.prefix)
        self._patched = getattr(self, '_patched', False)
Beispiel #37
0
def make_routable_request(dbsession: t.Optional[Session] = None,
                          registry: t.Optional[Registry] = None,
                          path='/') -> IRequest:
    """Creates a dummy request that has route_url and other routing methods.

    As this request does not get HTTP hostname and such stuff from WSGI environment, a configuration variable ``websauna.site_url`` is passed as the base URL.

    See also :func:`make_dummy_request`.

    TODO: Split this to two different functions: one for existing dbsession and one for where dbsession is connected.

    :param dbsession: Use existing dbsession or set to ``None`` to generate a new dbsession and transaction manager. None that this TM is not the thread local transaction manager in ``transaction.mananger``.
    :param registry: Configuration registry
    :param path: Path being requested.
    :return: Current request.
    """
    base_url = registry.settings.get("websauna.site_url", None)
    # TODO: Honour request_factory here
    request = Request.blank(path, base_url=base_url)
    request.registry = registry
    request.user = None
    request.view_name = ''

    # This will create request.tm, others
    apply_request_extensions(request)

    if dbsession:
        # Use the provided dbsession for this request
        request.dbsession = dbsession
        if hasattr(dbsession, "transaction_manager"):
            request.tm = request.transaction_manager = dbsession.transaction_manager
    else:
        # Create a new dbsession and transaction manager for this request
        tm = TransactionManager()
        dbsession = create_dbsession(request.registry, tm)
        request.dbsession = dbsession
        request.tm = request.transaction_manager = tm

        def terminate_session(request):
            # Close db session at the end of the request and return the db connection back to the pool
            dbsession.close()

        request.add_finished_callback(terminate_session)

    return request
Beispiel #38
0
    def __init__(self, port, agentConfig, watchdog=True, skip_ssl_validation=False):
        self._port = int(port)
        self._agentConfig = agentConfig
        self._metrics = {}
        MetricTransaction.set_application(self)
        MetricTransaction.set_endpoints()
        self._tr_manager = TransactionManager(MAX_WAIT_FOR_REPLAY,
            MAX_QUEUE_SIZE, THROTTLING_DELAY)
        MetricTransaction.set_tr_manager(self._tr_manager)

        self._watchdog = None
        self.skip_ssl_validation = skip_ssl_validation or agentConfig.get('skip_ssl_validation', False)
        if self.skip_ssl_validation:
            log.info("Skipping SSL hostname validation, useful when using a transparent proxy")

        if watchdog:
            watchdog_timeout = TRANSACTION_FLUSH_INTERVAL * WATCHDOG_INTERVAL_MULTIPLIER
            self._watchdog = Watchdog(watchdog_timeout,
                max_mem_mb=agentConfig.get('limit_memory_consumption', None))
Beispiel #39
0
    def testThrottling(self):
        """Test throttling while flushing"""
 
        # No throttling, no delay for replay
        trManager = TransactionManager(timedelta(seconds = 0), MAX_QUEUE_SIZE, THROTTLING_DELAY)
        trManager._flush_without_ioloop = True # Use blocking API to emulate tornado ioloop

        # Add 3 transactions, make sure no memory limit is in the way
        oneTrSize = MAX_QUEUE_SIZE / 10
        for i in xrange(3):
            tr = memTransaction(oneTrSize, trManager)
            trManager.append(tr)

        # Try to flush them, time it
        before = datetime.now()
        trManager.flush()
        after = datetime.now()
        self.assertTrue( (after-before) > 3 * THROTTLING_DELAY)
Beispiel #40
0
 def test_no_parallelism(self):
     step = 2
     trManager = TransactionManager(timedelta(seconds=0),
                                    MAX_QUEUE_SIZE,
                                    timedelta(seconds=0),
                                    max_parallelism=1,
                                    max_endpoint_errors=100)
     for i in xrange(step):
         trManager.append(SleepingTransaction(trManager, delay=1))
     trManager.flush()
     # Flushes should be sequential
     for i in xrange(step):
         self.assertEqual(trManager._running_flushes, 1)
         self.assertEqual(trManager._finished_flushes, i)
         self.assertEqual(len(trManager._trs_to_flush), step - (i + 1))
         time.sleep(1)
Beispiel #41
0
    def test_notify_transaction_late_comers(self):
        # If a datamanager registers for synchonization after a
        # transaction has started, we should call newTransaction so it
        # can do necessry setup.
        import mock
        from .. import TransactionManager
        manager = TransactionManager()
        sync1 = mock.MagicMock()
        manager.registerSynch(sync1)
        sync1.newTransaction.assert_not_called()
        t = manager.begin()
        sync1.newTransaction.assert_called_with(t)
        sync2 = mock.MagicMock()
        manager.registerSynch(sync2)
        sync2.newTransaction.assert_called_with(t)

        # for, um, completeness
        t.commit()
        for s in sync1, sync2:
            s.beforeCompletion.assert_called_with(t)
            s.afterCompletion.assert_called_with(t)
Beispiel #42
0
    def __init__(self, port, agentConfig, watchdog=True,
                 skip_ssl_validation=False, use_simple_http_client=False):
        self._port = int(port)
        self._agentConfig = agentConfig
        self._metrics = {}
        self._dns_cache = None
        AgentTransaction.set_application(self)
        AgentTransaction.set_endpoints(agentConfig['endpoints'])
        if agentConfig['endpoints'] == {}:
            log.warning(u"No valid endpoint found. Forwarder will drop all incoming payloads.")
        AgentTransaction.set_request_timeout(agentConfig['forwarder_timeout'])

        max_parallelism = self.NO_PARALLELISM
        # Multiple endpoints => enable parallelism
        if len(agentConfig['endpoints']) > 1:
            max_parallelism = self.DEFAULT_PARALLELISM

        self._tr_manager = TransactionManager(MAX_WAIT_FOR_REPLAY,
                                              MAX_QUEUE_SIZE, THROTTLING_DELAY,
                                              max_parallelism=max_parallelism)
        AgentTransaction.set_tr_manager(self._tr_manager)

        self._watchdog = None
        self.skip_ssl_validation = skip_ssl_validation or _is_affirmative(agentConfig.get('skip_ssl_validation'))
        self.agent_dns_caching = _is_affirmative(agentConfig.get('dns_caching'))
        self.agent_dns_ttl = int(agentConfig.get('dns_ttl', DEFAULT_DNS_TTL))
        if self.agent_dns_caching:
            self._dns_cache = DNSCache(ttl=self.agent_dns_ttl)
        self.use_simple_http_client = use_simple_http_client
        if self.skip_ssl_validation:
            log.info("Skipping SSL hostname validation, useful when using a transparent proxy")

        # Monitor activity
        if watchdog:
            watchdog_timeout = TRANSACTION_FLUSH_INTERVAL * WATCHDOG_INTERVAL_MULTIPLIER / 1000
            self._watchdog = Watchdog.create(watchdog_timeout,
                                             max_resets=WATCHDOG_HIGH_ACTIVITY_THRESHOLD)
Beispiel #43
0
    def test_parallelism(self):
        step = 4
        trManager = TransactionManager(timedelta(seconds=0), MAX_QUEUE_SIZE,
                                       timedelta(seconds=0), max_parallelism=step,
                                       max_endpoint_errors=100)
        for i in xrange(step):
            trManager.append(SleepingTransaction(trManager))

        trManager.flush()
        self.assertEqual(trManager._running_flushes, step)
        self.assertEqual(trManager._finished_flushes, 0)
        # If _trs_to_flush != None, it means that it's still running as it should be
        self.assertEqual(trManager._trs_to_flush, [])
        time.sleep(1)

        # It should be finished
        self.assertEqual(trManager._running_flushes, 0)
        self.assertEqual(trManager._finished_flushes, step)
        self.assertIs(trManager._trs_to_flush, None)
    def test_notify_transaction_late_comers(self):
        # If a datamanager registers for synchonization after a
        # transaction has started, we should call newTransaction so it
        # can do necessry setup.
        import mock
        from .. import TransactionManager
        manager = TransactionManager()
        sync1 = mock.MagicMock()
        manager.registerSynch(sync1)
        sync1.newTransaction.assert_not_called()
        t = manager.begin()
        sync1.newTransaction.assert_called_with(t)
        sync2 = mock.MagicMock()
        manager.registerSynch(sync2)
        sync2.newTransaction.assert_called_with(t)

        # for, um, completeness
        t.commit()
        for s in sync1, sync2:
            s.beforeCompletion.assert_called_with(t)
            s.afterCompletion.assert_called_with(t)
Beispiel #45
0
class QueryCacheBackend(object):
    """This class is the engine behind the query cache. It reads the queries
    going through the django Query and returns from the cache using
    the generation keys, or on a miss from the database and caches the results.
    Each time a model is updated the table keys for that model are re-created,
    invalidating all cached querysets for that model.

    There are different QueryCacheBackend's for different versions of django;
    call ``johnny.cache.get_backend`` to automatically get the proper class.
    """
    __shared_state = {}

    def __init__(self, cache_backend=None, keyhandler=None, keygen=None):
        self.__dict__ = self.__shared_state
        self.prefix = settings.MIDDLEWARE_KEY_PREFIX
        if keyhandler:
            self.kh_class = keyhandler
        if keygen:
            self.kg_class = keygen
        if not cache_backend and not hasattr(self, 'cache_backend'):
            cache_backend = settings._get_backend()

        if not keygen and not hasattr(self, 'kg_class'):
            self.kg_class = KeyGen
        if keyhandler is None and not hasattr(self, 'kh_class'):
            self.kh_class = KeyHandler

        if cache_backend:
            self.cache_backend = TransactionManager(cache_backend,
                                                    self.kg_class)
            self.keyhandler = self.kh_class(self.cache_backend,
                                            self.kg_class, self.prefix)
        self._patched = getattr(self, '_patched', False)

    def _monkey_select(self, original):
        from django.db.models.sql.constants import MULTI
        from django.db.models.sql.datastructures import EmptyResultSet

        @wraps(original, assigned=available_attrs(original))
        def newfun(cls, *args, **kwargs):
            if args:
                result_type = args[0]
            else:
                result_type = kwargs.get('result_type', MULTI)

            if any([isinstance(cls, c) for c in self._write_compilers]):
                return original(cls, *args, **kwargs)
            try:
                sql, params = cls.as_sql()
                if not sql:
                    raise EmptyResultSet
            except EmptyResultSet:
                if result_type == MULTI:
                    # this was moved in 1.2 to compiler
                    return empty_iter()
                else:
                    return

            db = getattr(cls, 'using', 'default')
            key, val = None, NotInCache()
            # check the blacklist for any of the involved tables;  if it's not
            # there, then look for the value in the cache.
            tables = get_tables_for_query(cls.query)
            # if the tables are blacklisted, send a qc_skip signal
            blacklisted = disallowed_table(*tables)
            if blacklisted:
                signals.qc_skip.send(sender=cls, tables=tables,
                    query=(sql, params, cls.ordering_aliases),
                    key=key)
            if tables and not blacklisted:
                gen_key = self.keyhandler.get_generation(*tables, **{'db': db})
                key = self.keyhandler.sql_key(gen_key, sql, params,
                                              cls.get_ordering(),
                                              result_type, db)
                val = self.cache_backend.get(key, NotInCache(), db)

            if not isinstance(val, NotInCache):
                if val == no_result_sentinel:
                    val = []

                signals.qc_hit.send(sender=cls, tables=tables,
                        query=(sql, params, cls.ordering_aliases),
                        size=len(val), key=key)
                return val

            if not blacklisted:
                signals.qc_miss.send(sender=cls, tables=tables,
                    query=(sql, params, cls.ordering_aliases),
                    key=key)

            val = original(cls, *args, **kwargs)

            if hasattr(val, '__iter__'):
                #Can't permanently cache lazy iterables without creating
                #a cacheable data structure. Note that this makes them
                #no longer lazy...
                #todo - create a smart iterable wrapper
                val = list(val)
            if key is not None:
                if not val:
                    self.cache_backend.set(key, no_result_sentinel, settings.MIDDLEWARE_SECONDS, db)
                else:
                    self.cache_backend.set(key, val, settings.MIDDLEWARE_SECONDS, db)
            return val
        return newfun

    def _monkey_write(self, original):
        @wraps(original, assigned=available_attrs(original))
        def newfun(cls, *args, **kwargs):
            db = getattr(cls, 'using', 'default')
            from django.db.models.sql import compiler
            # we have to do this before we check the tables, since the tables
            # are actually being set in the original function
            ret = original(cls, *args, **kwargs)

            if isinstance(cls, compiler.SQLInsertCompiler):
                #Inserts are a special case where cls.tables
                #are not populated.
                tables = [cls.query.model._meta.db_table]
            else:
                #if cls.query.tables != list(cls.query.table_map):
                #    pass
                #tables = list(cls.query.table_map)
                tables = cls.query.tables
            for table in tables:
                if not disallowed_table(table):
                    self.keyhandler.invalidate_table(table, db)
            return ret
        return newfun

    def patch(self):
        """
        monkey patches django.db.models.sql.compiler.SQL*Compiler series
        """
        from django.db.models.sql import compiler

        self._read_compilers = (
            compiler.SQLCompiler,
            compiler.SQLAggregateCompiler,
            compiler.SQLDateCompiler,
        )
        self._write_compilers = (
            compiler.SQLInsertCompiler,
            compiler.SQLDeleteCompiler,
            compiler.SQLUpdateCompiler,
        )
        if not self._patched:
            self._original = {}
            for reader in self._read_compilers:
                self._original[reader] = reader.execute_sql
                reader.execute_sql = self._monkey_select(reader.execute_sql)
            for updater in self._write_compilers:
                self._original[updater] = updater.execute_sql
                updater.execute_sql = self._monkey_write(updater.execute_sql)
            self._patched = True
            self.cache_backend.patch()
            self._handle_signals()

    def unpatch(self):
        """un-applies this patch."""
        if not self._patched:
            return
        for func in self._read_compilers + self._write_compilers:
            func.execute_sql = self._original[func]
        self.cache_backend.unpatch()
        self._patched = False

    def invalidate_m2m(self, instance, **kwargs):
        if self._patched:
            table = resolve_table(instance)
            if not disallowed_table(table):
                self.keyhandler.invalidate_table(instance)

    def invalidate(self, instance, **kwargs):
        if self._patched:
            table = resolve_table(instance)
            if not disallowed_table(table):
                self.keyhandler.invalidate_table(table)

            tables = set()
            tables.add(table)

            try:
                 instance._meta._related_objects_cache
            except AttributeError:
                 instance._meta._fill_related_objects_cache()

            for obj in instance._meta._related_objects_cache.keys():
                obj_table = obj.model._meta.db_table
                if obj_table not in tables:
                    tables.add(obj_table)
                    if not disallowed_table(obj_table):
                        self.keyhandler.invalidate_table(obj_table)

    def _handle_signals(self):
        post_save.connect(self.invalidate, sender=None)
        post_delete.connect(self.invalidate, sender=None)
        # FIXME: only needed in 1.1?
        signals.qc_m2m_change.connect(self.invalidate_m2m, sender=None)

    def flush_query_cache(self):
        from django.db import connection
        tables = connection.introspection.table_names()
        #seen_models = connection.introspection.installed_models(tables)
        for table in tables:
            # we want this to just work, so invalidate even things in blacklist
            self.keyhandler.invalidate_table(table)
Beispiel #46
0
class Context:
    """
    Base class for Contexts of a ConfiguredCtxModule. Do not use this class
    directly, use the :attr:`Context <.ConfiguredCtxModule.Context>` member of a
    ConfiguredCtxModule instead.

    Every Context object needs to be destroyed manually by calling its
    :meth:`.destroy` method. Although this method will be called in the
    destructor of this class, that might already be too late. This is the reason
    why the preferred way of using this class is within a `with` statement:

    >>> with ctx_conf.Context() as ctx:
    ...     ctx.logout_user()
    ...
    """

    def __init__(self):
        if not hasattr(self, '_conf'):
            raise Exception('Unconfigured Context')
        self.log = self._conf.log
        self.log.debug('Initializing')
        self._constructed_attrs = OrderedDict()
        self._active = True
        self.tx_manager = TransactionManager()
        for callback in self._conf._create_callbacks:
            callback(self)

    def __del__(self):
        if self._active:
            self.destroy()

    def __enter__(self):
        return self

    def __exit__(self, type, value, traceback):
        self.destroy(value)

    def __hasattr__(self, attr):
        if self._active:
            return attr in self._conf.registrations
        return attr in self.__dict__

    def __getattr__(self, attr):
        if '_active' in self.__dict__ and '_conf' in self.__dict__ and \
                self._active and attr in self._conf.registrations:
            value = self._conf.registrations[attr].constructor(self)
            if self._conf.registrations[attr].cached:
                self._constructed_attrs[attr] = value
                self.__dict__[attr] = value
            else:
                self._constructed_attrs[attr] = None
            self.log.debug('Creating member %s' % attr)
            return value
        raise AttributeError(attr)

    def __setattr__(self, attr, value):
        try:
            # call registered contructor, if there is one, so the destructor
            # gets called with this new value
            getattr(self, attr)
        except AttributeError:
            pass
        self.__dict__[attr] = value

    def __delattr__(self, attr):
        if attr in self._conf.registrations:
            if attr in self._constructed_attrs:
                self.__delattr(attr, None)
            elif attr in self.__dict__:
                del self.__dict__[attr]
        else:
            del self.__dict__[attr]

    def __delattr(self, attr, exception):
        """
        Deletes a previously constructed *attr*. Its destructor will receive the
        given *exception*.

        Note: this function assumes that the *attr* is indeed a registered
        context member. It will behave unexpectedly when called with an *attr*
        that has no registration.
        """
        constructor_value = self._constructed_attrs[attr]
        del self._constructed_attrs[attr]
        self.log.debug('Deleting member %s' % attr)
        destructor = self._conf.registrations[attr].destructor
        if destructor:
            self.log.debug('Calling destructor of %s' % attr)
            if self._conf.registrations[attr].cached:
                destructor(self, constructor_value, None)
            else:
                destructor(self, None)
        try:
            del self.__dict__[attr]
        except KeyError:
            # destructor might have deleted self.attr already
            pass

    def destroy(self, exception=None):
        """
        Cleans up this context and makes it unusable.

        After calling this function, this object will lose all its magic and
        behave like an empty class.

        The optional *exception*, that is the cause of this method call, will be
        passed to the destructors of every :term:`context member`.
        """
        if not self._active:
            return
        self.log.debug('Destroying')
        self._active = False
        for attr in reversed(list(self._constructed_attrs.keys())):
            self.__delattr(attr, exception)
        for callback in self._conf._destroy_callbacks:
            callback(self, exception)
        tx = self.tx_manager.get()
        if exception or tx.isDoomed():
            tx.abort()
        else:
            tx.commit()
Beispiel #47
0
class Application(tornado.web.Application):

    NO_PARALLELISM = 1
    DEFAULT_PARALLELISM = 5

    def __init__(self,
                 port,
                 agentConfig,
                 watchdog=True,
                 skip_ssl_validation=False,
                 use_simple_http_client=False):
        self._port = int(port)
        self._agentConfig = agentConfig
        self._metrics = {}
        self._dns_cache = None
        AgentTransaction.set_application(self)
        AgentTransaction.set_endpoints(agentConfig['endpoints'])
        if agentConfig['endpoints'] == {}:
            log.warning(
                u"No valid endpoint found. Forwarder will drop all incoming payloads."
            )
        AgentTransaction.set_request_timeout(agentConfig['forwarder_timeout'])

        max_parallelism = self.NO_PARALLELISM
        # Multiple endpoints => enable parallelism
        if len(agentConfig['endpoints']) > 1:
            max_parallelism = self.DEFAULT_PARALLELISM

        self._tr_manager = TransactionManager(MAX_WAIT_FOR_REPLAY,
                                              MAX_QUEUE_SIZE,
                                              THROTTLING_DELAY,
                                              max_parallelism=max_parallelism)
        AgentTransaction.set_tr_manager(self._tr_manager)

        self._watchdog = None
        self.skip_ssl_validation = skip_ssl_validation or _is_affirmative(
            agentConfig.get('skip_ssl_validation'))
        self.agent_dns_caching = _is_affirmative(
            agentConfig.get('dns_caching'))
        self.agent_dns_ttl = int(agentConfig.get('dns_ttl', DEFAULT_DNS_TTL))
        if self.agent_dns_caching:
            self._dns_cache = DNSCache(ttl=self.agent_dns_ttl)
        self.use_simple_http_client = use_simple_http_client
        if self.skip_ssl_validation:
            log.info(
                "Skipping SSL hostname validation, useful when using a transparent proxy"
            )

        # Monitor activity
        if watchdog:
            watchdog_timeout = TRANSACTION_FLUSH_INTERVAL * WATCHDOG_INTERVAL_MULTIPLIER / 1000
            self._watchdog = Watchdog.create(
                watchdog_timeout, max_resets=WATCHDOG_HIGH_ACTIVITY_THRESHOLD)

    def get_from_dns_cache(self, url):
        if not self.agent_dns_caching:
            log.debug('Caching disabled, not resolving.')
            return url

        location = urlparse(url)
        resolve = self._dns_cache.resolve(location.netloc)
        return "{scheme}://{ip}".format(scheme=location.scheme, ip=resolve)

    def log_request(self, handler):
        """ Override the tornado logging method.
        If everything goes well, log level is DEBUG.
        Otherwise it's WARNING or ERROR depending on the response code. """
        if handler.get_status() < 400:
            log_method = log.debug
        elif handler.get_status() < 500:
            log_method = log.warning
        else:
            log_method = log.error

        request_time = 1000.0 * handler.request.request_time()
        log_method(u"%d %s %.2fms", handler.get_status(),
                   handler._request_summary(), request_time)

    def appendMetric(self, prefix, name, host, device, ts, value):

        if prefix in self._metrics:
            metrics = self._metrics[prefix]
        else:
            metrics = {}
            self._metrics[prefix] = metrics

        if name in metrics:
            metrics[name].append([host, device, ts, value])
        else:
            metrics[name] = [[host, device, ts, value]]

    def _postMetrics(self):

        if len(self._metrics) > 0:
            self._metrics['uuid'] = get_uuid()
            self._metrics['internalHostname'] = get_hostname(self._agentConfig)
            self._metrics['apiKey'] = self._agentConfig['api_key']
            MetricTransaction(json.dumps(self._metrics),
                              headers={'Content-Type': 'application/json'})
            self._metrics = {}

    def run(self):
        handlers = [
            (r"/intake/?", AgentInputHandler),
            (r"/intake/metrics?", MetricsAgentInputHandler),
            (r"/intake/metadata?", MetadataAgentInputHandler),
            (r"/api/v1/series/?", ApiInputHandler),
            (r"/api/v1/check_run/?", ApiCheckRunHandler),
            (r"/status/?", StatusHandler),
        ]

        settings = dict(
            cookie_secret="12oETzKXQAGaYdkL5gEmGeJJFuYh7EQnp2XdTP1o/Vo=",
            xsrf_cookies=False,
            debug=False,
            log_function=self.log_request)

        non_local_traffic = self._agentConfig.get("non_local_traffic", False)

        tornado.web.Application.__init__(self, handlers, **settings)
        http_server = tornado.httpserver.HTTPServer(self)

        try:
            # non_local_traffic must be == True to match, not just some non-false value
            if non_local_traffic is True:
                http_server.listen(self._port)
            else:
                # localhost in lieu of 127.0.0.1 to support IPv6
                try:
                    http_server.listen(self._port,
                                       address=self._agentConfig['bind_host'])
                except gaierror:
                    log.warning(
                        "localhost seems undefined in your host file, using 127.0.0.1 instead"
                    )
                    http_server.listen(self._port, address="127.0.0.1")
                except socket_error as e:
                    if "Errno 99" in str(e):
                        log.warning(
                            "IPv6 doesn't seem to be fully supported. Falling back to IPv4"
                        )
                        http_server.listen(self._port, address="127.0.0.1")
                    else:
                        raise
        except socket_error as e:
            log.exception(
                "Socket error %s. Is another application listening on the same port ? Exiting",
                e)
            sys.exit(1)
        except Exception as e:
            log.exception("Uncaught exception. Forwarder is exiting.")
            sys.exit(1)

        log.info("Listening on port %d" % self._port)

        # Register callbacks
        self.mloop = tornado.ioloop.IOLoop.current()

        logging.getLogger().setLevel(get_logging_config()['log_level']
                                     or logging.INFO)

        def flush_trs():
            if self._watchdog:
                self._watchdog.reset()
            self._postMetrics()
            self._tr_manager.flush()

        tr_sched = tornado.ioloop.PeriodicCallback(flush_trs,
                                                   TRANSACTION_FLUSH_INTERVAL,
                                                   io_loop=self.mloop)

        # Register optional Graphite listener
        gport = self._agentConfig.get("graphite_listen_port", None)
        if gport is not None:
            log.info("Starting graphite listener on port %s" % gport)
            from graphite import GraphiteServer
            gs = GraphiteServer(self,
                                get_hostname(self._agentConfig),
                                io_loop=self.mloop)
            if non_local_traffic is True:
                gs.listen(gport)
            else:
                gs.listen(gport, address="localhost")

        # Start everything
        if self._watchdog:
            self._watchdog.reset()
        tr_sched.start()

        self.mloop.start()
        log.info("Stopped")

    def stop(self):
        self.mloop.stop()
    def testMemoryLimit(self):
        """Test memory limit as well as simple flush"""

        # No throttling, no delay for replay
        trManager = TransactionManager(timedelta(seconds=0), MAX_QUEUE_SIZE,
                                       timedelta(seconds=0))

        step = 10
        oneTrSize = (MAX_QUEUE_SIZE / step) - 1
        for i in xrange(step):
            tr = memTransaction(oneTrSize, trManager)
            trManager.append(tr)

        trManager.flush()

        # There should be exactly step transaction in the list, with
        # a flush count of 1
        self.assertEqual(len(trManager._transactions), step)
        for tr in trManager._transactions:
            self.assertEqual(tr._flush_count, 1)

        # Try to add one more
        tr = memTransaction(oneTrSize + 10, trManager)
        trManager.append(tr)

        # At this point, transaction one (the oldest) should have been removed from the list
        self.assertEqual(len(trManager._transactions), step)
        for tr in trManager._transactions:
            self.assertNotEqual(tr._id, 1)

        trManager.flush()
        self.assertEqual(len(trManager._transactions), step)
        # Check and allow transactions to be flushed
        for tr in trManager._transactions:
            tr.is_flushable = True
            # Last transaction has been flushed only once
            if tr._id == step + 1:
                self.assertEqual(tr._flush_count, 1)
            else:
                self.assertEqual(tr._flush_count, 2)

        trManager.flush()
        self.assertEqual(len(trManager._transactions), 0)
Beispiel #49
0
class Application(tornado.web.Application):

    def __init__(self, port, agentConfig, watchdog=True):
        self._port = int(port)
        self._agentConfig = agentConfig
        self._metrics = {}
        MetricTransaction.set_application(self)
        MetricTransaction.set_endpoints()
        self._tr_manager = TransactionManager(MAX_WAIT_FOR_REPLAY,
            MAX_QUEUE_SIZE, THROTTLING_DELAY)
        MetricTransaction.set_tr_manager(self._tr_manager)

        self._watchdog = None
        if watchdog:
            watchdog_timeout = TRANSACTION_FLUSH_INTERVAL * WATCHDOG_INTERVAL_MULTIPLIER
            self._watchdog = Watchdog(watchdog_timeout)

    def appendMetric(self, prefix, name, host, device, ts, value):

        if self._metrics.has_key(prefix):
            metrics = self._metrics[prefix]
        else:
            metrics = {}
            self._metrics[prefix] = metrics

        if metrics.has_key(name):
            metrics[name].append([host, device, ts, value])
        else:
            metrics[name] = [[host, device, ts, value]]

    def _postMetrics(self):

        if len(self._metrics) > 0:
            self._metrics['uuid'] = get_uuid()
            self._metrics['internalHostname'] = gethostname(self._agentConfig)
            self._metrics['apiKey'] = self._agentConfig['api_key']
            MetricTransaction(self._metrics, {})
            self._metrics = {}

    def run(self):
        handlers = [
            (r"/intake/?", AgentInputHandler),
            (r"/api/v1/series/?", ApiInputHandler),
            (r"/status/?", StatusHandler),
        ]

        settings = dict(
            cookie_secret="12oETzKXQAGaYdkL5gEmGeJJFuYh7EQnp2XdTP1o/Vo=",
            xsrf_cookies=False,
            debug=False,
        )

        non_local_traffic = self._agentConfig.get("non_local_traffic", False)

        tornado.web.Application.__init__(self, handlers, **settings)
        http_server = tornado.httpserver.HTTPServer(self)

        # set the root logger to warn so tornado is less chatty
        logging.getLogger().setLevel(logging.WARNING)

        # but keep the forwarder logger at the original level
        forwarder_logger = logging.getLogger('forwarder')
        log_config = get_logging_config()
        forwarder_logger.setLevel(log_config['log_level'] or logging.INFO)

        # non_local_traffic must be == True to match, not just some non-false value
        if non_local_traffic is True:
            http_server.listen(self._port)
        else:
            # localhost in lieu of 127.0.0.1 to support IPv6
            try:
                http_server.listen(self._port, address = "localhost")
            except gaierror:
                log.warning("Warning localhost seems undefined in your host file, using 127.0.0.1 instead")
                http_server.listen(self._port, address = "127.0.0.1")

        log.info("Listening on port %d" % self._port)

        # Register callbacks
        self.mloop = tornado.ioloop.IOLoop.instance()

        def flush_trs():
            if self._watchdog:
                self._watchdog.reset()
            self._postMetrics()
            self._tr_manager.flush()

        tr_sched = tornado.ioloop.PeriodicCallback(flush_trs,TRANSACTION_FLUSH_INTERVAL,
            io_loop = self.mloop)

        # Register optional Graphite listener
        gport = self._agentConfig.get("graphite_listen_port", None)
        if gport is not None:
            log.info("Starting graphite listener on port %s" % gport)
            from graphite import GraphiteServer
            gs = GraphiteServer(self, gethostname(self._agentConfig), io_loop=self.mloop)
            if non_local_traffic is True:
                gs.listen(gport)
            else:
                gs.listen(gport, address = "localhost")

        # Start everything
        if self._watchdog:
            self._watchdog.reset()
        tr_sched.start()

        self.mloop.start()
        log.info("Stopped")

    def stop(self):
        self.mloop.stop()
Beispiel #50
0
class Application(tornado.web.Application):

    def __init__(self, port, agentConfig, watchdog=True, skip_ssl_validation=False):
        self._port = int(port)
        self._agentConfig = agentConfig
        self._metrics = {}
        MetricTransaction.set_application(self)
        MetricTransaction.set_endpoints()
        self._tr_manager = TransactionManager(MAX_WAIT_FOR_REPLAY,
            MAX_QUEUE_SIZE, THROTTLING_DELAY)
        MetricTransaction.set_tr_manager(self._tr_manager)

        self._watchdog = None
        self.skip_ssl_validation = skip_ssl_validation or agentConfig.get('skip_ssl_validation', False)
        if self.skip_ssl_validation:
            log.info("Skipping SSL hostname validation, useful when using a transparent proxy")

        if watchdog:
            watchdog_timeout = TRANSACTION_FLUSH_INTERVAL * WATCHDOG_INTERVAL_MULTIPLIER
            self._watchdog = Watchdog(watchdog_timeout,
                max_mem_mb=agentConfig.get('limit_memory_consumption', None))

    def log_request(self, handler):
        """ Override the tornado logging method.
        If everything goes well, log level is DEBUG.
        Otherwise it's WARNING or ERROR depending on the response code. """
        if handler.get_status() < 400:
            log_method = log.debug
        elif handler.get_status() < 500:
            log_method = log.warning
        else:
            log_method = log.error
        request_time = 1000.0 * handler.request.request_time()
        log_method("%d %s %.2fms", handler.get_status(),
                   handler._request_summary(), request_time)

    def appendMetric(self, prefix, name, host, device, ts, value):

        if self._metrics.has_key(prefix):
            metrics = self._metrics[prefix]
        else:
            metrics = {}
            self._metrics[prefix] = metrics

        if metrics.has_key(name):
            metrics[name].append([host, device, ts, value])
        else:
            metrics[name] = [[host, device, ts, value]]

    def _postMetrics(self):

        if len(self._metrics) > 0:
            self._metrics['uuid'] = get_uuid()
            self._metrics['internalHostname'] = get_hostname(self._agentConfig)
            self._metrics['apiKey'] = self._agentConfig['api_key']
            MetricTransaction(json.dumps(self._metrics),
                headers={'Content-Type': 'application/json'})
            self._metrics = {}

    def run(self):
        handlers = [
            (r"/intake/?", AgentInputHandler),
            (r"/api/v1/series/?", ApiInputHandler),
            (r"/status/?", StatusHandler),
        ]

        settings = dict(
            cookie_secret="12oETzKXQAGaYdkL5gEmGeJJFuYh7EQnp2XdTP1o/Vo=",
            xsrf_cookies=False,
            debug=False,
            log_function=self.log_request
        )

        non_local_traffic = self._agentConfig.get("non_local_traffic", False)

        tornado.web.Application.__init__(self, handlers, **settings)
        http_server = tornado.httpserver.HTTPServer(self)

        # non_local_traffic must be == True to match, not just some non-false value
        if non_local_traffic is True:
            http_server.listen(self._port)
        else:
            # localhost in lieu of 127.0.0.1 to support IPv6
            try:
                http_server.listen(self._port, address = "localhost")
            except gaierror:
                log.warning("Warning localhost seems undefined in your host file, using 127.0.0.1 instead")
                http_server.listen(self._port, address = "127.0.0.1")

        log.info("Listening on port %d" % self._port)

        # Register callbacks
        self.mloop = get_tornado_ioloop()

        logging.getLogger().setLevel(get_logging_config()['log_level'] or logging.INFO)

        def flush_trs():
            if self._watchdog:
                self._watchdog.reset()
            self._postMetrics()
            self._tr_manager.flush()

        tr_sched = tornado.ioloop.PeriodicCallback(flush_trs,TRANSACTION_FLUSH_INTERVAL,
            io_loop = self.mloop)

        # Register optional Graphite listener
        gport = self._agentConfig.get("graphite_listen_port", None)
        if gport is not None:
            log.info("Starting graphite listener on port %s" % gport)
            from graphite import GraphiteServer
            gs = GraphiteServer(self, get_hostname(self._agentConfig), io_loop=self.mloop)
            if non_local_traffic is True:
                gs.listen(gport)
            else:
                gs.listen(gport, address = "localhost")

        # Start everything
        if self._watchdog:
            self._watchdog.reset()
        tr_sched.start()

        self.mloop.start()
        log.info("Stopped")

    def stop(self):
        self.mloop.stop()
Beispiel #51
0
class Forwarder(tornado.web.Application):

    def __init__(self, port, agent_config, watchdog=True, skip_ssl_validation=False,
                 use_simple_http_client=False):
        self._port = int(port)
        self._agentConfig = agent_config
        self._metrics = {}
        MetricTransaction.set_application(self)
        MetricTransaction.set_endpoints(MonAPI(agent_config['Api']))
        self._tr_manager = TransactionManager(MAX_WAIT_FOR_REPLAY, MAX_QUEUE_SIZE, THROTTLING_DELAY)
        MetricTransaction.set_tr_manager(self._tr_manager)

        self._watchdog = None
        self.skip_ssl_validation = skip_ssl_validation or agent_config.get(
            'skip_ssl_validation', False)
        self.use_simple_http_client = use_simple_http_client
        if self.skip_ssl_validation:
            log.info("Skipping SSL hostname validation, useful when using a transparent proxy")

        if watchdog:
            watchdog_timeout = TRANSACTION_FLUSH_INTERVAL * WATCHDOG_INTERVAL_MULTIPLIER
            self._watchdog = Watchdog(
                watchdog_timeout, max_mem_mb=agent_config.get('limit_memory_consumption', None))

    def _post_metrics(self):

        if len(self._metrics) > 0:
            MetricTransaction(self._metrics, headers={'Content-Type': 'application/json'})
            self._metrics = {}

    # todo why is the tornado logging method overridden? Perhaps ditch this.
    def log_request(self, handler):
        """ Override the tornado logging method.
        If everything goes well, log level is DEBUG.
        Otherwise it's WARNING or ERROR depending on the response code. """
        if handler.get_status() < 400:
            log_method = log.debug
        elif handler.get_status() < 500:
            log_method = log.warning
        else:
            log_method = log.error
        request_time = 1000.0 * handler.request.request_time()
        log_method("%d %s %.2fms", handler.get_status(),
                   handler._request_summary(), request_time)

    def run(self):
        handlers = [
            (r"/intake/?", AgentInputHandler),
            (r"/api/v1/series/?", AgentInputHandler),
            (r"/status/?", StatusHandler),
        ]

        settings = dict(
            cookie_secret="12oETzKXQAGaYdkL5gEmGeJJFuYh7EQnp2XdTP1o/Vo=",
            xsrf_cookies=False,
            debug=False,
            log_function=self.log_request
        )

        non_local_traffic = self._agentConfig.get("non_local_traffic", False)

        tornado.web.Application.__init__(self, handlers, **settings)
        http_server = tornado.httpserver.HTTPServer(self)

        try:
            # non_local_traffic must be == True to match, not just some non-false value
            if non_local_traffic is True:
                http_server.listen(self._port)
            else:
                # localhost in lieu of 127.0.0.1 to support IPv6
                try:
                    http_server.listen(self._port, address="localhost")
                except gaierror:
                    log.warning(
                        "localhost seems undefined in your host file, using 127.0.0.1 instead")
                    http_server.listen(self._port, address="127.0.0.1")
                except socket_error as e:
                    if "Errno 99" in str(e):
                        log.warning("IPv6 doesn't seem to be fully supported. Falling back to IPv4")
                        http_server.listen(self._port, address="127.0.0.1")
                    else:
                        raise
        except socket_error as e:
            log.exception(
                "Socket error %s. Is another application listening on the same port ? Exiting", e)
            sys.exit(1)
        except Exception:
            log.exception("Uncaught exception. Forwarder is exiting.")
            sys.exit(1)

        log.info("Listening on port %d" % self._port)

        # Register callbacks
        self.mloop = get_tornado_ioloop()

        logging.getLogger().setLevel(get_logging_config()['log_level'] or logging.INFO)

        def flush_trs():
            if self._watchdog:
                self._watchdog.reset()
            self._post_metrics()
            self._tr_manager.flush()

        tr_sched = tornado.ioloop.PeriodicCallback(
            flush_trs, TRANSACTION_FLUSH_INTERVAL, io_loop=self.mloop)

        # Start everything
        if self._watchdog:
            self._watchdog.reset()
        tr_sched.start()

        self.mloop.start()
        log.info("Stopped")

    def stop(self):
        self.mloop.stop()
Beispiel #52
0
class Application(tornado.web.Application):
    def __init__(self, port, agentConfig, watchdog=True):
        self._port = int(port)
        self._agentConfig = agentConfig
        self._metrics = {}
        MetricTransaction.set_application(self)
        MetricTransaction.set_endpoints()
        self._tr_manager = TransactionManager(MAX_WAIT_FOR_REPLAY, MAX_QUEUE_SIZE, THROTTLING_DELAY)
        MetricTransaction.set_tr_manager(self._tr_manager)

        self._watchdog = None
        if watchdog:
            watchdog_timeout = TRANSACTION_FLUSH_INTERVAL * WATCHDOG_INTERVAL_MULTIPLIER
            self._watchdog = Watchdog(watchdog_timeout)

    def appendMetric(self, prefix, name, host, device, ts, value):

        if self._metrics.has_key(prefix):
            metrics = self._metrics[prefix]
        else:
            metrics = {}
            self._metrics[prefix] = metrics

        if metrics.has_key(name):
            metrics[name].append([host, device, ts, value])
        else:
            metrics[name] = [[host, device, ts, value]]

    def _postMetrics(self):

        if len(self._metrics) > 0:
            self._metrics["uuid"] = getUuid()
            self._metrics["internalHostname"] = gethostname(self._agentConfig)
            self._metrics["apiKey"] = self._agentConfig["api_key"]
            MetricTransaction(self._metrics, {})
            self._metrics = {}

    def run(self):

        handlers = [
            (r"/intake/?", AgentInputHandler),
            (r"/api/v1/series/?", ApiInputHandler),
            (r"/status/?", StatusHandler),
        ]

        settings = dict(cookie_secret="12oETzKXQAGaYdkL5gEmGeJJFuYh7EQnp2XdTP1o/Vo=", xsrf_cookies=False, debug=True)

        tornado.web.Application.__init__(self, handlers, **settings)
        http_server = tornado.httpserver.HTTPServer(self)
        http_server.listen(self._port)
        logging.info("Listening on port %d" % self._port)

        # Register callbacks
        self.mloop = tornado.ioloop.IOLoop.instance()

        def flush_trs():
            if self._watchdog:
                self._watchdog.reset()
            self._postMetrics()
            self._tr_manager.flush()

        tr_sched = tornado.ioloop.PeriodicCallback(flush_trs, TRANSACTION_FLUSH_INTERVAL, io_loop=self.mloop)

        # Register optional Graphite listener
        gport = self._agentConfig.get("graphite_listen_port", None)
        if gport is not None:
            logging.info("Starting graphite listener on port %s" % gport)
            from graphite import GraphiteServer

            gs = GraphiteServer(self, gethostname(self._agentConfig), io_loop=self.mloop)
            gs.listen(gport)

        # Start everything
        if self._watchdog:
            self._watchdog.reset()
        tr_sched.start()
        self.mloop.start()

    def stop(self):
        self.mloop.stop()
    def evolve(self):
        """Perform a requested evolution

           This method needs to use the component architecture, so
           we'll set it up:

             >>> from zope.component.testing import setUp, tearDown
             >>> setUp()

           We also need a test request:

             >>> from zope.publisher.browser import TestRequest
             >>> request = TestRequest()

           We also need to give it a publication with a database:

             >>> class Publication(object):
             ...     pass

             >>> request.setPublication(Publication())
             >>> from ZODB.tests.util import DB
             >>> db = DB()
             >>> request.publication.db = db

           We need to define some schema managers.  We'll define two
           using the demo package:

             >>> from zope.generations.generations import SchemaManager
             >>> from zope import component as ztapi
             >>> app1 = SchemaManager(0, 1, 'zope.generations.demo')
             >>> ztapi.provideUtility(app1, ISchemaManager, 'foo.app1')
             >>> app2 = SchemaManager(0, 0, 'zope.generations.demo')
             >>> ztapi.provideUtility(app2, ISchemaManager, 'foo.app2')

           And we need to record some data for them in the database.

             >>> from zope.generations.generations import evolve
             >>> evolve(db)

           This sets up the data and actually evolves app1:

             >>> conn = db.open()
             >>> conn.root()[generations_key]['foo.app1']
             1
             >>> conn.root()[generations_key]['foo.app2']
             0

           To evolve a data base schema, the user clicks on a submit
           button. If they click on the button for add1, a item will
           be added to the request, which we simulate:

             >>> request.form['evolve-app-foo.app1'] = 'evolve'

           We'll also increase the generation of app1:

             >>> app1.generation = 2

           Now we can create our view:

             >>> view = Managers(None, request)

           Now, if we call its `evolve` method, it should see that the
           app1 evolve button was pressed and evolve app1 to the next
           generation.

             >>> status = view.evolve()
             >>> conn.sync()
             >>> conn.root()[generations_key]['foo.app1']
             2

           The demo evolver just writes the generation to a database key:

             >>> from zope.generations.demo import key
             >>> conn.root()[key]
             ('installed', 'installed', 2)

           Note that, because the demo package has an install script,
           we have entries for that script.

           Which the returned status should indicate:

             >>> status['app']
             u'foo.app1'
             >>> status['to']
             2

           Now, given that the database is at the maximum generation
           for app1, we can't evolve it further.  Calling evolve again
           won't evolve anything:

             >>> status = view.evolve()
             >>> conn.sync()
             >>> conn.root()[generations_key]['foo.app1']
             2
             >>> conn.root()[key]
             ('installed', 'installed', 2)

           as the status will indicate by returning a 'to' generation
           of 0:

             >>> status['app']
             u'foo.app1'
             >>> status['to']
             0

           If the request doesn't have the key:

             >>> request.form.clear()

           Then calling evolve does nothing:

             >>> view.evolve()
             >>> conn.sync()
             >>> conn.root()[generations_key]['foo.app1']
             2
             >>> conn.root()[key]
             ('installed', 'installed', 2)

           We'd better clean upp:

             >>> db.close()
             >>> tearDown()
           """

        self.managers = managers = dict(
            zope.component.getUtilitiesFor(ISchemaManager))
        db = self._getdb()
        transaction_manager = TransactionManager()
        conn = db.open(transaction_manager=transaction_manager)
        transaction_manager.begin()
        try:
            generations = conn.root().get(generations_key, ())
            request = self.request
            for key in generations:
                generation = generations[key]
                rkey = request_key_format % key
                if rkey in request:
                    manager = managers[key]
                    if generation >= manager.generation:
                        return {'app': key, 'to': 0}

                    context = Context()
                    context.connection = conn
                    generation += 1
                    manager.evolve(context, generation)
                    generations[key] = generation
                    transaction_manager.commit()

                    return {'app': key, 'to': generation}
        finally:
            transaction_manager.abort()
            conn.close()
    def applications(self):
        """Get information about database-generation status

           This method needs to use the component architecture, so
           we'll set it up:

             >>> from zope.component.testing import setUp, tearDown
             >>> setUp()

           We also need a test request:

             >>> from zope.publisher.browser import TestRequest
             >>> request = TestRequest()

           We also need to give it a publication with a database:

             >>> class Publication(object):
             ...     pass

             >>> request.setPublication(Publication())
             >>> from ZODB.tests.util import DB
             >>> db = DB()
             >>> request.publication.db = db

           We need to define some schema managers.  We'll define two
           using the demo package:

             >>> from zope.generations.generations import SchemaManager
             >>> from zope import component as ztapi
             >>> app1 = SchemaManager(0, 1, 'zope.generations.demo')
             >>> ztapi.provideUtility(app1, ISchemaManager, 'foo.app1')
             >>> app2 = SchemaManager(0, 0, 'zope.generations.demo')
             >>> ztapi.provideUtility(app2, ISchemaManager, 'foo.app2')

           And we need to record some data for them in the database.

             >>> from zope.generations.generations import evolve
             >>> evolve(db)

           This sets up the data and actually evolves app1:

             >>> conn = db.open()
             >>> conn.root()[generations_key]['foo.app1']
             1
             >>> conn.root()[generations_key]['foo.app2']
             0

           Now, let's increment app1's generation:

             >>> app1.generation += 1

           so we can evolve it.

           Now we can create our view:

             >>> view = Managers(None, request)

           We call its applications method to get data about
           application generations. We are required to call evolve
           first:

             >>> view.evolve()
             >>> data = list(view.applications())
             >>> data.sort(key=lambda d1: d1['id'])

             >>> for info in data:
             ...     print(info['id'])
             ...     print(info['min'], info['max'], info['generation'])
             ...     print('evolve?', info['evolve'] or None)
             foo.app1
             0 2 1
             evolve? evolve-app-foo.app1
             foo.app2
             0 0 0
             evolve? None

           We'd better clean up:

             >>> db.close()
             >>> tearDown()

        """
        result = []

        db = self._getdb()
        transaction_manager = TransactionManager()
        conn = db.open(transaction_manager=transaction_manager)
        transaction_manager.begin()
        try:
            managers = self.managers
            generations = conn.root().get(generations_key, ())
            for key, generation in generations.items():
                manager = managers.get(key)
                if manager is None: # pragma: no cover
                    continue

                result.append({
                    'id': key,
                    'min': manager.minimum_generation,
                    'max': manager.generation,
                    'generation': generation,
                    'evolve': (generation < manager.generation
                               and request_key_format % key
                               or ''
                               ),
                    })

            return result
        finally:
            transaction_manager.abort()
            conn.close()
Beispiel #55
0
class Application(tornado.web.Application):

    NO_PARALLELISM = 1
    DEFAULT_PARALLELISM = 5

    def __init__(self, port, agentConfig, watchdog=True,
                 skip_ssl_validation=False, use_simple_http_client=False):
        self._port = int(port)
        self._agentConfig = agentConfig
        self._metrics = {}
        AgentTransaction.set_application(self)
        AgentTransaction.set_endpoints(agentConfig['endpoints'])
        AgentTransaction.set_request_timeout(agentConfig['forwarder_timeout'])

        max_parallelism = self.NO_PARALLELISM
        # Multiple endpoints => enable parallelism
        if len(agentConfig['endpoints']) > 1:
            max_parallelism = self.DEFAULT_PARALLELISM

        self._tr_manager = TransactionManager(MAX_WAIT_FOR_REPLAY,
                                              MAX_QUEUE_SIZE, THROTTLING_DELAY,
                                              max_parallelism=max_parallelism)
        AgentTransaction.set_tr_manager(self._tr_manager)

        self._watchdog = None
        self.skip_ssl_validation = skip_ssl_validation or agentConfig.get('skip_ssl_validation', False)
        self.use_simple_http_client = use_simple_http_client
        if self.skip_ssl_validation:
            log.info("Skipping SSL hostname validation, useful when using a transparent proxy")

        # Monitor activity
        if watchdog:
            watchdog_timeout = TRANSACTION_FLUSH_INTERVAL * WATCHDOG_INTERVAL_MULTIPLIER / 1000
            self._watchdog = Watchdog(
                watchdog_timeout,
                max_mem_mb=agentConfig.get('limit_memory_consumption', None),
                max_resets=WATCHDOG_HIGH_ACTIVITY_THRESHOLD
            )

    def log_request(self, handler):
        """ Override the tornado logging method.
        If everything goes well, log level is DEBUG.
        Otherwise it's WARNING or ERROR depending on the response code. """
        if handler.get_status() < 400:
            log_method = log.debug
        elif handler.get_status() < 500:
            log_method = log.warning
        else:
            log_method = log.error

        request_time = 1000.0 * handler.request.request_time()
        log_method(
            u"%d %s %.2fms",
            handler.get_status(),
            handler._request_summary(), request_time
        )

    def appendMetric(self, prefix, name, host, device, ts, value):

        if prefix in self._metrics:
            metrics = self._metrics[prefix]
        else:
            metrics = {}
            self._metrics[prefix] = metrics

        if name in metrics:
            metrics[name].append([host, device, ts, value])
        else:
            metrics[name] = [[host, device, ts, value]]

    def _postMetrics(self):

        if len(self._metrics) > 0:
            self._metrics['uuid'] = get_uuid()
            self._metrics['internalHostname'] = get_hostname(self._agentConfig)
            self._metrics['apiKey'] = self._agentConfig['api_key']
            MetricTransaction(json.dumps(self._metrics),
                              headers={'Content-Type': 'application/json'})
            self._metrics = {}

    def run(self):
        handlers = [
            (r"/intake/?", AgentInputHandler),
            (r"/intake/metrics?", MetricsAgentInputHandler),
            (r"/intake/metadata?", MetadataAgentInputHandler),
            (r"/api/v1/series/?", ApiInputHandler),
            (r"/api/v1/check_run/?", ApiCheckRunHandler),
            (r"/status/?", StatusHandler),
        ]

        settings = dict(
            cookie_secret="12oETzKXQAGaYdkL5gEmGeJJFuYh7EQnp2XdTP1o/Vo=",
            xsrf_cookies=False,
            debug=False,
            log_function=self.log_request
        )

        non_local_traffic = self._agentConfig.get("non_local_traffic", False)

        tornado.web.Application.__init__(self, handlers, **settings)
        http_server = tornado.httpserver.HTTPServer(self)

        try:
            # non_local_traffic must be == True to match, not just some non-false value
            if non_local_traffic is True:
                http_server.listen(self._port)
            else:
                # localhost in lieu of 127.0.0.1 to support IPv6
                try:
                    http_server.listen(self._port, address=self._agentConfig['bind_host'])
                except gaierror:
                    log.warning("localhost seems undefined in your host file, using 127.0.0.1 instead")
                    http_server.listen(self._port, address="127.0.0.1")
                except socket_error as e:
                    if "Errno 99" in str(e):
                        log.warning("IPv6 doesn't seem to be fully supported. Falling back to IPv4")
                        http_server.listen(self._port, address="127.0.0.1")
                    else:
                        raise
        except socket_error as e:
            log.exception("Socket error %s. Is another application listening on the same port ? Exiting", e)
            sys.exit(1)
        except Exception as e:
            log.exception("Uncaught exception. Forwarder is exiting.")
            sys.exit(1)

        log.info("Listening on port %d" % self._port)

        # Register callbacks
        self.mloop = get_tornado_ioloop()

        logging.getLogger().setLevel(get_logging_config()['log_level'] or logging.INFO)

        def flush_trs():
            if self._watchdog:
                self._watchdog.reset()
            self._postMetrics()
            self._tr_manager.flush()

        tr_sched = tornado.ioloop.PeriodicCallback(flush_trs, TRANSACTION_FLUSH_INTERVAL,
                                                   io_loop=self.mloop)

        # Register optional Graphite listener
        gport = self._agentConfig.get("graphite_listen_port", None)
        if gport is not None:
            log.info("Starting graphite listener on port %s" % gport)
            from graphite import GraphiteServer
            gs = GraphiteServer(self, get_hostname(self._agentConfig), io_loop=self.mloop)
            if non_local_traffic is True:
                gs.listen(gport)
            else:
                gs.listen(gport, address="localhost")

        # Start everything
        if self._watchdog:
            self._watchdog.reset()
        tr_sched.start()

        self.mloop.start()
        log.info("Stopped")

    def stop(self):
        self.mloop.stop()
Beispiel #56
0
    def testEndpoints(self):
        """
        Tests that the logic behind the agent version specific endpoints is ok.
        Also tests that these endpoints actually exist.
        """
        raise SkipTest("This test doesn't apply to Server Density.")
        MetricTransaction._endpoints = []
        api_key = "a" * 32
        config = {
            "sd_url": "https://agent.serverdensity.io",
            "api_key": api_key,
            "use_dd": True
        }

        app = Application()
        app.skip_ssl_validation = False
        app._agentConfig = config
        app.use_simple_http_client = True

        trManager = TransactionManager(timedelta(seconds=0), MAX_QUEUE_SIZE, THROTTLING_DELAY)
        trManager._flush_without_ioloop = True  # Use blocking API to emulate tornado ioloop
        MetricTransaction._trManager = trManager
        MetricTransaction.set_application(app)
        MetricTransaction.set_endpoints()

        transaction = MetricTransaction(None, {}, "")
        endpoints = [transaction.get_url(e) for e in transaction._endpoints]
        expected = ['https://{0}-app.agent.datadoghq.com/intake/?api_key={1}'.format(
            get_version().replace(".", "-"), api_key)]
        self.assertEqual(endpoints, expected, (endpoints, expected))

        for url in endpoints:
            r = requests.post(url, data=json.dumps({"foo": "bar"}),
                              headers={'Content-Type': "application/json"})
            r.raise_for_status()

        # API Metric Transaction
        transaction = APIMetricTransaction(None, {})
        endpoints = [transaction.get_url(e) for e in transaction._endpoints]
        expected = ['https://{0}-app.agent.datadoghq.com/api/v1/series/?api_key={1}'.format(
            get_version().replace(".", "-"), api_key)]
        self.assertEqual(endpoints, expected, (endpoints, expected))

        for url in endpoints:
            r = requests.post(url, data=json.dumps({"foo": "bar"}),
                              headers={'Content-Type': "application/json"})
            r.raise_for_status()

        # API Service Check Transaction
        APIServiceCheckTransaction._trManager = trManager
        APIServiceCheckTransaction.set_application(app)
        APIServiceCheckTransaction.set_endpoints()

        transaction = APIServiceCheckTransaction(None, {})
        endpoints = [transaction.get_url(e) for e in transaction._endpoints]
        expected = ['https://{0}-app.agent.datadoghq.com/api/v1/check_run/?api_key={1}'.format(
            get_version().replace(".", "-"), api_key)]
        self.assertEqual(endpoints, expected, (endpoints, expected))

        for url in endpoints:
            r = requests.post(url, data=json.dumps({'check': 'test', 'status': 0}),
                              headers={'Content-Type': "application/json"})
            r.raise_for_status()
Beispiel #57
0
    def test_explicit_mode(self):
        from .. import TransactionManager
        from ..interfaces import AlreadyInTransaction, NoTransaction

        tm = TransactionManager()
        self.assertFalse(tm.explicit)

        tm = TransactionManager(explicit=True)
        self.assertTrue(tm.explicit)
        for name in 'get', 'commit', 'abort', 'doom', 'isDoomed', 'savepoint':
            with self.assertRaises(NoTransaction):
                getattr(tm, name)()

        t = tm.begin()
        with self.assertRaises(AlreadyInTransaction):
            tm.begin()

        self.assertTrue(t is tm.get())

        self.assertFalse(tm.isDoomed())
        tm.doom()
        self.assertTrue(tm.isDoomed())
        tm.abort()

        for name in 'get', 'commit', 'abort', 'doom', 'isDoomed', 'savepoint':
            with self.assertRaises(NoTransaction):
                getattr(tm, name)()

        t = tm.begin()
        self.assertFalse(tm.isDoomed())
        with self.assertRaises(AlreadyInTransaction):
            tm.begin()
        tm.savepoint()
        tm.commit()
Beispiel #58
0
    def testMemoryLimit(self):
        """Test memory limit as well as simple flush"""

        # No throttling, no delay for replay
        trManager = TransactionManager(timedelta(seconds=0), MAX_QUEUE_SIZE, timedelta(seconds=0))

        step = 10
        oneTrSize = (MAX_QUEUE_SIZE / step) - 1
        for i in xrange(step):
            tr = memTransaction(oneTrSize, trManager)
            trManager.append(tr)

        trManager.flush()

        # There should be exactly step transaction in the list, with
        # a flush count of 1
        self.assertEqual(len(trManager._transactions), step)
        for tr in trManager._transactions:
            self.assertEqual(tr._flush_count, 1)

        # Try to add one more
        tr = memTransaction(oneTrSize + 10, trManager)
        trManager.append(tr)

        # At this point, transaction one (the oldest) should have been removed from the list
        self.assertEqual(len(trManager._transactions), step)
        for tr in trManager._transactions:
            self.assertNotEqual(tr._id, 1)

        trManager.flush()
        self.assertEqual(len(trManager._transactions), step)
        # Check and allow transactions to be flushed
        for tr in trManager._transactions:
            tr.is_flushable = True
            # Last transaction has been flushed only once
            if tr._id == step + 1:
                self.assertEqual(tr._flush_count, 1)
            else:
                self.assertEqual(tr._flush_count, 2)

        trManager.flush()
        self.assertEqual(len(trManager._transactions), 0)
 def __init__(self, *args, **kwargs):
     self.made_seekable = 0
     self.tm = TransactionManager()
     super(DummyRequest, self).__init__(self, *args, **kwargs)