def test_logger_can_be_given(self):
        """Accept an external logger."""
        logger = logging.getLogger("ubuntuone.SyncDaemon.Test")
        handler = MementoHandler()
        logger.addHandler(handler)
        logger.setLevel(logging.DEBUG)
        logger.propagate = False

        # acquire and test
        release = yield self.plt.acquire('path', logger=logger)
        self.assertTrue(handler.check_debug("acquiring on"))

        # release and test
        release()
        self.assertTrue(handler.check_debug("releasing"))
class LogginTestCase(BaseTestCase):
    """Ensure that proper debug logging is done."""

    @defer.inlineCallbacks
    def setUp(self):
        """Initialize this testcase."""
        yield super(LogginTestCase, self).setUp()
        self.memento = MementoHandler()
        restful.logger.addHandler(self.memento)
        restful.logger.setLevel(logging.DEBUG)
        self.addCleanup(restful.logger.removeHandler, self.memento)

        self.rc = restful.RestfulClient(SAMPLE_SERVICE_IRI)
        self.addCleanup(self.rc.shutdown)

    @defer.inlineCallbacks
    def test_log_rest_call(self):
        """Check that proper DEBUG is made for every REST call."""
        yield self.rc.restcall(SAMPLE_OPERATION, **SAMPLE_ARGS)

        expected_msgs = (
            SAMPLE_SERVICE_IRI + SAMPLE_NAMESPACE,
        )
        self.assertTrue(self.memento.check_debug(*expected_msgs))

    @defer.inlineCallbacks
    def test_log_json_loads_exception(self):
        """Check that json load errors are properly logged."""
        invalid_json = 'NOTAVALIDJSON'
        self.patch(self.wc, 'return_value', invalid_json)
        yield self.assertFailure(self.rc.restcall(SAMPLE_OPERATION),
                                 ValueError)

        self.memento.debug = True
        expected_msgs = (
            ValueError,
            'Can not load json from REST request response',
            invalid_json
        )
        self.assertTrue(self.memento.check_exception(*expected_msgs))
class ClientDummyAuthTests(AuthenticationBaseTestCase):
    """Client authentication tests using the dummy auth provider."""

    auth_provider_class = DummyAuthProvider

    @defer.inlineCallbacks
    def setUp(self):
        yield super(ClientDummyAuthTests, self).setUp()
        self.creds = "open sesame"
        self.bad_creds = "not my secret"
        self.handler = MementoHandler()
        logger = logging.getLogger("storage.server")
        logger.addHandler(self.handler)
        self.addCleanup(logger.removeHandler, self.handler)
        self.handler.setLevel(logging.DEBUG)

    def assert_auth_ok_logging(self):
        self.assertTrue(self.handler.check_debug("authenticated user", "OK", self.usr0.username))
        self.assertFalse(self.handler.check_warning("missing user"))

    def assert_auth_ok_missing_user(self):
        self.assertTrue(self.handler.check_debug("missing user", "(id=%s)" % self.usr0.id))
        self.assertFalse(self.handler.check_info("authenticated user"))

    @defer.inlineCallbacks
    def test_auth_ok_user_ok(self):
        """Correct authentication must succeed."""
        yield self.callback_test(self.do_auth, credentials=self.creds, add_default_callbacks=True)
        self.assert_auth_ok_logging()

    @defer.inlineCallbacks
    def test_auth_ok_bad_user(self):
        """Non existing user must fail authentication."""
        # make the user getter fail
        self.patch(self.service.factory.content, "get_user_by_id", lambda *a, **k: defer.fail(DoesNotExist()))

        d = self.callback_test(self.do_auth, credentials=self.creds, add_default_callbacks=True)
        yield self.assertFailure(d, protocol_errors.AuthenticationFailedError)

        self.assert_auth_ok_missing_user()

    @defer.inlineCallbacks
    def test_auth_ok_with_session_id(self):
        """Correct authentication must succeed and include the session_id."""
        auth_request = yield self.callback_test(self.do_auth, credentials=self.creds, add_default_callbacks=True)

        protocol = self.service.factory.protocols[0]
        self.assertEqual(auth_request.session_id, str(protocol.session_id))

    @defer.inlineCallbacks
    def test_auth_ok_with_metadata(self):
        """Correct authentication must succeed and include metadata."""
        m_called = []
        self.service.factory.metrics.meter = lambda *a: m_called.append(a)

        metadata = {u"platform": u"linux2", u"version": u"1.0", u"foo": u"bar"}
        yield self.callback_test(self.do_auth, credentials=self.creds, metadata=metadata, add_default_callbacks=True)

        self.assertTrue(self.handler.check_info("Client metadata: %s" % metadata))
        self.assertIn(("client.platform.linux2", 1), m_called)
        self.assertIn(("client.version.1_0", 1), m_called)
        self.assertNotIn(("client.foo.bar", 1), m_called)

    def test_auth_fail(self):
        """Wrong secret must fail."""

        def test(client, **kwargs):
            d = self.do_auth(client, credentials=self.bad_creds)
            d.addCallbacks(
                lambda _: client.test_fail(Exception("Should not succeed.")), lambda _: client.test_done("ok")
            )

        return self.callback_test(test)

    def test_get_root(self):
        """Must receive the root after authentication."""

        @defer.inlineCallbacks
        def test(client, **kwargs):
            yield self.do_auth(client, credentials=self.creds)
            root_id = yield client.get_root()
            self.assertIsNotNone(root_id)

        return self.callback_test(test, add_default_callbacks=True)

    def test_get_root_twice(self):
        """Get root must keep the root id."""

        @defer.inlineCallbacks
        def test(client, **kwargs):
            yield self.do_auth(client, credentials=self.creds)
            root_id1 = yield client.get_root()
            root_id2 = yield client.get_root()
            self.assertEqual(root_id1, root_id2)

        return self.callback_test(test, add_default_callbacks=True)

    def test_user_becomes_inactive(self):
        """After StorageUser authentication ok it becomes inactive."""

        @defer.inlineCallbacks
        def test(client):
            """Test."""
            yield self.do_auth(client, credentials=self.creds)
            root_id = yield client.get_root()

            # create one file, should be ok
            yield client.make_file(request.ROOT, root_id, "f1")

            # cancel user subscription, so it needs
            # to get it again from the DB
            self.usr0.update(subscription=False)

            # create second file, should NOT be ok
            try:
                yield client.make_file(request.ROOT, root_id, "f2")
            except protocol_errors.DoesNotExistError:
                pass  # failed as we expected
            else:
                client.test_fail("It should have failed!")

        return self.callback_test(test, add_default_callbacks=True)
class HeartbeatListenerTestCase(TestCase):
    """Tests for HeartbeatListener class."""

    def setUp(self):
        super(HeartbeatListenerTestCase, self).setUp()
        self.stdin = StringIO()
        self.stdout = StringIO()
        self.stderr = StringIO()
        self.mocker = Mocker()
        self.rpc = self.mocker.mock()
        self.listener = HeartbeatListener(1, 10, ['foo'], [], self.rpc,
                                          stdin=self.stdin, stdout=self.stdout,
                                          stderr=self.stderr)
        self.next_fail = {}
        self.handler = MementoHandler()
        self.listener.logger.addHandler(self.handler)
        self.listener.logger.setLevel(logging.DEBUG)
        self.handler.setLevel(logging.DEBUG)
        self.listener.logger.propagate = False
        self.processes = [dict(name="heartbeat", group="heartbeat", pid="101",
                               state=RUNNING)]
        self.handler.debug = True

    def tearDown(self):
        self.listener.logger.removeHandler(self.handler)
        self.handler.close()
        self.next_fail = None
        self.handler = None
        self.listener = None
        super(HeartbeatListenerTestCase, self).tearDown()

    def fail_next_stop(self, pname):
        """Make next stopProcess to fail."""
        expect(self.rpc.supervisor.stopProcess(pname)).throw(
            xmlrpclib.Fault(42, "Failed to stop the process."))

    def fail_next_start(self, pname):
        """Make next startProcess to fail."""
        expect(self.rpc.supervisor.startProcess(pname)).throw(
            xmlrpclib.Fault(42, "Failed to start the process."))

    def test_restart(self):
        """Test the restart method."""
        expect(self.rpc.supervisor.stopProcess("foo"))
        expect(self.rpc.supervisor.startProcess("foo"))
        with self.mocker:
            self.listener.restart("foo", "testing")
        self.assertTrue(self.handler.check_info("Restarting foo (last "
                                                "hearbeat: testing)"))

    def test_restart_fail_stop(self):
        """Test the restart method failing to stop the process."""
        self.fail_next_stop("foo")
        last = time.time()
        with self.mocker:
            try:
                self.listener.restart("foo", last)
            except xmlrpclib.Fault:
                msg = ("Failed to stop process %s (last heartbeat: %s), "
                       "exiting: %s") % \
                    ("foo", last, "<Fault 42: 'Failed to stop the process.'>")
                self.assertTrue(self.handler.check_error(msg))
            else:
                self.fail("Should get an xmlrpclib.Fault")

    def test_restart_fail_start(self):
        """Test the restart method failing to start the process."""
        expect(self.rpc.supervisor.stopProcess("foo"))
        self.fail_next_start("foo")
        last = time.time()
        with self.mocker:
            try:
                self.listener.restart("foo", last)
            except xmlrpclib.Fault:
                msg = (
                    'Failed to start process %s after stopping it, exiting: %s'
                ) % ("foo", "<Fault 42: 'Failed to start the process.'>")
                self.assertTrue(self.handler.check_error(msg))
            else:
                self.fail("Should get an xmlrpclib.Fault")

    def test_check_processes(self):
        """Test the check_processes method."""
        # add the fake process to the process list
        self.processes.append(dict(name="foo", group="foo", pid="42",
                                   state=RUNNING))
        self.processes.append(dict(name="bar", group="bar", pid="43",
                                   state=RUNNING))
        self.listener.processes = ['bar']
        # 2 process to restart
        self.listener.data['foo'] = {
            'time': time.time() - (self.listener.timeout + 2)}
        self.listener.data['bar'] = {
            'time': time.time() - (self.listener.timeout + 3)}
        self.listener.data['p-1'] = {
            'time': time.time() - (self.listener.timeout - 1)}
        expect(self.rpc.supervisor.getAllProcessInfo()).result(self.processes)
        expect(self.rpc.supervisor.stopProcess("foo:"))
        expect(self.rpc.supervisor.startProcess("foo:"))
        expect(self.rpc.supervisor.stopProcess("bar:bar"))
        expect(self.rpc.supervisor.startProcess("bar:bar"))
        with self.mocker:
            self.listener.check_processes()

    def test_check_processes_no_data(self):
        """Test the check_processes method with no data of a process."""
        # add the fake process to the process list
        self.processes.append(dict(name="foo", group="foo", pid="42",
                                   state=RUNNING))
        self.processes.append(dict(name="bar", group="bar", pid="43",
                                   state=RUNNING))
        self.listener.processes = ['bar']
        expect(self.rpc.supervisor.getAllProcessInfo()).result(self.processes)
        expect(self.rpc.supervisor.stopProcess("foo:"))
        expect(self.rpc.supervisor.startProcess("foo:"))
        expect(self.rpc.supervisor.stopProcess("bar:bar"))
        expect(self.rpc.supervisor.startProcess("bar:bar"))
        with self.mocker:
            # one process to restart
            self.listener.check_processes()
        self.assertTrue(self.handler.check_warning(
            "Restarting process foo:foo (42), as we never received a hearbeat"
            " event from it"))
        self.assertTrue(self.handler.check_warning(
            "Restarting process bar:bar (43), as we never received a hearbeat"
            " event from it"))

    def test_check_processes_untracked(self):
        """Test the check_processes method with a untracked proccess."""
        # add the fake process to the process list
        self.processes.append(dict(name="foo-untracked", group="untracked",
                                   pid="43", state=RUNNING))
        # add a new tracked process from an untracked group
        self.processes.append(dict(name="bar-untracked", group="bar", pid="44",
                                   state=RUNNING))
        self.listener.processes = ['bar']
        expect(self.rpc.supervisor.getAllProcessInfo()).result(self.processes)
        with self.mocker:
            self.listener.check_processes()
        self.assertTrue(self.handler.check_info(
            "Ignoring untracked:foo-untracked (43) as isn't tracked."))
        self.assertTrue(self.handler.check_info(
            "Ignoring bar:bar-untracked (44) as isn't tracked."))

    def test_check_processes_not_running(self):
        """Test the check_processes method if the proccess isn't running."""
        # add the fake process to the process list
        self.processes.append(dict(name="foo", group="foo", pid="42",
                                   state=states.ProcessStates.STARTING))
        # add a new tracked process from an untracked group
        self.processes.append(dict(name="bar", group="bar", pid="43",
                                   state=states.ProcessStates.STARTING))
        self.listener.processes = ['bar']
        # 2 processes to restart
        self.listener.data['foo'] = {
            'time': time.time() - (self.listener.timeout + 2)}
        self.listener.data['bar'] = {
            'time': time.time() - (self.listener.timeout + 2)}
        expect(self.rpc.supervisor.getAllProcessInfo()).result(self.processes)
        with self.mocker:
            self.listener.check_processes()
        self.assertTrue(self.handler.check_info(
            "Ignoring foo:foo (42) as isn't running."))
        self.assertTrue(self.handler.check_info(
            "Ignoring bar:bar (43) as isn't running."))

    def test_handle_heartbeat(self):
        """Test handle_heartbeat method."""
        payload = {"time": time.time()}
        self.listener.handle_heartbeat('process_name', 'group_name',
                                       '42', payload)
        info = {"pid": "42", "time": payload["time"],
                "received": self.listener.data["process_name"]["received"]}
        self.assertEqual({"process_name": info}, self.listener.data)

    def test_handle_event(self):
        """Test handle_event method."""
        # patch handle_heartbeat
        called = []

        def handle_heartbeat(process_name, group_name, pid, payload):
            """Fake handle_heartbeat."""
            called.append((process_name, group_name, pid, payload))

        self.listener.handle_heartbeat = handle_heartbeat
        payload_dict = {u"time": time.time(), "type": "heartbeat"}
        raw_data = ("processname:ticker groupname:ticker pid:42\n" +
                    json.dumps(payload_dict))
        raw_header = ("ver:3.0 server:supervisor serial:1 pool:listener "
                      "poolserial:10 eventname:PROCESS_COMMUNICATION_STDOUT"
                      " len:%s\n" % len(raw_data))
        self.stdin.write(raw_header + raw_data)
        self.stdin.seek(0)
        headers = childutils.get_headers(raw_header)
        self.listener._handle_event()
        # check
        self.assertEqual(1, len(called))
        del payload_dict['type']
        self.assertEqual(('ticker', 'ticker', '42', payload_dict), called[0])
        self.assertTrue(self.handler.check_debug(
            "Event '%s' received: %r" % (headers['eventname'], raw_data)))
        # check the stdout info
        self.assertEqual(["READY", "RESULT 2", "OK"],
                         self.stdout.getvalue().split("\n"))

    def test_invalid_event_type(self):
        """Test with an invalid type."""
        payload_dict = {u"time": time.time(), "type": "ping"}
        raw_data = 'processname:ticker groupname:ticker pid:42\n' + \
            json.dumps(payload_dict)
        raw_header = ("ver:3.0 server:supervisor serial:1 pool:listener "
                      "poolserial:10 eventname:PROCESS_COMMUNICATION_STDOUT"
                      " len:%s\n" % len(raw_data))
        self.stdin.write(raw_header + raw_data)
        self.stdin.seek(0)
        self.listener._handle_event()
        # check
        self.assertTrue(self.handler.check_error(
            "Unable to handle event type '%s' - %r" % ('ping', raw_data)))

    def test_invalid_payload(self):
        """Test with an invalid payload."""
        payload_dict = {u"time": time.time(), "type": "ping"}
        raw_data = 'processname:ticker groupname:ticker pid:42\n' + \
            json.dumps(payload_dict) + "<!foo>"
        raw_header = ("ver:3.0 server:supervisor serial:1 pool:listener "
                      "poolserial:10 eventname:PROCESS_COMMUNICATION_STDOUT"
                      " len:%s\n" % len(raw_data))
        self.stdin.write(raw_header + raw_data)
        self.stdin.seek(0)
        self.listener._handle_event()
        # check
        self.assertTrue(self.handler.check_error(
            "Unable to handle event type '%s' - %r" % ('None', raw_data)))

    def test_unhandled_event(self):
        """A unhandled event type."""
        payload_dict = {u"time": time.time(), "type": "ping"}
        raw_data = 'processname:ticker groupname:ticker pid:42\n' + \
            json.dumps(payload_dict)
        raw_header = "ver:3.0 server:supervisor serial:1 pool:heartbeat " + \
            "poolserial:1 eventname:UNKNOWN len:%s\n" % len(raw_data)
        self.stdin.write(raw_header + raw_data)
        self.stdin.seek(0)
        self.listener._handle_event()
        # check
        self.assertTrue(self.handler.check_warning(
            "Received unsupported event: %s - %r" % ('UNKNOWN', raw_data)))

    def test_check_interval(self):
        """Check that we properly check on the specified interval."""
        header = "ver:3.0 server:supervisor serial:1 pool:heartbeat " + \
                 "poolserial:1 eventname:TICK_5 len:0\n"
        expect(self.rpc.supervisor.getAllProcessInfo()).result([])
        self.stdin.write(header)
        self.stdin.seek(0)
        self.listener._handle_event()
        self.assertEqual(self.listener.tick_count, 1)
        self.stdin.seek(0)
        with self.mocker:
            self.listener._handle_event()
class SignalBroadcasterTestCase(TestCase):
    """Test the signal broadcaster code."""

    @defer.inlineCallbacks
    def setUp(self):
        yield super(SignalBroadcasterTestCase, self).setUp()
        self.client = FakeRemoteClient()
        self.sb = ipc.SignalBroadcaster()

        self.memento = MementoHandler()
        ipc.logger.addHandler(self.memento)
        ipc.logger.setLevel(logging.DEBUG)
        self.addCleanup(ipc.logger.removeHandler, self.memento)

    def test_remote_register_to_signals(self):
        """Assert that the client was added."""
        signals = ["demo_signal1", "demo_signal2"]
        self.sb.remote_register_to_signals(self.client, signals)
        for signal in signals:
            clients = self.sb.clients_per_signal[signal]
            self.assertTrue(self.client in clients)

    def test_emit_signal(self):
        """Assert that the client method was called."""
        first = 1
        second = 2
        word = 'word'
        signal_name = 'on_test'

        self.client.callRemote(signal_name, first, second, word=word)

        signals = [signal_name]
        self.sb.remote_register_to_signals(self.client, signals)
        self.sb.emit_signal(signal_name, first, second, foo=word)

        self.assertEqual(self.client.called, ((first, second), dict(foo=word)))

    def test_emit_signal_dead_reference(self):
        """Test dead reference while emitting a signal."""
        sample_signal = "sample_signal"
        fake_remote_client = FakeRemoteClient(dead_remote=True)

        self.sb.remote_register_to_signals(fake_remote_client, [sample_signal])
        self.assertIn(fake_remote_client,
                      self.sb.clients_per_signal[sample_signal])

        self.sb.emit_signal(sample_signal)
        self.assertNotIn(fake_remote_client,
                         self.sb.clients_per_signal[sample_signal])

    def test_emit_signal_some_dead_some_not(self):
        """Test a clean reference after a dead one."""
        sample_signal = "sample_signal"
        fake_dead_remote = FakeRemoteClient(dead_remote=True)
        fake_alive_remote = FakeRemoteClient()

        self.sb.remote_register_to_signals(fake_dead_remote, [sample_signal])
        self.sb.remote_register_to_signals(fake_alive_remote, [sample_signal])
        self.sb.emit_signal(sample_signal)

        self.assertTrue(fake_alive_remote.called, "The alive must be called.")

    def test_emit_signal_ignore_missing_handlers(self):
        """A missing signal handler should just log a debug line."""
        fake_remote_client = FakeRemoteClient()

        signal = fake_remote_client.missing_signal
        signals = [signal]
        self.sb.remote_register_to_signals(fake_remote_client, signals)
        sb_clients = self.sb.clients_per_signal[signal]
        self.assertIn(fake_remote_client, sb_clients)
        self.sb.emit_signal(signal)

        expected = ipc.SignalBroadcaster.MSG_NO_SIGNAL_HANDLER % (
            signal,
            fake_remote_client,
        )
        self.assertTrue(self.memento.check_debug(*expected))

    def test_emit_signal_log_other_errors(self):
        """Other errors should be logged as warnings."""
        fake_remote_client = FakeRemoteClient()

        signal = fake_remote_client.failing_signal
        signals = [signal]
        self.sb.remote_register_to_signals(fake_remote_client, signals)
        sb_clients = self.sb.clients_per_signal[signal]
        self.assertIn(fake_remote_client, sb_clients)
        self.sb.emit_signal(signal)

        expected = ipc.SignalBroadcaster.MSG_COULD_NOT_EMIT_SIGNAL % (
            signal,
            fake_remote_client,
            fake_remote_client.random_exception,
        )
        self.assertTrue(self.memento.check_warning(*expected))
Exemple #6
0
class ClientDummyAuthTests(AuthenticationBaseTestCase):
    """Client authentication tests using the dummy auth provider."""

    auth_provider_class = DummyAuthProvider

    @defer.inlineCallbacks
    def setUp(self):
        yield super(ClientDummyAuthTests, self).setUp()
        self.creds = 'open sesame'
        self.bad_creds = 'not my secret'
        self.handler = MementoHandler()
        logger = logging.getLogger('storage.server')
        logger.addHandler(self.handler)
        self.addCleanup(logger.removeHandler, self.handler)
        self.handler.setLevel(logging.DEBUG)

    def assert_auth_ok_logging(self):
        self.assertTrue(
            self.handler.check_debug("authenticated user", "OK",
                                     self.usr0.username))
        self.assertFalse(self.handler.check_warning("missing user"))

    def assert_auth_ok_missing_user(self):
        self.assertTrue(
            self.handler.check_debug("missing user", "(id=%s)" % self.usr0.id))
        self.assertFalse(self.handler.check_info("authenticated user"))

    @defer.inlineCallbacks
    def test_auth_ok_user_ok(self):
        """Correct authentication must succeed."""
        yield self.callback_test(self.do_auth,
                                 credentials=self.creds,
                                 add_default_callbacks=True)
        self.assert_auth_ok_logging()

    @defer.inlineCallbacks
    def test_auth_ok_bad_user(self):
        """Non existing user must fail authentication."""
        # make the user getter fail
        self.patch(self.service.factory.content, 'get_user_by_id',
                   lambda *a, **k: defer.fail(DoesNotExist()))

        d = self.callback_test(self.do_auth,
                               credentials=self.creds,
                               add_default_callbacks=True)
        yield self.assertFailure(d, protocol_errors.AuthenticationFailedError)

        self.assert_auth_ok_missing_user()

    @defer.inlineCallbacks
    def test_auth_ok_with_session_id(self):
        """Correct authentication must succeed and include the session_id."""
        auth_request = yield self.callback_test(self.do_auth,
                                                credentials=self.creds,
                                                add_default_callbacks=True)

        protocol = self.service.factory.protocols[0]
        self.assertEqual(auth_request.session_id, str(protocol.session_id))

    @defer.inlineCallbacks
    def test_auth_ok_with_metadata(self):
        """Correct authentication must succeed and include metadata."""
        m_called = []
        self.service.factory.metrics.meter = lambda *a: m_called.append(a)

        metadata = {u"platform": u"linux2", u"version": u"1.0", u"foo": u"bar"}
        yield self.callback_test(self.do_auth,
                                 credentials=self.creds,
                                 metadata=metadata,
                                 add_default_callbacks=True)

        self.assertTrue(
            self.handler.check_info("Client metadata: %s" % metadata))
        self.assertIn(("client.platform.linux2", 1), m_called)
        self.assertIn(("client.version.1_0", 1), m_called)
        self.assertNotIn(("client.foo.bar", 1), m_called)

    def test_auth_fail(self):
        """Wrong secret must fail."""
        def test(client, **kwargs):
            d = self.do_auth(client, credentials=self.bad_creds)
            d.addCallbacks(
                lambda _: client.test_fail(Exception("Should not succeed.")),
                lambda _: client.test_done("ok"))

        return self.callback_test(test)

    def test_get_root(self):
        """Must receive the root after authentication."""
        @defer.inlineCallbacks
        def test(client, **kwargs):
            yield self.do_auth(client, credentials=self.creds)
            root_id = yield client.get_root()
            self.assertIsNotNone(root_id)

        return self.callback_test(test, add_default_callbacks=True)

    def test_get_root_twice(self):
        """Get root must keep the root id."""
        @defer.inlineCallbacks
        def test(client, **kwargs):
            yield self.do_auth(client, credentials=self.creds)
            root_id1 = yield client.get_root()
            root_id2 = yield client.get_root()
            self.assertEqual(root_id1, root_id2)

        return self.callback_test(test, add_default_callbacks=True)

    def test_user_becomes_inactive(self):
        """After StorageUser authentication ok it becomes inactive."""
        @defer.inlineCallbacks
        def test(client):
            """Test."""
            yield self.do_auth(client, credentials=self.creds)
            root_id = yield client.get_root()

            # create one file, should be ok
            yield client.make_file(request.ROOT, root_id, "f1")

            # cancel user subscription, so it needs
            # to get it again from the DB
            self.usr0.update(subscription=False)

            # create second file, should NOT be ok
            try:
                yield client.make_file(request.ROOT, root_id, "f2")
            except protocol_errors.DoesNotExistError:
                pass  # failed as we expected
            else:
                client.test_fail("It should have failed!")

        return self.callback_test(test, add_default_callbacks=True)
class ReactorInspectorTestCase(TwistedTestCase):
    """Test the ReactorInspector class."""

    def setUp(self):
        """Set up."""
        class Helper(object):
            """Fake object with a controllable call."""
            def __init__(self):
                self.call_count = 1
                self.calls = []
                self.ri = None

            def call(self, func):
                """Call function when counter is 0, then stop running."""
                self.call_count -= 1
                self.calls.append(func)
                if self.call_count == 0:
                    for f in self.calls:
                        f()
                if self.call_count <= 0:
                    self.ri.stop()

        class FakeMetrics(object):
            """Fake Metrics object that records calls."""
            def __init__(self):
                """Initialize calls."""
                self.calls = []

            def meter(self, name, count):
                """Record call to meter()."""
                self.calls.append(("meter", name, count))

            def gauge(self, name, val):
                """Record call to gauge()."""
                self.calls.append(("gauge", name, val))

        logger = logging.getLogger("storage.server")
        logger.propagate = False
        logger.setLevel(TRACE)
        self.handler = MementoHandler()
        self.handler.setLevel(TRACE)
        logger.addHandler(self.handler)
        self.addCleanup(logger.removeHandler, self.handler)
        self.helper = Helper()
        self.fake_metrics = FakeMetrics()
        MetricsConnector.register_metrics("reactor_inspector",
                                          instance=self.fake_metrics)
        self.addCleanup(MetricsConnector.unregister_metrics)
        self.ri = ReactorInspector(logger, self.helper.call, loop_time=.1)
        self.helper.ri = self.ri

    def run_ri(self, call_count=None, join=True):
        """Set the call count and then run the ReactorInspector."""
        if call_count is not None:
            self.helper.call_count = call_count
        self.start_ts = time.time()
        self.ri.start()
        # Reactor will stop after call_count calls, thanks to helper
        if join:
            self.ri.join()

    def test_stop(self):
        """It stops."""
        self.run_ri(1000, join=False)
        assert self.ri.is_alive()
        self.ri.stop()
        self.ri.join()
        self.assertFalse(self.ri.is_alive())

    @defer.inlineCallbacks
    def test_dump_frames(self):
        """Test how frames are dumped.

        Rules:
        - own frame must not be logged
        - must log all other threads
        - main reactor thread must have special title
        """
        # other thread, whose frame must be logged
        waitingd = defer.Deferred()

        def waiting_function():
            """Function with funny name to be checked later."""
            reactor.callFromThread(waitingd.callback, True)
            # wait have a default value
            event.wait()

        event = threading.Event()
        threading.Thread(target=waiting_function).start()
        # Make sure the thread has entered the waiting_function
        yield waitingd

        # Set reactor_thread since we're not starting the ReactorInspector
        # thread here.
        self.ri.reactor_thread = threading.currentThread().ident

        # dump frames in other thread, also
        def dumping_function():
            """Function with funny name to be checked later."""
            time.sleep(.1)
            self.ri.dump_frames()
            reactor.callFromThread(d.callback, True)

        d = defer.Deferred()
        threading.Thread(target=dumping_function).start()
        yield d
        event.set()

        # check
        self.assertFalse(self.handler.check_debug("dumping_function"))
        self.assertTrue(self.handler.check_debug("Dumping Python frame",
                                                 "waiting_function"))
        self.assertTrue(self.handler.check_debug("Dumping Python frame",
                                                 "reactor main thread"))

    def test_reactor_ok(self):
        """Reactor working fast."""
        self.run_ri()
        ok_line = self.handler.check(TRACE, "ReactorInspector: ok")
        self.assertTrue(ok_line)
        self.assertTrue(ok_line.args[-1] >= 0)  # Should be near zero delay
        # Check the metrics
        expected_metric = ("gauge", "delay", ok_line.args[-1])
        self.assertEqual([expected_metric], self.fake_metrics.calls)
        self.assertTrue(self.ri.last_responsive_ts >= self.start_ts)

    @defer.inlineCallbacks
    def test_reactor_blocked(self):
        """Reactor not working fast."""
        dump_frames_called = defer.Deferred()
        self.ri.dump_frames = lambda: dump_frames_called.callback(True)
        self.run_ri(0)
        yield dump_frames_called
        log_line = self.handler.check(logging.CRITICAL, "ReactorInspector",
                                      "detected unresponsive")
        self.assertTrue(log_line)
        self.assertTrue(log_line.args[-1] >= .1)  # waited for entire loop time
        # Check the metrics
        expected_metric = ("gauge", "delay", log_line.args[-1])
        self.assertEqual([expected_metric], self.fake_metrics.calls)

        self.assertTrue(self.ri.last_responsive_ts < self.start_ts)

    def test_reactor_back_alive(self):
        """Reactor resurrects after some loops."""
        self.run_ri(3)
        late_line = self.handler.check_warning("ReactorInspector: late",
                                               "got: 0")
        self.assertTrue(late_line)
        self.assertTrue(late_line.args[-1] >= .2)  # At least 2 cycles of delay
        # Check the metrics
        expected_metric = ("gauge", "delay", late_line.args[-1])
        self.assertEqual(expected_metric, self.fake_metrics.calls[-1])

        self.assertTrue(self.ri.queue.empty())
        # A late reactor is not considered responsive (until a successful loop)
        self.assertTrue(self.ri.last_responsive_ts < self.start_ts)
class OffloadQueueTestCase(TwistedTestCase):
    """Tests the OffloadQueue class."""

    def setUp(self):
        """Set up."""
        self.handler = MementoHandler()
        self.handler.setLevel(logging.DEBUG)
        logger = logging.getLogger("ubuntuone.SyncDaemon.OffloadQueue")
        logger.setLevel(logging.DEBUG)
        logger.addHandler(self.handler)
        self.addCleanup(logger.removeHandler, self.handler)
        self.oq = OffloadQueue()
        self.addCleanup(self.oq._tempfile.close)
        return super(OffloadQueueTestCase, self).setUp()

    def test_serialization_tuple(self):
        """Check that it can store tuples of strings."""
        data = ("foo", "bar")
        self.oq.push(data)
        retrieved = self.oq.pop()
        self.assertEqual(data, retrieved)

    def test_serialization_markers(self):
        """Check that it can store markers."""
        marker = MDMarker("foo")
        self.oq.push(marker)
        retrieved = self.oq.pop()
        self.assertTrue(IMarker.providedBy(retrieved))

    def test_fifo_simple(self):
        """Check FIFO queue with one silly value."""
        data = "data"
        self.oq.push(data)
        self.assertEqual(len(self.oq), 1)
        retrieved = self.oq.pop()
        self.assertEqual(data, retrieved)
        self.assertEqual(len(self.oq), 0)

    def test_fifo_double(self):
        """Check FIFO queue with two values."""
        data1, data2 = "data1", "data2"
        self.oq.push(data1)
        self.oq.push(data2)
        self.assertEqual(len(self.oq), 2)
        retrieved = self.oq.pop()
        self.assertEqual(data1, retrieved)
        self.assertEqual(len(self.oq), 1)
        retrieved = self.oq.pop()
        self.assertEqual(data2, retrieved)
        self.assertEqual(len(self.oq), 0)

    def test_fifo_mixed(self):
        """Check FIFO queue with more values."""
        data1, data2, data3 = "data1", "data2", "data3"
        self.oq.push(data1)
        self.oq.push(data2)
        self.assertEqual(data1, self.oq.pop())
        self.oq.push(data3)
        self.assertEqual(data2, self.oq.pop())
        self.assertEqual(data3, self.oq.pop())

    def test_rotate_limit_not_reached(self):
        """File does not rotate if limits are not reached."""
        orig_temp = self.oq._tempfile
        self.oq.push("data")
        self.assertEqual(self.oq._tempfile, orig_temp)
        self.oq.pop()
        self.assertEqual(self.oq._tempfile, orig_temp)

    def _get_data(self, data="data"):
        """Return data to store and it's item size in disk."""
        pickled = pickle.dumps(data, pickle.HIGHEST_PROTOCOL)
        item_size = len(pickled) + STRUCT_SIZE
        return data, item_size

    def test_rotate_soft_limit_on_push(self):
        """Rotation happens with soft limit on push."""
        # set and check rotation limits
        data, item_size = self._get_data()
        self.oq._rotation_soft_limit = item_size * 2.5
        assert self.oq._rotation_too_big_size > item_size * 10
        assert self.oq._rotation_hard_limit > item_size * 10

        # put two items, removing one so we can save space on rotation
        orig_temp = self.oq._tempfile
        self.oq.push(data)
        self.oq.pop()
        self.oq.push(data)
        self.assertEqual(self.oq._tempfile, orig_temp)

        # push another, now we're over again, but now we can save space
        self.oq.push(data)
        self.assertNotEqual(self.oq._tempfile, orig_temp)

    def test_rotate_soft_limit_on_pop(self):
        """Rotation happens with soft limit on pop."""
        # set and check rotation limits
        data, item_size = self._get_data()
        self.oq._rotation_soft_limit = item_size * 2.5
        assert self.oq._rotation_too_big_size > item_size * 10
        assert self.oq._rotation_hard_limit > item_size * 10

        # put four items
        orig_temp = self.oq._tempfile
        self.oq.push(data)
        self.oq.push(data)
        self.oq.push(data)
        self.oq.push(data)
        self.assertEqual(self.oq._tempfile, orig_temp)

        # pop the first one, we make room and still have more than soft limit
        # and min size, so we rotate
        self.oq.pop()
        self.assertNotEqual(self.oq._tempfile, orig_temp)

    def test_rotate_too_much_data(self):
        """Soft rotation doesn't happen if we have more than the max size."""
        # set and check rotation limits
        data, item_size = self._get_data()
        self.oq._rotation_soft_limit = item_size * 2.5
        self.oq._rotation_too_big_size = item_size
        assert self.oq._rotation_hard_limit > item_size * 10

        # put two items, removing one so we can save space on rotation
        orig_temp = self.oq._tempfile
        self.oq.push(data)
        self.oq.pop()
        self.oq.push(data)
        self.assertEqual(self.oq._tempfile, orig_temp)

        # push another, now we're over but we have too much data to move,
        # so rotation should not happen
        self.oq.push(data)
        self.assertEqual(self.oq._tempfile, orig_temp)

    def test_rotate_hard_limit(self):
        """We rotate on hard limit, no matter what."""
        # set and check rotation limits
        data, item_size = self._get_data()
        self.oq._rotation_soft_limit = item_size * 2.5
        self.oq._rotation_too_big_size = item_size
        self.oq._rotation_hard_limit = item_size * 3.5

        # put two items, removing one so we can save space on rotation
        orig_temp = self.oq._tempfile
        self.oq.push(data)
        self.oq.pop()
        self.oq.push(data)
        self.assertEqual(self.oq._tempfile, orig_temp)

        # push another, now we're over but we have too much data to move,
        # so rotation should not happen
        self.oq.push(data)
        self.assertEqual(self.oq._tempfile, orig_temp)

        # push another one, and check that after going over the hard limit
        # it will rotate no matter what
        self.oq.push(data)
        self.assertNotEqual(self.oq._tempfile, orig_temp)

    def test_rotate_keep_working(self):
        """Just check that all is normal after rotation."""
        data = []
        size = 0
        for i in xrange(10):
            d, s = self._get_data("data" + str(i))
            data.append(d)
            size += s
        self.oq._rotation_soft_limit = size * 0.7
        orig_temp = self.oq._tempfile

        # put one item and remove just to make it rotable
        results = []
        self.oq.push(data[0])
        results.append(self.oq.pop())

        # push the rest of the data, it should rotate at some point
        for d in data[1:]:
            self.oq.push(d)
        assert self.oq._tempfile != orig_temp

        # pop everything and compare
        while len(self.oq):
            results.append(self.oq.pop())
        self.assertEqual(data, results)

    def test_rotate_removes_old_file(self):
        """Rotation should start a new file and remove the previous one."""
        data, item_size = self._get_data()
        self.oq._rotation_soft_limit = item_size * 2.5
        orig_fname = self.oq._tempfile_name

        self.oq.push(data)
        self.oq.pop()
        self.oq.push(data)
        self.oq.push(data)

        self.assertFalse(os.path.exists(orig_fname))

    def test_log_init_tempfile(self):
        """Log the initial temp file used."""
        self.assertTrue(self.handler.check_debug("Using temporary file", repr(self.oq._tempfile_name)))

    def test_log_rotate(self):
        """Log new file in rotation."""
        data, item_size = self._get_data()
        self.oq._rotation_soft_limit = item_size * 2.5

        self.oq.push(data)
        self.oq.pop()
        self.oq.push(data)
        self.oq.push(data)

        self.assertTrue(self.handler.check_debug("Rotation into", "moving", repr(self.oq._tempfile_name)))

    def test_safe_rotate_crash(self):
        """All is ok even after rotation crashes when getting temp file."""

        def crash(*a):
            """Will crash."""
            raise NameError("ugly")

        self.patch(tempfile, "mkstemp", crash)

        # do a lot of things, rotating in the middle, checking all is ok
        self.test_rotate_keep_working()
        self.assertTrue(self.handler.check_exception(NameError))
        self.assertTrue(self.oq._in_memory)

    def test_safe_rotate_unlink(self):
        """All is ok after failing to unlink old file."""

        def crash(*a):
            """Will crash."""
            raise NameError("ugly")

        self.patch(os, "unlink", crash)

        # do a lot of things, rotating in the middle, checking all is ok
        self.test_rotate_keep_working()
        self.assertTrue(self.handler.check_warning("Error when removing old tempfile", "NameError"))

    def _test_safe_push_write(self, count):
        """Fail when pushing an item will leave it all ok."""

        class CrashingFile(StringIO.StringIO):
            """File-like object that crashes in second write."""

            def __init__(self):
                self._fail_counter = 0
                StringIO.StringIO.__init__(self)

            def write(self, *a):
                """Crashing write."""
                self._fail_counter += 1
                if self._fail_counter == count:
                    raise ValueError("broken")
                else:
                    StringIO.StringIO.write(self, *a)

        self.oq._tempfile = CrashingFile()

        # will try three items, checking all is ok
        self.test_fifo_mixed()
        self.assertTrue(self.handler.check_exception(ValueError))
        self.assertTrue(self.oq._in_memory)

    def test_safe_push_write_first(self):
        """Fail when pushing an item, on first write."""
        self._test_safe_push_write(1)

    def test_safe_push_write_second(self):
        """Fail when pushing an item, on second write."""
        self._test_safe_push_write(2)
class LoggingTests(TwistedTestCase):
    """Test the logging."""

    @defer.inlineCallbacks
    def setUp(self):
        """Set up."""
        yield super(LoggingTests, self).setUp()
        self.plt = PathLockingTree()

        self.handler = MementoHandler()
        self.plt.logger.setLevel(logging.DEBUG)
        self.plt.logger.propagate = False
        self.plt.logger.addHandler(self.handler)
        self.addCleanup(self.plt.logger.removeHandler, self.handler)

    @defer.inlineCallbacks
    def test_logger_can_be_given(self):
        """Accept an external logger."""
        logger = logging.getLogger("ubuntuone.SyncDaemon.Test")
        handler = MementoHandler()
        logger.addHandler(handler)
        logger.setLevel(logging.DEBUG)
        logger.propagate = False

        # acquire and test
        release = yield self.plt.acquire('path', logger=logger)
        self.assertTrue(handler.check_debug("acquiring on"))

        # release and test
        release()
        self.assertTrue(handler.check_debug("releasing"))

    def test_acquire_single_default(self):
        """Single path, full check."""
        self.plt.acquire('path')
        self.assertTrue(self.handler.check_debug(
                        "acquiring on", "path",
                        "(on_parent=False, on_children=False)", "wait for: 0"))

    def test_acquire_single_on_parent(self):
        """Single path, on parent."""
        self.plt.acquire('path', on_parent=True)
        self.assertTrue(self.handler.check_debug("on_parent=True"))

    def test_acquire_single_on_children(self):
        """Single path, on children."""
        self.plt.acquire('path', on_children=True)
        self.assertTrue(self.handler.check_debug("on_children=True"))

    def test_acquire_single_on_both(self):
        """Single path, on both."""
        self.plt.acquire('path', on_parent=True, on_children=True)
        self.assertTrue(self.handler.check_debug(
                        "(on_parent=True, on_children=True)"))

    def test_acquire_multiple(self):
        """Single path, on both."""
        self.plt.acquire('1', '2', *"abc")
        self.assertTrue(self.handler.check_debug("'1', '2', 'a', 'b', 'c'"))

    def test_acquire_waiting(self):
        """Single path, on both."""
        self.plt.acquire('path')
        self.assertTrue(self.handler.check_debug("wait for: 0"))

        self.plt.acquire('path')
        self.assertTrue(self.handler.check_debug("wait for: 1"))

        self.plt.acquire('path')
        self.assertTrue(self.handler.check_debug("wait for: 2"))

    @defer.inlineCallbacks
    def test_release_simple(self):
        """Single release."""
        release = yield self.plt.acquire("path")
        release()
        self.assertTrue(self.handler.check_debug("releasing",
                                                 "path", "remaining: 0"))

    @defer.inlineCallbacks
    def test_release_double(self):
        """Double release."""
        release1 = yield self.plt.acquire("path1")
        release2 = yield self.plt.acquire("path2")
        release1()
        self.assertTrue(self.handler.check_debug("releasing",
                                                 "path1", "remaining: 1"))
        release2()
        self.assertTrue(self.handler.check_debug("releasing",
                                                 "path2", "remaining: 0"))

    @defer.inlineCallbacks
    def test_release_longer_branches(self):
        """Longer branches."""
        release = yield self.plt.acquire(*"abcde")
        self.plt.acquire(*"abc")
        self.plt.acquire(*"abcdefg")
        self.plt.acquire(*"abklop")
        self.plt.acquire(*"foobar")
        release()
        self.assertTrue(self.handler.check_debug("releasing",
                                                 "'a', 'b', 'c', 'd', 'e'",
                                                 "remaining: 4"))
class HeartbeatListenerTestCase(TestCase):
    """Tests for HeartbeatListener class."""
    def setUp(self):
        super(HeartbeatListenerTestCase, self).setUp()
        self.stdin = StringIO()
        self.stdout = StringIO()
        self.stderr = StringIO()
        self.mocker = Mocker()
        self.rpc = self.mocker.mock()
        self.listener = HeartbeatListener(1,
                                          10, ['foo'], [],
                                          self.rpc,
                                          stdin=self.stdin,
                                          stdout=self.stdout,
                                          stderr=self.stderr)
        self.next_fail = {}
        self.handler = MementoHandler()
        self.listener.logger.addHandler(self.handler)
        self.listener.logger.setLevel(logging.DEBUG)
        self.handler.setLevel(logging.DEBUG)
        self.listener.logger.propagate = False
        self.processes = [
            dict(name="heartbeat", group="heartbeat", pid="101", state=RUNNING)
        ]
        self.handler.debug = True

    def tearDown(self):
        self.listener.logger.removeHandler(self.handler)
        self.handler.close()
        self.next_fail = None
        self.handler = None
        self.listener = None
        super(HeartbeatListenerTestCase, self).tearDown()

    def fail_next_stop(self, pname):
        """Make next stopProcess to fail."""
        expect(self.rpc.supervisor.stopProcess(pname)).throw(
            xmlrpclib.Fault(42, "Failed to stop the process."))

    def fail_next_start(self, pname):
        """Make next startProcess to fail."""
        expect(self.rpc.supervisor.startProcess(pname)).throw(
            xmlrpclib.Fault(42, "Failed to start the process."))

    def test_restart(self):
        """Test the restart method."""
        expect(self.rpc.supervisor.stopProcess("foo"))
        expect(self.rpc.supervisor.startProcess("foo"))
        with self.mocker:
            self.listener.restart("foo", "testing")
        self.assertTrue(
            self.handler.check_info("Restarting foo (last "
                                    "hearbeat: testing)"))

    def test_restart_fail_stop(self):
        """Test the restart method failing to stop the process."""
        self.fail_next_stop("foo")
        last = time.time()
        with self.mocker:
            try:
                self.listener.restart("foo", last)
            except xmlrpclib.Fault:
                msg = ("Failed to stop process %s (last heartbeat: %s), "
                       "exiting: %s") % \
                    ("foo", last, "<Fault 42: 'Failed to stop the process.'>")
                self.assertTrue(self.handler.check_error(msg))
            else:
                self.fail("Should get an xmlrpclib.Fault")

    def test_restart_fail_start(self):
        """Test the restart method failing to start the process."""
        expect(self.rpc.supervisor.stopProcess("foo"))
        self.fail_next_start("foo")
        last = time.time()
        with self.mocker:
            try:
                self.listener.restart("foo", last)
            except xmlrpclib.Fault:
                msg = (
                    'Failed to start process %s after stopping it, exiting: %s'
                ) % ("foo", "<Fault 42: 'Failed to start the process.'>")
                self.assertTrue(self.handler.check_error(msg))
            else:
                self.fail("Should get an xmlrpclib.Fault")

    def test_check_processes(self):
        """Test the check_processes method."""
        # add the fake process to the process list
        self.processes.append(
            dict(name="foo", group="foo", pid="42", state=RUNNING))
        self.processes.append(
            dict(name="bar", group="bar", pid="43", state=RUNNING))
        self.listener.processes = ['bar']
        # 2 process to restart
        self.listener.data['foo'] = {
            'time': time.time() - (self.listener.timeout + 2)
        }
        self.listener.data['bar'] = {
            'time': time.time() - (self.listener.timeout + 3)
        }
        self.listener.data['p-1'] = {
            'time': time.time() - (self.listener.timeout - 1)
        }
        expect(self.rpc.supervisor.getAllProcessInfo()).result(self.processes)
        expect(self.rpc.supervisor.stopProcess("foo:"))
        expect(self.rpc.supervisor.startProcess("foo:"))
        expect(self.rpc.supervisor.stopProcess("bar:bar"))
        expect(self.rpc.supervisor.startProcess("bar:bar"))
        with self.mocker:
            self.listener.check_processes()

    def test_check_processes_no_data(self):
        """Test the check_processes method with no data of a process."""
        # add the fake process to the process list
        self.processes.append(
            dict(name="foo", group="foo", pid="42", state=RUNNING))
        self.processes.append(
            dict(name="bar", group="bar", pid="43", state=RUNNING))
        self.listener.processes = ['bar']
        expect(self.rpc.supervisor.getAllProcessInfo()).result(self.processes)
        expect(self.rpc.supervisor.stopProcess("foo:"))
        expect(self.rpc.supervisor.startProcess("foo:"))
        expect(self.rpc.supervisor.stopProcess("bar:bar"))
        expect(self.rpc.supervisor.startProcess("bar:bar"))
        with self.mocker:
            # one process to restart
            self.listener.check_processes()
        self.assertTrue(
            self.handler.check_warning(
                "Restarting process foo:foo (42), as we never received a hearbeat"
                " event from it"))
        self.assertTrue(
            self.handler.check_warning(
                "Restarting process bar:bar (43), as we never received a hearbeat"
                " event from it"))

    def test_check_processes_untracked(self):
        """Test the check_processes method with a untracked proccess."""
        # add the fake process to the process list
        self.processes.append(
            dict(name="foo-untracked",
                 group="untracked",
                 pid="43",
                 state=RUNNING))
        # add a new tracked process from an untracked group
        self.processes.append(
            dict(name="bar-untracked", group="bar", pid="44", state=RUNNING))
        self.listener.processes = ['bar']
        expect(self.rpc.supervisor.getAllProcessInfo()).result(self.processes)
        with self.mocker:
            self.listener.check_processes()
        self.assertTrue(
            self.handler.check_info(
                "Ignoring untracked:foo-untracked (43) as isn't tracked."))
        self.assertTrue(
            self.handler.check_info(
                "Ignoring bar:bar-untracked (44) as isn't tracked."))

    def test_check_processes_not_running(self):
        """Test the check_processes method if the proccess isn't running."""
        # add the fake process to the process list
        self.processes.append(
            dict(name="foo",
                 group="foo",
                 pid="42",
                 state=states.ProcessStates.STARTING))
        # add a new tracked process from an untracked group
        self.processes.append(
            dict(name="bar",
                 group="bar",
                 pid="43",
                 state=states.ProcessStates.STARTING))
        self.listener.processes = ['bar']
        # 2 processes to restart
        self.listener.data['foo'] = {
            'time': time.time() - (self.listener.timeout + 2)
        }
        self.listener.data['bar'] = {
            'time': time.time() - (self.listener.timeout + 2)
        }
        expect(self.rpc.supervisor.getAllProcessInfo()).result(self.processes)
        with self.mocker:
            self.listener.check_processes()
        self.assertTrue(
            self.handler.check_info("Ignoring foo:foo (42) as isn't running."))
        self.assertTrue(
            self.handler.check_info("Ignoring bar:bar (43) as isn't running."))

    def test_handle_heartbeat(self):
        """Test handle_heartbeat method."""
        payload = {"time": time.time()}
        self.listener.handle_heartbeat('process_name', 'group_name', '42',
                                       payload)
        info = {
            "pid": "42",
            "time": payload["time"],
            "received": self.listener.data["process_name"]["received"]
        }
        self.assertEqual({"process_name": info}, self.listener.data)

    def test_handle_event(self):
        """Test handle_event method."""
        # patch handle_heartbeat
        called = []

        def handle_heartbeat(process_name, group_name, pid, payload):
            """Fake handle_heartbeat."""
            called.append((process_name, group_name, pid, payload))

        self.listener.handle_heartbeat = handle_heartbeat
        payload_dict = {u"time": time.time(), "type": "heartbeat"}
        raw_data = ("processname:ticker groupname:ticker pid:42\n" +
                    json.dumps(payload_dict))
        raw_header = ("ver:3.0 server:supervisor serial:1 pool:listener "
                      "poolserial:10 eventname:PROCESS_COMMUNICATION_STDOUT"
                      " len:%s\n" % len(raw_data))
        self.stdin.write(raw_header + raw_data)
        self.stdin.seek(0)
        headers = childutils.get_headers(raw_header)
        self.listener._handle_event()
        # check
        self.assertEqual(1, len(called))
        del payload_dict['type']
        self.assertEqual(('ticker', 'ticker', '42', payload_dict), called[0])
        self.assertTrue(
            self.handler.check_debug("Event '%s' received: %r" %
                                     (headers['eventname'], raw_data)))
        # check the stdout info
        self.assertEqual(["READY", "RESULT 2", "OK"],
                         self.stdout.getvalue().split("\n"))

    def test_invalid_event_type(self):
        """Test with an invalid type."""
        payload_dict = {u"time": time.time(), "type": "ping"}
        raw_data = 'processname:ticker groupname:ticker pid:42\n' + \
            json.dumps(payload_dict)
        raw_header = ("ver:3.0 server:supervisor serial:1 pool:listener "
                      "poolserial:10 eventname:PROCESS_COMMUNICATION_STDOUT"
                      " len:%s\n" % len(raw_data))
        self.stdin.write(raw_header + raw_data)
        self.stdin.seek(0)
        self.listener._handle_event()
        # check
        self.assertTrue(
            self.handler.check_error("Unable to handle event type '%s' - %r" %
                                     ('ping', raw_data)))

    def test_invalid_payload(self):
        """Test with an invalid payload."""
        payload_dict = {u"time": time.time(), "type": "ping"}
        raw_data = 'processname:ticker groupname:ticker pid:42\n' + \
            json.dumps(payload_dict) + "<!foo>"
        raw_header = ("ver:3.0 server:supervisor serial:1 pool:listener "
                      "poolserial:10 eventname:PROCESS_COMMUNICATION_STDOUT"
                      " len:%s\n" % len(raw_data))
        self.stdin.write(raw_header + raw_data)
        self.stdin.seek(0)
        self.listener._handle_event()
        # check
        self.assertTrue(
            self.handler.check_error("Unable to handle event type '%s' - %r" %
                                     ('None', raw_data)))

    def test_unhandled_event(self):
        """A unhandled event type."""
        payload_dict = {u"time": time.time(), "type": "ping"}
        raw_data = 'processname:ticker groupname:ticker pid:42\n' + \
            json.dumps(payload_dict)
        raw_header = "ver:3.0 server:supervisor serial:1 pool:heartbeat " + \
            "poolserial:1 eventname:UNKNOWN len:%s\n" % len(raw_data)
        self.stdin.write(raw_header + raw_data)
        self.stdin.seek(0)
        self.listener._handle_event()
        # check
        self.assertTrue(
            self.handler.check_warning("Received unsupported event: %s - %r" %
                                       ('UNKNOWN', raw_data)))

    def test_check_interval(self):
        """Check that we properly check on the specified interval."""
        header = "ver:3.0 server:supervisor serial:1 pool:heartbeat " + \
                 "poolserial:1 eventname:TICK_5 len:0\n"
        expect(self.rpc.supervisor.getAllProcessInfo()).result([])
        self.stdin.write(header)
        self.stdin.seek(0)
        self.listener._handle_event()
        self.assertEqual(self.listener.tick_count, 1)
        self.stdin.seek(0)
        with self.mocker:
            self.listener._handle_event()