Beispiel #1
0
    def test_timeout(self):
        """
        If a L{ProcessTimeLimitReachedError} is fired back, the
        operation-result should have a failed status.
        """
        factory = StubProcessFactory()
        self.manager.add(ScriptExecutionPlugin(process_factory=factory))

        result = self._send_script(sys.executable, "bar", time_limit=30)
        self._verify_script(factory.spawns[0][1], sys.executable, "bar")

        protocol = factory.spawns[0][0]
        protocol.makeConnection(DummyProcess())
        protocol.childDataReceived(2, b"ONOEZ")
        self.manager.reactor.advance(31)
        protocol.processEnded(Failure(ProcessDone(0)))

        def got_result(r):
            self.assertMessages(
                self.broker_service.message_store.get_pending_messages(),
                [{
                    "type": "operation-result",
                    "operation-id": 123,
                    "status": FAILED,
                    "result-text": u"ONOEZ",
                    "result-code": 102
                }])

        result.addCallback(got_result)
        return result
Beispiel #2
0
    def _run_script(self, username, uid, gid, path):
        expected_uid = uid if uid != os.getuid() else None
        expected_gid = gid if gid != os.getgid() else None

        factory = StubProcessFactory()
        self.plugin.process_factory = factory

        # ignore the call to chown!
        patch_chown = mock.patch("os.chown")
        mock_chown = patch_chown.start()

        result = self.plugin.run_script("/bin/sh", "echo hi", user=username)

        self.assertEqual(len(factory.spawns), 1)
        spawn = factory.spawns[0]
        self.assertEqual(spawn[4], path)
        self.assertEqual(spawn[5], expected_uid)
        self.assertEqual(spawn[6], expected_gid)

        protocol = spawn[0]
        protocol.childDataReceived(1, b"foobar")
        for fd in (0, 1, 2):
            protocol.childConnectionLost(fd)
        protocol.processEnded(Failure(ProcessDone(0)))

        def check(result):
            mock_chown.assert_called_with()
            self.assertEqual(result, "foobar")

        def cleanup(result):
            patch_chown.stop()
            return result

        return result.addErrback(check).addBoth(cleanup)
Beispiel #3
0
    def test_run_timeout(self):
        filename = self.makeFile("some content")
        self.store.add_graph(123, filename, None)
        factory = StubProcessFactory()
        self.graph_manager.process_factory = factory
        result = self.graph_manager.run()

        self.assertEqual(len(factory.spawns), 1)
        spawn = factory.spawns[0]
        protocol = spawn[0]
        protocol.makeConnection(DummyProcess())
        self.assertEqual(spawn[1], filename)

        self.manager.reactor.advance(110)
        protocol.processEnded(Failure(ProcessDone(0)))

        def check(ignore):
            self.graph_manager.exchange()
            self.assertMessages(
                self.broker_service.message_store.get_pending_messages(),
                [{"data": {
                    123: {
                        "error": u"Process exceeded the 10 seconds limit",
                        "script-hash": b"9893532233caff98cd083a116b013c0b",
                        "values": []},
                    },
                  "type": "custom-graph"}])

        return result.addCallback(check)
Beispiel #4
0
    def test_success(self):
        """
        When a C{execute-script} message is received from the server, the
        specified script will be run and an operation-result will be sent back
        to the server.
        """
        # Let's use a stub process factory, because otherwise we don't have
        # access to the deferred.
        factory = StubProcessFactory()

        self.manager.add(ScriptExecutionPlugin(process_factory=factory))

        result = self._send_script(sys.executable, "print 'hi'")

        self._verify_script(factory.spawns[0][1], sys.executable, "print 'hi'")
        self.assertMessages(
            self.broker_service.message_store.get_pending_messages(), [])

        # Now let's simulate the completion of the process
        factory.spawns[0][0].childDataReceived(1, b"hi!\n")
        factory.spawns[0][0].processEnded(Failure(ProcessDone(0)))

        def got_result(r):
            self.assertMessages(
                self.broker_service.message_store.get_pending_messages(),
                [{
                    "type": "operation-result",
                    "operation-id": 123,
                    "status": SUCCEEDED,
                    "result-text": u"hi!\n"
                }])

        result.addCallback(got_result)
        return result
Beispiel #5
0
 def test_outputReceivedPartialLine(self):
     """
     Getting partial line results in no events until process end
     """
     events = []
     self.addCleanup(globalLogPublisher.removeObserver, events.append)
     globalLogPublisher.addObserver(events.append)
     self.pm.addProcess("foo", ["foo"])
     # Schedule the process to start
     self.pm.startService()
     # Advance the reactor to start the process
     self.reactor.advance(0)
     self.assertIn("foo", self.pm.protocols)
     # Long time passes
     self.reactor.advance(self.pm.threshold)
     # Process greets
     self.pm.protocols["foo"].outReceived(b"hello world!")
     self.assertEquals(len(events), 0)
     self.pm.protocols["foo"].processEnded(Failure(ProcessDone(0)))
     self.assertEquals(len(events), 1)
     namespace = events[0]["log_namespace"]
     stream = events[0]["stream"]
     tag = events[0]["tag"]
     line = events[0]["line"]
     self.assertEquals(namespace, "twisted.runner.procmon.ProcessMonitor")
     self.assertEquals(stream, "stdout")
     self.assertEquals(tag, "foo")
     self.assertEquals(line, "hello world!")
Beispiel #6
0
 def test_connectionLost(self):
     """
     When connectionLost() is called, it should call loseConnection()
     on the session channel.
     """
     self.pp.connectionLost(failure.Failure(
             ProcessDone(0)))
Beispiel #7
0
    def request_exit_status(self, data):
        stat = struct.unpack('>L', data)[0]
        if stat:
            res = ProcessTerminated(exitCode=stat)
        else:
            res = ProcessDone(stat)

        self._protocol.commandExited(Failure(res))
Beispiel #8
0
 def test_processEndedWithExitCode(self):
     """
     When processEnded is called, if there is an exit code in the reason
     it should be sent in an exit-status method.  The connection should be
     closed.
     """
     self.pp.processEnded(Failure(ProcessDone(None)))
     self.assertRequestsEqual([(b"exit-status", struct.pack(">I", 0), False)])
     self.assertSessionClosed()
Beispiel #9
0
 def closed(self):
     if self._exitCode or self._signal:
         reason = failure.Failure(
             ProcessTerminated(self._exitCode, self._signal, self.status))
     else:
         reason = failure.Failure(ProcessDone(status=self.status))
     processProtocol = self.processProtocol
     del self.processProtocol
     processProtocol.processEnded(reason)
Beispiel #10
0
 def test__logs_stderr_at_process_end(self):
     message = factory.make_name("message")
     callback = Mock()
     proto = JSONPerLineProtocol(callback=callback)
     proto.connectionMade()
     with TwistedLoggerFixture() as logger:
         proto.errReceived(message.encode("ascii"))
         self.assertThat(logger.output, Equals(""))
         proto.processEnded(Failure(ProcessDone(0)))
     self.assertThat(logger.output, Equals(message))
Beispiel #11
0
    def test_unregisters_killer_success(self):
        """
        When the process ends succesfully, the before-shutdown event is
        unregistered.
        """
        reactor = ProcessCoreReactor()
        run(reactor, ['command', 'and', 'args'])
        [process] = reactor.processes

        process.processProtocol.processEnded(Failure(ProcessDone(0)))
        self.assertEqual(reactor._triggers['shutdown'].before, [])
Beispiel #12
0
 def test_create_result(self):
     """
     The result of ``ZFSSnapshots.create()`` is a ``Deferred`` that fires
     when creation has finished.
     """
     reactor = FakeProcessReactor()
     snapshots = ZFSSnapshots(reactor, Filesystem(b"mypool", None))
     d = snapshots.create(b"name")
     reactor.processes[0].processProtocol.processEnded(
         Failure(ProcessDone(0)))
     self.assertEqual(self.successResultOf(d), None)
Beispiel #13
0
 def test_normal_exit(self):
     """If the subprocess exits with exit code 0, the bytes output by its
     stdout are returned as the result of the ``Deferred`` returned from
     ``zfs_command``.
     """
     reactor = FakeProcessReactor()
     result = zfs_command(reactor, [b"-H", b"lalala"])
     process_protocol = reactor.processes[0].processProtocol
     process_protocol.childDataReceived(1, b"abc")
     process_protocol.childDataReceived(1, b"def")
     process_protocol.processEnded(Failure(ProcessDone(0)))
     self.assertEqual(self.successResultOf(result), b"abcdef")
Beispiel #14
0
    def test_process_success(self):
        """
        If the process ends with a success, the returned deferred fires with
        a succesful result.
        """

        reactor = ProcessCoreReactor()
        d = run(reactor, ['command', 'and', 'args'])
        [process] = reactor.processes

        expected_failure = Failure(ProcessDone(0))
        process.processProtocol.processEnded(expected_failure)
        self.successResultOf(d)
 def test_connectionLostBackoffDelayDoubles(self):
     """
     L{ProcessMonitor.connectionLost} doubles the restart delay each time
     the process dies too quickly.
     """
     self.pm.startService()
     self.pm.addProcess("foo", ["foo"])
     self.reactor.advance(self.pm.threshold - 1) #9s
     self.assertIn("foo", self.pm.protocols)
     self.assertEqual(self.pm.delay["foo"], self.pm.minRestartDelay)
     # process dies within the threshold and should not restart immediately
     self.pm.protocols["foo"].processEnded(Failure(ProcessDone(0)))
     self.assertEqual(self.pm.delay["foo"], self.pm.minRestartDelay * 2)
Beispiel #16
0
    def test_process_shutdown_unregister(self):
        """
        If the process is killed after shutting down, an error
        isn't raised.

        In particular, removing the killer doesn't cause an error.
        """

        reactor = ProcessCoreReactor()
        run(reactor, ['command', 'and', 'args'])
        [process] = reactor.processes

        reactor.fireSystemEvent('shutdown')
        process.processProtocol.processEnded(Failure(ProcessDone(0)))
 def test_time_limit_canceled_after_success(self):
     """
     The timeout call is cancelled after the script terminates.
     """
     factory = StubProcessFactory()
     self.plugin.process_factory = factory
     self.plugin.run_script("/bin/sh", "", time_limit=500)
     protocol = factory.spawns[0][0]
     transport = DummyProcess()
     protocol.makeConnection(transport)
     protocol.childDataReceived(1, b"hi\n")
     protocol.processEnded(Failure(ProcessDone(0)))
     self.manager.reactor.advance(501)
     self.assertEqual(transport.signals, [])
    def test_connectionLostMinMaxRestartDelay(self):
        """
        L{ProcessMonitor.connectionLost} will wait at least minRestartDelay s
        and at most maxRestartDelay s
        """
        self.pm.minRestartDelay = 2
        self.pm.maxRestartDelay = 3

        self.pm.startService()
        self.pm.addProcess("foo", ["foo"])

        self.assertEqual(self.pm.delay["foo"], self.pm.minRestartDelay)
        self.reactor.advance(self.pm.threshold - 1)
        self.pm.protocols["foo"].processEnded(Failure(ProcessDone(0)))
        self.assertEqual(self.pm.delay["foo"], self.pm.maxRestartDelay)
Beispiel #19
0
    def test_list_result_child_dataset(self):
        """
        ``ZFSSnapshots.list`` parses out the snapshot names of a non-root
        dataset from the results of the command.
        """
        reactor = FakeProcessReactor()
        snapshots = ZFSSnapshots(reactor, Filesystem(b"mypool", b"myfs"))

        d = snapshots.list()
        process_protocol = reactor.processes[0].processProtocol
        process_protocol.childDataReceived(1, b"mypool/myfs@name\n")
        process_protocol.childDataReceived(1, b"mypool/myfs@name2\n")
        reactor.processes[0].processProtocol.processEnded(
            Failure(ProcessDone(0)))
        self.assertEqual(self.successResultOf(d), [b"name", b"name2"])
Beispiel #20
0
    def test_list_result_ignores_other_pools(self):
        """
        ``ZFSSnapshots.list`` skips snapshots of other pools.

        In particular, we are likely to see snapshot names of sub-pools in
        the output.
        """
        reactor = FakeProcessReactor()
        snapshots = ZFSSnapshots(reactor, Filesystem(b"mypool", None))

        d = snapshots.list()
        process_protocol = reactor.processes[0].processProtocol
        process_protocol.childDataReceived(1, b"mypool/child@name\n")
        process_protocol.childDataReceived(1, b"mypool@name2\n")
        reactor.processes[0].processProtocol.processEnded(
            Failure(ProcessDone(0)))
        self.assertEqual(self.successResultOf(d), [b"name2"])
    def test_list_ignores_undecodable_snapshots(self):
        """
        ``ZFSSnapshots.list`` skips snapshots whose names cannot be decoded.

        These are presumably snapshots not being managed by Flocker.
        """
        reactor = FakeProcessReactor()
        snapshots = ZFSSnapshots(reactor, Filesystem(b"mypool", None))
        name = SnapshotName(datetime.now(UTC), b"node")

        d = snapshots.list()
        process_protocol = reactor.processes[0].processProtocol
        process_protocol.childDataReceived(1, b"mypool@alalalalal\n")
        process_protocol.childDataReceived(
            1, b"mypool@%s\n" % (name.to_bytes(), ))
        reactor.processes[0].processProtocol.processEnded(
            Failure(ProcessDone(0)))
        self.assertEqual(self.successResultOf(d), [name])
    def test_stopServiceCancelRestarts(self):
        """
        L{ProcessMonitor.stopService} should cancel any scheduled process
        restarts.
        """
        self.pm.addProcess("foo", ["foo"])
        # Schedule the process to start
        self.pm.startService()
        # advance the reactor to start the processes
        self.reactor.advance(self.pm.threshold)
        self.assertIn("foo", self.pm.protocols)

        self.reactor.advance(1)
        # Kill the process early
        self.pm.protocols["foo"].processEnded(Failure(ProcessDone(0)))
        self.assertTrue(self.pm.restart['foo'].active())
        self.pm.stopService()
        # Scheduled restart should have been cancelled
        self.assertFalse(self.pm.restart['foo'].active())
    def test_list_result(self):
        """
        ``ZFSSnapshots.list`` parses out the snapshot names from the results of
        the command.
        """
        reactor = FakeProcessReactor()
        snapshots = ZFSSnapshots(reactor, Filesystem(b"mypool", None))
        name = SnapshotName(datetime.now(UTC), b"node")
        name2 = SnapshotName(datetime.now(UTC), b"node2")

        d = snapshots.list()
        process_protocol = reactor.processes[0].processProtocol
        process_protocol.childDataReceived(
            1, b"mypool@%s\n" % (name.to_bytes(), ))
        process_protocol.childDataReceived(
            1, b"mypool@%s\n" % (name2.to_bytes(), ))
        reactor.processes[0].processProtocol.processEnded(
            Failure(ProcessDone(0)))
        self.assertEqual(self.successResultOf(d), [name, name2])
 def test_connectionLostLongLivedProcess(self):
     """
     L{ProcessMonitor.connectionLost} should immediately restart a process
     if it has been running longer than L{ProcessMonitor.threshold} seconds.
     """
     self.pm.addProcess("foo", ["foo"])
     # Schedule the process to start
     self.pm.startService()
     # advance the reactor to start the process
     self.reactor.advance(0)
     self.assertIn("foo", self.pm.protocols)
     # Long time passes
     self.reactor.advance(self.pm.threshold)
     # Process dies after threshold
     self.pm.protocols["foo"].processEnded(Failure(ProcessDone(0)))
     self.assertNotIn("foo", self.pm.protocols)
     # Process should be restarted immediately
     self.reactor.advance(0)
     self.assertIn("foo", self.pm.protocols)
Beispiel #25
0
    def test_restart_stops_exchanger(self):
        """
        After a successful shutdown, the broker stops processing new messages.
        """
        message = {"type": "shutdown", "reboot": False, "operation-id": 100}
        self.plugin.perform_shutdown(message)

        [arguments] = self.process_factory.spawns
        protocol = arguments[0]
        protocol.processEnded(Failure(ProcessDone(status=0)))
        self.broker_service.reactor.advance(100)
        self.manager.reactor.advance(100)

        # New messages will not be exchanged after a reboot process is in
        # process.
        self.manager.broker.exchanger.schedule_exchange()
        payloads = self.manager.broker.exchanger._transport.payloads
        self.assertEqual(0, len(payloads))
        return protocol.result
    def test_user_with_attachments(self):
        uid = os.getuid()
        info = pwd.getpwuid(uid)
        username = info.pw_name
        gid = info.pw_gid

        patch_chown = mock.patch("os.chown")
        mock_chown = patch_chown.start()

        factory = StubProcessFactory()
        self.plugin.process_factory = factory

        result = self.plugin.run_script("/bin/sh",
                                        "echo hi",
                                        user=username,
                                        attachments={u"file 1": "some data"})

        self.assertEqual(len(factory.spawns), 1)
        spawn = factory.spawns[0]
        self.assertIn("LANDSCAPE_ATTACHMENTS", spawn[3])
        attachment_dir = spawn[3]["LANDSCAPE_ATTACHMENTS"]
        self.assertEqual(stat.S_IMODE(os.stat(attachment_dir).st_mode), 0o700)
        filename = os.path.join(attachment_dir, "file 1")
        self.assertEqual(stat.S_IMODE(os.stat(filename).st_mode), 0o600)

        protocol = spawn[0]
        protocol.childDataReceived(1, b"foobar")
        for fd in (0, 1, 2):
            protocol.childConnectionLost(fd)
        protocol.processEnded(Failure(ProcessDone(0)))

        def check(data):
            self.assertEqual(data, "foobar")
            self.assertFalse(os.path.exists(attachment_dir))
            mock_chown.assert_has_calls(
                [mock.call(mock.ANY, uid, gid) for x in range(3)])

        def cleanup(result):
            patch_chown.stop()
            return result

        return result.addCallback(check).addBoth(cleanup)
    def test_limit_time_accumulates_data(self):
        """
        Data from processes that time out should still be accumulated and
        available from the exception object that is raised.
        """
        factory = StubProcessFactory()
        self.plugin.process_factory = factory
        result = self.plugin.run_script("/bin/sh", "", time_limit=500)
        protocol = factory.spawns[0][0]
        protocol.makeConnection(DummyProcess())
        protocol.childDataReceived(1, b"hi\n")
        self.manager.reactor.advance(501)
        protocol.processEnded(Failure(ProcessDone(0)))

        def got_error(f):
            self.assertTrue(f.check(ProcessTimeLimitReachedError))
            self.assertEqual(f.value.data, "hi\n")

        result.addErrback(got_error)
        return result
    def test_cancel_doesnt_blow_after_success(self):
        """
        When the process ends successfully and is immediately followed by the
        timeout, the output should still be in the failure and nothing bad will
        happen!
        [regression test: killing of the already-dead process would blow up.]
        """
        factory = StubProcessFactory()
        self.plugin.process_factory = factory
        result = self.plugin.run_script("/bin/sh", "", time_limit=500)
        protocol = factory.spawns[0][0]
        protocol.makeConnection(DummyProcess())
        protocol.childDataReceived(1, b"hi")
        protocol.processEnded(Failure(ProcessDone(0)))
        self.manager.reactor.advance(501)

        def got_result(output):
            self.assertEqual(output, "hi")

        result.addCallback(got_result)
        return result
    def test_list_result_ignores_other_pools(self):
        """
        ``ZFSSnapshots.list`` skips snapshots of other pools.

        In particular, we are likely to see snapshot names of sub-pools in
        the output.
        """
        reactor = FakeProcessReactor()
        snapshots = ZFSSnapshots(reactor, Filesystem(b"mypool", None))
        name = SnapshotName(datetime.now(UTC), b"node")
        name2 = SnapshotName(datetime.now(UTC), b"node2")

        d = snapshots.list()
        process_protocol = reactor.processes[0].processProtocol
        process_protocol.childDataReceived(
            1, b"mypool/child@%s\n" % (name.to_bytes(), ))
        process_protocol.childDataReceived(
            1, b"mypool@%s\n" % (name2.to_bytes(), ))
        reactor.processes[0].processProtocol.processEnded(
            Failure(ProcessDone(0)))
        self.assertEqual(self.successResultOf(d), [name2])
    def test_limit_size(self):
        """Data returned from the command is limited."""
        factory = StubProcessFactory()
        self.plugin.process_factory = factory
        self.plugin.size_limit = 100
        result = self.plugin.run_script("/bin/sh", "")

        # Ultimately we assert that the resulting output is limited to
        # 100 bytes and indicates its truncation.
        result.addCallback(self.assertEqual,
                           ("x" * 79) + "\n**OUTPUT TRUNCATED**")

        protocol = factory.spawns[0][0]

        # Push 200 bytes of output, so we trigger truncation.
        protocol.childDataReceived(1, b"x" * 200)

        for fd in (0, 1, 2):
            protocol.childConnectionLost(fd)
        protocol.processEnded(Failure(ProcessDone(0)))

        return result