Ejemplo n.º 1
0
 def test_simple_queue_read_write_from_different_proc(self):
     # One process writew, another process reads and then writes to q_out.
     # Then master process reads from q_out.
     n = 10
     q = SimpleQueue()
     q_out = SimpleQueue()
     p1 = fiber.Process(target=put_queue, args=(q, [i for i in range(n)]))
     p2 = fiber.Process(target=get_queue, args=(q, q_out, n))
     p1.start()
     p2.start()
     for i in range(n):
         data = q_out.get()
         assert data == i
     p1.join()
     p2.join()
Ejemplo n.º 2
0
    def test_queue_balance(self):
        # We only test SimpleQueuePush because SimpleQueuePull doesn't gurantee
        # balance.
        inqueue = fiber.queues.SimpleQueuePush()
        outqueue = fiber.queues.SimpleQueuePush()
        num_workers = 4
        multiplier = 600
        workers = []
        results = []
        for i in range(num_workers):
            print("create worker", i)
            p = fiber.Process(target=worker, args=(inqueue, outqueue, i), daemon=True)
            workers.append(p)
        for i in range(num_workers):
            workers[i].start()

        # wait for all workers to connect
        time.sleep(1)
        for i in range(num_workers * multiplier):
            inqueue.put("work")
        for i in range(num_workers * multiplier):
            results.append(outqueue.get())
        stats = collections.Counter(results)
        total = num_workers * multiplier
        # send singals to all workers
        for i in range(num_workers * multiplier):
            inqueue.put("quit")
        for i in range(num_workers):
            workers[i].join()
        for i in range(num_workers):
            #print("{}: {} {:.2f}".format(i, stats[i], stats[i] / float(total)))
            # data should be fairly queued
            assert stats[i] == 600
Ejemplo n.º 3
0
 def test_simple_queue_fiber(self):
     q = SimpleQueue()
     p = fiber.Process(target=put_queue, args=(q, 10))
     p.start()
     p.join()
     data = q.get()
     assert data == 10
Ejemplo n.º 4
0
    def test_launch_fiber_background_thread(self):
        # reset global variable
        fiber.popen_fiber_spawn._fiber_background_thread = None

        fp = io.StringIO()
        handler = logging.StreamHandler(fp)
        logger = logging.getLogger("fiber")
        logger.setLevel(level=logging.DEBUG)
        logger.addHandler(handler)

        try:
            procs = []
            for i in range(4):
                p = fiber.Process(
                    target=time.sleep,
                    args=(1, ),
                    name="test_launch_fiber_background_thread_{}".format(i))
                procs.append(p)
            for p in procs:
                p.start()

            logs = fp.getvalue()
            times = len(list(re.finditer('creating background thread', logs)))
            assert (times == 1)
        finally:
            for p in procs:
                p.join()
            logger.removeHandler(handler)
Ejemplo n.º 5
0
 def test_subprocess_with_pipe(self):
     reader, writer = Pipe()
     p = fiber.Process(target=write_pipe, args=(writer, b"fiber pipe"))
     p.start()
     msg = reader.recv()
     p.join()
     assert msg == b"fiber pipe"
Ejemplo n.º 6
0
    def run(self):
        """
        Start this Ring. This will start the ring 0 process on the same machine
        and start all the other ring nodes with Fiber processes.
        """
        if self.size <= 0:
            return

        procs = []
        rank = self.rank
        # Start process rank 0
        self.rank = 0
        ctx = mp.get_context("spawn")
        p = ctx.Process(target=self._target)
        p.start()
        procs.append(p)

        for i in range(1, self.size):
            self.rank = i
            p = fiber.Process(target=self._target)
            p.start()
            procs.append(p)

        self.rank = rank
        # wait for all processes to finish
        for i in range(self.size):
            procs[i].join()
Ejemplo n.º 7
0
    def test_managers_basic(self):
        manager = fiber.Manager()
        ns = manager.Namespace()
        ns.x = 1
        ns.y = [1]
        ns.z = [1]
        ls = manager.list([1, [1], [1], manager.list([1])])
        di = manager.dict({0: 1, 1: [1], 2: [1], 3: manager.list([1])})

        p = fiber.Process(target=f,
                          args=(ns, ls, di),
                          name="test_managers_basic")
        p.start()
        p.join()
        assert ns.x == 2
        assert ns.y == [1]
        assert ns.z == [2]

        assert ls[0] == 2
        assert ls[1] == [1]
        assert ls[2] == [2]
        assert ls[3][0] == 2
        assert len(ls) == 4

        assert di[0] == 2
        assert di[1] == [1]
        assert di[2] == [2]
        assert di[3][0] == 2
Ejemplo n.º 8
0
    def test_sharing_pipe_with_fiber_and_multiprocessing(self):
        reader, writer = Pipe()

        p = fiber.Process(target=pipe_mp_worker, args=(writer,))
        p.start()
        msg = reader.recv()
        p.join()
        assert msg == b"hello"
Ejemplo n.º 9
0
 def test_pipe_duplex_over_fiber_process(self):
     conn1, conn2 = Pipe(duplex=True)
     p = fiber.Process(target=pipe_worker, args=(conn2,))
     p.start()
     conn1.send(b"hello")
     data = conn1.recv()
     p.join()
     assert data == b"ack"
Ejemplo n.º 10
0
 def test_simple_queue_fiber_multi(self):
     # Read and write multiple objects
     n = 10
     q = SimpleQueue()
     p = fiber.Process(target=put_queue, args=(q, [i for i in range(n)]))
     p.start()
     p.join()
     for i in range(n):
         data = q.get()
         assert data == i
Ejemplo n.º 11
0
    def test_get_job(self):
        p = fiber.Process(name="test_get_job")
        popen = fspawn.Popen(p, launch=False)

        job = popen._get_job(["echo", "hello fiber"])
        assert job.command == ["echo", "hello fiber"]
        #assert job.image == "fiber-test:latest"
        # by default, do image should be provided. Backend should decide if
        # to use default image or current container image.
        assert job.image is None
Ejemplo n.º 12
0
    def test_popen_method(self):
        # make sure that fiber works with multiprocessing
        p = fiber.Process(name="test_popen_method")
        popen = p._Popen(p)
        assert isinstance(popen, fiber.popen_fiber_spawn.Popen)

        p = mp.Process(name="test_popen_method2")
        popen = p._Popen(p)
        assert popen.method == "fork"
        assert isinstance(popen, mp.popen_fork.Popen)
Ejemplo n.º 13
0
    def test_config_sync(self):
        # child process should have the same config as parent process
        r, w = fiber.Pipe()
        p = fiber.Process(target=config_worker, args=(w,))
        p.start()

        got = r.recv()
        expected = fiber.config.get_dict()

        p.join()
        assert got == expected
Ejemplo n.º 14
0
    def test_interactive_shell_cloudpickle(self):
        def add(a, b):
            return a + b

        # This will fail: "AttributeError: Can't pickle local object"
        # default pickler can pickle local object
        with pytest.raises(AttributeError):
            p = fiber.Process(target=add, args=(1, 2),
                              name="test_interactive_shell_cloudpickle")
            p.start()

        # This will work. When using interactive console, pickler
        # is set to cloudpickle

        with mock.patch("fiber.util.is_in_interactive_console") as func:
            func.return_value = True

            p = fiber.Process(target=add, args=(1, 2),
                              name="test_interactive_shell_cloudpickle2")
            p.start()
            p.join()
Ejemplo n.º 15
0
def run_manager_standard():
    manager = fiber.Manager()
    ns = manager.Namespace()
    ns.x = 1
    ns.y = [1]
    ns.z = [1]
    ls = manager.list([1, [1], [1], manager.list([1])])
    di = manager.dict({0: 1, 1: [1], 2: [1], 3: manager.list([1])})
    print('before', ns, ls, ls[2], di, di[2], sep='\n')
    p = fiber.Process(target=f, args=(ns, ls, di))
    p.start()
    p.join()
    print('after', ns, ls, ls[2], di, di[2], sep='\n')
Ejemplo n.º 16
0
    def test_image_not_found(self):
        if fiber.config.default_backend != "docker":
            pytest.skip("skipped because current backend is not docker")

        try:
            with pytest.raises(multiprocessing.ProcessError):
                fiber.init(
                    image='this-image-does-not-exist-and-is-only-used-for-testing'
                )
                p = fiber.Process(name="test_image_not_found")
                p.start()
        finally:
            fiber.reset()
Ejemplo n.º 17
0
    def test_command_line(self):
        fiber.init(use_bash=False)
        try:
            p = fiber.Process(name="test_command_line")
            popen = fspawn.Popen(p, backend="docker", launch=False)

            cmd = popen.get_command_line(host="127.0.0.1", port="8000", id=1)
        finally:
            fiber.init(use_bash=True)
        assert cmd == [
            "/usr/local/bin/python", "-c",
            'import sys;import os;import socket;import struct;import fiber;import fiber.util;from multiprocessing import spawn, reduction;sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM);sock.connect(("127.0.0.1", 8000));conn = sock;conn.send(struct.pack("<I", 1));fd = conn.fileno();exitcode = fiber.util.spawn_prepare(fd);sys.exit(exitcode)',
            '--multiprocessing-fork'
        ]
Ejemplo n.º 18
0
    def test_get_listen_addr_fp(self):
        ifce = get_ifce("docker0")
        network = ipaddress.IPv4Network((ifce.address, ifce.netmask),
                                        strict=False)

        q = fiber.SimpleQueue()
        p = fiber.Process(target=wrap_result,
                          args=(q, ),
                          name="test_get_listen_addr")
        p.start()
        res = q.get()
        ip = res[0]
        assert ipaddress.IPv4Address(ip) in network

        p.join()
        assert p.exitcode == 0
Ejemplo n.º 19
0
    def test_init_image(self, mock_create_job):
        if fiber.config.default_backend != "docker":
            pytest.skip("skipped because current backend is not docker")

        try:
            fiber.init(image='python:3.6-alpine', backend='docker')
            p = fiber.Process(name="test_init_image")
            p.start()
        except NotImplementedError:
            pass
        finally:
            fiber.reset()
        args = mock_create_job.call_args
        # https://docs.python.org/3/library/unittest.mock.html#unittest.mock.Mock.call_args # noqa E501
        job_spec_arg = args[0][0]
        assert job_spec_arg.image == "python:3.6-alpine"
Ejemplo n.º 20
0
 def test_simple_queue_fiber2(self):
     q = SimpleQueue()
     n = 3
     procs = []
     try:
         for i in range(n):
             procs.append(fiber.Process(target=put_queue, args=(q, 10)))
         for p in procs:
             p.start()
         for p in procs:
             p.join()
         for p in procs:
             data = q.get()
             assert data == 10
     finally:
         for p in procs:
             p.join()
Ejemplo n.º 21
0
    def test_no_python3_inside_image(self):
        if fiber.config.default_backend != "docker":
            pytest.skip("skipped because current backend is not docker")
        '''
        fp = io.StringIO()
        handler = logging.StreamHandler(fp)
        logger = logging.getLogger("fiber")
        logger.setLevel(level=logging.DEBUG)
        logger.addHandler(handler)
        '''

        with pytest.raises(multiprocessing.ProcessError):
            try:
                fiber.init(image='ubuntu:18.04')
                p = fiber.Process(name="test_no_python3_inside_image")
                p.start()
            finally:
                fiber.reset()
                #logger.removeHandler(handler)
        '''
Ejemplo n.º 22
0
    def test_fiber_with_more_than_1024_fds(self):
        # select.select can't handle fd >= 1024
        # https://stackoverflow.com/questions/7328165/how-do-you-compile-python-with-1024-file-descriptors
        import resource
        resource.setrlimit(resource.RLIMIT_OFILE, (8192, 8192))

        socks = []
        for i in range(1023):
            s = socket.socket(socket.AF_INET)
            socks.append(s)

        p = fiber.Process(target=time.sleep,
                          args=(10, ),
                          name="test_fiber_with_more_than_1024_fds")
        p.start()

        # if select is used, we should see errors like:
        # `ValueError: filedescriptor out of range in select()`
        print(p.exitcode)

        p.terminate()
Ejemplo n.º 23
0
    def test_log_file_path(self):
        if fiber.config.default_backend != "docker":
            pytest.skip("skipped because current backend is not docker")

        # make sure workers and master doesn't write to the same log file
        try:
            log_file = fiber_config.log_file
            log_level = fiber_config.log_level
            # clean up
            if os.path.isdir("tests/logs"):
                files = glob.glob("tests/logs/*")
                for f in files:
                    os.remove(f)
            files = glob.glob("tests/logs/*")
            assert files == []

            fiber.init(log_file="tests/logs/fiber.log", log_level="DEBUG")
            q = fiber.SimpleQueue()
            p = fiber.Process(target=dummy_q_worker, args=(q,))
            p.start()
            msg = q.get()

            assert msg == "DUMMY_WORKER_DONE"

            files = glob.glob("tests/logs/*")
            files.sort()
            assert len(files) == 2
            assert files[0] == "tests/logs/fiber.log.MainProcess"
            assert "tests/logs/fiber.log.Process-" in files[1]

            regex = r'\w+:Process-\d+\(\d+\):'
            with open("tests/logs/fiber.log.MainProcess") as f:
                content = f.read()
                assert re.search(regex, content) is None, (
                    "Fiber subprocess log found in master log file")

            p.join()

        finally:
            fiber.init(log_file=log_file, log_level=log_level)
Ejemplo n.º 24
0
    def test_no_python3_inside_image(self):
        if fiber.config.default_backend != "docker":
            pytest.skip("skipped because current backend is not docker")

        fp = io.StringIO()
        handler = logging.StreamHandler(fp)
        logger = logging.getLogger("fiber")
        logger.setLevel(level=logging.DEBUG)
        logger.addHandler(handler)

        try:
            fiber.init(image='ubuntu:18.04')
            p = fiber.Process(name="test_no_python3_inside_image")
            p.start()

            logs = fp.getvalue()
            times = len(list(re.finditer('Failed to start Fiber process',
                                         logs)))
            assert times == 1
        finally:
            fiber.reset()
            logger.removeHandler(handler)