Пример #1
0
    def test_multiprocessing(self):
        set_db_uri()

        key1, key2, key3 = uuid4(), uuid4(), uuid4()
        value1, value2, value3 = 11111, 22222, 33333

        self.close_connections_before_forking()

        pipeline_ids = [1, 2, 3]
        runner = MultiprocessRunner(
            system=self.system,
            pipeline_ids=pipeline_ids,
            infrastructure_class=self.infrastructure_class,
        )

        # Start running operating system processes.
        with runner:
            # Get local application object.
            paxosapplication0 = runner.get(runner.system.process_classes["paxosapplication0"])
            assert isinstance(paxosapplication0, PaxosApplication)

            # Start proposing values on the different system pipelines.
            paxosapplication0.change_pipeline(1)
            started1 = datetime.datetime.now()
            paxosapplication0.propose_value(key1, value1)

            paxosapplication0.change_pipeline(2)
            started2 = datetime.datetime.now()
            paxosapplication0.propose_value(key2, value2)

            paxosapplication0.change_pipeline(3)
            started3 = datetime.datetime.now()
            paxosapplication0.propose_value(key3, value3)

            # Check all the process applications have expected final values.
            paxosapplication1 = runner.get(runner.system.process_classes["paxosapplication1"])
            paxosapplication2 = runner.get(runner.system.process_classes["paxosapplication2"])

            assert isinstance(paxosapplication1, PaxosApplication)
            paxosapplication0.repository.use_cache = False
            paxosapplication1.repository.use_cache = False
            paxosapplication2.repository.use_cache = False

            self.assert_final_value(paxosapplication0, key1, value1)
            self.assert_final_value(paxosapplication1, key1, value1)
            self.assert_final_value(paxosapplication2, key1, value1)
            duration1 = (datetime.datetime.now() - started1).total_seconds()
            print("Resolved paxos 1 with multiprocessing in %ss" % duration1)

            self.assert_final_value(paxosapplication0, key2, value2)
            self.assert_final_value(paxosapplication1, key2, value2)
            self.assert_final_value(paxosapplication2, key2, value2)
            duration2 = (datetime.datetime.now() - started2).total_seconds()
            print("Resolved paxos 2 with multiprocessing in %ss" % duration2)

            self.assert_final_value(paxosapplication0, key3, value3)
            self.assert_final_value(paxosapplication1, key3, value3)
            self.assert_final_value(paxosapplication2, key3, value3)
            duration3 = (datetime.datetime.now() - started3).total_seconds()
            print("Resolved paxos 3 with multiprocessing in %ss" % duration3)
    def test_multiprocessing_performance(self):

        set_db_uri()

        num_pipelines = 2
        pipeline_ids = range(num_pipelines)

        runner = MultiprocessRunner(
            system=self.system,
            pipeline_ids=pipeline_ids,
            infrastructure_class=self.infrastructure_class,
            setup_tables=True,
        )

        num_proposals = 50

        self.close_connections_before_forking()

        with runner:
            sleep(1)

            # Construct an application instance in this process.
            paxosapplication0 = runner.get(
                runner.system.process_classes["paxosapplication0"])

            assert isinstance(paxosapplication0, PaxosApplication)

            # Don't use the cache, so as to keep checking actual database.
            paxosapplication0.repository.use_cache = False

            # Start timing (just for fun).
            started = datetime.datetime.now()

            # Propose values.
            proposals = list(((uuid4(), i) for i in range(num_proposals)))
            for key, value in proposals:
                paxosapplication0.change_pipeline((value % len(pipeline_ids)))
                print("Proposing key {} value {}".format(key, value))
                paxosapplication0.propose_value(key, str(value))
                sleep(0.0)

            # Check final values.
            for key, value in proposals:
                print("Asserting final value for key {} value {}".format(
                    key, value))
                self.assert_final_value(paxosapplication0, key, str(value))

            # Print timing information (just for fun).
            duration = (datetime.datetime.now() - started).total_seconds()
            print(
                "Resolved {} paxoses with multiprocessing in {:.4f}s ({:.4f}s "
                "each)".format(num_proposals, duration,
                               duration / num_proposals))
Пример #3
0
    def test_multiprocessing_multiapp_system(self):
        system = System(
            Orders | Reservations | Orders,
            Orders | Payments | Orders,
            setup_tables=True,
            infrastructure_class=self.infrastructure_class,
        )

        self.set_db_uri()

        with system.construct_app(Orders) as app:
            # Create a new order.
            order_id = create_new_order()
            # Check new order exists in the repository.
            assert order_id in app.repository

        self.close_connections_before_forking()

        with MultiprocessRunner(system):

            with system.construct_app(Orders) as app:
                retries = 50
                while not app.repository[order_id].is_reserved:
                    sleep(0.1)
                    retries -= 1
                    assert retries, "Failed set order.is_reserved"

                while retries and not app.repository[order_id].is_paid:
                    sleep(0.1)
                    retries -= 1
                    assert retries, "Failed set order.is_paid"
Пример #4
0
    def test_multiprocessing_singleapp_system(self):

        system = System(
            Examples | Examples,
            setup_tables=True,
            infrastructure_class=self.infrastructure_class,
        )

        self.set_db_uri()

        self.close_connections_before_forking()

        with MultiprocessRunner(system) as runner:

            examples = runner.get(Examples)

            aggregate = ExampleAggregate.__create__()
            aggregate.__save__()

            assert aggregate.id in examples.repository

            # Check the aggregate is moved on.
            retries = 50
            while not examples.repository[aggregate.id].is_moved_on:

                sleep(0.1)
                retries -= 1
                assert retries, "Failed to move"
Пример #5
0
    def test_multiprocess_runner_with_single_application_class(self):
        system = System(
            Orders, setup_tables=True, infrastructure_class=self.infrastructure_class,
        )

        self.set_db_uri()
        self.close_connections_before_forking()

        with MultiprocessRunner(system) as runner:
            app = runner.get(Orders)
            order_id = app.create_new_order()
            repository = app.repository
            self.assertEqual(repository[order_id].id, order_id)
Пример #6
0
    def test_multipipeline_multiprocessing_multiapp(self):

        self.set_db_uri()

        system = System(
            (Orders, Reservations, Orders, Payments, Orders),
            setup_tables=True,
            infrastructure_class=self.infrastructure_class,
        )

        num_pipelines = 2

        pipeline_ids = range(num_pipelines)

        multiprocess_runner = MultiprocessRunner(system, pipeline_ids=pipeline_ids)

        num_orders_per_pipeline = 5
        order_ids = []

        self.close_connections_before_forking()

        # Start multiprocessing system.
        with multiprocess_runner:

            # Create some new orders.
            for _ in range(num_orders_per_pipeline):

                orders = multiprocess_runner.get(Orders)

                for pipeline_id in pipeline_ids:

                    orders.change_pipeline(pipeline_id)

                    order_id = create_new_order()
                    order_ids.append(order_id)

                    sleep(0.05)

            # Wait for orders to be reserved and paid.
            retries = 10 + 10 * num_orders_per_pipeline * len(pipeline_ids)
            for i, order_id in enumerate(order_ids):

                while not orders.repository[order_id].is_reserved:
                    sleep(0.1)
                    retries -= 1
                    assert retries, "Failed set order.is_reserved {} ({})".format(
                        order_id, i
                    )

                while retries and not orders.repository[order_id].is_paid:
                    sleep(0.1)
                    retries -= 1
                    assert retries, "Failed set order.is_paid ({})".format(i)

            # Calculate timings from event timestamps.
            order_aggregates = [orders.repository[oid] for oid in order_ids]
            first_timestamp = min([o.__created_on__ for o in order_aggregates])
            last_timestamp = max([o.__last_modified__ for o in order_aggregates])
            duration = last_timestamp - first_timestamp
            rate = len(order_ids) / float(duration)
            period = 1 / rate
            print(
                "Orders system processed {} orders in {:.3f}s at rate of {:.1f} "
                "orders/s, {:.3f}s each".format(len(order_ids), duration, rate, period)
            )

            # Print min, average, max duration.
            durations = [
                o.__last_modified__ - o.__created_on__ for o in order_aggregates
            ]
            print("Min order processing time: {:.3f}s".format(min(durations)))
            print(
                "Mean order processing time: {:.3f}s".format(
                    sum(durations) / len(durations)
                )
            )
            print("Max order processing time: {:.3f}s".format(max(durations)))